query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Adapted from nicholls_turton.ipynb sst, sea surface temperature (K) ft_qv, mixedlayer top qv (kg kg^1) use_NT, True or False
def calc_equil(sst, ft_qv, use_NT=False): run_main(sst, ft_qv, use_NT) # grab csv file with open('dumpmodel.csv','r') as f: df_result=pd.read_csv(f) # last time step into named tupple out=df_result.iloc[-1] steady_state=make_tuple(out.to_dict()) steady_state # obtain steady-state values dth=steady_state.deltheta dqt=steady_state.delqv thetal_m=steady_state.theta qt_m=steady_state.qv h=steady_state.h press=tf.find_press(steady_state.h) #kPa thetal_ft = steady_state.theta + dth qt_ft = steady_state.qv + dqt zb = steady_state.LCL zi = steady_state.h we = steady_state.went # calculate thetal at z = 3000 m (take qt(z = 3000m) = qt(z = h), so delta_qt = dqt) gamma = 6e-3 thetal_3000 = thetal_ft + gamma*(3000-h) LTS = thetal_3000 - steady_state.theta # calculate delta_Fr delta_Frstar = 82.0 # Wm^-2 Frlambda = 7.9 # Wm^-2, using with CTL from Gesso delta_Fr = delta_Frstar - Frlambda*qt_ft*1000 # convert qt_ft to g kg^-1 # calculate LWP rho = 1. LWP = 0.5*rho*(zi-zb)**2 # put all required variables into output array out_array = np.array([thetal_m, qt_m, zi, zb, we, LWP, delta_Fr, LTS, dqt]) return out_array
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_main(sst, ft_qv, use_NT):\n\n dtout=10. #minutes\n end_time=8*24. #hours\n del_time=dtout*60. #seconds\n end_time=end_time*3600. #seconds\n #sst=297\n D=5.e-6 #s-1\n U=7 #m/s\n psfc=100. #kPa\n qsfc=tf.qs_tp(sst,psfc)\n ft_intercept = 292 #K\n ft_gamma = 6.e-3 #K/m\n #ft_qv = 2.e-3\n k=0.2 #entrainment efficiency\n Cd = 1.e-3 #drag coefficient\n tspan = np.arange(0.,end_time,del_time)\n vars_init=[285.,400.,8.e-3] #theta (K), height (m) qv (kg/kg) to start\n the_tup=dict(D=D,U=U,sst=sst,ft_intercept=ft_intercept,ft_gamma=ft_gamma,\n qsfc=qsfc,ft_qv=ft_qv,k=k,Cd=Cd,radcool=30.,use_NT=use_NT) # include use_NT\n the_tup=make_tuple(the_tup,'coeffs')\n output=integrate.odeint(dmixed_vars, vars_init, tspan,(the_tup,))\n result=pd.DataFrame.from_records(output,columns=['theta','h','qv'])\n\n # save time/computation by only doing calculations for the last timestep (equilibrium)\n result['time']=tspan[-1]/3600./24. #days\n result['deltheta'] = theta_ft(result['h'].values[-1],ft_intercept,ft_gamma) - result['theta'].iloc[-1]\n result['delqv'] = ft_qv - result['qv'].iloc[-1]\n result['LCL'] = calc_lcl(result.iloc[-1], psfc)\n result['q_flux_0']=calc_sfc_qvap_flux(result.iloc[-1],the_tup)\n result['T_flux_0']=calc_sfc_theta_flux(result.iloc[-1],the_tup)\n result['entflux_theta']=calc_entflux_theta(result.iloc[-1],the_tup)\n \n # decide how to calculate entrainment\n the_vars = [result['theta'].iloc[-1],result['h'].iloc[-1],result['qv'].iloc[-1]]\n if use_NT:\n result['went']=calc_went_NT(the_vars, the_tup, result['deltheta'].iloc[-1], \n result['T_flux_0'].iloc[-1], result['q_flux_0'].iloc[-1])\n else:\n result['went']=calc_went(result.iloc[-1],the_tup)\n\n result['entflux_qv']=calc_entflux_qv(result.iloc[-1],the_tup)\n\n with open('dumpmodel.csv','w') as f:\n result.to_csv(f,index=False)\n \n return None", "def compute_surface_temperature(heat_flux):\n\n return 1.1e-4*heat_flux + 323", "def calc_went_NT(the_vars, coeffs, deltheta, F0, Fqv0):\n thetal_m = the_vars[0]\n qt_m = the_vars[2]\n zi = the_vars[1]\n dth = deltheta\n \n thetal_ft = thetal_m + dth\n qt_ft = coeffs.ft_qv\n \n dqt = qt_ft - qt_m\n \n # calculate thetal at z = 3000 m (take qt(z = 3000m) = qt(z = h), so delta_qt = dqt)\n gamma = 6e-3 \n thetal_3000 = thetal_ft + gamma*(3000-zi)\n LTS = thetal_3000 - coeffs.sst # lower tropospheric stability\n\n # calculate coefficients\n press=tf.find_press(zi)\n Ad,Bd,issat = tf.calc_ABcoeffs(thetal_ft,qt_ft,press)\n Aw,Bw,issat = tf.calc_ABcoeffs(thetal_m,qt_m,press)\n \n invert= tf.t_uos_thetal(thetal_m,qt_m,press)\n T_0 = invert.temp\n lv=tf.L_t(invert.temp)\n Cl = (Ad*lv/tc.CPD - T_0/tc.EPS)\n del_thv_dry = Ad * dth + Bd * dqt\n del_thv_sat = Aw * dth + Bw * dqt\n \n # account for evaporative cooling (increases we)\n ql_max = invert.ql\n Cl = (Ad*lv/tc.CPD - T_0/tc.EPS)\n Del_thv = del_thv_dry - Cl * ql_max\n \n # calculate buoyancy integral terms\n rho = 1.\n lcl_press=tf.LCL_thetal(thetal_m,qt_m)\n zb=tf.find_height(lcl_press)\n\n T1 = zb/zi\n T2 = 0.5 * zb**2 / zi**2\n T3 = (zi-zb)/zi\n T4 = 0.5 * (zi**2 - zb**2) / zi**2\n \n # calculate delta_Fr\n delta_Frstar = 82.0 # Wm^-2\n Frlambda = 7.9 # Wm^-2, using with CTL from Gesso\n delta_Fr = delta_Frstar - Frlambda*qt_ft*1000 # convert qt_ft to g kg^-1\n\n wtl_0=F0\n wqt_0=Fqv0\n Del_F = delta_Fr/(tc.CPD*rho) # use sensitivity to radiation a la Gesso Fig. 3\n term1 = wtl_0 * (Ad * (T1-T2) + Aw * (T3-T4))\n term2 = wqt_0 * (Bd * (T1-T2) + Bw * (T3-T4))\n term3 = Del_F * (Ad * T2 + Aw * T4)\n\n Theta_NE = term1 + term2 + term3\n \n # calculate w*\n wstar=(2.5*9.8/T_0*zi*Theta_NE)**(1/3.)\n \n # calculate chi*\n chi_star = Cl * ql_max / (del_thv_dry - del_thv_sat)\n \n # calculate del_m\n Del_m = del_thv_dry + chi_star * (2. - chi_star) * (del_thv_sat - del_thv_dry)\n \n # calculate we\n a2=15.\n Del_thv_NT = Del_thv / (1. + a2 * (1. - Del_m/Del_thv))\n \n A_NT = 0.2\n fac_NT = 2.5\n\n term4 = Del_thv_NT\n term5 = A_NT * fac_NT * (T2 * del_thv_dry + T4 * del_thv_sat)\n denominator = term4 + term5\n\n we = A_NT * fac_NT * Theta_NE / denominator\n \n return we", "def test_set_vT(self):\n s = State(substance=\"water\")\n s.vT = Q_(1.801983936953226, \"m**3/kg\"), Q_(400.0, \"K\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.vT[1], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.vT[0], Q_(1.801983936953226, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.u, Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(2009.2902478486988, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(1509.1482452129906, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(1.801983936953226, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert s.x is None", "def env_temperature(v3: \"float\", v4: \"float\") -> \"float\":", "def test_set_sT(self):\n s = State(substance=\"water\")\n s.sT = Q_(7496.2021523754065, \"J/(kg*K)\"), Q_(400.0, \"K\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.sT[1], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.sT[0], Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.u, Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(2009.2902478486988, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(1509.1482452129906, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(1.801983936953226, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert s.x is None", "def test_set_uT(self):\n s = State(substance=\"water\")\n s.uT = Q_(2547715.3635084038, \"J/kg\"), Q_(400.0, \"K\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.uT[1], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.uT[0], Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.u, Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(2009.2902478486988, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(1509.1482452129906, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(1.801983936953226, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert s.x is None", "def prescribed_surface_temperature(x, t, K_medium, rho_medium, c_medium, T_medium_initial, T_external_applied):\n k = get_kappa(K_medium, rho_medium, c_medium)\n return (T_external_applied - T_medium_initial)*erfc(x/(2*np.sqrt(k*t))) + T_medium_initial", "def test_set_xT(self):\n s = State(substance=\"water\")\n s.xT = Q_(0.5, \"dimensionless\"), Q_(400.0, \"K\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(245769.34557103913, \"Pa\")) # type: ignore\n assert np.isclose(s.xT[1], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.xT[0], Q_(0.5, \"dimensionless\")) # type: ignore\n assert np.isclose(s.u, Q_(1534461.5163075812, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(4329.703956664546, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(4056.471547685226, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(2913.7307270395363, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.3656547423394701, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1624328.2430353598, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.5, \"dimensionless\")) # type: ignore\n s.xT = Q_(50, \"percent\"), Q_(400.0, \"K\")\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(245769.34557103913, \"Pa\")) # type: ignore\n assert np.isclose(s.xT[1], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.xT[0], Q_(0.5, \"dimensionless\")) # type: ignore\n assert np.isclose(s.u, Q_(1534461.5163075812, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(4329.703956664546, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(4056.471547685226, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(2913.7307270395363, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.3656547423394701, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1624328.2430353598, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.5, \"dimensionless\")) # type: ignore", "def TM_fluid(layer, kx, om):\n\n h = layer.d\n rho = layer.medium.rho\n K = layer.medium.K\n k = om*np.sqrt(rho/K)\n ky = np.sqrt(k**2-kx**2)\n T = np.zeros((2, 2), dtype=complex)\n T[0, 0] = np.cos(ky*h)\n T[1, 0] = (om**2*rho/ky)*np.sin(ky*h)\n T[0, 1] = -(ky/(om**2*rho))*np.sin(ky*h)\n T[1, 1] = np.cos(ky*h)\n return T", "def test_set_hT(self):\n s = State(substance=\"water\")\n s.hT = Q_(2730301.3859201893, \"J/kg\"), Q_(400.0, \"K\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.hT[1], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.hT[0], Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert np.isclose(s.u, Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(2009.2902478486988, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(1509.1482452129906, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(1.801983936953226, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert s.x is None", "def sky_observed(seed=425, th=150, old=False):\n \n # impact parameters\n M = 3e7*u.Msun\n #M = 6e7*u.Msun\n B = 19.95*u.kpc\n V = 190*u.km/u.s\n phi = coord.Angle(0*u.deg)\n theta = coord.Angle(th*u.deg)\n Tenc = 0.01*u.Gyr\n T = 0.5*u.Gyr\n dt = 0.05*u.Myr\n rs = 0*u.pc\n \n old_label = ''\n \n if old:\n old_label = '_old'\n \n # impact parameters\n M = 5e7*u.Msun\n B = 19.8*u.kpc\n V = 210*u.km/u.s\n phi = coord.Angle(0*u.deg)\n th = 150\n theta = coord.Angle(th*u.deg)\n Tenc = 0.05*u.Gyr\n T = 2*u.Gyr\n dt = 0.1*u.Myr\n #dt = 1*u.Myr\n rs = 0*u.pc\n \n # potential parameters\n potential = 3\n Vh = 220*u.km/u.s\n q = 1*u.Unit(1)\n rhalo = 20*u.pc\n par_pot = np.array([Vh.si.value, q.value, rhalo.si.value])\n \n # setup tube\n Nstar = 1400\n wx = 30*u.kpc\n wy = 0*u.pc\n wz = 0*u.pc\n sx = 0*u.km/u.s\n \n np.random.seed(seed)\n observer = {'z_sun': 27.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 60*u.deg, 'galcen_coord': coord.SkyCoord(ra=300*u.deg, dec=-90*u.deg, frame='icrs')}\n alt_observer = {'z_sun': 27.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': -45*u.deg, 'galcen_coord': coord.SkyCoord(ra=300*u.deg, dec=-90*u.deg, frame='icrs')}\n vobs = {'vcirc': 220*u.km/u.s, 'vlsr': [0, 0, 0]*u.km/u.s}\n wangle = 180*u.deg\n \n xphi = np.linspace(-0.3*np.pi,0.3*np.pi, Nstar)\n xphi0 = np.linspace(-0.1*np.pi, 0.1*np.pi, 1000)\n xphi1 = np.linspace(-0.28*np.pi, -0.1*np.pi, 200)\n xphi2 = np.linspace(0.1*np.pi, 0.32*np.pi, 200)\n xphi = np.concatenate([xphi1, xphi0, xphi2])\n \n xr = 20*u.kpc + np.random.randn(Nstar)*0.0*u.kpc\n x = np.sin(xphi) * xr\n y = np.cos(xphi) * xr\n z = x * 0\n vx = -np.cos(xphi) * Vh\n vy = np.sin(xphi) * Vh\n vz = vx * 0\n # closest to impact\n ienc = np.argmin(np.abs(x))\n \n # generate stream model\n potential_perturb = 1\n par_perturb = np.array([M.si.value, 0., 0., 0.])\n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream = {}\n stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal = coord.Galactocentric(stream['x'], **observer)\n xeq = xgal.transform_to(coord.ICRS)\n veq_ = gc.vgal_to_hel(xeq, stream['v'], **vobs)\n veq = [None] * 3\n veq[0] = veq_[0].to(u.mas/u.yr)\n veq[1] = veq_[1].to(u.mas/u.yr)\n veq[2] = veq_[2].to(u.km/u.s)\n \n # alternative sky coordinates\n xgal_alt = coord.Galactocentric(stream['x'], **alt_observer)\n xeq_alt = xgal_alt.transform_to(coord.ICRS)\n veq_alt_ = gc.vgal_to_hel(xeq_alt, stream['v'], **vobs)\n veq_alt = [None] * 3\n veq_alt[0] = veq_alt_[0].to(u.mas/u.yr)\n veq_alt[1] = veq_alt_[1].to(u.mas/u.yr)\n veq_alt[2] = veq_alt_[2].to(u.km/u.s)\n \n # unperturbed stream\n par_perturb = np.array([0*M.si.value, 0., 0., 0.])\n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream0 = {}\n stream0['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream0['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal0 = coord.Galactocentric(stream0['x'], **observer)\n xeq0 = xgal0.transform_to(coord.ICRS)\n veq0_ = gc.vgal_to_hel(xeq0, stream0['v'], **vobs)\n veq0 = [None] * 3\n veq0[0] = veq0_[0].to(u.mas/u.yr)\n veq0[1] = veq0_[1].to(u.mas/u.yr)\n veq0[2] = veq0_[2].to(u.km/u.s)\n \n # alternative sky coordinates\n xgal0_alt = coord.Galactocentric(stream0['x'], **alt_observer)\n xeq0_alt = xgal0_alt.transform_to(coord.ICRS)\n veq0_alt_ = gc.vgal_to_hel(xeq0_alt, stream0['v'], **vobs)\n veq0_alt = [None] * 3\n veq0_alt[0] = veq0_alt_[0].to(u.mas/u.yr)\n veq0_alt[1] = veq0_alt_[1].to(u.mas/u.yr)\n veq0_alt[2] = veq0_alt_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n R = find_greatcircle(xeq0.ra.deg[::10], xeq0.dec.deg[::10])\n xi0, eta0 = myutils.rotate_angles(xeq0.ra, xeq0.dec, R)\n xi0 = coord.Angle(xi0*u.deg)\n \n # place gap at xi~0\n xioff = xi0[ienc]\n xi0 -= xioff\n \n xi, eta = myutils.rotate_angles(xeq.ra, xeq.dec, R)\n xi = coord.Angle(xi*u.deg)\n xi -= xioff\n \n # alternative observer\n R_alt = find_greatcircle(xeq0_alt.ra.deg[::10], xeq0_alt.dec.deg[::10])\n xi0_alt, eta0_alt = myutils.rotate_angles(xeq0_alt.ra, xeq0_alt.dec, R_alt)\n xi0_alt = coord.Angle(xi0_alt*u.deg)\n \n # place gap at xi~0\n xioff_alt = xi0_alt[ienc]\n xi0_alt -= xioff_alt\n \n xi_alt, eta_alt = myutils.rotate_angles(xeq_alt.ra, xeq_alt.dec, R_alt)\n xi_alt = coord.Angle(xi_alt*u.deg)\n xi_alt -= xioff_alt\n \n\n # observed gd1\n g = Table(fits.getdata('/home/ana/projects/GD1-DR2/output/gd1_members.fits'))\n \n vlabel = ['$\\mu_{\\\\alpha_\\star}$ [mas yr$^{-1}$]','$\\mu_{\\delta}$ [mas yr$^{-1}$]', '$V_r$ [km s$^{-1}$]']\n ylims = [[-1.5, 1.5], [-1.5, 1.5], [-30,30]]\n color = '0.35'\n ms = 4\n alpha = 0.7\n \n # plotting\n plt.close()\n fig, ax = plt.subplots(2,4,figsize=(17,8), sharex=True, sharey='col')\n \n plt.sca(ax[0][0])\n plt.plot(xi.wrap_at(wangle), eta, '.', mec='none', color=color, ms=ms, label='Simulated GD-1')\n \n #plt.xlabel('$\\phi_1$ [deg]')\n plt.ylabel('$\\phi_2$ [deg]')\n plt.xlim(-20,20)\n plt.ylim(-5,5)\n \n plt.sca(ax[1][0])\n plt.plot(xi_alt.wrap_at(wangle), eta_alt, '.', mec='none', color=color, ms=ms, label='Simulated GD-1')\n \n plt.xlabel('$\\phi_1$ [deg]')\n plt.ylabel('$\\phi_2$ [deg]')\n plt.xlim(-20,20)\n plt.ylim(-5,5)\n \n xeqs = [xeq.ra, xeq.dec, xeq.distance.to(u.kpc)]\n dv = []\n dv_alt = []\n for i in range(3):\n plt.sca(ax[0][i+1])\n \n # interpolate expected kinematics from an unperturbed stream\n vexp = np.interp(xi.wrap_at(wangle), xi0.wrap_at(wangle), veq0[i].value) * veq0[i].unit\n dv += [veq[i]-vexp]\n plt.plot(xi.wrap_at(wangle), dv[i], '.', mec='none', color=color, ms=ms)\n \n plt.ylabel('$\\Delta$ {}'.format(vlabel[i]))\n plt.ylim(*ylims[i])\n \n plt.sca(ax[1][i+1])\n # interpolate expected kinematics from an unperturbed stream\n vexp_alt = np.interp(xi_alt.wrap_at(wangle), xi0_alt.wrap_at(wangle), veq0_alt[i].value) * veq0_alt[i].unit\n dv_alt += [veq_alt[i]-vexp_alt]\n plt.plot(xi_alt.wrap_at(wangle), dv_alt[i], '.', mec='none', color=color, ms=ms)\n \n plt.ylabel('$\\Delta$ {}'.format(vlabel[i]))\n plt.ylim(*ylims[i])\n plt.xlabel('$\\phi_1$ [deg]')\n \n # find closest model star to the gd-1 stars\n Ngd1 = len(g)\n p = np.array([g['phi1']+40, g['phi2']])\n q = np.array([xi.wrap_at(wangle).to(u.deg).value, eta])\n idmin = np.empty(Ngd1, dtype='int')\n \n for i in range(Ngd1):\n dist = np.sqrt((p[0,i]-q[0])**2 + (p[1,i]-q[1])**2)\n idmin[i] = np.argmin(dist)\n\n # mask stream, mask spur\n onstream_mask = ((g['phi1']<-30.5) & (g['phi1']>-35.5) & (g['phi2']>-0.2) & (g['phi2']<0.2))\n spur_mask = ((g['phi1']<-30.5) & (g['phi1']>-35.5) & (g['phi2']>1) & (g['phi2']<1.4))\n all_mask = np.ones(Ngd1, dtype='bool')\n \n # plot scaled data uncertainties on model pm drawn from a corresponding obs uncertainty\n np.random.seed(seed+1)\n fgaia = np.sqrt(2/5)\n print(2/5, fgaia)\n phi1 = xi[idmin].wrap_at(wangle).to(u.deg).value\n phi2 = eta[idmin]\n pmra = dv[0][idmin] + g['pmra_error']*u.mas/u.yr*np.random.randn(Ngd1) * fgaia\n pmdec = dv[1][idmin] + g['pmdec_error']*u.mas/u.yr*np.random.randn(Ngd1) * fgaia\n \n colors = ['tab:red', 'tab:blue', '0.4']\n labels = ['Stream', 'Spur']\n labels = ['Gaia DR4', '']\n \n for e, mask in enumerate([onstream_mask, spur_mask]):\n plt.sca(ax[0][0])\n plt.plot(phi1[mask], phi2[mask], 'o', color=colors[e], mec='none', alpha=alpha, label=labels[e])\n \n plt.sca(ax[0][1])\n plt.errorbar(phi1[mask], pmra[mask].value, yerr=g['pmra_error'][mask]*fgaia, fmt='o', color=colors[e], mec='none', alpha=alpha)\n \n plt.sca(ax[0][2])\n plt.errorbar(phi1[mask], pmdec[mask].value, yerr=g['pmdec_error'][mask]*fgaia, fmt='o', color=colors[e], mec='none', alpha=alpha)\n \n print(np.sqrt(np.sum(g['pmra_error'][mask]**2))/np.sum(mask))\n print(np.sqrt(np.sum(g['pmdec_error'][mask]**2))/np.sum(mask))\n\n Nfield = 2\n p2 = np.array([np.array([-32.77,-32.77])+40, [1.167,0]])\n q = np.array([xi.wrap_at(wangle).to(u.deg).value, eta])\n idmin2 = np.empty(Nfield, dtype='int')\n \n for i in range(Nfield):\n dist = np.sqrt((p2[0,i]-q[0])**2 + (p2[1,i]-q[1])**2)\n idmin2[i] = np.argmin(dist)\n \n pmerr = np.array([0.0848, 0.0685])\n \n np.random.seed(seed+2)\n phi1 = xi[idmin2].wrap_at(wangle).to(u.deg).value\n phi2 = eta[idmin2]\n pmra = dv[0][idmin2].value + pmerr*np.random.randn(Nfield)\n pmdec = dv[1][idmin2].value + pmerr*np.random.randn(Nfield)\n \n plt.sca(ax[0][0])\n plt.errorbar(phi1, phi2, color='k', fmt='o', label='HST')\n \n plt.sca(ax[0][1])\n plt.errorbar(phi1, pmra, yerr=pmerr, color='k', fmt='o')\n \n plt.sca(ax[0][2])\n plt.errorbar(phi1, pmdec, yerr=pmerr, color='k', fmt='o')\n \n \n ##############\n # alt observer\n \n # find closest model star to the gd-1 stars\n Ngd1 = len(g)\n p = np.array([g['phi1']+40, g['phi2']])\n q = np.array([xi_alt.wrap_at(wangle).to(u.deg).value, eta_alt])\n idmin = np.empty(Ngd1, dtype='int')\n \n for i in range(Ngd1):\n dist = np.sqrt((p[0,i]-q[0])**2 + (p[1,i]-q[1])**2)\n idmin[i] = np.argmin(dist)\n\n # mask stream, mask spur\n onstream_mask = ((g['phi1']<-30.5) & (g['phi1']>-35.5) & (g['phi2']>-0.2) & (g['phi2']<0.2))\n spur_mask = ((g['phi1']<-30.5) & (g['phi1']>-35.5) & (g['phi2']>1) & (g['phi2']<1.4))\n all_mask = np.ones(Ngd1, dtype='bool')\n \n # plot scaled data uncertainties on model pm drawn from a corresponding obs uncertainty\n #np.random.seed(seed+3)\n phi1 = xi_alt[idmin].wrap_at(wangle).to(u.deg).value\n phi2 = eta_alt[idmin]\n pmra = dv_alt[0][idmin] + g['pmra_error']*u.mas/u.yr*np.random.randn(Ngd1) * fgaia\n pmdec = dv_alt[1][idmin] + g['pmdec_error']*u.mas/u.yr*np.random.randn(Ngd1) * fgaia\n \n colors = ['tab:red', 'tab:blue', '0.4']\n labels = ['Gaia DR4', '']\n \n for e, mask in enumerate([onstream_mask, spur_mask]):\n plt.sca(ax[1][0])\n plt.plot(phi1[mask], phi2[mask], 'o', color=colors[e], mec='none', alpha=alpha, label=labels[e])\n \n plt.sca(ax[1][1])\n plt.errorbar(phi1[mask], pmra[mask].value, yerr=g['pmra_error'][mask]*fgaia, fmt='o', color=colors[e], mec='none', alpha=alpha)\n \n plt.sca(ax[1][2])\n plt.errorbar(phi1[mask], pmdec[mask].value, yerr=g['pmdec_error'][mask]*fgaia, fmt='o', color=colors[e], mec='none', alpha=alpha)\n \n Nfield = 2\n p2 = np.array([np.array([-32.77,-32.77])+40, [1.167,0]])\n q = np.array([xi_alt.wrap_at(wangle).to(u.deg).value, eta_alt])\n idmin2 = np.empty(Nfield, dtype='int')\n \n for i in range(Nfield):\n dist = np.sqrt((p2[0,i]-q[0])**2 + (p2[1,i]-q[1])**2)\n idmin2[i] = np.argmin(dist)\n \n pmerr = np.array([0.11, 0.08])\n \n np.random.seed(seed+6)\n phi1 = xi_alt[idmin2].wrap_at(wangle).to(u.deg).value\n phi2 = eta_alt[idmin2]\n pmra = dv_alt[0][idmin2].value + pmerr*np.random.randn(Nfield)\n pmdec = dv_alt[1][idmin2].value + pmerr*np.random.randn(Nfield)\n \n plt.sca(ax[1][0])\n plt.errorbar(phi1, phi2, color='k', fmt='o', label='HST')\n \n plt.sca(ax[1][1])\n plt.errorbar(phi1, pmra, yerr=pmerr, color='k', fmt='o')\n \n plt.sca(ax[1][2])\n plt.errorbar(phi1, pmdec, yerr=pmerr, color='k', fmt='o')\n \n \n plt.sca(ax[0][0])\n plt.text(0.1,0.85, '$\\\\theta_{roll}$ = 60$^\\circ$', fontsize='small', transform=plt.gca().transAxes)\n\n plt.sca(ax[1][0])\n plt.text(0.1,0.85, '$\\\\theta_{roll}$ = -45$^\\circ$', fontsize='small', transform=plt.gca().transAxes)\n plt.legend(fontsize='small', loc=3, handlelength=0.2)\n \n plt.suptitle('Expected astrometric performance', fontsize='medium')\n plt.tight_layout(rect=[0,0,1,0.94])\n plt.savefig('../plots/astrometric_performance.png')", "def test_set_Tu(self):\n s = State(substance=\"water\")\n s.Tu = Q_(400.0, \"K\"), Q_(2547715.3635084038, \"J/kg\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.Tu[0], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.Tu[1], Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.u, Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(2009.2902478486988, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(1509.1482452129906, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(1.801983936953226, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert s.x is None", "def test_transport_sanity(self):\n T = 400\n cv_mole, W = 21005.045895231186, 28.014\n species_name = \"N2\"\n\n data = ct_properties.ctThermoTransport(\"gri30.cti\", verbose=False)\n data.evaluate_properties()\n i = data.gas.species_index(species_name)\n\n As, Ts, _, poly_mu, poly_kappa, log_poly_mu, log_poly_kappa= ct2foam_utils.fit_ct_transport(data)\n\n mu_s = tr_fitter.sutherland(T, As[i], Ts[i])\n kappa_s=tr_fitter.euken(mu_s, cv_mole, W, R)\n mu_logp, kappa_logp = tr_fitter.eval_log_polynomial(log_poly_mu[i,:], log_poly_kappa[i,:], T)\n mu_p, kappa_p = tr_fitter.eval_polynomial(poly_mu[i,:], poly_kappa[i,:], T)\n\n\n # rough test whether they are in the same scale...\n mu_ref = 2.2217e-5\n kappa_ref = 0.032205\n\n self.assertTrue(np.abs(mu_s-mu_ref)/np.abs(mu_ref) < 0.07)\n self.assertTrue(np.abs(mu_p-mu_ref)/np.abs(mu_ref) < 0.01)\n self.assertTrue(np.abs(mu_logp-mu_ref)/np.abs(mu_ref) < 0.01)\n self.assertTrue(np.abs(kappa_s-kappa_ref)/np.abs(kappa_ref) < 0.05)\n self.assertTrue(np.abs(kappa_p-kappa_ref)/np.abs(kappa_ref) < 0.05)\n self.assertTrue(np.abs(kappa_logp-kappa_ref)/np.abs(kappa_ref) < 0.05)", "def test_virtual_temperature():\n t = 288. * units.kelvin\n qv = .0016 * units.dimensionless # kg/kg\n tv = virtual_temperature(t, qv)\n assert_almost_equal(tv, 288.2796 * units.kelvin, 3)", "async def test_thermostat_heatit_z_trm3_no_value(\n hass: HomeAssistant, client, climate_heatit_z_trm3_no_value, integration\n) -> None:\n # When the config parameter that specifies what sensor to use has no value, we fall\n # back to the first temperature sensor found on the device\n state = hass.states.get(CLIMATE_FLOOR_THERMOSTAT_ENTITY)\n assert state.attributes[ATTR_CURRENT_TEMPERATURE] == 22.5", "def calculate_surface_heatflux(self, weather, spaces_dict, surface, temp_record, Coeff, space, h_surface, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, Aflag, terrain, areaDict, areaWinDict, shadowRatios, shadowRatioIndex):\r\n #print \"Reaching Surface function...\"\r\n\r\n # First get the As\r\n A_total = self.get_A(surface, areaDict, areaWinDict)\r\n if Aflag == 0:\r\n # If it is the first surface of the space, label the space ID in the log file:\r\n la = str(surface.obj_id)\r\n lb = str(surface.obj_type)\r\n #TM_user.info(\"%s,surface area,%s,%s\" % (la, A_total, lb))\r\n A_noWin = self.get_A_noWin(surface, areaDict, areaWinDict)\r\n A_noOp = self.get_A_noOp(surface, areaDict, areaWinDict)\r\n T_space = spaces_dict[space.obj_id][1]\r\n T1 = weather[\"t_outside\"]\r\n hc_external = float(self.get_hc_external(weather, surface, h_surface, terrain))\r\n transmitted_win = 0\r\n Q_flux = 0\r\n\r\n # need the surface related information, T_space, U, R3\r\n U = self.get_U_surface_e(A_total, A_noOp, surface, areaWinDict) # U = Infor_surface{11,i_surface}; Defined Below\r\n #print U\r\n R3 = 1/U\r\n # Using calculations from: self.surface.constr.layer.C # Infor_surface{10, i_surface} ; from gbXML\r\n C = self.get_C_surface(A_total, A_noOp, surface, Coeff, areaWinDict) # need to pass surface and opening ids\r\n #print C\r\n\r\n temperature = Temperature()\r\n\r\n #Sub-routines for each wall type based on the returned hc_external\r\n # This hc is different for each surface type so moved under this sub-routine area\r\n #hc = 3.076 sent this to the Temperature Object\r\n if surface.obj_type == \"ExteriorWall\":\r\n transmitted_win, Q_flux = temperature.exterior_wall(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, T_space, temp_record, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, areaWinDict, shadowRatios, areaDict, shadowRatioIndex)\r\n #print Q_flux\r\n if surface.obj_type == \"Roof\":\r\n transmitted_win, Q_flux = temperature.roof(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, A_noOp, T_space, temp_record, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, areaWinDict, shadowRatios, areaDict, shadowRatioIndex)\r\n #print Q_flux # Matches for Four Room\r\n if surface.obj_type == \"InteriorWall\":\r\n transmitted_win, Q_flux = temperature.interior_wall(surface, A_total, R3, C, spaces_dict, T_space, temp_record)\r\n #print Q_flux # Matches for Four Room\r\n if surface.obj_type == \"UndergroundWall\":\r\n transmitted_win, Q_flux = temperature.underground_wall(surface, A_total, R3, C, T_space, temp_record) # No instance of yet to test\r\n if surface.obj_type == \"RaisedFloor\":\r\n # This will eventually need some values when we start using raised floors\r\n transmitted_win, Q_flux = temperature.raised_floor(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, A_noOp, T_space, temp_record) # Not instance of yet to test\r\n\r\n return transmitted_win, Q_flux", "def qtf(self, vw, th, gp, psi_l, lai, dt):\n\t\t#if the amount of water in tank is less than amount that will be absorbed by plant in timestep dt, then what's left will be absorbed \n\t qtt = th - self.qwf(vw, th, gp, psi_l, lai, dt)\n\t if self.tx*self.ZT*10**6 <= 0:\n\t return 0.\n\t elif self.tx*self.ZT*10**6 <= qtt*dt:\n\t return (self.tx*self.ZT*10**6/dt)\n\t else:\n\t return qtt", "def produce_13TeV_template(tag_name=\"HKHI\"):\n num_rebin = 1\n file_name = \"inputs/BkgEstimation_Lin/BkgEstimation_NONE_TOPO_PTDEP_\"+tag_name+\"_Lin.root\"\n print \"Input: \", file_name\n fin = ROOT.TFile.Open(file_name, \"read\")\n h_nom = fin.Get(\"bkg_total_gg_full\").Clone(\"bkg_nominal_old\")\n h_nom.Rebin(num_rebin)\n fout = ROOT.TFile.Open(\"hists_input_\"+tag_name+\".root\", \"recreate\")\n\n h_purity_sys = fin.Get(\"bkg_purity_syst_gg_full\").Clone(\"bkg_purity_syst_gg\")\n h_reducible_sys = fin.Get(\"bkg_reducible_syst_gg_full\").Clone(\"bkg_reducible_syst_gg\")\n h_irreducible_sys = fin.Get(\"bkg_irreducible_syst_gg_full\").Clone(\"bkg_irreducible_syst_gg\")\n h_iso_sys = fin.Get(\"bkg_iso_syst_gg_full\").Clone(\"bkg_iso_syst_gg\")\n\n #file_iso = \"isolation_sys/hist.root\"\n #fin2 = ROOT.TFile.Open(file_iso, \"read\")\n #h_iso_sys = fin2.Get(\"bkg_isolation_syst_gg\")\n ## inflat irreducible uncertainty by factor of 10\n # so that it closes to stats uncertainty in data\n sf = 1\n if INFLATE_SYS:\n sf = 10\n\n # after rebinning systematic uncertainties, need to scale down,\n # otherwise the uncertainties are inflated.\n h_purity_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_irreducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_reducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_iso_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n\n ## truncate the histograms to [200, 2000] GeV\n h_nom_new = truncate_hist(h_nom, \"bkg_nominal\")\n h_purity_sys_new = truncate_hist(h_purity_sys, \"h_purity_sys_new\")\n h_irreducible_sys_new = truncate_hist(h_irreducible_sys, \"h_irreducible_sys_new\")\n h_reducible_sys_new = truncate_hist(h_reducible_sys, \"h_reducible_sys_new\")\n h_iso_sys_new = truncate_hist(h_iso_sys, \"h_iso_sys_new\")\n\n #write down sys and nominal\n fout.cd()\n h_nom_new.Write()\n h_purity_sys_new.Write()\n h_reducible_sys_new.Write()\n h_irreducible_sys_new.Write()\n h_iso_sys_new.Write()\n\n h_purity_up, h_purity_down = create_sys_hist(h_nom_new, h_purity_sys_new, \"purity_sys\")\n h_purity_up.Write()\n h_purity_down.Write()\n\n h_red_up, h_red_down = create_sys_hist(h_nom_new, h_reducible_sys_new, \"reducible_sys\")\n h_red_up.Write()\n h_red_down.Write()\n\n h_irred_up, h_irred_down = create_sys_hist(h_nom_new, h_irreducible_sys_new, \"irreducible_sys\")\n h_irred_up.Write()\n h_irred_down.Write()\n\n h_iso_up, h_iso_down = create_sys_hist(h_nom_new, h_iso_sys, \"isolation_sys\")\n h_iso_up.Write()\n h_iso_down.Write()\n\n fin.Close()\n fout.Close()", "def compSETrueSim(self,taup=None):\r\n \r\n if self.layer_type == 'input':\r\n # Create unit Gaussians for the sampling\r\n zshape = SELayer.sample_shape(self.shape,self.nsamp)\r\n self.wq = np.random.normal(0,1,zshape) \r\n\r\n # Sample the output\r\n self.ztrue = self.sample(nsamp=self.nsamp)\r\n \r\n # Compute the sample mean\r\n tauz = np.mean(np.abs(self.ztrue)**2)\r\n return tauz\r\n \r\n \r\n else:\r\n # Create unit Gaussians for the sampling\r\n pshape = SELayer.sample_shape(self.shape[0],self.nsamp)\r\n zshape = SELayer.sample_shape(self.shape[1],self.nsamp)\r\n self.wp = np.random.normal(0,1,pshape) \r\n self.wq = np.random.normal(0,1,zshape) \r\n\r\n # Generate random Gaussian input\r\n pshape = SELayer.sample_shape(self.shape[0], self.nsamp)\r\n self.ptrue = np.random.normal(0,np.sqrt(taup),pshape)\r\n \r\n # Sample from the ouptut\r\n self.ztrue = self.sample(self.ptrue,nsamp=self.nsamp)\r\n \r\n # Compute the sample mean\r\n tauz = np.mean(np.abs(self.ztrue)**2)\r\n return tauz", "def test_mixed_layer():\n pressure = np.array([959., 779.2, 751.3, 724.3, 700., 269.]) * units.hPa\n temperature = np.array([22.2, 14.6, 12., 9.4, 7., -38.]) * units.degC\n mixed_layer_temperature = mixed_layer(pressure, temperature, depth=250 * units.hPa)[0]\n assert_almost_equal(mixed_layer_temperature, 16.4024930 * units.degC, 6)", "def Tsky(self, source):\n\n if not _usePyGSM:\n raise ImportError('PyGSM is not available: cannot access sky temperatures')\n if not isinstance(source, astropy.coordinates.sky_coordinate.SkyCoord):\n if isinstance(source,str):\n # assume .par file\n source=parfile2SkyCoord(source)\n else:\n raise TypeError('Do not know how to interpret an object of type %s' % source.__class__)\n\n source=source.galactic\n T=healpy.pixelfunc.get_interp_val(self.map,\n source.l.value,\n source.b.value,\n lonlat=True)\n return T*u.K", "def test_isentropic_pressure_tmp_out():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290.\n tmp[3, :] = 288.\n tmpk = tmp * units.kelvin\n isentlev = [296.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, temperature_out=True)\n truetmp = 296. * units.kelvin\n assert_almost_equal(isentprs[1], truetmp, 3)", "def test_set_Ts(self):\n s = State(substance=\"water\")\n s.Ts = Q_(400.0, \"K\"), Q_(7496.2021523754065, \"J/(kg*K)\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.Ts[0], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.Ts[1], Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.u, Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(2009.2902478486988, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(1509.1482452129906, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(1.801983936953226, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert s.x is None", "def get_ptf10iuv(colorplt = False):\n z = 0.0251485\n ebv = 0.0371 # SFD\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n print (\"adopt g band t_max estimated by myself\")\n t_max = 55357.387 \n tb = pd.read_csv('../data/otherSN/Kasliwal2012/PTF10iuv', sep='\\t')\n tb = tb.drop(columns=[\"Unnamed: 4\"])\n tb = tb.rename(columns={'Filter' : 'filter',\n 'MJD': 'mjd'})\n tb = tb[~np.array([x[0]=='>' for x in tb['Mag'].values])]\n tb['mag'] = np.array([float(x.split(\" +or-\")[0]) for x in tb['Mag'].values])\n tb['emag'] = np.array([float(x.split(\" +or-\")[1]) for x in tb['Mag'].values])\n tb = tb.drop(columns=[\"Mag\"])\n \n ixg = tb['filter'].values == \"g\"\n ixr = tb['filter'].values == \"r\"\n ixi = tb['filter'].values == \"i\"\n ixz = tb['filter'].values == \"z\"\n ixB = tb['filter'].values == \"B\"\n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixB] = 4359\n tb['wave'].values[ixg] = 4814\n tb['wave'].values[ixr] = 6422\n tb['wave'].values[ixi] = 7883\n tb['wave'].values[ixz] = 9670\n \n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)\n tb = tb.sort_values(by = \"mjd\")\n if colorplt==False:\n return tb\n \n else:\n tb = add_datecol(tb)\n ix = np.in1d(tb[\"filter\"].values, np.array(['g', 'r', 'i']))\n tb = tb[ix]\n tb = tb[tb.mjd > 55352.5]\n tb = tb[tb.mjd < 55593.5]\n \n dates = get_date_span(tb)\n datesave = []\n for i in range(len(dates)):\n x = dates[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n if len(tbsub)!=0:\n flts = tbsub['filter'].values\n if \"r\" in flts and np.sum(np.unique(flts))!=1:\n datesave.append(x)\n datesave = np.array(datesave)\n \n mcolor = []\n mcolor_unc = []\n mjds = []\n colorname = []\n for i in range(len(datesave)):\n x = datesave[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n gtb = tbsub[tbsub[\"filter\"].values==\"g\"]\n rtb = tbsub[tbsub[\"filter\"].values==\"r\"]\n itb = tbsub[tbsub[\"filter\"].values==\"i\"]\n if len(gtb)!=0:\n gmjds = gtb[\"mjd\"].values\n gmags = gtb[\"mag0\"].values\n gemags = gtb[\"emag\"].values\n gwtgs = 1/gemags**2\n gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)\n gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)\n gemag = 1/ np.sqrt(np.sum(gwtgs))\n if len(rtb)!=0:\n rmjds = rtb[\"mjd\"].values\n rmags = rtb[\"mag0\"].values\n remags = rtb[\"emag\"].values\n rwtgs = 1/remags**2\n rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)\n rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)\n remag = 1/ np.sqrt(np.sum(rwtgs))\n if len(itb)!=0:\n imjds = itb[\"mjd\"].values\n imags = itb[\"mag0\"].values\n iemags = itb[\"emag\"].values\n iwtgs = 1/iemags**2\n imag = np.sum(imags * iwtgs) / np.sum(iwtgs)\n imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)\n iemag = 1/ np.sqrt(np.sum(iwtgs))\n if len(gtb)!=0 and len(rtb)!=0:\n mcolor.append(gmag - rmag)\n mjds.append( 0.5 * (gmjd + rmjd) )\n mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )\n colorname.append(\"gmr\")\n if len(rtb)!=0 and len(itb)!=0:\n mcolor.append(rmag - imag)\n mjds.append( 0.5 * (rmjd + imjd) )\n mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )\n colorname.append(\"rmi\")\n \n ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],\n names = [\"mjd\", \"c\", \"ec\", \"cname\"])\n \n ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)\n ctb = ctb.to_pandas()\n return ctb", "def thermal(isatom, freq, scalfac,linnonlin,T):\n if isatom != \"true\":\n nfreq = len(freq)\n\n vib_temp = []\n for ifreq in range(nfreq):\n freq[ifreq] = float(freq[ifreq]) * float(scalfac)\n vib_temp_new = c * 100.0 * h * float(freq[ifreq]) / kB\n vib_temp.append(vib_temp_new)\n\n dE_vib = 0\n for ifreq in range(nfreq):\n dE_vib = dE_vib + kB * vib_temp[ifreq] * j2au * ( 0.5 + 1 / ( np.exp(vib_temp[ifreq]/T) - 1) )\n\n dE_ZPE = 0.5 * sum(freq) * cmi2au\n\n if linnonlin == \"L\":\n dE_rot = kB * T * j2au\n elif linnonlin == \"NL\":\n dE_rot = kB * T * j2au * (3.0/2.0)\n else:\n with open(\"Thermochemistry.out\", \"a\") as ther_chem:\n ther_chem.write(\"ERROR: unknown entry for linear/nonlinear\")\n else:\n dE_ZPE = 0\n dE_vib = 0\n dE_rot = 0\n\n dE_tra = kB * T * j2au * (3.0/2.0)\n dE_thermal = (dE_vib - dE_ZPE) + dE_rot + dE_tra\n\n return(dE_ZPE, dE_vib, dE_rot, dE_tra, dE_thermal)", "def plotTsneE_3D(datadf,level,descname,v,path_output,Efilter,own_cmap,clustermethod=\"kmeans\",onlyShow=False,selected=False):\n\n if not selected:\n datadf = datadf[datadf[\"{}Energy\".format(level)]<=Efilter].sort_values(by=\"{}_{}_klabel\".format(level,descname))\n\n klabels = datadf[\"{}_{}_klabel\".format(level,descname)].astype(str)\n fig = px.scatter_3d(data_frame=datadf,\n z=\"{}Energy\".format(level),\n x=\"{}_{}_tsne1\".format(level,descname),\n y=\"{}_{}_tsne2\".format(level,descname),\n color=klabels,\n color_discrete_sequence=own_cmap,\n size=\"GyRadius\",\n opacity=0.9,\n #symbol=\"symbol\", # Use if needed in Jupyter\n hover_name=datadf.index,\n title=\"{}'s' t-SNE + {}Energy\".format(descname,level),\n #range_z=[-36,-20],\n width= 1200,\n height= 900)\n\n\n if onlyShow:\n fig.show()\n elif selected:\n fig.write_html(\"{}/{}_{}_EtSNE_selected.html\".format(path_output,level,descname))\n else:\n fig.write_html(\"{}/{}_{}_EtSNE.html\".format(path_output,level,descname))", "def test_set_pT(self):\n s = State(substance=\"water\")\n s.pT = Q_(101325.0, \"Pa\"), Q_(400.0, \"K\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.pT[1], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.pT[0], Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.u, Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(2009.2902478486988, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(1509.1482452129906, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(1.801983936953226, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert s.x is None\n assert s.phase == \"gas\"", "def compute_tsky_hot( xv, yv, hv, thot, tcold):\n\n nData = len(yv) \n epsilons = np.full( nData, EPSILON)\n tsys = np.zeros(nData) # initialize arrays\n\n Z = np.zeros(nData)\n oneMZ = np.zeros(nData)\n # For full Temp calibration, a spectrum taken at high elevation away from \n # The galactic plan is used. For this program the cold spectrum must be\n # the spectrum being calibrated. See the M command for comparision\n epsilons = np.full( nData, EPSILON)\n yv = np.maximum( yv, epsilons)\n hv = np.maximum( hv, epsilons)\n # comput the cold/hot ratio\n Z = yv/hv\n oneMZ = np.full( nData, 1.) - Z\n oneMZ = np.maximum( oneMZ, epsilons)\n\n # the cold, receiver, temperature is this function\n tsys = ((Z*thot) - tcold)/oneMZ\n \n n6 = int(nData/6)\n n56 = 5*n6\n\n tsysmedian = np.median( tsys[n6:n56])\n\n tsky = np.zeros(nData) # initialize arrays\n S = np.zeros(nData) # initialize arrays\n\n # The system gain S is computed assuming a tsys is the cold load\n S = np.full( nData, tsysmedian+thot)/hv\n # scale the observed instensity in counts to Kelvins.\n tsky = S*yv\n\n return tsky", "def temperature() -> float:", "def func_d23_318(n, series):\n if series == \"3D3\":\n try: \n return np.sqrt((3*os_3D3[str(n)]*wl_3D3[str(n)]*1e-9*hbar*e**2)/(4*np.pi*m_e*c))\n except:\n return 0", "def get_sn2018kzr(colorplt = False):\n ebv = 0.113/3.1\n z = 0.053\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n t_max = 58480.422+0.1\n \n f = open('../data/otherSN/Mcbrien2019/table1.tex')\n lines = f.readlines()\n f.close()\n lines = lines[:-4]\n \n dates = [x.split(\"&\")[0] for x in lines]\n mjds = [float(x.split(\"&\")[1]) for x in lines]\n phases = [float(x.split(\"&\")[2].replace('$', '').replace('\\t', '')) for x in lines]\n gs = [x.split(\"&\")[3].replace('$', '') for x in lines]\n rs = [x.split(\"&\")[4].replace('$', '') for x in lines]\n iis = [x.split(\"&\")[5].replace('$', '') for x in lines]\n zs = [x.split(\"&\")[6].replace('$', '') for x in lines]\n insts = [x.split(\"&\")[7] for x in lines]\n \n dtg = digital_latex(mjds, phases, gs, insts)\n dtr = digital_latex(mjds, phases, rs, insts)\n dti = digital_latex(mjds, phases, iis, insts)\n \n filt = np.hstack([np.repeat(\"g\", len(dtg[0])),\n np.repeat(\"r\", len(dtr[0])),\n np.repeat(\"i\", len(dti[0]))])\n phase = np.hstack([dtg[1], dtr[1], dti[1]])\n mag = np.hstack([dtg[2], dtr[2], dti[2]])\n emag = np.hstack([dtg[3], dtr[3], dti[3]])\n mjd = np.hstack([dtg[0], dtr[0], dti[0]])\n \n tb = Table(data = [(mjd - t_max) / (1+z), mag, emag, filt],\n names = ['tmax_rf', 'mag', 'emag', 'filter'])\n \n ixr = tb['filter'] == \"r\"\n ixg = tb['filter'] == \"g\"\n ixi = tb['filter'] == \"i\"\n tb['wave'] = np.zeros(len(tb))\n tb['wave'][ixg] = 4814\n tb['wave'][ixr] = 6422\n tb['wave'][ixi] = 7883\n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'], 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb = tb.to_pandas()\n return tb", "def initial_importer(initials, initialZMT=True):\n from .functions import cosd, lna\n ###filling the running variables with values depending on the systemconfiguration in rk4input###\n\n if Base.spatial_resolution == 0:\n dim = 0\n print('0D')\n Vars.T = initials['zmt']\n else:\n dim = 1\n # NS==True corresponds to southpole to northpole representation (180 Degrees)\n if Base.both_hemispheres == True:\n Latrange = 180\n\n # Checking if Temperature and Latitude is set on a latitudal circle (0°,10°,..if step=10)\n # or on a latitudinal belt and therefore between the boundaries (5°,15°,..if step=10)\n\n # circle==True and belt==False says on the latitudinal circle\n if Base.latitudinal_circle == True and Base.latitudinal_belt == False:\n Vars.Lat = np.linspace(-90 + Base.spatial_resolution, 90 - Base.spatial_resolution,\n int(Latrange / Base.spatial_resolution - 1))\n Vars.Lat2 = np.linspace(-90, 90 - Base.spatial_resolution,\n int(Latrange / Base.spatial_resolution)) + Base.spatial_resolution / 2\n if initialZMT == True:\n Vars.T = np.array([initials['zmt']] * int(Latrange / Base.spatial_resolution - 1))\n # Checking if the Temperature for each latitude starts with the same value or a\n # cosine shifted value range\n if initials['initial_temperature_cosine'] == True:\n Vars.T = Vars.T + initials['initial_temperature_amplitude'] * (cosd(Vars.Lat) - 1)\n\n # circle==False and belt==True say on the latitudinal belt\n if Base.latitudinal_circle == False and Base.latitudinal_belt == True:\n Vars.Lat2 = np.linspace(-90 + Base.spatial_resolution, 90 - Base.spatial_resolution,\n int(Latrange / Base.spatial_resolution - 1))\n Vars.Lat = np.linspace(-90, 90 - Base.spatial_resolution,\n int(Latrange / Base.spatial_resolution)) + Base.spatial_resolution / 2\n if initialZMT == True:\n Vars.T = np.array([initials['zmt']] * int(Latrange / Base.spatial_resolution))\n if initials['initial_temperature_cosine'] == True:\n if initials['initial_temperature_noise'] == True:\n z = [0] * len(Vars.Lat)\n for k in range(len(Vars.Lat)):\n z[k] = np.random.normal(0, initials['initial_temperature_noise_amplitude'])\n else:\n z = 0\n Vars.T = Vars.T + initials['initial_temperature_amplitude'] * (cosd(Vars.Lat) - 1) + lna(z)\n\n # Not from southpole to northpole rather equator to pole\n else:\n Latrange = 90\n if Base.latitudinal_circle == True and Base.latitudinal_belt == False:\n Vars.Lat = np.linspace(0, 90 - Base.spatial_resolution, int(Latrange / Base.spatial_resolution))\n Vars.Lat2 = np.linspace(0, 90 - Base.spatial_resolution,\n int(Latrange / Base.spatial_resolution)) + Base.spatial_resolution / 2\n if initialZMT == True:\n Vars.T = np.array([initials['zmt']] * int(Latrange / Base.spatial_resolution))\n if initials['initial_temperature_cosine'] == True:\n Vars.T = Vars.T + initials['initial_temperature_amplitude'] * (cosd(Vars.Lat) - 1)\n if Base.latitudinal_circle == False and Base.latitudinal_belt == True:\n Vars.Lat2 = np.linspace(0, 90 - Base.spatial_resolution, int(Latrange / Base.spatial_resolution))\n Vars.Lat = np.linspace(0, 90 - Base.spatial_resolution,\n int(Latrange / Base.spatial_resolution)) + Base.spatial_resolution / 2\n if initialZMT == True:\n Vars.T = np.array([initials['zmt']] * int(Latrange / Base.spatial_resolution))\n if initials['initial_temperature_cosine'] == True:\n Vars.T = Vars.T + initials['initial_temperature_amplitude'] * (cosd(Vars.Lat) - 1)\n\n Vars.t = initials['time']\n if Base.parallelization == True:\n if initialZMT == True:\n Vars.T = np.array([Vars.T] * Base.number_of_parallels)\n Vars.T_global = np.array([initials['gmt']] * Base.number_of_parallels)\n else:\n Vars.T_global = initials['gmt']", "def tf_sugra_tensors(t_v70, compute_masses, t_lhs_vielbein, t_rhs_E):\n tc_28_8_8 = tf.constant(su8.m_28_8_8)\n t_e7_generator_v70 = tf.einsum(\n 'v,vIJ->JI',\n tf.complex(t_v70, tf.constant([0.0] * 70, dtype=tf.float64)),\n tf.constant(e7.t_a_ij_kl[:70, :, :], dtype=tf.complex128))\n t_complex_vielbein0 = tf.linalg.expm(t_e7_generator_v70) @ t_rhs_E\n if compute_masses:\n t_complex_vielbein = t_lhs_vielbein @ t_complex_vielbein0\n else:\n t_complex_vielbein = t_complex_vielbein0\n @tf.function\n def expand_ijkl(t_ab):\n return 0.5 * tf.einsum(\n 'ijB,BIJ->ijIJ',\n tf.einsum('AB,Aij->ijB', t_ab, tc_28_8_8),\n tc_28_8_8)\n #\n t_u_ijIJ = expand_ijkl(t_complex_vielbein[:28, :28])\n t_u_klKL = expand_ijkl(t_complex_vielbein[28:, 28:])\n t_v_ijKL = expand_ijkl(t_complex_vielbein[:28, 28:])\n t_v_klIJ = expand_ijkl(t_complex_vielbein[28:, :28])\n #\n t_uv = t_u_klKL + t_v_klIJ\n t_uuvv = (tf.einsum('lmJK,kmKI->lkIJ', t_u_ijIJ, t_u_klKL) -\n tf.einsum('lmJK,kmKI->lkIJ', t_v_ijKL, t_v_klIJ))\n t_T = tf.einsum('ijIJ,lkIJ->lkij', t_uv, t_uuvv)\n t_A1 = (-4.0 / 21.0) * tf.linalg.trace(tf.einsum('mijn->ijmn', t_T))\n t_A2 = (-4.0 / (3 * 3)) * (\n # Antisymmetrize in last 3 indices, taking into account antisymmetry\n # in last two indices.\n t_T\n + tf.einsum('lijk->ljki', t_T)\n + tf.einsum('lijk->lkij', t_T))\n t_A1_real = tf.math.real(t_A1)\n t_A1_imag = tf.math.imag(t_A1)\n t_A2_real = tf.math.real(t_A2)\n t_A2_imag = tf.math.imag(t_A2)\n t_A1_potential = (-3.0 / 4) * (\n tf.einsum('ij,ij->', t_A1_real, t_A1_real) +\n tf.einsum('ij,ij->', t_A1_imag, t_A1_imag))\n t_A2_potential = (1.0 / 24) * (\n tf.einsum('ijkl,ijkl->', t_A2_real, t_A2_real) +\n tf.einsum('ijkl,ijkl->', t_A2_imag, t_A2_imag))\n t_potential = t_A1_potential + t_A2_potential\n #\n return t_v70, t_complex_vielbein, t_T, t_A1, t_A2, t_potential", "def test_set_Tv(self):\n s = State(substance=\"water\")\n s.Tv = Q_(400.0, \"K\"), Q_(1.801983936953226, \"m**3/kg\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.Tv[0], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.Tv[1], Q_(1.801983936953226, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.u, Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(2009.2902478486988, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(1509.1482452129906, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(1.801983936953226, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert s.x is None", "def in_situ_tair_snd(sno0, year0=2016, npr_date=-1, ascat_date=-1):\n if npr_date < 0:\n npr_date = 100*24*3600 + bxy.get_total_sec('%d0101' % year0)\n if ascat_date < 0:\n ascat_date = 100*24*3600 + bxy.get_total_sec('%d0101' % year0)\n snd_name = \"snow\"\n print 'the %d was processing' % sno0\n sno = str(sno0)\n tair_name = \"Air Temperature Observed (degC)\"\n if sno0 in [2065, 2081]:\n if year0 == 2016:\n tair_name = \"Air Temperature Average (degC)\"\n # read measurements\n hr_list = [5, 7, 9, 14, 18, 21]\n t_air_one_year = read_site.in_situ_series(sno, y=year0, hr=hr_list) # [:, :, 0] temperature at 7:00 (local)\n # time_above_zero_0 = data_process.zero_find(t_air_one_year[:, :, 0], w=10, th=-0.1) #\n # time_above_zero_1 = data_process.zero_find(t_air_one_year[:, :, 1], w=10, th=-0.1)\n # time_above_zero_2 = data_process.zero_find(t_air_one_year[:, :, 3], w=10, th=-0.1)\n time_above_zero_list = [data_process.zero_find(t_air_one_year[:, :, i], w=10, th=-0.1)\n for i in range(0, len(hr_list))]\n date_tuple = bxy.time_getlocaltime(time_above_zero_list, ref_time=[2000, 1, 1, 0], t_source='US/Alaska')\n t_value, t_date = read_site.read_measurements\\\n (sno, tair_name, np.arange(1, 365), year0=year0, hr=18, t_unit='sec')\n\n\n tair_zero_day2 = data_process.zero_find(np.array([t_date, -t_value]), w=7, th=0) # in unit of sec\n tair_zero_day1 = data_process.zero_find_gt(np.array([t_date, t_value]), w=7, th=1)\n air_win = 7 # check days during window shown air temperature gt 0 degC\n w, w_valid = data_process.n_convolve3(t_value, air_win)\n air0_index0 = np.where(w>5)\n for ind0 in air0_index0[0]:\n if t_date[ind0] > bxy.get_total_sec('%d0307' % year0):\n tair_zero_day = t_date[ind0] - air_win*24*3600\n break\n # check\n zero_date = bxy.time_getlocaltime([tair_zero_day,tair_zero_day2, npr_date[0], ascat_date[0]],\n ref_time=[2000, 1, 1, 0], t_source=\"US/Alaska\")[-2]\n i_zero = np.where(bxy.time_getlocaltime(t_date, ref_time=[2000, 1, 1, 0],\n t_source=\"US/Alaska\")[-2] == zero_date[0])[0][0]\n t_check = t_value[i_zero - 3: i_zero + 4]\n air_0, air00 = read_site.read_measurements(sno, tair_name, 366+np.arange(50, 70), hr=18)\n a_extend = np.array([-3600*24, 3600*24])\n period0, period1 = np.array(sorted([tair_zero_day, npr_date])) + a_extend, \\\n np.array(sorted([tair_zero_day, ascat_date])) + a_extend\n snow_value, snow_date = read_site.read_measurements\\\n (sno, snd_name, np.arange(1, 365), year0=year0, hr=0, t_unit='sec')\n # get the in situ measurements during a period\n snow2date0 = data_process.measurements_slice(np.array([snow_date, snow_value]),\n peroid=period0)\n snow2date1 = data_process.measurements_slice(np.array([snow_date, snow_value]),\n peroid=period1)\n air2date0, air2date1 = data_process.measurements_slice(np.array([t_date, t_value]),\n peroid=period0),\\\n data_process.measurements_slice(np.array([t_date, t_value]),\n peroid=period1)\n return tair_zero_day, snow2date0, snow2date1, air2date0, air2date1", "def latent_heat_vapourisation(self, tair):\n return (2.501 - 0.00237 * tair) * 1E06", "def test_t0(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n # r must contain 2 elements, otherwise the density and pressure are nan\n r = np.array([0.7, 0.8])\n t = 0.0\n solrt = sol(r, t)\n for quant in ['velocity', 'pressure', 'sound_speed', 'density', 'xdet']:\n assert np.all(np.isnan(solrt[quant]))", "def T_c(I, T_amb, V, D, R_list, N_cond=1, T_range=[298,323,348], a_s=0.9, e_s=0.9, I_sun=900.0, temp_factor=1, wind_factor=1, n_iter=10):\n\n # def Q_gen(I, R):\n # w = I * I * R\n # return w\n\n # def Q_rad_in(I_sun, A_s, a_s):\n # w = I_sun * D * a_s\n # return w\n\n # def Q_conv(htcoeff, A_s, T_lin, T_amb):\n # w = htcoeff * A_s * (T_line - T_amb)\n # return w\n\n # def Q_rad_out(e_s, A_s, sigma, T_line, T_amb):\n # w = e_s * D * sigma * (T_line**4 - T_amb**4)\n # return w\n\n def reynolds(V, D, v, Mair=1.103):\n r = V * D / v\n return r\n\n def nusselt(Re, Pr):\n a = 0.62 * ( (Re) ** (1.0/2.0) ) * ( Pr ** (1.0/3.0) )\n b = (1 + (0.4/(Pr**(2.0/3.0) ) ) ) ** (1.0/4.0)\n c = (Re / 282000) ** (5.0/8.0)\n n = 0.3 + (a/b) * ( (1 + c) ** (4.0/5.0) )\n return n\n\n def air_prop(T_amb):\n # temp v k Pr\n air_prop = np.array([[200, 7.59e-6, 18.1e-3, 0.737],\n [250, 11.44e-6, 22.3e-3, 0.720],\n [300, 15.89e-6, 26.3e-3, 0.707],\n [350, 20.92e-6, 30.0e-3, 0.700],\n [400, 26.41e-6, 33.8e-3, 0.690],\n [450, 32.39e-6, 37.3e-3, 0.686],\n [500, 38.79e-6, 40.7e-3, 0.684],\n [550, 45.57e-6, 43.9e-3, 0.683],\n [600, 52.69e-6, 46.9e-3, 0.685]])\n\n v, k, Pr = np.apply_along_axis(lambda x: np.interp(T_amb, air_prop[:,0], x),\n 0, air_prop[:,1:])\n return v, k, Pr\n\n def R_T(R_lo, R_mid, R_hi, T_line, N_cond, T_range=T_range):\n if 273 <= T_line <= 323:\n R = ((R_lo + \n ((R_lo - R_mid)/(T_range[0] - T_range[1]))\n *(T_line - T_range[0]))/N_cond)\n elif T_line > 323:\n R = ((R_mid + \n ((R_mid - R_hi)/(T_range[1] - T_range[2]))\n *(T_line - T_range[1]))/N_cond)\n else:\n R = R_lo\n print('Out of bounds')\n return R\n\n R_lo, R_mid, R_hi = R_list[0], R_list[1], R_list[2]\n temp_factor = 1\n wind_factor = 1\n sigma = 5.6703e-8 # Stefan-Boltzmann constant\n\n T_amb = T_amb*temp_factor\n V = V*wind_factor\n\n v, k, Pr = air_prop(T_amb)\n Re = reynolds(V, D, v)\n htcoeff = nusselt(Re, Pr) * k / D\n\n def T_line(T_init):\n \n R = R_T(R_lo, R_mid, R_hi, T_init, N_cond)\n print R\n\n C4 = e_s * sigma * D * math.pi\n C3 = 0.0\n C2 = 0.0\n C1 = htcoeff * D * math.pi\n C0 = - ( I ** 2 * R\n + I_sun * a_s * D\n + htcoeff * D * math.pi * T_amb\n + e_s * D * math.pi * sigma * (T_amb ** 4))\n\n return np.roots([C4, C3, C2, C1, C0])\n\n T_c = T_amb\n \n for i in range(n_iter):\n T_arr = T_line(T_c)\n T_c = np.real(T_arr[np.where((np.real(T_arr) > 0) & ~(np.iscomplex(T_arr)))]).mean()\n print T_c\n\n return T_c", "def composite_solid_prescribed_surface_temperature(x, t, layer_1_props, layer_2_props, T_initial, T_external, num_terms=1):\n\n l = layer_1_props['d']\n kappa_1 = get_kappa(layer_1_props['K'], layer_1_props['rho'], layer_1_props['c'])\n kappa_2 = get_kappa(layer_2_props['K'], layer_2_props['rho'], layer_2_props['c'])\n\n k = np.sqrt(kappa_1/kappa_2)\n sigma = layer_2_props['K']*k/layer_1_props['K']\n alpha = (sigma-1)/(sigma+1)\n\n #translate coordinate system so that boundary is at x = 0\n x = x - l\n\n def v_1_term(n):\n return alpha**n * (erfc(((2*n+1)*l + x)/(2*np.sqrt(kappa_1*t))) - alpha*erfc(((2*n+1)*l - x)/(2*np.sqrt(kappa_1*t))))\n def v_2_term(n):\n return alpha**n * erfc(((2*n+1)*l + k*x)/(2*np.sqrt(kappa_1*t)))\n\n solution = 0\n if x < 0:\n solution = np.sum([v_1_term(n) for n in range(num_terms)])\n else:\n solution = 2/(1+sigma) * np.sum([v_2_term(n) for n in range(num_terms)])\n\n return solution*(T_external - T_initial) + T_initial", "def __init__(self,b,u,v,hbls_old,hbbl_old,Kv_old,Kt_old,srflx,sustr,svstr,f,grid_dict,tstep_mode,dt):\n \n # INPUTS FROM TTTW SYSTEM\n self.b = b #buoyancy field: [Ly,N]\n self.u = u # x-component of velocity [Ly,N]\n self.v = v # y-component of velocity [Ly+1,N]\n self.hbls_old = hbls_old #boundary layer depth from previous time step [Ly]\n self.hbbl_old = hbbl_old # bottom boundary layer depth from previous time step [Ly]\n self.Kv_old = Kv_old # momentum mixing coefficeint from previous time step [Ly,N+1]\n self.Kt_old = Kt_old # tracer mixing coefficient from previous time step [Ly,N+1]\n self.srflx = srflx #solar heat flux [Ly] (degC * (m/s))\n self.sustr = sustr # x-component surface wind stress [Ly] (N/m^2) \n self.svstr = svstr # y-component surface wind stress [Ly+1] (N/m^2)\n self.grid_dict = grid_dict #gridded data\n self.f = f #coriolis parameter\n # KPP-SPECIFIC VARIABLES \n self.hbls = np.zeros([self.b.shape[0]])\n self.hbbl = np.zeros([self.b.shape[0]])\n self.ustar = []\n self.bvf = [] \n self.kmo = []\n self.C_h_MO = []\n self.kbl = []\n self.Cr = [] \n self.Fc = []\n self.ghat = [] #NONLOCAL TERM: TO BE USED IN TIME STEPPING\n self.tstep_mode = tstep_mode# if in time steppign mode, turn on HBL_RATE_LIMIT\n self.dt = dt", "def test_steady_state(self, model):\r\n model.fs.unit.initialize()\r\n solver.solve(model)\r\n\r\n # Expected temperatures\r\n exp_sco2 = np.ones(TIME_STEPS) * 305.2\r\n exp_air = np.ones(TIME_STEPS) * 370.4\r\n exp_wall = np.ones(TIME_STEPS) * 339.7\r\n\r\n self.check_temperatures(\r\n model, np.array(model.fs.time), exp_sco2, exp_air, exp_wall\r\n )", "def eps_MTSI(omg, kx, ky, kz, prt=PlasmaParameters()):\n\n k2 = kz ** 2 + ky ** 2\n\n if k2 == 0:\n raise RuntimeError(\"The wave vector is Zero !!\")\n\n iEps = 1/omg**2\n eEpsz = prt.mi_over_me * ( kz**2 ) / ( (omg - ky * prt.driftSpeed/prt.BohmSpeed)**2 * k2 )\n eEpsy = prt.mi_over_me * ( ky**2 ) / ( ((omg - ky * prt.driftSpeed/prt.BohmSpeed)**2 - prt.electronCyclotronFrequency**2/ (prt.ionPlasmaFrequency/u.rad)**2)* k2 )\n\n return 1 - iEps - eEpsz - eEpsy", "def temperature(self, state: State):\n if self.is_self_play and state.number_of_stones < 16:\n return self._temperature\n return None", "def test_temperature_condition_degc(value):\n result = Stc3xTemperatureConditionDegC(value.get('degrees_celsius'))\n assert type(result) is Stc3xTemperatureConditionDegC\n assert type(result.ticks) is int\n assert result.ticks == value.get('ticks')\n assert type(result.degrees_celsius) is float\n assert result.degrees_celsius == value.get('degrees_celsius')\n assert type(result.degrees_fahrenheit) is float\n assert result.degrees_fahrenheit == value.get('degrees_fahrenheit')", "def make_stehle(self):\n\n temp_k = self.temp * e / k # temperature in K\n dens_cm = self.e_dens * 1.e-6 # electronic density in cm-3\n prefix = 'n_' + str(self.n_upper) + '_' + str(self.n_lower) + '_'\n\n # extract raw tabulated tabulated_data\n tab_temp_k = np.array(pystark.nc.variables[prefix + 'tempe'].data) # tabulated electron temperatures (K)\n olam0 = pystark.nc.variables[prefix + 'olam0'].data # line centre wavelength (A)\n num_tab_dens = pystark.nc.variables[prefix + 'id_max'].data\n fainom = pystark.nc.variables[prefix + 'fainom'].data\n tab_dens_cm = np.array(pystark.nc.variables[prefix + 'dense'].data) # tabulated electron densities (cm ** -3)\n f00 = np.array(pystark.nc.variables[prefix + 'f00'].data) # normal Holtsmark field strength (30 kV / m)\n dl12 = np.array(pystark.nc.variables[prefix + 'dl12'].data)\n dl12s = np.array(pystark.nc.variables[prefix + 'dl12s'].data)\n fainu = pystark.nc.variables[\n prefix + 'fainu'].data # Asymptotic value of iStark * (alpha ** 2.5) (\"wings factor in alfa units\")\n pr0 = np.array(pystark.nc.variables[\n prefix + 'pr0'].data) # Ratio of the mean interelectronic distance to the electronic Debye length\n jtot = np.array(pystark.nc.variables[prefix + 'jtot'].data,\n dtype=np.int) # \"number of wave lengths for the couple (T,Ne)\"\n dom = np.array(pystark.nc.variables[prefix + 'dom'].data) # frequency detunings in units (rad / (s*ues)\n d1om = np.array(pystark.nc.variables[prefix + 'd1om'].data)\n o1line = np.array(pystark.nc.variables[prefix + 'o1line'].data)\n o1lines = np.array(pystark.nc.variables[prefix + 'o1lines'].data)\n\n # ensure given temperature + density falls within tabulated values\n # change sligtly the value of the input density\n # dens_cm in order to remain , as far as possible, inside the tabulation\n # JSA: this first step seems bogus!\n\n if np.abs(dens_cm - tab_dens_cm[0]) / dens_cm <= 1.0E-3:\n dens_cm = tab_dens_cm[0] * 1.001\n\n for id in np.arange(1, num_tab_dens + 1):\n if np.abs(dens_cm - tab_dens_cm[id]) / dens_cm <= 1.0E-3:\n dens_cm = tab_dens_cm[id] * 0.999\n\n if dens_cm >= 2.0 * tab_dens_cm[num_tab_dens]:\n raise Exception(\n 'Your input density is higher than the largest tabulated value %f' % tab_dens_cm[num_tab_dens])\n\n if dens_cm <= tab_dens_cm[0]:\n raise Exception('Your input density is smaller than the smallest tabulated value %f' % tab_dens_cm[0])\n\n if temp_k >= tab_temp_k[9]:\n raise Exception('Your input temperature is higher than the largest tabulated value %f' % tab_temp_k[9])\n\n if temp_k <= tab_temp_k[0]:\n raise Exception('Your input temperature is lower than the smallest tabulated value %f' % tab_temp_k[0])\n\n normal_holtsmark_field = 1.25e-9 * (dens_cm ** (2. / 3.)) # normal field value in ues\n\n # calculate line centre wavelength and frequency using Rydberg formula\n # JSA: I have made this step clearer and corrected for deuteron mass in the Rydberg constant (though the effect is small)\n # TODO make sure this matches olam0 parameter above -- why were there two variables in the first place?!\n # rydberg_m = Rydberg / (1. + (electron_mass / physical_constants['deuteron mass'][0]))\n # wl_0_angst = 1e10 * (rydberg_m * (1 / n_lower ** 2 - 1 / n_upper ** 2)) ** -1\n\n wl_centre_angst = self.wl_centre * 1e10\n\n c_angst = c * 1e10 # velocity of light in Ansgtroms / s\n angular_freq_0 = 2 * np.pi * c_angst / wl_centre_angst # rad / s\n\n otrans = -2 * np.pi * c_angst / wl_centre_angst ** 2\n\n olines = o1lines / np.abs(otrans)\n oline = o1line / np.abs(otrans)\n\n # Limit analysis_tools to uncorrelated plasmas.\n # check that mean interelectronic distance is smaller than the electronic Debye length (equ. 10)\n PR0_exp = 0.0898 * (dens_cm ** (1. / 6.)) / np.sqrt(temp_k) # = (r0 / debye)\n if PR0_exp > 1.:\n raise Exception('The plasma is too strongly correlated\\ni.e. r0/debye=0.1\\nthe line cannot be computed.')\n\n # fainom_exp=fainom*(F00_exp**1.5)\n # fainum_exp=fainom_exp/( (OPI*2.)**1.5)\n\n # ========================\n # TABULATION Format CDS\n # si on veut ecrire\n # n -np lambda0 kalpha Ne E0 T R0/Debye Dalpha iDoppler iStark\n\n # IN_cds= N+0.01\n # INP_cds = NP+0.01\n\n # ***********************************************************\n # Don't edit the CDS format...\n # ***********************************************************\n\n # Skipped the code in the IF statement starting at line 470, since it\n # isn't used, if (.FALSE.) ...\n\n # ==============================================\n # define an unique detunings grid - domm - for the tabulated\n # profiles ( various temperatures , densities)\n # calculate all the line shapes for this common grid\n # units used at this points are Domega_new= Delta(omega)/F00\n # in rd/(s-1 ues)\n\n max_num_dens = 30 # Maximum number of densities\n max_num_tab_temp = 10\n max_num_detunings = 60 # Maximum number of detunings\n jtot = jtot.astype(np.int)\n domm = np.zeros(100000)\n dom0 = np.zeros(10000)\n tprof = np.zeros([max_num_dens, max_num_tab_temp, 10000])\n tprofs = np.zeros([max_num_dens, max_num_tab_temp, 10000])\n uprof = np.zeros([max_num_dens, 10000])\n uprofs = np.zeros([max_num_dens, 10000])\n\n inc = 0\n domm[inc] = 0.0\n # ---- Look to replace this loop\n for id in np.arange(num_tab_dens + 1): # loop over tab densities\n for j in np.arange(max_num_tab_temp): # loop over tab temperatures (?)\n for i in np.arange(1, jtot[id, j]):\n inc += 1\n dom0[inc] = dom[id, j, i]\n\n inc = np.count_nonzero(dom)\n npik = inc + 1\n # nut=10000\n\n # Calling numpy sort instead of piksrt\n tmp = np.sort(dom0[0:npik])\n dom0[0:npik] = tmp[0:npik]\n # dom0 seems to agree with the FORTRAN version\n\n inc = 0\n domm[0] = 0.0\n # print 'npik',npik\n # ---- Look to replace this loop\n for i in np.arange(1, npik):\n dif = (dom0[i] - dom0[i - 1])\n if dif <= 1.0E-6:\n continue\n if dif / np.abs(dom0[i]) <= 0.1:\n continue\n inc = inc + 1\n domm[inc] = dom0[i]\n\n jdom = inc + 1 # One line after marker 35\n\n for id in np.arange(num_tab_dens):\n for j in np.arange(10):\n if pr0[id, j] > 1.0:\n continue\n\n tprof[id, j, 0] = oline[id, j, 0]\n tprofs[id, j, 0] = olines[id, j, 0]\n\n if jtot[id, j] == 0:\n continue\n\n for i in np.arange(1, jdom + 1):\n skip1 = False\n skip2 = False\n # print 'i',i\n domeg = domm[i]\n ij_max = jtot[id, j]\n # print 'domeg,ij_max',domeg,ij_max\n for ij in np.arange(1, ij_max - 1):\n # print 'ij',ij\n test = (domeg - dom[id, j, ij]) * (domeg - dom[id, j, ij - 1])\n # print 'test1:',test\n if test <= 0.0:\n # print 'triggered test1'\n x1 = dom[id, j, ij - 1]\n x2 = dom[id, j, ij]\n x3 = dom[id, j, ij + 1]\n y1 = oline[id, j, ij - 1]\n y2 = oline[id, j, ij]\n y3 = oline[id, j, ij + 1]\n # print 'x1,x2,x3',x1,x2,x3\n # print 'y1,y2,y3',y1,y2,y3\n tprof[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n y1 = olines[id, j, ij - 1]\n y2 = olines[id, j, ij]\n y3 = olines[id, j, ij + 1]\n tprofs[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n # print 'tprof[id,j,i]',tprof[id,j,i]\n # print 'tprofs[id,j,i]',tprofs[id,j,i]\n skip1 = True\n skip2 = True\n break\n\n if skip1 is False:\n test = (domeg - dom[id, j, ij_max - 2]) * (domeg - dom[id, j, ij_max - 1])\n # print 'test2:',test\n # print 'domeg',domeg\n # print 'dom[id,j,ij_max-1]',dom[id,j,ij_max-2]\n # print 'dom[id,j,ij_max]',dom[id,j,ij_max-1]\n if test <= 0.0:\n # print 'triggered test2'\n x1 = dom[id, j, ij_max - 3]\n x2 = dom[id, j, ij_max - 2]\n x3 = dom[id, j, ij_max - 1]\n y1 = oline[id, j, ij_max - 3]\n y2 = oline[id, j, ij_max - 2]\n y3 = oline[id, j, ij_max - 1]\n tprof[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n y1 = olines[id, j, ij_max - 3]\n y2 = olines[id, j, ij_max - 2]\n y3 = olines[id, j, ij_max - 1]\n tprofs[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n skip2 = True\n # print 'x1,x2,x3',x1,x2,x3\n # print 'y1,y2,y3',y1,y2,y3\n # print 'tprof[id,j,i]',tprof[id,j,i]\n # print 'tprofs[id,j,i]',tprofs[id,j,i]\n continue\n\n if skip2 is False:\n if domeg > dom[id, j, ij_max]:\n # print 'triggered test3'\n tprof[id, j, i] = fainom / (domeg ** 2.5)\n tprofs[id, j, i] = tprof[id, j, i]\n continue\n\n # We can skip writing the intermediate file\n\n\n for id in np.arange(num_tab_dens):\n otest_dens = (dens_cm - tab_dens_cm[id]) * (dens_cm - tab_dens_cm[id + 1])\n if otest_dens <= 0.0:\n dense1 = tab_dens_cm[id]\n dense2 = tab_dens_cm[id + 1]\n id1 = id\n id2 = id + 1\n break\n\n if dens_cm >= tab_dens_cm[num_tab_dens]:\n dense1 = tab_dens_cm[num_tab_dens - 1]\n dense2 = tab_dens_cm[num_tab_dens]\n id1 = num_tab_dens - 1\n id2 = num_tab_dens\n\n for it in np.arange(10):\n otest = (temp_k - tab_temp_k[it]) * (temp_k - tab_temp_k[it + 1])\n if otest <= 0.0:\n it1 = it\n it2 = it + 1\n # pr01 = pr0[id2,it1] # max value of pr0 for T1,T2,dense1,dense2\n tempe1 = tab_temp_k[it]\n tempe2 = tab_temp_k[it + 1]\n break\n\n # interpolation in temperature\n for id in np.arange(id1, id2 + 1):\n for i in np.arange(jdom):\n uprof[id, i] = tprof[id, it1, i] + (temp_k - tempe1) * (tprof[id, it2, i] - tprof[id, it1, i]) / (\n tempe2 - tempe1)\n uprofs[id, i] = tprofs[id, it1, i] + (temp_k - tempe1) * (tprofs[id, it2, i] - tprofs[id, it1, i]) / (\n tempe2 - tempe1)\n\n delta_lambda = np.zeros(jdom)\n delta_nu = np.zeros(jdom)\n wprof_nu = np.zeros(jdom)\n wprofs_nu = np.zeros(jdom)\n\n for i in np.arange(jdom):\n wprof = uprof[id1, i] + (dens_cm - dense1) * (uprof[id2, i] - uprof[id1, i]) / (dense2 - dense1)\n wprofs = uprofs[id1, i] + (dens_cm - dense1) * (uprofs[id2, i] - uprofs[id1, i]) / (dense2 - dense1)\n delta_omega = domm[i] * normal_holtsmark_field\n delta_nu[i] = delta_omega / (2 * np.pi)\n delta_lambda[i] = wl_centre_angst * delta_omega / (angular_freq_0 + delta_omega)\n # print(delta_lambda[i])\n wprof_nu[i] = (wprof / normal_holtsmark_field) * (2. * np.pi)\n wprofs_nu[i] = (wprofs / normal_holtsmark_field) * (2. * np.pi)\n # print '%e %e %e %e' %(delta_lambda[i],delta_nu[i],wprof_nu[i],wprofs_nu[i])\n\n delta_lambda2 = np.concatenate((-delta_lambda[::-1], delta_lambda)) + wl_centre_angst # + olam0\n delta_nu2 = np.concatenate((-delta_nu[::-1], delta_nu))\n wprof_nu2 = np.concatenate((wprof_nu[::-1], wprof_nu))\n wprofs_nu2 = np.concatenate((wprofs_nu[::-1], wprofs_nu))\n\n # for some reason, i only get a good agreement with the other models if i take the pure Stark broadened Stehle\n # output and manually convolve it with the Doppler profile -- not sure why...\n ls_sd = wprofs_nu2\n\n # interpolate onto frequency axis\n ls_sd = np.interp(self.freq_axis, delta_nu2 + self.freq_centre, ls_sd)\n\n return ls_sd", "def compute_ground_truth_volume(self, display_opt):\n\n self.meshActor.GetProperty().SetOpacity(0.2)\n self.meshActor.GetProperty().SetColor(1, 0, 0)\n\n clean = vtk.vtkCleanPolyData()\n clean.SetInputData(self.endo_poly)\n\n d3 = vtk.vtkDelaunay3D()\n d3.SetInputConnection(clean.GetOutputPort())\n d3.SetTolerance(0.01)\n d3.SetAlpha(0.0)\n d3.Update()\n\n surfaceFilter = vtk.vtkDataSetSurfaceFilter() # output is triangular mesh\n surfaceFilter.SetInputConnection(d3.GetOutputPort())\n surfaceFilter.Update()\n\n Mass = vtk.vtkMassProperties()\n Mass.SetInputConnection(surfaceFilter.GetOutputPort())\n Mass.Update()\n\n self.ground_truth_vol = Mass.GetVolume()/1000.0\n\n if display_opt:\n\n m = vtk.vtkDataSetMapper()\n m.SetInputConnection(d3.GetOutputPort())\n\n a = vtk.vtkActor()\n a.SetMapper(m)\n\n # set mapper for epi for visualization\n m2 = vtk.vtkDataSetMapper()\n m2.SetInputData(self.epi_poly)\n\n epi_actor = vtk.vtkActor()\n epi_actor.SetMapper(m2)\n epi_actor.GetProperty().SetOpacity(0.3)\n epi_actor.GetProperty().SetColor(1,0,0)\n\n ren = vtk.vtkRenderer()\n ren.SetBackground(0.0, 0.0, 0.0)\n ren.AddActor(epi_actor)\n ren.AddActor(a)\n\n vtk_show(ren)", "def test_set_hx(self):\n s = State(substance=\"water\")\n s.hx = Q_(1624328.2430353598, \"J/kg\"), Q_(0.5, \"dimensionless\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(245769.34557103913, \"Pa\")) # type: ignore\n assert np.isclose(s.xT[1], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.xT[0], Q_(0.5, \"dimensionless\")) # type: ignore\n assert np.isclose(s.u, Q_(1534461.5163075812, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(4329.703956664546, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(4056.471547685226, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(2913.7307270395363, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.3656547423394701, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1624328.2430353598, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.5, \"dimensionless\")) # type: ignore", "def test_set_Th(self):\n s = State(substance=\"water\")\n s.Th = Q_(400.0, \"K\"), Q_(2730301.3859201893, \"J/kg\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.Th[0], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.Th[1], Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert np.isclose(s.u, Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(2009.2902478486988, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(1509.1482452129906, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(1.801983936953226, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert s.x is None", "def update_temperature(self):\n if self.T < self.Tmin:\n return False\n self.T -= self.alpha\n\n return True", "def sky(seed=425, th=150, old=False):\n \n # impact parameters\n M = 3e7*u.Msun\n B = 19.95*u.kpc\n #B = 20.08*u.kpc\n V = 190*u.km/u.s\n phi = coord.Angle(0*u.deg)\n th = 150\n theta = coord.Angle(th*u.deg)\n Tenc = 0.01*u.Gyr\n T = 0.5*u.Gyr\n dt = 0.05*u.Myr\n rs = 0*u.pc\n \n old_label = ''\n np.random.seed(seed)\n observer = {'z_sun': 27.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 60*u.deg, 'galcen_coord': coord.SkyCoord(ra=300*u.deg, dec=-90*u.deg, frame='icrs')}\n vobs = {'vcirc': 220*u.km/u.s, 'vlsr': [0, 0, 0]*u.km/u.s}\n wangle = 180*u.deg\n \n if old:\n old_label = '_old_up'\n observer = {'z_sun': -2000.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 50*u.deg, 'galcen_coord': coord.SkyCoord(ra=300*u.deg, dec=-90*u.deg, frame='icrs')}\n vobs = {'vcirc': 220*u.km/u.s, 'vlsr': [0,0,0]*u.km/u.s}\n \n # impact parameters\n M = 3e7*u.Msun\n B = 20.06*u.kpc\n V = 190*u.km/u.s\n phi = coord.Angle(0*u.deg)\n th = 155\n theta = coord.Angle(th*u.deg)\n Tenc = 0.01*u.Gyr\n T = 0.55*u.Gyr\n dt = 0.05*u.Myr\n #dt = 1*u.Myr\n rs = 0*u.pc\n \n # potential parameters\n potential = 3\n Vh = 220*u.km/u.s\n q = 1*u.Unit(1)\n rhalo = 20*u.pc\n par_pot = np.array([Vh.si.value, q.value, rhalo.si.value])\n \n # setup tube\n Nstar = 1400\n wx = 30*u.kpc\n wy = 0*u.pc\n wz = 0*u.pc\n sx = 0*u.km/u.s\n \n xphi = np.linspace(-0.3*np.pi,0.3*np.pi, Nstar)\n xphi0 = np.linspace(-0.1*np.pi, 0.1*np.pi, 1000)\n xphi1 = np.linspace(-0.28*np.pi, -0.1*np.pi, 200)\n xphi2 = np.linspace(0.1*np.pi, 0.32*np.pi, 200)\n xphi = np.concatenate([xphi1, xphi0, xphi2])\n \n xr = 20*u.kpc + np.random.randn(Nstar)*0.0*u.kpc\n x = np.sin(xphi) * xr\n y = np.cos(xphi) * xr\n z = x * 0\n vx = -np.cos(xphi) * Vh# * 0.94\n vy = np.sin(xphi) * Vh #* 0.97\n vz = vx * 0\n # closest to impact\n ienc = np.argmin(np.abs(x))\n \n # generate stream model\n potential_perturb = 1\n par_perturb = np.array([M.si.value, 0., 0., 0.])\n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream = {}\n stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal = coord.Galactocentric(stream['x'], **observer)\n xeq = xgal.transform_to(coord.ICRS)\n veq_ = gc.vgal_to_hel(xeq, stream['v'], **vobs)\n veq = [None] * 3\n veq[0] = veq_[0].to(u.mas/u.yr)\n veq[1] = veq_[1].to(u.mas/u.yr)\n veq[2] = veq_[2].to(u.km/u.s)\n \n # unperturbed stream\n par_perturb = np.array([0*M.si.value, 0., 0., 0.])\n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream0 = {}\n stream0['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream0['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal0 = coord.Galactocentric(stream0['x'], **observer)\n xeq0 = xgal0.transform_to(coord.ICRS)\n veq0_ = gc.vgal_to_hel(xeq0, stream0['v'], **vobs)\n veq0 = [None] * 3\n veq0[0] = veq0_[0].to(u.mas/u.yr)\n veq0[1] = veq0_[1].to(u.mas/u.yr)\n veq0[2] = veq0_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n R = find_greatcircle(xeq0.ra.deg[::10], xeq0.dec.deg[::10])\n xi0, eta0 = myutils.rotate_angles(xeq0.ra, xeq0.dec, R)\n xi0 = coord.Angle(xi0*u.deg)\n \n # place gap at xi~0\n xioff = xi0[ienc]\n xi0 -= xioff\n \n xi, eta = myutils.rotate_angles(xeq.ra, xeq.dec, R)\n xi = coord.Angle(xi*u.deg)\n xi -= xioff\n \n vlabel = ['$\\mu_{\\\\alpha_\\star}$ [mas yr$^{-1}$]','$\\mu_{\\delta}$ [mas yr$^{-1}$]', '$V_r$ [km s$^{-1}$]']\n ylims = [[-0.5, 0.5], [-0.5, 0.5], [-25,25]]\n color = '0.35'\n ms = 4\n \n # plotting\n plt.close()\n fig, ax = plt.subplots(5,1,figsize=(12,12), sharex=True)\n \n plt.sca(ax[0])\n g = Table(fits.getdata('/home/ana/projects/GD1-DR2/output/gd1_members.fits'))\n plt.scatter(g['phi1']+40, g['phi2'], s=g['pmem']*2, c=g['pmem'], cmap=mpl.cm.binary, vmin=0.5, vmax=1.1)\n \n plt.xlim(-45,45)\n plt.ylim(-10,10)\n plt.gca().set_aspect('equal')\n plt.ylabel('$\\phi_1$ [deg]')\n \n plt.sca(ax[1])\n plt.plot(xi.wrap_at(wangle), eta, 'o', mec='none', color=color, ms=ms)\n \n plt.ylabel('$\\phi_1$ [deg]')\n plt.ylim(-10,10)\n plt.gca().set_aspect('equal')\n \n xeqs = [xeq.ra, xeq.dec, xeq.distance.to(u.kpc)]\n for i in range(3):\n plt.sca(ax[i+2])\n \n # interpolate expected kinematics from an unperturbed stream\n vexp = np.interp(xi.wrap_at(wangle), xi0.wrap_at(wangle), veq0[i].value) * veq0[i].unit\n plt.plot(xi.wrap_at(wangle), veq[i]-vexp, 'o', mec='none', color=color, ms=ms)\n \n plt.ylabel('$\\Delta$ {}'.format(vlabel[i]))\n plt.ylim(*ylims[i])\n\n plt.xlabel('$\\phi_2$ [deg]')\n \n plt.tight_layout()\n plt.savefig('../plots/spur_morphology_sky{}.png'.format(old_label))", "def test_set_sv(self):\n s = State(substance=\"water\")\n s.sv = Q_(3028.9867985920914, \"J/(kg*K)\"), Q_(0.4772010021515822, \"m**3/kg\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(373.1242958476843, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.sv[0], Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.sv[1], Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.u, Q_(1013250.0, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.28475636946248034, \"dimensionless\")) # type: ignore", "def test_set_xh(self):\n s = State(substance=\"water\")\n s.xh = Q_(0.5, \"dimensionless\"), Q_(1624328.2430353598, \"J/kg\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(245769.34557103913, \"Pa\")) # type: ignore\n assert np.isclose(s.xT[1], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.xT[0], Q_(0.5, \"dimensionless\")) # type: ignore\n assert np.isclose(s.u, Q_(1534461.5163075812, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(4329.703956664546, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(4056.471547685226, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(2913.7307270395363, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.3656547423394701, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1624328.2430353598, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.5, \"dimensionless\")) # type: ignore", "def gensettings(T, Z=1, E=2, n=5e19, yMax=20):\n betaTh = DREAM.Formulas.getNormalizedThermalSpeed(T)\n pMax = yMax * betaTh\n Ec = DREAM.Formulas.getEc(T, n)\n\n ds = DREAMSettings()\n\n ds.collisions.lnlambda = Collisions.LNLAMBDA_THERMAL\n\n ds.eqsys.E_field.setPrescribedData(E)\n ds.eqsys.n_i.addIon(name='Ion', Z=Z, n=n/Z, iontype=IonSpecies.IONS_PRESCRIBED_FULLY_IONIZED) # Imaginary ion with charge Z\n ds.eqsys.n_cold.setPrescribedData(n)\n ds.eqsys.T_cold.setPrescribedData(T)\n ds.eqsys.f_hot.setInitialProfiles(rn0=0, n0=n, rT0=0, T0=T)\n ds.eqsys.n_re.setAvalanche(avalanche=Runaways.AVALANCHE_MODE_NEGLECT)\n ds.eqsys.f_hot.setAdvectionInterpolationMethod(ad_int=FHot.AD_INTERP_QUICK)\n \n ds.hottailgrid.setNxi(20)\n ds.hottailgrid.setNp(100)\n ds.hottailgrid.setPmax(pMax)\n\n ds.runawaygrid.setEnabled(False)\n\n ds.radialgrid.setB0(1)\n ds.radialgrid.setMinorRadius(0.1)\n ds.radialgrid.setWallRadius(0.1)\n ds.radialgrid.setNr(1)\n\n tMax0 = pMax*Ec / E\n ds.timestep.setTmax(.9*tMax0)\n ds.timestep.setNt(nTimeSteps)\n\n ds.other.include('fluid/runawayRate', 'fluid/gammaDreicer')\n\n \"\"\" \n If using MUMPS, computation time can be reduced by 30%:\n ds.solver.setLinearSolver(Solver.LINEAR_SOLVER_MUMPS)\n \"\"\"\n \n return ds", "def temp(self):\n if self.temp_sensor is None:\n return None\n else:\n if self.temp_scale.lower() in ['f', 'fahrenheit']:\n return self.temp_sensor.temp_f\n elif self.temp_scale.lower() in ['c', 'celsius']:\n return self.temp_sensor.temp_c", "def TestTDDFT():\n prm = '''\n Model\tTDHF\n Method\tMMUT\n dt\t0.02\n MaxIter\t100\n ExDir\t1.0\n EyDir\t1.0\n EzDir\t1.0\n FieldAmplitude\t0.01\n FieldFreq\t0.9202\n ApplyImpulse\t1\n ApplyCw\t\t0\n StatusEvery\t10\n '''\n geom = \"\"\"\n H 0. 0. 0.\n H 0. 0. 0.9\n H 2.0 0. 0\n H 2.0 0.9 0\n \"\"\"\n output = re.sub(\"py\",\"dat\",sys.argv[0])\n mol = gto.Mole()\n mol.atom = geom\n mol.basis = 'sto-3g'\n mol.build()\n the_scf = pyscf.dft.RKS(mol)\n the_scf.xc='HF'\n print \"Inital SCF finished. E=\", the_scf.kernel()\n aprop = tdscf.tdscf(the_scf,prm,output)\n return", "def __t_fine__(self, adc_temperature):\n var1 = (((adc_temperature >> 3) -\n (self._calibration_t[0] << 1)) * self._calibration_t[1]) >> 11\n var2 = (((\n ((adc_temperature >> 4) - self._calibration_t[0]) *\n ((adc_temperature >> 4) - self._calibration_t[0])) >> 12)\n * self._calibration_t[2]) >> 14\n return var1 + var2", "def use_tonce(self):\r\n return self.config.get_bool(\"gox\", \"use_tonce\")", "def test_t3(self):\n periods = 200\n t3 = qufilab.t3(self.close, periods)\n t3_talib = talib.T3(self.close, periods)\n np.testing.assert_allclose(t3, t3_talib, rtol = self.tolerance)", "def get_sn2005ek(colorplt=False):\n z = 0.016551\n ebv = 0.210\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n t_max = 53639.9\n print (\"adopt r band t_max from Drout+13\")\n \n # tb = pd.read_csv('/Users/yuhanyao/Desktop/ZTF18abfcmjw/data/Drout2013/table1', sep='\\t')\n # tb = tb.drop(columns=[\"Unnamed: 6\"])\n \n mjds = np.array([53639.3, 53640.3, 53641.3, 53642.2, 53643.2, 53645.3,\n 53646.5, 53648.0, 53649.2, 53650.4, 53651.3, 53652.5,\n 53654.2, 53655.2, 53656.2, 53657.2])\n \n Bmags = np.array([18.25, 18.38, 18.65, np.nan, 19.10, 19.71,\n 20.07, np.nan, 20.67, 20.90, 21.05, np.nan,\n 21.74, np.nan, np.nan, np.nan])\n \n Bmag_uncs = np.array([0.02, 0.03, 0.02, np.nan, 0.05, 0.07, \n 0.07, np.nan, 0.04, 0.04, 0.04, np.nan,\n 0.12, np.nan, np.nan, np.nan])\n \n Vmags = np.array([17.83, 18.03, 17.92, np.nan, 18.24, 18.66,\n 18.93, 19.48, 19.63, 19.86, 19.98, 20.35,\n 20.60, 20.74, 20.88, 21.22])\n \n Vmag_uncs = np.array([0.02, 0.03, 0.01, np.nan, 0.02, 0.02,\n 0.02, 0.06, 0.03, 0.03, 0.04, 0.05, \n 0.08, 0.10, 0.08, 0.13])\n \n Rmags = np.array([17.46, 17.41, 17.60, 17.69, 17.86, 18.18, \n np.nan, 18.83, 19.03, 19.26, 19.48, 19.75,\n 20.08, np.nan, 20.47, np.nan])\n \n Rmag_uncs = np.array([0.01, 0.02, 0.01, 0.02, 0.01, 0.01,\n np.nan, 0.03, 0.02, 0.02, 0.02, 0.04,\n 0.05, np.nan, 0.08, np.nan])\n\n Imags = np.array([17.20, 17.13, 17.18, np.nan, 17.47, 17.71, \n np.nan, 18.13, 18.26, 18.51, 18.61, 18.74, \n 19.01, np.nan, 19.47, np.nan])\n \n Imag_uncs = np.array([0.02, 0.04, 0.02, np.nan, 0.03, 0.02,\n np.nan, 0.06, 0.02, 0.02, 0.02, 0.03,\n 0.05, np.nan, 0.06, np.nan])\n \n mymjds = np.hstack([mjds, mjds, mjds, mjds])\n mymags = np.hstack([Bmags, Vmags, Rmags, Imags])\n myemags = np.hstack([Bmag_uncs, Vmag_uncs, Rmag_uncs, Imag_uncs])\n myfilts = np.hstack([ np.repeat(\"B\", len(Bmags)),\n np.repeat(\"V\", len(Bmags)),\n np.repeat(\"R\", len(Rmags)),\n np.repeat(\"I\", len(Imags)) ])\n ix = ~np.isnan(mymags)\n tb = pd.DataFrame({'mjd': mymjds[ix],\n 'mag': mymags[ix],\n 'emag': myemags[ix],\n \"filter\": myfilts[ix]})\n \n ixB = tb['filter'].values==\"B\"\n ixV = tb['filter'].values==\"V\"\n ixR = tb['filter'].values==\"R\"\n ixI = tb['filter'].values==\"I\"\n \n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixB] = 4359\n tb['wave'].values[ixV] = 5430\n tb['wave'].values[ixR] = 6349\n tb['wave'].values[ixI] = 8797\n \n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)\n if colorplt==False:\n return tb\n else:\n tb = add_datecol(tb)\n ix = np.in1d(tb[\"filter\"].values, np.array(['B', 'R', 'I']))\n tb = tb[ix]\n\n dates = get_date_span(tb)\n datesave = []\n for i in range(len(dates)):\n x = dates[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n if len(tbsub)!=0:\n flts = tbsub['filter'].values\n if \"R\" in flts and np.sum(np.unique(flts))!=1:\n datesave.append(x)\n datesave = np.array(datesave)\n \n mcolor = []\n mcolor_unc = []\n mjds = []\n colorname = []\n for i in range(len(datesave)):\n x = datesave[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n gtb = tbsub[tbsub[\"filter\"].values==\"B\"]\n rtb = tbsub[tbsub[\"filter\"].values==\"R\"]\n itb = tbsub[tbsub[\"filter\"].values==\"I\"]\n if len(gtb)!=0:\n gmjds = gtb[\"mjd\"].values\n gmags = gtb[\"mag0\"].values\n gemags = gtb[\"emag\"].values\n gwtgs = 1/gemags**2\n gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)\n gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)\n gemag = 1/ np.sqrt(np.sum(gwtgs))\n if len(rtb)!=0:\n rmjds = rtb[\"mjd\"].values\n rmags = rtb[\"mag0\"].values\n remags = rtb[\"emag\"].values\n rwtgs = 1/remags**2\n rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)\n rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)\n remag = 1/ np.sqrt(np.sum(rwtgs))\n if len(itb)!=0:\n imjds = itb[\"mjd\"].values\n imags = itb[\"mag0\"].values\n iemags = itb[\"emag\"].values\n iwtgs = 1/iemags**2\n imag = np.sum(imags * iwtgs) / np.sum(iwtgs)\n imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)\n iemag = 1/ np.sqrt(np.sum(iwtgs))\n if len(gtb)!=0 and len(rtb)!=0:\n mcolor.append(gmag - rmag)\n mjds.append( 0.5 * (gmjd + rmjd) )\n mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )\n colorname.append(\"BmR\")\n if len(rtb)!=0 and len(itb)!=0:\n mcolor.append(rmag - imag)\n mjds.append( 0.5 * (rmjd + imjd) )\n mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )\n colorname.append(\"RmI\")\n \n ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],\n names = [\"mjd\", \"c\", \"ec\", \"cname\"])\n \n ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)\n ctb = ctb.to_pandas()\n return ctb", "def temperature(wair,pres,entr=None,temp=None,airf=None,dhum=None,\n chkvals=False,chktol=_CHKTOL,airf0=None,temp0=None,dhum0=None,\n chkbnd=False,mathargs=None):\n airf, temp, dhum = eq_wpte(wair,pres,entr=entr,temp=temp,airf=airf,\n dhum=dhum,chkvals=chkvals,chktol=chktol,airf0=airf0,temp0=temp0,\n dhum0=dhum0,chkbnd=chkbnd,mathargs=mathargs)\n return temp", "def read_satellite(filename, ftype):\n #ftype = 'l3c'\n #filename = '/gws/nopw/j04/cds_c3s_sst/output/v2.6.0/l3c/AVHRR19_G/2018/03/01/20180301120000-C3S-L3C_GHRSST-SSTskin-AVHRR19_G-ICDR2.0_day-v02.0-fv01.0.nc'\n #ftype = 'l4'\n #filename = '/gws/nopw/j04/cds_c3s_sst/public/data/ICDR_v2/Analysis/L4/v2.0/2018/01/01/20180101120000-C3S-L4_GHRSST-SSTdepth-OSTIA-GLOB_ICDR2.0-v02.0-fv01.0.nc'\n print \"Reading %s file: %s\" % (ftype, filename)\n \n # Read data - L4 or L3C (note L4 mask and L3C quality level have same array name)\n ncin = netCDF4.Dataset(filename)\n if ftype == 'l4':\n lon = ncin.variables['lon'][:]\n lat = ncin.variables['lat'][:]\n time_read = ncin.variables['time'][:]\n sst = ncin.variables['analysed_sst'][:]\n unc = ncin.variables['analysis_uncertainty'][:]\n sea_ice_frac = ncin.variables['sea_ice_fraction'][:]\n ql = ncin.variables['mask'][:]\n sstfill = ncin.variables['analysed_sst']._FillValue\n sstao = ncin.variables['analysed_sst'].add_offset\n sstsf = ncin.variables['analysed_sst'].scale_factor\n elif ftype == 'l3c':\n lon = ncin.variables['lon'][:]\n lat = ncin.variables['lat'][:]\n time_read = ncin.variables['time'][:]\n time_bnds = ncin.variables['time_bnds'][:]\n sst = ncin.variables['sea_surface_temperature'][:]\n sst_depth = ncin.variables['sea_surface_temperature_depth'][:]\n sst_dtime = ncin.variables['sst_dtime'][:]\n sst_depth_dtime = ncin.variables['sst_depth_dtime'][:]\n sses_bias = ncin.variables['sses_bias'][:]\n sses_sd = ncin.variables['sses_standard_deviation'][:]\n sst_depth_total_unc = ncin.variables['sst_depth_total_uncertainty'][:]\n l2p_flags = ncin.variables['l2p_flags'][:]\n ql = ncin.variables['quality_level'][:]\n wind_speed = ncin.variables['wind_speed'][:]\n large_scale_cor_unc = ncin.variables['large_scale_correlated_uncertainty'][:]\n synop_cor_unc = ncin.variables['synoptically_correlated_uncertainty'][:]\n uncor_unc = ncin.variables['uncorrelated_uncertainty'][:]\n adj_unc = ncin.variables['adjustment_uncertainty'][:]\n aerosol_dyn_ind = ncin.variables['aerosol_dynamic_indicator'][:]\n sens = ncin.variables['sensitivity'][:]\n tfill = ncin.variables['sst_dtime']._FillValue\n sstfill = ncin.variables['sea_surface_temperature']._FillValue\n sstao = ncin.variables['sea_surface_temperature'].add_offset\n sstsf = ncin.variables['sea_surface_temperature'].scale_factor\n else:\n print 'ftype not recognised or supported'\n \n # Create time field\n # -> If L4 then create a time field set to time in L4 file\n # -> Also add a time fill value to keep coding simple later on\n if ftype == 'l4':\n time = np.empty((7200,3600))\n time[:,:] = time_read\n tfill = -2147483648\n else:\n time = copy.deepcopy(sst_dtime) # Need to make a hard copy\n mask = sst_dtime.mask == False; mask = mask[0,:,:]\n row, col = np.where(mask==True)\n time.data[0, row, col] = time.data[0,row, col] + time_read\n \n # Create output structure\n if ftype == 'l4':\n data = dict(lon=lon,\n lat=lat,\n time_read=time_read,\n time=time,\n sst=sst,\n unc=unc,\n sea_ice_frac=sea_ice_frac,\n ql=ql,\n tfill=tfill,\n sstfill=sstfill,\n sstao=sstao,\n sstsf=sstsf)\n elif ftype == 'l3c':\n data = dict(lon=lon,\n lat=lat,\n time_read=time_read,\n time=time,\n time_bnds=time_bnds,\n sst=sst,\n sst_depth=sst_depth,\n sst_dtime=sst_dtime,\n sst_depth_dtime=sst_depth_dtime,\n sses_bias=sses_bias,\n sses_sd=sses_sd,\n sst_depth_total_unc=sst_depth_total_unc,\n l2p_flags=l2p_flags,\n ql=ql,\n wind_speed=wind_speed,\n large_scale_cor_unc=large_scale_cor_unc,\n synop_cor_unc=synop_cor_unc,\n uncor_unc=uncor_unc,\n adj_unc=adj_unc,\n aerosol_dyn_ind=aerosol_dyn_ind,\n sens=sens,\n tfill=tfill,\n sstfill=sstfill,\n sstao=sstao,\n sstsf=sstsf)\n else:\n print 'ftype not recognised or supported'\n \n return data", "def test_saturation_equivalent_potential_temperature():\n p = 700 * units.mbar\n t = 263.15 * units.kelvin\n s_ept = saturation_equivalent_potential_temperature(p, t)\n # 299.096584 comes from equivalent_potential_temperature(p,t,t)\n # where dewpoint and temperature are equal, which means saturations.\n assert_almost_equal(s_ept, 299.10542 * units.kelvin, 3)", "def zernikeHexapodTrend(mnts='M20'):\n Tfile='/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_training.cp'\n b=p.load(open(Tfile))\n nobs = len(b)\n x = b[:,0]\n y = b[:,1]\n z = b[:,2]\n theta = b[:,3]\n phi = b[:,4]\n fwhm = b[:,5]\n e1 = b[:,6]\n e2 = b[:,7]\n thetax = theta*np.cos(np.deg2rad(phi))\n thetay = theta*np.sin(np.deg2rad(phi))\n if mnts == 'M20':\n idxBase = 9\n if mnts == 'M22real':\n idxBase = 29\n if mnts == 'M22imag':\n idxBase = 49\n idx = np.arange(14)\n zernikeName=('Piston','Tip','Tilt','Astignism','Defocus','Astignism','Trefoil','Coma','Coma','Trefoil','Ashtray','Astigm.5th','Spherical','Astigm.5th','Ashtray','16','17','18','19','20')\n for i in range(14):\n pl.figure(figsize=(21,10))\n pl.subplot(2,3,1)\n bp.bin_scatter(x,b[:,idxBase+idx[i]],binsize=0.01,fmt='bo',scatter=True)\n pl.xlabel('x decenter')\n pl.ylabel(zernikeName[i+1])\n pl.title(mnts)\n pl.subplot(2,3,2)\n bp.bin_scatter(y,b[:,idxBase+idx[i]],binsize=0.01,fmt='bo',scatter=True)\n pl.xlabel('y decenter')\n pl.ylabel(zernikeName[i+1])\n pl.title(mnts)\n pl.subplot(2,3,3)\n bp.bin_scatter(z,b[:,idxBase+idx[i]],binsize=0.01,fmt='bo',scatter=True)\n pl.xlabel('z-defocus')\n pl.ylabel(zernikeName[i+1])\n pl.title(mnts)\n pl.subplot(2,3,4)\n bp.bin_scatter(thetax,b[:,idxBase+idx[i]],binsize=5,fmt='bo',scatter=True)\n pl.xlabel('x-tilt')\n pl.ylabel(zernikeName[i+1])\n pl.title(mnts)\n pl.subplot(2,3,5)\n bp.bin_scatter(thetay,b[:,idxBase+idx[i]],binsize=5,fmt='bo',scatter=True)\n pl.xlabel('y-tilt')\n pl.ylabel(zernikeName[i+1])\n pl.title(mnts)\n pl.savefig(mnts+'_'+str(i+1)+'_'+zernikeName[i+1]+'.png')\n pl.close()", "def isTemperature(obxDict):\n readingCode = getReadingCode(obxDict)\n return readingCode == '8310-5'", "def test_get_filtered_state_t(ft_ll_mvar_diffuse, Yt_mvar_diffuse_missing,\n theta_ll_mvar_diffuse):\n kf = Filter(ft_ll_mvar_diffuse, Yt_mvar_diffuse_missing, for_smoother=True)\n kf.fit(theta_ll_mvar_diffuse)\n \n t = kf.T - 1\n result = kf.get_filtered_state(t)\n expected_result = {'P_star_t': kf.P_star_t[t][0],\n 'P_inf_t': kf.P_inf_t[t][0],\n 'xi_t': kf.xi_t[t][0],\n 'q': 0}\n for i in ['P_star_t', 'P_inf_t', 'xi_t']:\n np.testing.assert_array_equal(result[i], expected_result[i])\n assert result['q'] == expected_result['q']", "def T_naught(z, h, OM, OB):\n\n T0 = 28.5 * ((1.0+z)/10.0)**(0.5) * OB/0.042 * h/0.73 * (0.24/OM)**(0.5)\n return T0", "def setup_UT_te(self):\n self.setup_O()\n self.setup_T()\n # diagonalizing T\n ET, LT, RT = eig(self.T, b=self.O, left=True, right=True)\n LT = LT.transpose().conjugate()\n exp_T = np.exp(-1j*ET / self.hbar)\n # order according to absolute value:\n i_sort = np.argsort(-abs(exp_T))\n exp_T = exp_T[i_sort]\n RT = RT[:,i_sort]\n LT = LT[i_sort,:]\n # normalize RL to O and test the decomposition\n RT, LT = self.normalize_RL_to_O(RT, LT)\n # test the quality of the decomposition -------------------------\n # we exclude directions of evals below 10**(-15) by hand\n max_mode = len(np.where(abs(exp_T)>10**(-15))[0])\n ET_red = ET[:max_mode]\n RT_red = RT[:,:max_mode]\n LT_red = LT[:max_mode,:]\n # 1) test of orthogonality on the reduced space\n unity = np.dot(LT_red, np.dot(self.O, RT_red))\n ortho_error = abs(unity - np.diag(np.ones(max_mode))).max()\n print(\"Orthogonality errors\", ortho_error)\n # 1) test difference between the full and the reduced te-operator\n UT_red = np.dot(RT_red, np.dot(np.diag(exp_T[:max_mode]),\n np.dot(LT_red, self.O)))\n UT = np.dot(RT, np.dot(np.diag(exp_T), np.dot(LT, self.O)))\n print(\"Propagator error\", abs(UT_red - UT).max())\n self.UT = UT", "def ft_tc3(\n cls,\n ts: np.ndarray,\n lag: t.Optional[t.Union[str, int]] = None,\n only_numerator: bool = False,\n max_nlags: t.Optional[int] = None,\n detrended_acfs: t.Optional[np.ndarray] = None,\n detrended_ami: t.Optional[np.ndarray] = None,\n ) -> float:\n _lag = _embed.embed_lag(\n ts=ts,\n lag=lag,\n max_nlags=max_nlags,\n detrended_acfs=detrended_acfs,\n detrended_ami=detrended_ami,\n )\n\n ts_shift_1 = ts[: -2 * _lag]\n ts_shift_2 = ts[_lag:-_lag]\n ts_shift_3 = ts[2 * _lag :]\n\n _aux = ts_shift_1 * ts_shift_2\n numen = np.mean(_aux * ts_shift_3)\n\n if only_numerator:\n return numen\n\n denom = np.abs(np.mean(_aux)) ** 1.5\n\n tc3 = numen / denom\n\n return tc3", "def check_valid_temperature(var, units):\n check_valid(var, \"standard_name\", \"air_temperature\")\n check_valid(var, \"units\", units)", "def test_no_backg_subt():\n \n test_object = fa.read_in_envision(data_csv=HsHis6_PEX5C_vs_HsPEX5C, platemap_csv=Hs_His6_PEX5C_vs_HsPEX5C_platemap, data_type='plate', size=384)\n test_object.calculate_r_i(correct=True, plot_i=False, thr=80)", "def addtonc(ncfout,key,vd,ofield,ftype=\"timeseries\"):\n nc_out=nc.Dataset(ncfout,'r+')\n if ftype==\"timeseries\":\n diml=['time','height','south_north','west_east'] # Tuple of Dimensions\n if vd['dims']==4:\n dimtup=tuple(diml)\n elif vd['dims']==3:\n dimtup = tuple([c for c in diml if c != \"height\"])\n elif vd['dims']==2:\n dimtup = tuple([c for c in diml if c not in [\"height\",\"time\"]])\n elif ftype==\"roughness\":\n diml=['south_north','west_east']\n dimtup=tuple(diml)\n elif ftype==\"tabfile\":\n diml=['south_north','west_east','sector','wind','stab']\n if vd['dims']==3:\n dimtup=tuple(diml.remove('wind').remove('stab'))\n if vd['dims']==2:\n dimtup=tuple(diml.remove('wind').remove('stab').remove('sector'))\n if key in (\"TKE\", \"ABLAT_CYL\", \"ACCRE_CYL\"):\n outv=nc_out.createVariable(key, 'f4', dimtup, zlib=True,\n complevel=9, fill_value=-999.)\n else:\n outv=nc_out.createVariable(key,'f4',dimtup,zlib=True,complevel=9)\n outv.units=vd['units']\n outv.long_name=vd['name']\n if vd['std_name'] is not None:\n outv.standard_name=vd['std_name']\n if key==\"PRECIP\":\n outv.cell_methods=\"time: sum\"\n outv.grid_mapping=\"crs\"\n outv.coordinates=\"XLAT XLON\"\n outv[:]=ofield[:]\n nc_out.close()\n return(None)", "def test_set_vs(self):\n s = State(substance=\"water\")\n s.vs = Q_(0.4772010021515822, \"m**3/kg\"), Q_(3028.9867985920914, \"J/(kg*K)\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(373.1242958476843, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.vs[0], Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.vs[1], Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.u, Q_(1013250, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.28475636946248034, \"dimensionless\")) # type: ignore", "def tempWater(sample):\n sample *= .0009\n sample *= 1000\n celsius = (sample - 20.5128) * 0.0512\n return round(celsius,2)", "def tsz_spectrum(self, nu):\n x = NU_SCALE * nu # Frequency/temperature\n #g_nu = ( x*(np.exp(x) + 1.) / (np.exp(x) - 1.) ) - 4. # tSZ spectral dependence\n g_nu = x**2. * np.exp(x) * (x/np.tanh(x/2.) - 4.) / (np.exp(x) - 1.)**2.\n return g_nu", "def temperature(self):\r\n self._read_temperature()\r\n return self._t_fine / 5120.0", "def get_iPTF14gqr(colorplt=False):\n z = 0.063\n # ebv = 0.082\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n t_exp = 56943.74 # \n t_max = 56950.26 # g band max light + 3\n \n tb = Table(fits.open('../data/otherSN/De2018/tables1.fit')[1].data)\n tb.rename_column('MJD' , 'mjd')\n tb['texp_rf'] = (tb['mjd'] - t_exp) / (1+z)\n tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)\n # tb = tb[tb[\"Filt\"]==\"g \"]\n tb = tb[~np.isnan(tb['e_mag'])]\n tb.rename_column('Filt' , 'filter')\n tb.rename_column('e_mag' , 'emag')\n tb.rename_column('mag' , 'mag0')\n \n ixg = tb['filter']==\"g \"\n ixB = tb['filter']==\"B \"\n ixV = tb['filter']==\"V \"\n ixr = tb['filter']==\"r \"\n ixi = tb['filter']==\"i \"\n ixUVW1 = tb['filter']==\"UVW1\"\n ixUVW2 = tb['filter']==\"UVW2\"\n \n tb['wave'] = np.zeros(len(tb))\n tb['wave'][ixUVW2] = 2079\n tb['wave'][ixUVW1] = 2614\n tb['wave'][ixB] = 4359\n tb['wave'][ixg] = 4814\n tb['wave'][ixV] = 5430\n tb['wave'][ixr] = 6422\n tb['wave'][ixi] = 7883\n \n tb['mag0_abs'] = tb['mag0'] - dis_mod\n \n tb = tb.to_pandas()\n tb[\"texp_rf\"] = tb[\"Phase\"]\n tb = tb.drop(columns=[\"recno\", \"Phase\", \"l_mag\"])\n \"\"\"\n ix = np.any([tb['Tel'].values==\"P60 \",\n tb[\"filter\"].values=='g '], axis=0)\n tb = tb[ix]\n \"\"\"\n tb = add_datecol(tb)\n tb = add_physcol(tb)\n tt = tb[\"tmax_rf\"].values\n epochs = [\" \" for x in range(len(tt))]\n epochs = np.array(epochs)\n \"\"\"\n ix = (tt>-5.6)&(tt<-5.55)\n epochs[ix] = \"epoch 01\"\n \"\"\"\n ix = (tt>-5.55)&(tt<-5.50)\n epochs[ix] = \"epoch 02\"\n \n ix = (tt>-5.50)&(tt<-5.45)\n epochs[ix] = \"epoch 03\"\n \n ix = (tt>-5.2)&(tt<-5.0)\n epochs[ix] = \"epoch 04\"\n ix = (tt>-5.0)&(tt<-4.7)\n epochs[ix] = \"epoch 05\"\n \n ix = (tt>-4.7)&(tt<-4.5)\n epochs[ix] = \"epoch 06\"\n ix = (tt>-4.5)&(tt<-3.5)\n epochs[ix] = \"epoch 07\"\n ix = (tt>-3.5)&(tt<-2.5)\n epochs[ix] = \"epoch 08\"\n ix = (tt>-1.5)&(tt<-1)\n epochs[ix] = \"epoch 09\"\n ix = (tt>-1)&(tt<-0.82)\n epochs[ix] = \"epoch 10\"\n ix = (tt>-0.82)&(tt<-0.6)\n epochs[ix] = \"epoch 11\"\n ix = (tt>-0.5)&(tt<0.5)\n epochs[ix] = \"epoch 12\"\n ix = (tt>0.5)&(tt<1.5)\n epochs[ix] = \"epoch 13\"\n ix = (tt>1.5)&(tt<2.5)\n epochs[ix] = \"epoch 14\"\n ix = (tt>3.5)&(tt<4.5)\n epochs[ix] = \"epoch 15\"\n ix = (tt>4.5)&(tt<5)\n epochs[ix] = \"epoch 16\"\n ix = (tt>5)&(tt<5.6)\n epochs[ix] = \"epoch 17\"\n ix = (tt>5.6)&(tt<5.8)\n epochs[ix] = \"epoch 18\"\n ix = (tt>6)&(tt<7)\n epochs[ix] = \"epoch 19\"\n ix = (tt>7)&(tt<8)\n epochs[ix] = \"epoch 20\"\n ix = (tt>8)&(tt<9)\n epochs[ix] = \"epoch 21\"\n tb[\"epoch\"] = epochs\n\n if colorplt==False:\n return tb\n else:\n tb = add_datecol(tb)\n ix = np.in1d(tb[\"filter\"].values, np.array(['g ', 'r ', 'i ']))\n tb = tb[ix]\n\n dates = get_date_span(tb)\n datesave = []\n for i in range(len(dates)):\n x = dates[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n if len(tbsub)!=0:\n flts = tbsub['filter'].values\n if \"r \" in flts and np.sum(np.unique(flts))!=1:\n datesave.append(x)\n datesave = np.array(datesave)\n \n mcolor = []\n mcolor_unc = []\n mjds = []\n colorname = []\n for i in range(len(datesave)):\n x = datesave[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n gtb = tbsub[tbsub[\"filter\"].values==\"g \"]\n rtb = tbsub[tbsub[\"filter\"].values==\"r \"]\n itb = tbsub[tbsub[\"filter\"].values==\"i \"]\n if len(gtb)!=0:\n gmjds = gtb[\"mjd\"].values\n gmags = gtb[\"mag0\"].values\n gemags = gtb[\"emag\"].values\n gwtgs = 1/gemags**2\n gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)\n gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)\n gemag = 1/ np.sqrt(np.sum(gwtgs))\n if len(rtb)!=0:\n rmjds = rtb[\"mjd\"].values\n rmags = rtb[\"mag0\"].values\n remags = rtb[\"emag\"].values\n rwtgs = 1/remags**2\n rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)\n rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)\n remag = 1/ np.sqrt(np.sum(rwtgs))\n if len(itb)!=0:\n imjds = itb[\"mjd\"].values\n imags = itb[\"mag0\"].values\n iemags = itb[\"emag\"].values\n iwtgs = 1/iemags**2\n imag = np.sum(imags * iwtgs) / np.sum(iwtgs)\n imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)\n iemag = 1/ np.sqrt(np.sum(iwtgs))\n if len(gtb)!=0 and len(rtb)!=0:\n mcolor.append(gmag - rmag)\n mjds.append( 0.5 * (gmjd + rmjd) )\n mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )\n colorname.append(\"gmr\")\n if len(rtb)!=0 and len(itb)!=0:\n mcolor.append(rmag - imag)\n mjds.append( 0.5 * (rmjd + imjd) )\n mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )\n colorname.append(\"rmi\")\n \n ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],\n names = [\"mjd\", \"c\", \"ec\", \"cname\"])\n \n ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)\n ctb = ctb.to_pandas()\n return ctb", "def test_temperatures_units(self):\n self.assertEqual(str(self.TmaxUnits), 'K')", "def test_tte3(self):\n filename = str(self.temp_j2k_filename)\n xtx3_setup(filename)\n self.assertTrue(True)", "def tdmSpectrum(channelWidth, nchan):\n if ((channelWidth >= 15e6/2. and nchan>240) or # 1 pol TDM, must avoid 240-chan FDM\n (channelWidth >= 15e6)):\n# (channelWidth >= 15e6 and nchan<96) or # 2 pol TDM (128 chan)\n# (channelWidth >= 30e6 and nchan<96)): # 4 pol TDM (64 chan)\n return True\n else:\n return False", "def true_lrv_ma3_t_free(sigma: float): \n return (148 / 3) * (sigma ** 2)", "def test_filt_stmag(self):\n sun = Sun.from_builtin('E490_2014')\n V = get_bandpass('johnson v')\n wave, fluxd = sun.filt(V, unit=u.STmag)\n assert np.isclose(fluxd.value, -26.76, atol=0.003)", "def temperatures():\n\n return station_9281", "def test_3d_plot(self):\n db = pd.HDFStore('test.h5')\n df_iv = db['iv']\n db.close()\n\n date = pd.to_datetime('2015-04-01')\n self.full_iv.get_data()\n df_date0 = self.full_iv.df_all.query('date == %r' % date)\n df_date1 = df_iv.query('date == %r' % date)\n df_date = pd.concat([df_date0, df_date1])\n \"\"\":type: pd.DataFrame\"\"\"\n\n x = df_date['dte']\n y = df_date['strike']\n z = df_date['impl_vol']\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n # noinspection PyUnresolvedReferences\n ax.plot_trisurf(x, y, z, cmap=cm.jet, linewidth=0.2)\n # ax.plot_wireframe(x, y, z, rstride=1, cstride=1)\n plt.show()", "def dewT_2_q_magnus(ds, var):\n A1, B1, C1 = 17.625, 243.04, 610.94\n vpsl = C1 * np.exp(A1 * (ds[var['dew']] - 273.15) / (B1 + (ds[var['dew']] - 273.15)))\n wsl = eps0 * vpsl / (ds[var['pressure']] - vpsl)\n ds[var['spec_h']] = wsl / (1 + wsl)\n return ds", "def ferry_data_QC(ferry, TH_abs, TH_u, TH_d):\n if type(ferry) is not xr.core.dataset.Dataset:\n raise ValueError('Ferry is not defined')\n # QC1: make nan all Absolute velocities that are greater than 6.5 m/s\n abs_u = ferry.eastward_absolute_water_velocity.where(\n (abs(ferry.eastward_absolute_water_velocity) < TH_abs) &\n (abs(ferry.northward_absolute_water_velocity) < TH_abs))\n\n abs_v = ferry.northward_absolute_water_velocity.where(\n (abs(ferry.eastward_absolute_water_velocity) < TH_abs) &\n (abs(ferry.northward_absolute_water_velocity) < TH_abs))\n\n abs_w = ferry.vertical_absolute_water_velocity.where(\n (abs(ferry.eastward_absolute_water_velocity) < TH_abs) &\n (abs(ferry.northward_absolute_water_velocity) < TH_abs))\n # Get bottom track velocity for reference\n # and also clean for TH in abs velocity\n east_btv = ferry.eastward_bottom_tracking_velocity.where(\n (abs(ferry.eastward_absolute_water_velocity) < TH_abs) &\n (abs(ferry.northward_absolute_water_velocity) < TH_abs))\n\n north_btv = ferry.northward_bottom_tracking_velocity.where(\n (abs(ferry.eastward_absolute_water_velocity) < TH_abs) &\n (abs(ferry.northward_absolute_water_velocity) < TH_abs))\n\n vert_btv = ferry.vertical_bottom_tracking_velocity.where(\n (abs(ferry.eastward_absolute_water_velocity) < TH_abs) &\n (abs(ferry.northward_absolute_water_velocity) < TH_abs))\n # Estimate u_true = abs_u + east_bt_v\n u_true = abs_u + east_btv\n v_true = abs_v + north_btv\n w_true = abs_w + vert_btv\n U = np.sqrt(u_true**2 + v_true**2)\n # QC2: check that u_true and v_true are less than 4 m/s\n u_true = u_true.where((u_true < TH_u) & (v_true < TH_u) & (U < TH_u))\n v_true = v_true.where((u_true < TH_u) & (v_true < TH_u) & (U < TH_u))\n w_true = w_true.where((u_true) < TH_u & (v_true < TH_u) & (U < TH_u))\n U = U.where((u_true) < TH_u & (v_true < TH_u) & (U < TH_u))\n # Add true velocity data to the dataset\n ferry['u_true'] = u_true\n ferry['v_true'] = v_true\n ferry['w_true'] = w_true\n ferry['Horizontal_speed'] = U\n # Remove first 5 rows of depth\n ferryQC = ferry.isel(depth=slice(TH_d, None))\n goodQC = True\n return(ferryQC, goodQC)", "def test_temperatures_value(self):\n self.assertEqual(self.TminValue, 450.0)", "def cooled_surface_temp(T:np.ndarray) -> float:\n \n return T.dot(cs_temp_weights)", "def hexapodZernikeTrend(mnts='M20'):\n Tfile='/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_training.cp'\n b=p.load(open(Tfile))\n nobs = len(b)\n x = b[:,0]\n y = b[:,1]\n z = b[:,2]\n theta = b[:,3]\n phi = b[:,4]\n fwhm = b[:,5]\n e1 = b[:,6]\n e2 = b[:,7]\n thetax = theta*np.cos(np.deg2rad(phi))\n thetay = theta*np.sin(np.deg2rad(phi))\n if mnts == 'M20':\n idxBase = 9\n if mnts == 'M22real':\n idxBase = 29\n if mnts == 'M22imag':\n idxBase = 49\n idx = np.arange(14)\n zernikeName=('Piston','Tip','Tilt','Astignism','Defocus','Astignism','Trefoil','Coma','Coma','Trefoil','Ashtray','Astigm.5th','Spherical','Astigm.5th','Ashtray','16','17','18','19','20')\n for i in range(14):\n pl.figure(figsize=(21,10))\n pl.subplot(2,3,1)\n bp.bin_scatter(b[:,idxBase+idx[i]],x,nbins=20,fmt='bo',scatter=True)\n pl.ylabel('x-decenter')\n pl.xlabel(zernikeName[i+1])\n pl.ylim(-0.1,0.1)\n pl.title(mnts)\n pl.subplot(2,3,2)\n bp.bin_scatter(b[:,idxBase+idx[i]],y,nbins=20,fmt='bo',scatter=True)\n pl.ylabel('y-decenter')\n pl.xlabel(zernikeName[i+1])\n pl.title(mnts)\n pl.ylim(-0.1,0.1)\n pl.subplot(2,3,3)\n bp.bin_scatter(b[:,idxBase+idx[i]],z,nbins=20,fmt='bo',scatter=True)\n pl.ylabel('z-defocus')\n pl.xlabel(zernikeName[i+1])\n pl.title(mnts)\n pl.ylim(-0.1,0.1)\n pl.subplot(2,3,4)\n bp.bin_scatter(b[:,idxBase+idx[i]],thetax,nbins=20,fmt='bo',scatter=True)\n pl.ylabel('x-tilt')\n pl.xlabel(zernikeName[i+1])\n pl.title(mnts)\n pl.ylim(-40,40)\n pl.subplot(2,3,5)\n bp.bin_scatter(b[:,idxBase+idx[i]],thetay,nbins=20,fmt='bo',scatter=True)\n pl.ylabel('y-tilt')\n pl.xlabel(zernikeName[i+1])\n pl.title(mnts)\n pl.ylim(-40,40)\n pl.savefig(zernikeName[i+1]+mnts+'_'+str(i+1)+'.png')\n pl.close()", "def runTE(T, E):\n ds = gensettings(T=T, E=E)\n ds.save('settings.h5')\n\n do = DREAM.runiface(ds, quiet=True)\n\n rrFull = do.other.fluid.runawayRate[:,0]\n rr = rrFull[-1]\n # Connor-Hastie runaway rate\n rrCH = do.other.fluid.gammaDreicer[-1,0]\n\n return rr, rrFull, rrCH", "def get_allsky(self):\n band = self.get_band()\n septon = self.is_septon()\n if band == '10_90' or band == '30_90' or septon:\n allsky = True\n else:\n allsky = False\n return allsky", "def sky_temperature(self) -> float:\n\n return 0.0552 * (self.ambient_temperature**1.5)", "def trop_hght(T,z):\n dT = -np.gradient(T,z)\n for i in range(len(dT)):\n if dT[i]<2.0:\n z_2km=(z>z[i]) & (z<=(z[i]+2000))\n if (np.mean(dT[z_2km])<2) and (np.mean(dT[z_2km])<0) and (z[i]>8000):\n ztrop = z[i]\n return ztrop\n return 0", "async def test_temp_unit_fix(\n hass: HomeAssistant,\n client,\n climate_radio_thermostat_ct101_multiple_temp_units,\n climate_radio_thermostat_ct100_mode_and_setpoint_on_different_endpoints,\n integration,\n) -> None:\n state = hass.states.get(\"climate.thermostat\")\n assert state\n assert state.attributes[\"current_temperature\"] == 18.3\n\n state = hass.states.get(\"climate.z_wave_thermostat\")\n assert state\n assert state.attributes[\"current_temperature\"] == 21.1", "def thermodynamic_temperature(frequency, T_cmb=None):\n nu = frequency.to(si.GHz, spectral())\n\n if T_cmb is None:\n from astropy.cosmology import default_cosmology\n\n T_cmb = default_cosmology.get().Tcmb0\n\n def f(nu, T_cmb=T_cmb):\n x = _si.h * nu / _si.k_B / T_cmb\n return x**2 * np.exp(x) / np.expm1(x) ** 2\n\n def convert_Jy_to_K(x_jybm):\n factor = (f(nu) * 2 * _si.k_B * si.K * nu**2 / _si.c**2).to_value(\n astrophys.Jy\n )\n return x_jybm / factor\n\n def convert_K_to_Jy(x_K):\n factor = (astrophys.Jy / (f(nu) * 2 * _si.k_B * nu**2 / _si.c**2)).to_value(\n si.K\n )\n return x_K / factor\n\n return Equivalency(\n [(astrophys.Jy / si.sr, si.K, convert_Jy_to_K, convert_K_to_Jy)],\n \"thermodynamic_temperature\",\n {\"frequency\": frequency, \"T_cmb\": T_cmb},\n )", "def Tol3d(self, *args):\n return _Adaptor3d.Adaptor3d_TopolTool_Tol3d(self, *args)", "def layer(zs, ps, Ts, qvap, qliq, IFORM=1):\n assert IFORM == 0 or IFORM == 1\n assert len(zs) == 2\n assert len(ps) == 2\n assert len(Ts) == 2\n dz = zs[1] - zs[0]\n assert dz > 0\n pave = 0.5 * sum(ps)\n Tave = 0.5 * sum(Ts)\n Rave = (1-qvap)*Rdry + qvap*Rwat\n ρave = 100*pave / Tave / Rave\n # Calculate column number density of water from specific humidity\n H2O = (qvap # Specific humidity [kg/kg]\n * ρave # Density of water vapor → [kg/m³]\n / 0.018 # 0.018 kg of water is 1 mol → [mol/m³]\n * avogadro # Number density → [molecules/m³]\n * dz # Column number density → [molecules/m²]\n * 1.0e-4 # MonoRTM wants cm² → [molecules/cm²]\n )\n # Cloud amout in mm contained in column\n CLW = (qliq # Specific CLW [kg/kg]\n * ρave # Density of CLW [kg/m³]\n * dz # Column CLW [kg/m²], corresponds to [mm]\n )\n if CLW == 0: CLW = None\n # Broadening gas amount must be given as column density (see __doc__) ↓cm²\n broadening = mixing_ratio_Ar * dz * (pave*100) / Tave / boltzmann * 1.0e-4\n # Give species 1 (H2O), 2 (CO2), 7 (O2) and 22 (N2)\n row1 = [H2O, mixing_ratio_CO2, 0., 0., 0., 0., mixing_ratio_O2]\n row2 = [ 0., 0., 0., 0., 0., 0., 0., 0.]\n row3 = [ 0., 0., 0., 0., 0., 0., mixing_ratio_N2, None]\n # Select Record matching IFORM parameter\n Record211 = Record211_IFORM0 if IFORM == 0 else Record211_IFORM1\n return [Record211(PAVE=pave, TAVE=Tave, ALTZB=zs[0]/1000, PZB=ps[0],\n TZB=Ts[0], ALTZT=zs[1]/1000, PZT=ps[1], TZT=Ts[1],\n CLW=CLW), # z in km\n Record212_first(WKL=row1, WBROADL=broadening),\n Record212_other(WKL=row2),\n Record212_other(WKL=row3)\n ]", "def computeTm(self):\n #first step is finding the derivative series of the well\n x = self.temperatures\n if self.fluorescence == None:\n self.Tm = None\n return\n y = self.fluorescence\n \n xdiff = np.diff(x)\n dydx = -np.diff(y)/xdiff\n #the derivative series, has one less index since there is one fewer differences than points\n seriesDeriv = pandas.Series(dydx, x[:-1])\n \n #now that we have the derivative series, we can find the Tm\n lowestPoint = 0\n lowestPointIndex = None\n \n #gets number of signchanges between max and min of the curve, used to determin if the curve\n #is complex or not\n lowestPoint2 = 1\n lowestIndex2 = None\n highestPoint = 0\n highestIndex = None\n previous = None\n for i, value in enumerate(self.fluorescence[:-1]):\n if value > highestPoint:\n highestPoint = value\n highestIndex = i\n if highestIndex == 0 :\n highestPoint = 0\n highestIndex = None\n for i, value in enumerate(self.fluorescence[:-1]):\n if value<lowestPoint2:\n lowestPoint2 = value\n lowestIndex2 = i\n for i, value in enumerate(self.fluorescence[:-1]):\n if i < lowestIndex2:\n continue\n if value > highestPoint:\n highestPoint = value\n highestIndex = i\n else:\n for i, value in enumerate(self.fluorescence[:-1]):\n if i > highestIndex:\n break\n if value<lowestPoint2:\n lowestPoint2 = value\n lowestIndex2 = i\n signChange = False\n for ind in seriesDeriv.index[lowestIndex2+1:highestIndex]:\n \n if previous:\n if seriesDeriv[ind] + SIGN_CHANGE_THRESH < 0 and previous - SIGN_CHANGE_THRESH > 0:\n signChange = True\n if seriesDeriv[ind] - SIGN_CHANGE_THRESH > 0 and previous + SIGN_CHANGE_THRESH < 0:\n signChange = True\n # if seriesDeriv[ind] == 0:\n # signChangeCount += 1\n previous = seriesDeriv[ind]\n\n \n #finding the lowest point and its index on the derivative series\n #only search for Tm up to 90degrees, since last part is hard to predict\n #and often gives false positives\n ignoreNum = int(len(seriesDeriv.index)*0.125)\n for ind in seriesDeriv.index[:-ignoreNum]:\n if seriesDeriv[ind]<lowestPoint:\n lowestPoint = seriesDeriv[ind]\n lowestPointIndex = ind\n \n #TODO working, tms not steep enough added to complex\n #if the slope is not steep enough, tm remains saved, but curve is grouped with the\n #complex curves (now known as the unreliable group)\n #if lowestPoint > -0.000001 / (normalisationFactor / saturation max point of all curves thing):\n # print self.name, 'lowestpoint too small', lowestPoint\n # self.complex = True\n\n #if lowest point is the first index, then no curve fit is required\n if lowestPointIndex == seriesDeriv.index[0]:\n tm = lowestPointIndex\n self.Tm = tm\n \n #set complex to true if curve was complex\n if signChange:\n self.complex = True\n return\n \n #could not find any Tm\n if lowestPointIndex == None:\n self.Tm = None\n \n #if no tm, the curve hopefully be picked up as a monotonic/in the noise/saturated/outlier\n #however, if this does not happen, the curve remains as complex\n self.complex = True\n return \n \n #the indices in the series either side of the lowest index\n #note the first list is indexed e.g. list[i] where i is the section using .index\n leftIndex = [ind for ind in seriesDeriv.index][[ind for ind in seriesDeriv.index].index(lowestPointIndex)-1]\n rightIndex = [ind for ind in seriesDeriv.index][[ind for ind in seriesDeriv.index].index(lowestPointIndex)+1]\n \n \n #matrices used to fit a parabola to the 3 points\n Y=[seriesDeriv[leftIndex],\n seriesDeriv[lowestPointIndex],\n seriesDeriv[rightIndex]]\n \n A=[[leftIndex**2, leftIndex, 1],\n [lowestPointIndex**2, lowestPointIndex, 1],\n [rightIndex**2, rightIndex, 1]]\n \n #solve for b, in the form Y=Ab\n (a,b,c) = np.linalg.solve(A,Y)\n \n #initialise tm to left most point of relevant curve\n tm=seriesDeriv[leftIndex]\n tmValue=0\n #make tm the lowest point on the fitted parabola rounded to nearest 0.01\n for x in np.arange(leftIndex,rightIndex,0.01):\n point = (a*(x**2) + b*x + c)\n if tmValue > point:\n tmValue = point\n tm = x\n self.Tm = tm\n \n #again check for complex shape before returning\n if signChange:\n self.complex = True\n\n\n averagePoint = (lowestPoint2 +highestPoint) / 2\n i = lowestIndex2\n while self.fluorescence[i]<averagePoint:\n i += 1;\n\n # estimates tm by another method and if the difference is too large the curve is considred complex\n if (self.temperatures[i] -self.Tm)**2 > 5**2:\n self.complex=True\n return", "def sat_vap_dens(nz, T, SWVD, plot=False):\r\n rho_v = np.zeros(nz)\r\n rho_v_dT = np.zeros(nz)\r\n if SWVD == \"Libbrecht\":\r\n rho_v = (\r\n np.exp(-T_ref_L / T) / (f * T) * (a0 + a1 * (T - 273) + a2 * (T - 273) ** 2)\r\n ) # [kg/m^3] Water vapor density\r\n rho_v_dT = (\r\n np.exp(-T_ref_L / T)\r\n / (f * T ** 2)\r\n * (\r\n (a0 - a1 * 273 + a2 * 273 ** 2) * (T_ref_L / T - 1)\r\n + (a1 - a2 * 2 * 273) * T_ref_L\r\n + a2 * T ** 2 * (T_ref_L / T + 1)\r\n )\r\n ) # [kg/m^3/K]\r\n elif SWVD == \"Calonne\":\r\n x = (L_Cal * mH2O) / (rho_i * kB)\r\n rho_v = rho_ref * np.exp(x * ((1 / T_ref_C) - (1 / T)))\r\n\r\n rho_v_dT = x / T ** 2 * rho_ref * np.exp(x * ((1 / T_ref_C) - (1 / T)))\r\n\r\n elif SWVD == \"Hansen\":\r\n\r\n rho_v = (\r\n (10.0 ** (c1 / T + c2 * np.log(T) / np.log(10) + c3 * T + c4 * T ** 2 + c5))\r\n * c6\r\n / R_v\r\n / T\r\n )\r\n rho_v_dT = (\r\n rho_v\r\n * np.log(10)\r\n * (-c1 / T ** 2 + c2 / (T * np.log(10)) + c3 + 2 * c4 * T)\r\n - rho_v / T\r\n )\r\n else:\r\n raise ValueError(\"Saturation water vapor density not available\")\r\n if plot:\r\n fig1 = plt.plot(T, rho_v)\r\n plt.title(\"Water vapor density with respect to temperature\")\r\n plt.show(fig1)\r\n fig2 = plt.plot(T, rho_v_dT)\r\n plt.title(\"Derivative of water vapor density with respect to temperature\")\r\n plt.show(fig2)\r\n return rho_v, rho_v_dT", "def low_temperature(self):\r\n raise NotImplementedError" ]
[ "0.60827553", "0.5971266", "0.57040036", "0.5694853", "0.56882703", "0.5666581", "0.5637644", "0.5630418", "0.56049395", "0.55450636", "0.54949796", "0.5493195", "0.54648805", "0.54205227", "0.5363305", "0.53159505", "0.5269913", "0.52530104", "0.524438", "0.52441275", "0.5217888", "0.5202955", "0.5199603", "0.51980907", "0.5187931", "0.51861656", "0.51783586", "0.51737976", "0.51603997", "0.51466423", "0.51452756", "0.5134955", "0.51174957", "0.5114482", "0.5111926", "0.5102505", "0.5097587", "0.5087861", "0.50866026", "0.50761205", "0.50623685", "0.5060205", "0.50410193", "0.5040623", "0.503999", "0.50393665", "0.50364757", "0.5034297", "0.5027351", "0.5015674", "0.50124055", "0.50123525", "0.50055265", "0.5004005", "0.5000046", "0.49916458", "0.49875435", "0.49819043", "0.4981053", "0.49809742", "0.49708813", "0.49619997", "0.49612546", "0.4957336", "0.4956074", "0.49528268", "0.49481606", "0.4947688", "0.4945293", "0.4933763", "0.4930148", "0.4929877", "0.49291027", "0.49287608", "0.49240822", "0.49227047", "0.49203748", "0.49202663", "0.4919242", "0.49188498", "0.49091616", "0.49080643", "0.4901558", "0.48995745", "0.48984653", "0.48960838", "0.4895196", "0.48837832", "0.48800397", "0.48745677", "0.48740897", "0.48728904", "0.4866333", "0.48661923", "0.48632511", "0.48614392", "0.485826", "0.48546523", "0.4854313", "0.4852167" ]
0.65542036
0
Send an event to sensu via pysensu_yelp with the given information.
def send_event(instance_config, status, output): # This function assumes the input is a string like "mumble.main" monitoring_overrides = instance_config.get_monitoring() if 'alert_after' not in monitoring_overrides: monitoring_overrides['alert_after'] = '2m' monitoring_overrides['check_every'] = '1m' monitoring_overrides['runbook'] = monitoring_tools.get_runbook( monitoring_overrides, instance_config.service, soa_dir=instance_config.soa_dir, ) check_name = ( 'check_marathon_services_replication.%s' % instance_config.job_id ) monitoring_tools.send_event( service=instance_config.service, check_name=check_name, overrides=monitoring_overrides, status=status, output=output, soa_dir=instance_config.soa_dir, cluster=instance_config.cluster, ) _log( service=instance_config.service, line='Replication: %s' % output, component='monitoring', level='debug', cluster=instance_config.cluster, instance=instance_config.instance, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def command_where(self, bot, update):\n\n bot.sendChatAction(update.message.chat_id, action='typing')\n\n foursquare = ext.get_foursquare_location(self.config['foursquare'])\n venue = foursquare['venue']\n location = venue['location']\n\n msg = 'Myles Braithwaite checked in to *{venue[name]}* {ago}.'\n self.send_message(bot, update, msg.format(**foursquare))\n\n if location.get('address', None):\n self.send_venue(bot, update, location['lat'], location['lng'],\n venue['name'], location['address'],\n foursquare_id=venue['id'])\n else:\n self.send_location(bot, update, location['lat'], location['lng'])", "def yelp(n=1):\n tweet = get_tweet(YELP_NAME, n) # Yelp checkin info aggregated to twitter.\n yelp_info = {\n 'biz-name': parse_yelp_name(tweet.text),\n 'biz-uri': parse_yelp_uri(tweet.text),\n 'location': 'San Francisco, CA',\n 'date': tweet.created_at.strftime('%A, %B %d'),\n 'time': tweet.created_at.strftime('%H:%M'),\n 'tip': \"\",\n }\n return jsonify(yelp_info)", "def send_venue(self, bot, update, lat, lon, title, address, **kwargs):\n\n return bot.sendVenue(update.message.chat_id, lat, lon, title, address,\n reply_markup=ReplyKeyboardMarkup(self.keyboard),\n resize_keyboard=True,\n **kwargs)", "def send_event(event: dict):\n\n eventbridge.put_events(Entries=[event])", "def handler(event, context):\n LOGGER.debug(event)\n LOGGER.debug(context)\n records = get_users()\n artists = get_artists(records)\n if not artists:\n return {'message': 'Nothing to search :/'}\n\n spotify_authorization = authorize()\n spotify_responses = get_new_music_from_spotify(artists, spotify_authorization)\n\n for record in records:\n email_body = build_email_body_for_user(record['artists'], spotify_responses)\n send_email(email_body, record['email'])\n\n return {'message': 'all done :)'}", "def _send_event(self, name: EventName, payload):\n self.send_custom_event('Custom.Mindstorms.Gadget', name.value, payload)", "def _send_event(self, name: EventName, payload):\n self.send_custom_event('Custom.Mindstorms.Gadget', name.value, payload)", "def receive_notification(self, *args, **kwargs):\n\t\tprint(f\"{self.__location} is now hearing \\\"{args[0]}\\\" on {args[1]}\")", "def _send_event(self, title, text, tags, type, aggregation_key, severity='info'):\n event_dict = {\n 'timestamp': int(time.time()),\n 'source_type_name': self.SOURCE_TYPE_NAME,\n 'msg_title': title,\n 'event_type': type,\n 'alert_type': severity,\n 'msg_text': text,\n 'tags': tags,\n 'aggregation_key': aggregation_key,\n }\n self.event(event_dict)", "def _send_event(self, title, text, tags, type, aggregation_key, severity='info'):\n event_dict = {\n 'timestamp': int(time()),\n 'source_type_name': self.SOURCE_TYPE_NAME,\n 'msg_title': title,\n 'event_type': type,\n 'alert_type': severity,\n 'msg_text': text,\n 'tags': tags,\n 'aggregation_key': aggregation_key,\n }\n self.event(event_dict)", "def send_event(self, event_name, body):\n\n logger.info(\"Function call: send_event\")\n if not event_name:\n return self.__handle_error('Seems you not pass event slug')\n if not body:\n return self.__handle_error('Seems you not pass body')\n\n return self.__handle_result(self.__send_request('/events/name/{}'.format(event_name, ), 'POST', body))", "def handler(event,context):\n tweet = setup_and_get_tweet()\n send_tweet(tweet)", "def notify(self, event):\n\n self.send_json(event[\"payload\"])", "def write_to_splunk(**kwargs):\n event = helper.new_event(**kwargs)\n ew.write_event(event)", "def send_notification (event):\n Publisher.sendMessage (event)", "async def woot(message):\n r = await http.get(\"http://api.woot.com/2/events.json\", params=[\n ('key', api_key()),\n ('eventType', 'Daily'),\n ])\n data = r.json()\n offers = map(lambda e: e['Offers'], data)\n offers = reduce(lambda x, y: x + y, offers)\n return \"\\n\".join(map(lambda e: \"\\u2022 **{title}:** (${price:.2f}) [{remaining}%] <{url}>\".format(\n title=e['Title'],\n teaser=strip_html(e['Teaser']),\n price=sum(map(lambda x: x['SalePrice'], e['Items'])),\n url=re.sub(\"\\\\?.*$\", \"\", e['Url']),\n remaining=e['PercentageRemaining'],\n ), offers))", "def delegate_about_event():\n\n regs = Registration.objects.all()\n\n template = 'notifications/sprints_about_mail.html'\n\n for reg in regs:\n subject = 'SciPy.in 2011: Details of the individual events'\n message = loader.render_to_string(\n template, dictionary={'name': reg.registrant.username})\n\n reg.registrant.email_user(subject=subject, message=message,\n from_email='madhusudancs@gmail.com')", "def track(self, distinct_id, event_name, added_properties={}, time = int(time.time())):\n\t\tproperties = {\n\t\t\t'distinct_id' : distinct_id,\n\t\t\t'time' : time,\n\t\t\t'token' : self.token\n\t\t}\n\t\tproperties.update(added_properties)\n\t\tevent = {\n\t\t\t'event' : event_name,\n\t\t\t'properties' : properties\n\t\t}\n\t\tevent_json = json.dumps(event, separators=(',', ':'))\n\t\tdata = {\n\t\t\t'data':base64.b64encode(event_json),\n\t\t\t'verbose':1,\n\t\t\t'ip':0,\n\t\t\t'api_key': self.api_key\n\t\t}\n\n\n\t\tencoded_data = urllib.urlencode(data)\n\t\t#print encoded_data\n\t\trequest = urllib2.Request(ENDPOINT, encoded_data)\n\t\tprint request\n\n\t\tresponse = urllib2.urlopen(request).read()\n\t\tprint response", "def track_generic(self, event_name: str, data: t.Dict[str, t.Any]):\n event_content = {'event_name': event_name, 'properties': data}\n logger.debug(f'[Analytics Client] Tracked event: {event_content}')\n if not self.config.no_emit:\n self.queue.put(event_content)", "def _send_to_endpoint(self, events):\n raise NotImplementedError('Please implement _send_to_endpoint().')", "def emit(self, **kwargs):\n try:\n alert_type = next(_ for _ in self.alert_types if _.name == kwargs[\"event_type\"])\n except StopIteration:\n self.logger.error(INVALID_ALERT_TYPE, kwargs[\"event_type\"])\n return\n\n self.logger.critical(kwargs)\n\n alert = Alert(alert_type)\n for key, value in six.iteritems(kwargs):\n setattr(alert, key, value)\n\n send_alert_to_subscribed_integrations(alert)", "async def send_event_conf(self, booked_action_id: int):\n async with self.pg.acquire() as conn:\n data = await conn.fetchrow(\n \"\"\"\n SELECT a.user_id, e.id as event_id,\n full_name(ub.first_name, ub.last_name) AS buyer_name,\n full_name(uh.first_name, uh.last_name) AS host_name,\n event_link(cat.slug, e.slug, e.public, $2) AS event_link, e.name, e.short_description,\n cat.name AS cat_name, cat.slug AS cat_slug,\n cat.ticket_extra_title AS ticket_extra_title,\n e.location_name, e.location_lat, e.location_lng,\n e.start_ts, e.duration, e.timezone, cat.company, co.currency, a.extra\n FROM actions AS a\n JOIN users AS ub ON a.user_id = ub.id\n JOIN events AS e ON a.event = e.id\n JOIN users AS uh ON e.host = uh.id\n JOIN categories AS cat ON e.category = cat.id\n JOIN companies co on cat.company = co.id\n WHERE a.id = $1\n \"\"\",\n booked_action_id,\n self.settings.auth_key,\n )\n tickets = await conn.fetch(\n \"\"\"\n SELECT id, user_id, trim(BOTH FROM extra_info) as extra_info, price\n FROM tickets\n WHERE booked_action = $1 and user_id is not null\n \"\"\",\n booked_action_id,\n )\n # use max to get the price as they should all be the same\n ticket_count, total_ticket_price, extra_donated, price = await conn.fetchrow(\n 'select count(*), sum(price), sum(extra_donated), max(price) from tickets where booked_action=$1',\n booked_action_id,\n )\n total_price = total_ticket_price and total_ticket_price + (extra_donated or 0)\n start, duration = start_tz_duration(data)\n ctx = {\n 'event_link': data['event_link'],\n 'event_name': data['name'],\n 'event_short_description': data['short_description'],\n 'event_start': start,\n 'event_duration': duration or 'All day',\n 'event_location': data['location_name'],\n 'ticket_price': display_cash_free(price, data['currency']),\n 'buyer_name': data['buyer_name'],\n 'host_name': data['host_name'],\n 'ticket_extra_title': data['ticket_extra_title'] or 'Extra Information',\n 'category_name': data['cat_name'],\n is_cat(data['cat_slug']): True,\n }\n lat, lng = data['location_lat'], data['location_lng']\n if lat and lng:\n ctx.update(\n static_map=static_map_link(lat, lng, settings=self.settings),\n google_maps_url=f'https://www.google.com/maps/place/{lat},{lng}/@{lat},{lng},13z',\n )\n\n ctx_buyer = {\n **ctx,\n 'ticket_count': ticket_count,\n 'ticket_count_plural': ticket_count > 1,\n 'extra_donated': extra_donated and display_cash_free(extra_donated, data['currency']),\n 'total_price': display_cash_free(total_price, data['currency']),\n }\n if data['extra']:\n action_extra = json.loads(data['extra'])\n ctx_buyer['card_details'] = '{brand} {card_expiry} - ending {card_last4}'.format(**action_extra)\n\n buyer_user_id = data['user_id']\n buyer_emails = None\n other_emails = []\n for ticket_id, user_id, extra_info, _ in tickets:\n if buyer_user_id == user_id:\n ctx_buyer.update(\n ticket_id=ticket_id_signed(ticket_id, self.settings), extra_info=extra_info,\n )\n buyer_emails = [UserEmail(user_id, ctx_buyer, ticket_id)]\n else:\n ctx_other = dict(**ctx, ticket_id=ticket_id_signed(ticket_id, self.settings), extra_info=extra_info)\n other_emails.append(UserEmail(user_id, ctx_other, ticket_id))\n\n if not buyer_emails:\n # unusual case where the buyer is not an attendee\n user_id = await self.pg.fetchval('select user_id from actions where id=$1', booked_action_id)\n buyer_emails = [UserEmail(user_id, ctx_buyer)]\n\n await self.send_emails.direct(\n data['company'], Triggers.ticket_buyer, buyer_emails, attached_event_id=data['event_id']\n )\n\n if other_emails:\n await self.send_emails.direct(\n data['company'], Triggers.ticket_other, other_emails, attached_event_id=data['event_id']\n )\n return len(other_emails) + 1", "def track(self, event_name: AnalyticsEventTypes, data: t.Dict[str, t.Any]):\n event_content = {'event_name': event_name.value, 'properties': data}\n logger.debug(f'[Analytics Client] Tracked event: {event_content}')\n if not self.config.no_emit:\n self.queue.put(event_content)", "async def server_event_trigger(self, event):\n event_data = event[\"event_data\"]\n await self.send_json(event_data)", "def write_point(datum):\n measurement = {\n \"measurement\": \"weather\",\n \"tags\": {\n \"location\": LOCATION\n },\n \"time\": datetime.now().isoformat(),\n \"fields\": datum\n }\n CHANNEL.basic_publish(exchange='',\n routing_key='scribe',\n body=json.dumps(measurement))", "def send_email(email_dict, appointment_id):\n event_identifier = g_cal.send_invite_through_gcal(email_dict)\n models.Appointments.objects.filter(id=appointment_id).update(event_identifier=event_identifier)", "def log_event(event):\r\n tracker.send(event)", "def post_event(\n api_key=None,\n app_key=None,\n title=None,\n text=None,\n date_happened=None,\n priority=None,\n host=None,\n tags=None,\n alert_type=None,\n aggregation_key=None,\n source_type_name=None,\n):\n _initialize_connection(api_key, app_key)\n if title is None:\n raise SaltInvocationError(\"title must be specified\")\n if text is None:\n raise SaltInvocationError(\"text must be specified\")\n if alert_type not in [None, \"error\", \"warning\", \"info\", \"success\"]:\n # Datadog only supports these alert types but the API doesn't return an\n # error for an incorrect alert_type, so we can do it here for now.\n # https://github.com/DataDog/datadogpy/issues/215\n message = 'alert_type must be one of \"error\", \"warning\", \"info\", or \"success\"'\n raise SaltInvocationError(message)\n\n ret = {\"result\": False, \"response\": None, \"comment\": \"\"}\n\n try:\n response = datadog.api.Event.create(\n title=title,\n text=text,\n date_happened=date_happened,\n priority=priority,\n host=host,\n tags=tags,\n alert_type=alert_type,\n aggregation_key=aggregation_key,\n source_type_name=source_type_name,\n )\n except ValueError:\n comment = (\n \"Unexpected exception in Datadog Post Event API \"\n \"call. Are your keys correct?\"\n )\n ret[\"comment\"] = comment\n return ret\n\n ret[\"response\"] = response\n if \"status\" in response.keys():\n ret[\"result\"] = True\n ret[\"comment\"] = \"Successfully sent event\"\n else:\n ret[\"comment\"] = \"Error in posting event.\"\n return ret", "def launch_request_handler(handler_input: HandlerInput) -> Response:\n day = events.get_date()\n text = events.for_day(day)\n log.info(f\"launch: events for {day} = {text}\")\n return (\n handler_input.response_builder.speak(text)\n .set_card(SimpleCard(f\"Hillbrook events for {day.strftime('%A')}:\\n{text}\"))\n .set_should_end_session(True)\n .response\n )", "async def send(self, event_type: str, data: str) -> dict:\n return await self._do_request(\"post\", send_address, self._auth,\n data=dict(eventType=event_type, data=data))", "def handle_event(event_data):\n # define variable of data\n message = event_data.get('event')\n channel = message.get('channel')\n msg = message.get('text').lower()\n userid = message.get('user')\n username = convert_unicode(sc.api_call('users.info', user=userid)).get('user').get('profile').get('display_name')\n text = None\n print(msg)\n\n if \"tasks\" in msg or \"task\" in msg:\n ret_data = fb.display_list('Business', False)\n ret_data = filter(lambda x:username in [names.strip() for names in x[2].split(',')], ret_data)\n text = \"Click <http://team8tasks.serveo.net|here> to go to the Task Website\\n\"\n ongoing_tasks = return_tasks(ret_data, 'ongoing')\n overdue_tasks = return_tasks(ret_data, 'overdue')\n completed_tasks = return_tasks(ret_data, 'completed')\n sc.api_call('chat.postMessage', channel=channel, text=text, as_user=True, attachments=[{'text': ongoing_tasks, 'mrkdwn_in': [\"text\"], 'color': '#03572C'}, {'text': overdue_tasks, 'mrkdwn_in': [\"text\"], 'color': '#ff6666'}, {'text': completed_tasks, 'mrkdwn_in': [\"text\"]}])\n return\n elif \"hello\" in msg or \"hi\" in msg or \"hey\" in msg:\n text = \"Hello <@\" + userid + \">! What's up?\"\n elif \"no u\" in msg:\n text = \"no u\"\n else:\n text = 'Sorry I do not know what that command means. Try \"tasks\" to list your tasks.'\n\n sc.api_call('chat.postMessage', channel=channel, text=text, as_user=True)", "def send_event(agent_name, agent_version):\n client_name, client_version = _get_client_info()\n payload = {\n 'v': '1',\n 'tid': GA_INSTANCE,\n 'aip': '1',\n 'cid': str(uuid4()),\n 't': 'event',\n 'ec': 'Client name \"{}\", version \"{}\", interpreter \"{}\"'.format(\n client_name, client_version, _get_platform_info()\n ),\n 'ea': 'Start launch',\n 'el': 'Agent name \"{}\", version \"{}\"'.format(\n agent_name, agent_version\n )\n }\n headers = {'User-Agent': 'Universal Analytics'}\n try:\n return requests.post(url=GA_ENDPOINT, data=payload, headers=headers)\n except requests.exceptions.RequestException as err:\n logger.debug('Failed to send data to Google Analytics: %s',\n str(err))", "def handle_dataevent(bot, event):\n event.reply(event.tojson())", "def notify(google=False, bing=False):\n if google:\n base_url = 'http://www.google.com/webmasters/sitemaps/ping'\n params = {'sitemap': _sitemap_url}\n ping_sitemap(base_url, params)\n if bing:\n base_url = 'http://www.bing.com/webmaster/ping.aspx'\n params = {'siteMap': _sitemap_url}\n ping_sitemap(base_url, params)\n\n if not (google or bing):\n print(\"\\n* Specify service(s) to ping.\")\n print(\"* type: 'invoke --help notify'\")\n print(\"* for the list of available options.\\n\")", "def HI(update, context):\n update.message.reply_text(\"\"\"WELCOME TO THE IoT LAB!\"\"\")", "async def event_handler(self, response):\n data = ujson.loads(response.data)\n if isinstance(data, dict):\n if data['event'] == 'subscribed':\n print('Subscribed to channel: {0}, for pair: {1}, on channel ID: {2}'.format(data['channel'], data['pair'], data['chanId']))\n self.channel_mapping[data['chanId']] = (data['channel'], data['pair'])\n elif data['event'] == 'info':\n print('Exchange: {0} Websocket version: {1}'.format(self.id, data['version']))\n elif isinstance(data, list):\n if isinstance(data[1], str):\n print('Heartbeat on channel {0}'.format(data[0]))\n else:\n # Published data, time stamp and send to appropriate queue\n timestamp = self.microseconds() / 1000\n datetime = self.iso8601(timestamp)\n if self.channel_mapping[data[0]][0] == 'book':\n pair_id = self.channel_mapping[data[0]][1]\n await self.queues['orderbooks'][pair_id].put((data, timestamp, datetime))", "def resolve_incident():\n \n headers = {\n 'Accept': 'application/vnd.pagerduty+json;version=2',\n 'Authorization': 'Token token={0}'.format(API_ACCESS_KEY),\n 'Content-type': 'application/json',\n }\n \n payload = json.dumps({\n \"service_key\": \"\", # Enter service key here\n \"incident_key\": \"\", # Enter incident key here\n \"event_type\": \"resolve\",\n \"description\": \"Andrew fixed the problem.\", # Enter personalized description\n \"details\": {\n \"fixed at\": \"2010-06-10 06:00\"\n }\n })\n \n r = requests.post('https://events.pagerduty.com/generic/2010-04-15/create_event.json',\n headers=headers,\n data=payload,\n )\n\n print r.status_code\n print r.text", "def send(self, event, message):\n pass", "def trigger(self, data):\n if settings.PUSHER_ENABLED:\n\n # get the current timestamp\n pusher_start_ts = int(time.time())\n # use pusher to send the data to clients!\n self.pusher.trigger(self.channel, self.event, data)\n\n pusher_completed_ts = int(time.time())\n log_msg = 'PSHR_NOW=\"%s\", PSHR_NOW_TS=%s, PSHR_LOG=Send, PSHR_CHANNEL=%s, ' \\\n 'PSHR_EVENT=%s, PSHR_DATA_ID=\"%s\", PSHR_ITEM=Object, PSHR_VALUE=%s, ' \\\n 'PSHR_START_TS=%s, PSHR_END_TS=%s, PSHR_DELTA_MS=%s, ' % (\n str(timezone.now()),\n int(time.time()),\n self.channel,\n self.event,\n str(data.get('id')),\n str(data), str(\n pusher_start_ts), str(pusher_completed_ts),\n str(int(\n pusher_completed_ts - pusher_start_ts)))\n logger.debug(log_msg)\n\n else:\n # print to console if its disable to remind us\n logger.info(\n 'settings.PUSHER_ENABLED == False ... pusher.trigger() blocked. object not sent.')\n\n # Should we write out to a local kinda-json-formatted text file?\n if settings.PUSHER_OUTPUT_TO_FILE:\n file_path = 'tmp/pusher_events__%s.%s.txt' % (self.channel, self.event)\n logger.info(\"Writing out pusher events to: `%s`\" % file_path)\n\n try:\n # Open the file and append our event data in JSON format.\n with open(file_path, mode='a+', encoding='utf-8') as outfile:\n outfile.write(json.dumps(data))\n outfile.write('\\n')\n\n except Exception as e:\n logger.error(e)\n client.captureException()", "async def send_event_update(self, action_id):\n async with self.pg.acquire() as conn:\n data = await conn.fetchrow(\n \"\"\"\n SELECT a.company AS company_id, e.id AS event_id, e.name AS event_name,\n full_name(u.first_name, u.last_name, u.email) AS sender_name,\n a.extra->>'subject' AS subject, a.extra->>'message' AS message,\n event_link(cat.slug, e.slug, e.public, $2) AS event_link,\n cat.name AS cat_name, cat.slug AS cat_slug\n FROM actions AS a\n JOIN users AS u ON a.user_id = u.id\n JOIN events AS e ON a.event = e.id\n JOIN categories AS cat ON e.category = cat.id\n WHERE a.id=$1\n \"\"\",\n action_id,\n self.settings.auth_key,\n )\n user_tickets = await conn.fetch(\n \"\"\"\n SELECT DISTINCT user_id, id AS ticket_id\n FROM tickets\n WHERE status='booked' AND event=$1 AND user_id IS NOT NULL\n \"\"\",\n data['event_id'],\n )\n\n ctx = {\n 'event_link': data['event_link'],\n 'event_name': data['event_name'],\n 'subject': data['subject'],\n 'message': data['message'],\n 'category_name': data['cat_name'],\n is_cat(data['cat_slug']): True,\n }\n users = [UserEmail(id=user_id, ctx=ctx, ticket_id=ticket_id) for user_id, ticket_id in user_tickets]\n await self.send_emails.direct(\n data['company_id'], Triggers.event_update, users, attached_event_id=data['event_id']\n )", "def find_events(handler_input):\n\n length = 0\n\n events_list = requests.get(\"http://3.17.148.9:8080/events\")\n\n # check for response code from server\n if events_list.status_code == 200:\n events_list = events_list.content\n details = json.loads(events_list.decode('utf-8'))\n length = len(details)\n\n # store count of every event\n events = dict()\n\n # generate response text\n response_text = \"\"\n\n for i in range(length):\n cat = details[i]['event_category']\n if cat not in events:\n events[cat] = 1\n else:\n events[cat] += 1\n \n for event, count in events.items():\n response_text += str(count) + \" \" + event+\", \"\n\n speech_text = \"I found {} events.\".format(response_text)\n handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\"I found {} events.\".format(response_text), speech_text)).set_should_end_session(False)\n return handler_input.response_builder.response", "def onPing(self, payload):", "def handler(event, context):\n\n print('request: {}'.format(json.dumps(event)))\n search_text = event['arguments']['address']\n\n here_api_key = os.environ['HERE_API_KEY']\n geocoder_api = herepy.GeocoderApi(here_api_key)\n response_here = geocoder_api.free_form(search_text)\n \n response_location = response_here.items[0]\n response_address = response_location['address'] \n response_location = response_location['position']\n \n address = {\n 'street': response_address['label'],\n 'city': response_address['city'],\n 'state': response_address['state'],\n 'country': response_address['countryCode'],\n 'latitude': response_location['lat'],\n 'longitude': response_location['lng']\n }\n \n print('response: {}'.format(json.dumps(address))) \n return address", "def create_event(\n service_key=None, description=None, details=None, incident_key=None, profile=None\n):\n trigger_url = \"https://events.pagerduty.com/generic/2010-04-15/create_event.json\"\n\n if isinstance(details, str):\n details = salt.utils.yaml.safe_load(details)\n if isinstance(details, str):\n details = {\"details\": details}\n\n ret = salt.utils.json.loads(\n salt.utils.pagerduty.query(\n method=\"POST\",\n profile_dict=__salt__[\"config.option\"](profile),\n api_key=service_key,\n data={\n \"service_key\": service_key,\n \"incident_key\": incident_key,\n \"event_type\": \"trigger\",\n \"description\": description,\n \"details\": details,\n },\n url=trigger_url,\n opts=__opts__,\n )\n )\n return ret", "def send_event():\n range = request.args.get('range', '60')\n time = arrow.utcnow().replace(minutes=-int(range))\n data = Event.query.filter(Event.timestamp > time).order_by(Event.timestamp.desc()).all()\n return jsonify(results=[i.serialize for i in data])", "def notify(self, info, context=None):\n\n info[\"project\"] = self.project\n info[\"service\"] = self.service\n self.client.info(context or self.context,\n \"profiler.%s\" % info[\"service\"],\n info)", "def report_handler(bot, new_report):\n event_count = report[2]\n \n # Count events and take report & time\n if event_count == 0:\n event_count = new_report.count(\"|\")\n else:\n event_count += new_report.count(\"|\")\n\n timestamp = datetime.now()\n reporttime = timestamp.strftime(\"[%H:%M]\")\n\n #Console log\n print(timestamp.strftime(\"[%d %b, %H:%M]\") + \" -- \" + report)\n\n update_report(new_report, reporttime, event_count)\n \n bot.say(\"Understood.\")\n \n update_topic(bot, new_report, sopel.tools.target.Channel(CHANNEL))", "def send_tweet(self):\n \n ## Check the quality/score\n quality = self.sunsetwx_response['features'][0]['properties']['quality']\n score = self.sunsetwx_response['features'][0]['properties']['quality_percent']\n \n ## For great ones... compose a status\n if quality == 'Great':\n \n local_time_str = self.time_converted.strftime(\"%I:%M %p\")\n if self.type == 'sunrise':\n time_of_day_str = 'tomorrow morning'\n elif self.type == 'sunset':\n time_of_day_str = 'this evening'\n status = f'Looks like there will be a great {self.type} in {self.location} {time_of_day_str}! Check it out at {local_time_str}.'\n \n ## Post about the great ones\n api.update_status(status=status)\n \n ## Update the log regardless\n self.update_log_record(datetime.today().strftime(\"%Y-%m-%d\"), score)", "def report(events):\n # Parse events into the database\n for event in events:\n event_obj = db.models.Event(\n user=request.user,\n seen_at=datetime.datetime.fromtimestamp(event['timestamp']),\n beacon_id=event['id'],\n beacon_distance=event['distance'],\n )\n db.session.add(event_obj)\n db.session.commit()\n\n # Let the client know what happened\n return '+OK RECEIVED {event_count} EVENTS'.format(\n event_count=len(events)\n )", "def tell_sophie(message):\n \n d = {'token': cf.get('pushover', 'apikey'),\n 'user': cf.get('pushover', 'userkey'),\n 'message': message }\n requests.post('https://api.pushover.net/1/messages.json', json=d)", "def apigw_event():\n\n return {\n \"queryStringParameters\": {\n \"snotel_site\": \"322:CO:SNTL\",\n \"days\": \"30\",\n \"element_code\": \"WDIRV\"\n }\n }", "def post_data_to_sentinel(self, data, table_name, fields):\n __method_name = inspect.currentframe().f_code.co_name\n if fields:\n for event in data:\n for field in fields:\n event[field] = [event.get(field)]\n data = json.dumps(data)\n status_code = self.azuresentinel.post_data(data, table_name)\n if status_code in consts.SENTINEL_ACCEPTABLE_CODES:\n self.applogger.info(\n '{}(method={}) : {} : Successfully posted the data in the table=\"{}\"'.format(\n consts.LOGS_STARTS_WITH,\n __method_name,\n self.function_name,\n table_name,\n )\n )\n else:\n self.applogger.error(\n '{}(method={}) : {} : Data is not posted in the table=\"{}\" status_code=\"{}\"'.format(\n consts.LOGS_STARTS_WITH,\n __method_name,\n self.function_name,\n table_name,\n status_code,\n )\n )\n raise Exception(\n \"Error Occurred while posting data into Microsoft Sentinel Log Analytics Workspace.\"\n )", "def button(update, context) -> None:\n query = update.callback_query\n status, item_id, is_waitlist = query.data.split('|||')\n invitation = EventRegistration.objects.get(pk=item_id)\n text = invitation.process_invite(status, is_waitlist)\n safe_message_send(context.bot, query.message.chat_id, text=text)\n query.answer(text)", "def collect_events(helper, ew): # pylint: disable=no-self-argument,invalid-name,too-many-statements\n\n class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):\n \"\"\"Handles incoming requests from the browser\"\"\"\n\n SESSION_KEY = helper.context_meta['session_key']\n SSL_VERIFY = False\n\n def handle_request(self):\n \"\"\"Parses incoming POST, saves as checkpoint and sends data to Splunk\"\"\"\n try:\n content_type = self.headers.get('content-type')\n\n if content_type != 'application/json':\n self.write_empty_response(400)\n return\n\n content_len = int(self.headers.get('content-length', 0))\n\n # If content was provided, then parse it\n if content_len > 0:\n message = json.loads(self.rfile.read(content_len))\n else:\n self.write_empty_response(400)\n return\n\n helper.log_info(f'Incoming POST from {self.client_address[0]}: {message}')\n\n aspect_type = message['aspect_type']\n object_id = message['object_id']\n object_type = message['object_type']\n # make owner_id a str to avoid issues with athlete_checkpoint dict\n owner_id = str(message['owner_id'])\n\n athlete_checkpoint = helper.get_check_point(\"webhook_updates\") or {}\n\n # We only care about activity updates. New activities are pulled in automatically as strava_api input restarts.\n if aspect_type == 'update' and object_type == 'activity':\n if owner_id not in athlete_checkpoint:\n athlete_checkpoint[owner_id] = []\n athlete_checkpoint[owner_id].append(object_id)\n helper.save_check_point(\"webhook_updates\", athlete_checkpoint)\n else:\n athlete_checkpoint[owner_id].append(object_id)\n helper.save_check_point(\"webhook_updates\", athlete_checkpoint)\n helper.log_debug(f'webhooks_updates checkpoint: {helper.get_check_point(\"webhook_updates\")}')\n\n # Send data to Splunk\n data = json.dumps(message)\n event = helper.new_event(source=helper.get_input_type(), index=helper.get_output_index(), sourcetype=helper.get_sourcetype(), data=data)\n ew.write_event(event)\n\n # Strava API expects a 200 response\n self.write_empty_response(200)\n\n # Restart strava_api inputs to pull in the data unless it's a delete, as the input doesn't do anything with that anyway.\n if aspect_type != 'delete':\n self.restart_input('strava_api', self.SESSION_KEY)\n helper.log_info(f'Reloading Strava API input to retrieve updated activity {object_id} for athlete {owner_id}.')\n\n except Exception as ex:\n helper.log_error(f'Something went wrong in handle request: {ex}')\n\n def do_GET(self): # pylint: disable=invalid-name\n \"\"\"Responds to incoming GET request from Strava with challenge token\"\"\"\n parsed_url = urlparse(self.path)\n parsed_query = parse_qs(parsed_url.query)\n\n helper.log_info(f'Incoming request from {self.client_address[0]} - {self.path}')\n\n # Strava webhook expects a reply with the hub.challenge parameter\n challenge = parsed_query['hub.challenge'][0]\n request_verify_token = parsed_query['hub.verify_token'][0]\n\n # Respond with hub.challenge parameter if verify_token is correct\n if request_verify_token == verify_token:\n self.write_response(200, {\"hub.challenge\": challenge})\n else:\n self.write_empty_response(400)\n\n def do_POST(self): # pylint: disable=invalid-name\n \"\"\"Used for incoming POST request\"\"\"\n self.handle_request()\n\n def restart_input(self, modinput, session_key):\n \"\"\"Restarts modinput, used to trigger the Strava Activities input to pull in update.\"\"\"\n rest_url = f'https://localhost:8089/services/data/inputs/{modinput}/_reload'\n headers = {'Authorization': f'Splunk {session_key}'}\n\n response = requests.get(rest_url, headers=headers, verify=self.SSL_VERIFY)\n try:\n response.raise_for_status()\n except Exception as ex:\n helper.log_error(f'Something went wrong in input function: {ex}')\n\n def write_response(self, status_code, json_body):\n \"\"\"Craft response header with status code and json_body\"\"\"\n self.send_response(status_code)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n self.write_json(json_body)\n\n def write_empty_response(self, status_code):\n \"\"\"Craft empty response with status code.\"\"\"\n self.send_response(status_code)\n self.end_headers()\n\n def write_json(self, json_dict):\n \"\"\"Write json_dict to string and encode it.\"\"\"\n content = json.dumps(json_dict)\n\n if isinstance(content, unicode):\n content = content.encode('utf-8')\n\n self.wfile.write(content)\n\n def create_webhook(client_id, client_secret, verify_token, callback_url):\n \"\"\"Creates webhook, raises error if one already exists\"\"\"\n url = 'https://www.strava.com/api/v3/push_subscriptions'\n payload = {\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'verify_token': verify_token,\n 'callback_url': callback_url}\n response = helper.send_http_request(url, \"POST\", payload=payload, use_proxy=False)\n\n try:\n response.raise_for_status()\n except Exception:\n if 'already exists' in response.text:\n webhook_details = get_webhook(client_id, client_secret)\n helper.log_info(webhook_details)\n if 'GET to callback URL does not return 200' in response.text:\n helper.log_error(f'Error: Strava can\\'t reach {callback_url}')\n if 'not verifiable' in response.text:\n helper.log_error(f'Error: Strava can\\'t verify {callback_url}. URL incorrect or server not using public CA certificate.')\n else:\n helper.log_error(f'{response.status_code} Error: {response.text}')\n else:\n response = response.json()\n helper.log_info(f\"Webhook created successfully: ID {response['id']}\")\n\n def get_webhook(client_id, client_secret):\n \"\"\"Gets webhook details\"\"\"\n url = 'https://www.strava.com/api/v3/push_subscriptions'\n payload = {\n 'client_id': client_id,\n 'client_secret': client_secret}\n response = helper.send_http_request(url, \"GET\", payload=payload, use_proxy=False)\n\n try:\n response.raise_for_status()\n except Exception as ex:\n helper.log_error(f'Something went wrong: {ex}')\n return False\n else:\n return response.json()\n\n # Get global arguments\n port = int(helper.get_arg('port'))\n verify_token = helper.get_arg('verify_token')\n cert_file = helper.get_arg('cert_file')\n callback_url = helper.get_arg('callback_url')\n key_file = helper.get_arg('key_file')\n client_id = helper.get_global_setting('client_id')\n client_secret = helper.get_global_setting('client_secret')\n\n # Setup HTTP Server instance\n try:\n httpd = HTTPServer(('', port), SimpleHTTPRequestHandler)\n sslctx = ssl.SSLContext()\n sslctx.check_hostname = False\n sslctx.load_cert_chain(certfile=cert_file, keyfile=key_file)\n httpd.socket = sslctx.wrap_socket(httpd.socket, server_side=True)\n except Exception as err:\n helper.log_error(err)\n raise\n\n helper.log_info(f'Starting HTTPS web server on port {port}.')\n thread = Thread(target=httpd.serve_forever)\n thread.start()\n\n # Get webhook details. If it doesn't exist, create it.\n get_webhook = get_webhook(client_id, client_secret)\n if get_webhook:\n helper.log_info(f'Existing webhook: {get_webhook}')\n else:\n create_webhook(client_id, client_secret, verify_token, callback_url)", "def send_out_of_stock_notification(event: OutOfStock):\n pass", "def notify(self, title, message, config):\n notification = {\n 'application': config.get('application'),\n 'event': title,\n 'description': message,\n 'url': config.get('url'),\n 'priority': config.get('priority'),\n 'providerkey': config.get('provider_key'),\n }\n\n if isinstance(config['api_key'], list):\n config['api_key'] = [config['api_key']]\n notification['apikey'] = config['api_key']\n\n try:\n response = requests.post(PROWL_URL, data=notification)\n except RequestException as e:\n raise PluginWarning(repr(e))\n\n request_status = ET.fromstring(response.content)\n error = request_status.find('error')\n if error is not None:\n raise PluginWarning(error.text)\n else:\n success = request_status.find('success').attrib\n logger.debug(\n 'prowl notification sent. Notifications remaining until next reset: {}. '\n 'Next reset will occur in {} minutes',\n success['remaining'],\n success['resetdate'],\n )", "def send(self):\n event = gdata.calendar.CalendarEventEntry()\n event.title = atom.Title(text=self.title)\n event.content = atom.Content(text='')\n event.where.append(gdata.calendar.Where(value_string=self.location))\n # Set start time in 6 minutes\n start_time = time.strftime('%Y-%m-%dT%H:%M:%S.000Z',\n time.gmtime(time.time() + 6 * 60))\n # Set end time in an hour\n end_time = time.strftime('%Y-%m-%dT%H:%M:%S.000Z',\n time.gmtime(time.time() + 3600))\n event.when.append(gdata.calendar.When(start_time=start_time,\n end_time=end_time))\n minutes = 5\n for a_when in event.when:\n if len(a_when.reminder) > 0:\n # Adding reminder in 5 minutes before event (start_time)\n a_when.reminder[0].minutes = 5\n else:\n a_when.reminder.append(\n gdata.calendar.Reminder(minutes=minutes))\n # Insert new event\n new_event = self.calendar_service.InsertEvent(event,\n self.calendar_link)\n return new_event", "def post(self):\n json_body = self.request.body\n if not json_body:\n # TODO(davidbyttow): Log error?\n return\n\n json_body = unicode(json_body, 'utf8')\n logging.info('Incoming: ' + json_body)\n\n context, events = robot_abstract.ParseJSONBody(json_body)\n for event in events:\n try:\n self._robot.HandleEvent(event, context)\n except:\n logging.error(traceback.format_exc())\n\n json_response = robot_abstract.SerializeContext(context,\n self._robot.version)\n logging.info('Outgoing: ' + json_response)\n\n # Build the response.\n self.response.headers['Content-Type'] = 'application/json; charset=utf-8'\n self.response.out.write(json_response.encode('utf-8'))", "def lambda_handler(event, context):\n if event.get('zipcode') and event.get('country') and event.get('job'):\n data = get_current_temperature(event['zipcode'], event['country'])\n send_to_badash(event['job'], data)\n else:\n print('Error: no zipcode and/or country and/or job supplied!')\n exit(-1)", "def emit_to(key, event, data, endpoint=''):\n\n socket = None\n\n if key is User:\n key = key.id\n if key in sockets.user:\n socket = sockets.user.get(key)\n elif key in sockets.all:\n socket = sockets.all.get(key)\n\n if socket:\n socket.send_packet(dict(\n type=\"event\",\n name=event,\n args=data,\n endpoint=endpoint))\n else:\n print \"not found\"", "def send(self, events, validation_hit=False, postpone=False, date=None):\n\n # check for any missing or invalid parameters among automatically collected and recommended event types\n self._check_params(events)\n self._check_date_not_in_future(date)\n\n if postpone is True:\n # build event list to send later\n for event in events:\n event[\"_timestamp_micros\"] = self._get_timestamp(time.time())\n self._event_list.append(event)\n else:\n # batch events into sets of 25 events, the maximum allowed.\n batched_event_list = [\n events[event : event + 25] for event in range(0, len(events), 25)\n ]\n # send http post request\n self._http_post(\n batched_event_list, validation_hit=validation_hit, date=date\n )", "def notify(self, data):\n\n if 'personId' in data.keys():\n person_id = data['personId']\n if data['type'] == EventTimeLine.PERSON_CREATION:\n self._registry[person_id] = {\n 'name': data['name'],\n 'address': data['address'],\n 'status': data['status'],\n 'version': 1\n }\n\n if data['type'] == EventTimeLine.PERSON_STATUS_CHANGE:\n p = self._registry[person_id]\n p['status'] = data['newStatus']\n p['version'] += 1\n\n if data['type'] == EventTimeLine.PERSON_MOVE:\n p = self._registry[person_id]\n p['address'] = data['newAddress']\n p['version'] += 1", "def emit_event(self, event, item_list=None):\n sender = '%s!%s' % (self.get_url_name(), event)\n if item_list is None:\n item_list = self.get_resource_items()\n resource_event.send(sender=sender, resource=self, event=event, item_list=item_list)", "def buildEvent(data):", "def NotifyPushEvent(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def notify_STL(self, title, message, *related_gear):\n\n department_email = \"{}@climbingclubuw.org\".format(self.name)\n stl_emails = [staffer.exc_email for staffer in self.stls.all()]\n email_body = (\n \"Hi {} STL! \\n\"\n \"\\n\"\n \"This is an automated message to let you know that:\\n\"\n \"{}\"\n \"\\n\".format(self.name, message)\n )\n for gear in related_gear:\n email_body += \" {}\\n\".format(gear)\n\n email_body += \"From your dearest robot <3\"\n\n send_mail(title, email_body, department_email, stl_emails, fail_silently=False)", "def handler(event, context):\n DATASERVICE = os.environ.get('DATASERVICE', None)\n\n if DATASERVICE is None:\n return 'no dataservice url set'\n\n # The consent code lambda ARN\n consentcode_func = os.environ.get('FUNCTION', None)\n\n # User must give a function that will process individual entities\n if consentcode_func is None:\n return 'no lambda specified'\n\n lam = boto3.client('lambda')\n\n study = event.get('study', None)\n # If there is no study in the event, we should re-call this function for\n # each event in the dataservice\n if study is None:\n map_to_studies(lam, context.function_name, DATASERVICE)\n\n # Call functions for each sample in the study\n elif study and consentcode_func:\n try:\n map_one_study(study, lam, consentcode_func, DATASERVICE)\n except (DataserviceException, DbGapException) as err:\n # There was a problem trying to process the study, notify slack\n msg = f'Problem invoking for `{study}`: {err}'\n attachments = [{\n 'fallback': msg,\n 'text': msg,\n 'color': 'danger'\n }]\n send_slack(attachments=attachments)", "def handler(event, context):\n if event['body'] is None:\n return respond(400, {'message': 'Bad request'})\n\n body = json.loads(event['body'])\n\n if body['long_url'] is None:\n return respond(400, {'message': 'Bad request'})\n\n try:\n info = SHORTLY.create(body['long_url'])\n return respond(200, info)\n except shortly.InternalError:\n return respond(500, {'message': 'Internal server error'})", "def process_telemetry(self, telemetry):\n _id = telemetry['id']\n\n if _id not in self.sondes:\n try:\n # This is a new sonde. Send the email.\n msg = 'Sonde launch detected:\\n'\n msg += '\\n'\n msg += 'Callsign: %s\\n' % _id\n msg += 'Type: %s\\n' % telemetry['type']\n msg += 'Frequency: %s MHz\\n' % telemetry['freq']\n msg += 'Position: %.5f,%.5f\\n' % (telemetry['lat'], telemetry['lon'])\n msg += 'Altitude: %dm\\n' % round(telemetry['alt'])\n msg += '\\n'\n msg += 'https://tracker.habhub.org/#!qm=All&q=RS_%s\\n' % _id\n\n msg = MIMEText(msg, 'plain', 'UTF-8')\n msg['Subject'] = 'Sonde launch detected: ' + _id\n msg['From'] = self.mail_from\n msg['To'] = self.mail_to\n msg[\"Date\"] = formatdate()\n\n s = smtplib.SMTP(self.smtp_server)\n s.sendmail(msg['From'], msg['To'], msg.as_string())\n s.quit()\n\n self.log_info(\"E-mail sent.\")\n except Exception as e:\n self.log_error(\"Error sending E-mail - %s\" % str(e))\n\n self.sondes[_id] = { 'last_time': time.time() }", "def _send_events(self, payloads, combined_events=False):\n try:\n udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n if not combined_events:\n for payload in payloads:\n print payload\n udp_socket.sendto(payload, self.statsd_addr)\n else:\n # send multiple events per packet\n payload = self.combine_key.join(payloads)\n udp_socket.sendto(payload, self.statsd_addr)\n except Exception:\n self.logger.exception(\"Error sending statsd event\")", "def ask(self):\n self.term = str(input(\"What are you looking for? (Coffee, Restaurants, Museums, Bars) \"))\n if self.term.lower() == 'quit':\n sys.exit()\n self.destination = str(input(\"Where are you looking to go? (Neighborhood, City or City, State) \"))\n if self.destination.lower() == 'quit':\n sys.exit()\n \n \n #Request/JSON\n self.request = self.session.get(\"http://api.yelp.com/v2/search\", params={'term': self.term,'location': self.destination})\n self.request = self.request.json()\n \n #Dataframing\n self.menu = json_normalize(self.request['businesses'])\n self.menu.index = list(range(1, 21))\n self.menu = self.menu[['name', 'categories', 'location.address', 'location.city', 'location.coordinate.latitude', \\\n 'location.coordinate.longitude', 'review_count', 'rating', 'snippet_text']]\\\n .sort_values(['rating'], ascending=False).sort_index()", "def find_events(handler_input):\n \n slots = handler_input.request_envelope.request.intent.slots\n \n selected_event = slots['event_cat'].resolutions.resolutions_per_authority[0].values[0].value.name\n \n events_list = requests.get(\"http://3.17.148.9:8080/events\")\n length = 0\n\n if events_list.status_code == 200:\n events_list = events_list.content\n details = json.loads(events_list.decode('utf-8'))\n length = len(details)\n\n events = dict()\n response_text = \"\"\n for i in range(length):\n if details[i][\"event_category\"].lower() == selected_event:\n cat = details[i]['event']\n if cat not in events:\n events[cat] = 1\n else:\n events[cat] += 1\n \n for event, count in events.items():\n response_text += str(count) + + event+\", \"\n\n speech_text = \"I found {}\".format(response_text)\n handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\"I found {}\".format(response_text), speech_text)).set_should_end_session(False)\n return handler_input.response_builder.response", "def report_custom_info_event(self, description: str = None, title: str = None, properties: Dict[str, str] = {}):\n self._results_builder.report_custom_info_event(description=description, title=title,\n properties=properties, entity_selector=self.selector)", "def on_event():\n\n event = request.get_json()\n \n token_status, token_text = validate_token()\n\n if token_status != 0:\n return json.jsonify({'text': token_text})\n\n if event['type'] == 'ADDED_TO_SPACE' and event['space']['type'] == 'ROOM':\n text = 'Thanks for adding me to \"%s\"! For help type @bot help' % event['space']['displayName']\n \n elif event['type'] == 'MESSAGE':\n\n room_name = event['space']['name'].split('/')[1]\n commands = ['list', 'add', 'remove', 'help']\n\n try:\n param = event['message']['text'].split()[1:][0]\n except:\n text = _help()\n return json.jsonify({'text': text})\n\n if param in commands:\n\n if param == 'list':\n text = _list(room_name)\n\n elif param == 'add':\n text = _add(event, room_name)\n\n elif param == 'remove':\n text = _remove(event, room_name)\n\n elif param == 'help':\n text = _help()\n return json.jsonify({'text': text})\n \n else:\n text = send_msg(event, room_name)\n\n else:\n return\n \n return json.jsonify({'text': text})", "def add_event_from_info(db, event_info, event_id, tag):\n\n if 'description' not in event_info.keys():\n return False\n\n if len(event_info['description']) < MIN_CHARS_DESC:\n if VERBOSE:\n print('Failure: event description too short \\\n (>={} chars needed)'.format(MIN_CHARS_DESC))\n return False\n\n if 'name' in event_info.keys():\n ename = event_info['name']\n else:\n ename = None\n\n if 'venue' in event_info.keys():\n if 'name' in event_info['venue'].keys() and event_info['venue']['name']:\n lname = event_info['venue']['name']\n else:\n lname = None\n\n if 'lon' in event_info['venue'].keys() and event_info['venue']['lon']:\n lon = event_info['venue']['lon']\n else:\n lon = None\n\n if 'lat' in event_info['venue'].keys() and event_info['venue']['lat']:\n lat = event_info['venue']['lat']\n else:\n lat = None\n\n if 'address_1' in event_info['venue'].keys() \\\n and event_info['venue']['address_1']:\n address_1 = event_info['venue']['address_1']\n else:\n address_1 = None\n\n if 'zip' in event_info['venue'].keys() and event_info['venue']['zip']:\n zipcode = event_info['venue']['zip']\n else:\n zipcode = None\n\n if 'city' in event_info['venue'].keys() and event_info['venue']['city']:\n city = event_info['venue']['city']\n else:\n city = None\n\n if 'state' in event_info['venue'].keys() \\\n and event_info['venue']['state']:\n state = event_info['venue']['state']\n else:\n state = None\n else:\n lname = lon = lat = address_1 = zipcode = city = state = None\n\n if 'time' in event_info.keys() and event_info['time']:\n start_time = event_info['time']\n else:\n start_time = None\n\n if 'duration' in event_info.keys() and event_info['duration']:\n duration = event_info['duration']\n else:\n duration = None\n\n if 'description' in event_info.keys() and event_info['description']:\n description = event_info['description']\n else:\n description = None\n\n # taglist = []\n # for t in TAGS:\n # if t in description.lower() or t in ename.lower():\n # taglist.append(t)\n #\n # if len(taglist) > 0:\n # print(ename, taglist)\n # else:\n # return\n\n cursor = db.cursor()\n\n cursor.execute(\"\"\"SELECT eid\n FROM Events\n WHERE mid = %s\n \"\"\",\n (event_id, ))\n\n result = cursor.fetchone()\n\n if result:\n print('Event already in database.')\n return\n\n cursor.execute(\"\"\"SELECT eid\n FROM Events\n WHERE ename = %s\n \"\"\",\n (ename, ))\n if result:\n print('Event already in database.')\n return\n\n loc_query = \\\n \"\"\"\n INSERT\n INTO Locations(lname, lat, lon, address_1, zip, city, state)\n VALUES (%s, %s, %s, %s, %s, %s, %s)\n \"\"\"\n\n cursor.execute(loc_query, (\n lname,\n lon,\n lat,\n address_1,\n zipcode,\n city,\n state,\n ))\n\n db.commit()\n\n print('Inserted into Locations.')\n\n cursor.execute('SELECT LAST_INSERT_ID()')\n\n lid = cursor.fetchone()\n\n start_date = str(datetime.fromtimestamp(start_time / 1000))\n\n if start_date and duration:\n end_date = str(datetime.fromtimestamp((start_time + duration) / 1000))\n else:\n end_date = None\n\n ev_query = \\\n \"\"\"\n INSERT\n INTO Events(ename, start_date, end_date,\n num_attending, lid, description, mid)\n VALUES (%s, %s, %s, %s, %s, %s, %s);\n \"\"\"\n\n cursor.execute(ev_query, (\n ename.encode('ascii', 'ignore'),\n start_date,\n end_date,\n 0,\n lid,\n description.encode('ascii', 'ignore'),\n event_id,\n ))\n\n db.commit()\n\n print('Inserted into Events.')\n\n cursor.execute('SELECT LAST_INSERT_ID()')\n\n eid = cursor.fetchone()\n\n # for tag in taglist:\n # category = None\n # for c in CATEGORIES:\n # if tag in CATEGORIES[c]:\n # category = c\n\n et_query = \\\n \"\"\"\n INSERT\n INTO EventTags(eid, tag, category)\n VALUES (%s, %s, %s)\n \"\"\"\n\n cursor.execute(et_query, (eid, tag, tag))\n\n db.commit()\n\n print('Inserted into EventTags.')\n\n if VERBOSE:\n print('Finished.')\n return True", "def post_volunteers(data):\r\n logging.info(\"STARTING FUNCTION\")\r\n \r\n # This was needed when want to decode http body data from googe cloud\r\n # when using cloud functions. This can be ignored\r\n # data = ast.literal_eval(request.data.decode('utf-8'))\r\n \r\n # Determine which mode we are in\r\n service_name = data['service_name']\r\n event_name = data['event_name']\r\n loc_name = data['location_name']\r\n \r\n logging.info(f'We are doing service: {service_name}')\r\n \r\n # Get mapping ids from api\r\n service_name_to_id = get_service_mapping()\r\n event_to_id = get_event_mapping()\r\n\r\n # Get Service id\r\n service_id = service_name_to_id[service_name]\r\n # Get event id\r\n event_id = event_to_id[event_name]\r\n # Get location id\r\n location_id = get_location_id(event_id, loc_name)\r\n # Get event period and event time ids\r\n event_period_id, event_time_to_id = get_event_times(event_id)\r\n\r\n # loop through and find correct service\r\n for i in range(n_future_plans):\r\n \r\n # Get plan times\r\n try:\r\n upcoming_plan_id, service_time_ids_to_time = get_future_plans(service_id, i)\r\n except IndexError:\r\n logging.exception(f'Ran out of services to compare with event times {event_time_to_id}')\r\n raise\r\n\r\n # Check this is the correct service plan(times should match with event time)\r\n if not all([event_time in service_time_ids_to_time.values() for event_time in event_time_to_id.keys()]):\r\n logging.warning(f'Not correct service times: {service_time_ids_to_time }')\r\n continue\r\n \r\n\t\t# Get volunteers attending at the event\r\n volunteers = get_volunteers(\r\n\t\t\tservice_id,\r\n\t\t\tupcoming_plan_id,\r\n\t\t\tlocation_id,\r\n\t\t\tevent_id,\r\n\t\t\tevent_period_id,\r\n\t\t\tservice_time_ids_to_time,\r\n\t\t\tevent_time_to_id\r\n\t\t)\r\n break\r\n else:\r\n raise LookupError(f'Can not find future service with event times {event_time_to_id}')\r\n \r\n # Open a requests session, this is where we post the volunteers to \r\n # PCO api, as no post api exists at the moment for check-in\r\n bulk_check_url = bulk_check_url_fmt.format(event_period_id)\r\n with requests.Session() as s:\r\n # This is where we make the first request to generate an csrf-token\r\n r = s.get(login_url, headers=headers)\r\n\r\n # Using Beautiful Soup to scrape the tokens from the page source\r\n soup = BeautifulSoup(r.content, 'html.parser') \r\n\r\n # Here we are populating header with the csrf-token to allow for future posts\r\n headers[\"x-csrf-token\"] = soup.find('meta', attrs={'name': 'csrf-token'})['content']\r\n\r\n # Here we login by submitting the post request with the same session passing the url, login_data, and headers\r\n r = s.post(login_url, data=login_data, headers=headers)\r\n\r\n # Posting all volunteers in check in.\r\n for payload in volunteers:\r\n check_person = s.post(bulk_check_url, data=payload, headers=headers)\r\n if check_person.status_code != 200:\r\n logging.error(f'ERROR CODE: {check_person.status_code}, when posting payload {payload}')\r\n \r\n logging.info('FINISHED FUNCTION')\r\n return f'Done!'", "def post(self):\n policies = json.loads(self.request.get('policies'))\n request = json.loads(self.request.get('request_json'))\n response = json.loads(self.request.get('response_json'))\n\n maybe_notify_backend('LEASED', response['hostname'], policies)\n maybe_notify_lessee(request, response)", "def trigger_service(call):\n event = call.data.get(ATTR_EVENT)\n value1 = call.data.get(ATTR_VALUE1)\n value2 = call.data.get(ATTR_VALUE2)\n value3 = call.data.get(ATTR_VALUE3)\n if event is None:\n return\n\n try:\n import pyfttt as pyfttt\n pyfttt.send_event(key, event, value1, value2, value3)\n except requests.exceptions.RequestException:\n _LOGGER.exception(\"Error communicating with IFTTT\")", "def handler(event, context):\n alert_message = json.loads(event['Records'][0]['Sns']['Message'])\n alarm_name = alert_message['AlarmName']\n reason = alert_message['NewStateReason']\n new_state = alert_message['NewStateValue']\n color = \"good\" if new_state == 'OK' else \"danger\"\n\n region = os.getenv('AWS_DEFAULT_REGION')\n alert_url = f'https://console.aws.amazon.com/cloudwatch/home?region={region}#alarm:alarmFilter=ANY;name={alarm_name}'\n link = f\"<{alert_url}|{alarm_name}>\"\n\n secrets = json.loads(get_secret()['SecretString'])\n default_slack_channel = secrets['slack_alert_channel']\n alarm_description = json.loads(alert_message.get('AlarmDescription', '{}'))\n slack_channel = alarm_description.get(\"slack_channel\", default_slack_channel)\n description = alarm_description.get(\"description\")\n slack_message = '\\n'.join(\n [f\"New state: {new_state}\", f\"Description: {description}\", reason]\n )\n\n attachments = [{\n \"fallback\": f\"{link} {slack_message}\",\n \"title\": alarm_name,\n \"title_link\": alert_url,\n \"text\": slack_message,\n \"color\": color\n }]\n\n slack_url = secrets['slack_webhooks'][slack_channel]\n\n post_message_to_url(slack_url, {\"attachments\": attachments})", "def storm():\n sourcetype = 'generic_single_line'\n source = 'webhook'\n\n post_data = flask.request.data\n if not post_data:\n post_data = flask.request.form.keys()[0]\n\n event_params = {\n 'event_text': post_data, 'sourcetype': sourcetype, 'source': source}\n\n return _send_log(event_params)", "def main_handler(event):\n\n address = \"\"\n try:\n address = get_address(event)\n except ValueError:\n # Value error is raised if no permissions to address\n return ask_permissions()\n except Exception:\n # Some other error when getting location\n speech_output = \"Sorry, error occurred when retrieving device address.\"\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)\n\n nearest_stations = get_nearest_stations(3, address)\n station = nearest_stations[0]\n\n speech_output = f\"On station {station['name']} is {station['bikesAvailable']} \" \\\n \"bikes available. Do you want to hear more nearby stations?\"\n\n session_attributes = {\n \"previousIntent\": \"mainHandler\",\n \"nextStations\": build_next_stations(nearest_stations[1:3])\n }\n\n response = build_speechlet_response(CARD_TITLE, speech_output, False)\n return build_response(response, session_attributes)", "def tapahtumat(bot, update):\n\n text = \"\"\n events = get_events()\n for calendar_name, calendar_events in events.items():\n text += f\"\\n*{calendar_name}*\\n\"\n for event in calendar_events:\n if len(event) == 1:\n text += f\"{event[0]}\\n\"\n else:\n text += f\"{'.'.join(event[0].split('-')[::-1])} [{event[1]}]({event[2]})\\n\"\n bot.send_message(update.effective_chat.id, text, parse_mode=\"MARKDOWN\")", "def main():\n credentials = get_credentials()\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n max = 7\n events = getEvents(credentials, now, max)\n if not events:\n print('No upcoming events found.')\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n print(start, event['summary'])\n #addEvent(credentials)", "def on_data(data: dict):\n if 'symbol' in data and 'messageType' in data:\n if data['symbol'] == 'AAPL' and data['messageType'] == 'tradingstatus':\n print(\"APPL initial message received\")\n event1.set()\n if data['symbol'] == 'KPTI' and data['messageType'] == 'tradingstatus':\n print(\"KPTI initial message received\")\n event2.set()", "def dispatch_event(event):\n queue = connect_to_sqs() \n logging.info('Writing event to SQS:' + str(json.dumps(event.params)))\n\n visitor = event.params['visitors'][0]['visitor_id']\n attributes = event.params['visitors'][0]['attributes']\n snapshot = event.params['visitors'][0]['snapshots'][0]\n\n response = queue.send_message(MessageBody=json.dumps({visitor: (attributes, snapshot)}))", "def ping(event, context):\n logger.info(\"Ping requested.\")\n return _get_response(200, \"PONG!\")", "def callback(ch, method, properties, body):\n logging.info(\" [x] salary scheduling %r\" % (body, ))\n send_task('skype_messaging.notify_devs', ['Schedule Salary Update : %r' % body])\n data = json.loads(body)\n subcon_id = data['subcon_id']\n eta = datetime.strptime(data['scheduled_date'], '%Y-%m-%d %H:%M:%S')\n tz = timezone(data['timezone'])\n eta = tz.localize(eta)\n ph_tz = timezone('Asia/Manila')\n eta = eta.astimezone(ph_tz)\n logging.info(' [x] sending task %s @ %s' % (subcon_id, eta))\n send_task('ScheduleActivation.StaffSalaryUpdate', args=[subcon_id,], eta=eta)", "def sendPing(self, payload=None):", "def post(self):\n\n # we need a unique tx number so we can look these back up again\n # as well as for logging\n # FIXME: how can we guarantee uniqueness here?\n tx = int(time.time() * 100000) + random.randrange(10000, 99999)\n\n log.info(\"EVENTS [{}]: Creating events\".format(tx))\n\n try:\n user = self.jbody[\"user\"]\n if not EMAIL_REGEX.match(user):\n user += \"@\" + self.domain\n event_type_id = self.jbody.get(\"eventTypeId\", None)\n category = self.jbody.get(\"category\", None)\n state = self.jbody.get(\"state\", None)\n note = self.jbody.get(\"note\", None)\n except KeyError as err:\n raise exc.BadRequest(\n \"Missing Required Argument: {}\".format(err.message)\n )\n except ValueError as err:\n raise exc.BadRequest(err.message)\n\n if not event_type_id and (not category and not state):\n raise exc.BadRequest(\n \"Must specify an event type id or both category and state\"\n )\n\n if event_type_id:\n event_type = self.session.query(EventType).get(event_type_id)\n else:\n event_type = self.session.query(EventType).filter(\n and_(\n EventType.category == category,\n EventType.state == state\n )\n ).one()\n\n if event_type is None:\n self.write_error(400, message=\"Bad event type\")\n return\n\n category = event_type.category\n state = event_type.state\n\n hostnames = (\n [self.jbody.get(\"hostname\", None)]\n if self.jbody.get(\"hostname\", None) else []\n )\n\n if \"hostnames\" in self.jbody:\n hostnames.extend(self.jbody.get(\"hostnames\"))\n\n log.info(\n \"EVENTS [{}]: Will create event {} {}\".format(\n tx, category, state\n )\n )\n\n log.info(\n \"EVENTS [{}]: Hostnames specified: {}\".format(\n tx, \", \".join(hostnames)\n )\n )\n\n # If a host query was specified, we need to talk to the external\n # query server to resolve this into a list of hostnames\n if \"hostQuery\" in self.jbody:\n query = self.jbody[\"hostQuery\"]\n log.info(\"EVENTS [{}]: Running query {}\".format(tx, query))\n response = PluginHelper.request_get(params={\"query\": query})\n if response.json()[\"status\"] == \"ok\":\n hostnames.extend(response.json()[\"results\"])\n log.info(\n \"EVENTS [{}]: Hostnames after query: {}\".format(\n tx, \", \".join(hostnames)\n )\n )\n\n # If a quest Id was given, look up the labors in that quest and\n # get all the hostnames for those labors.\n if \"questId\" in self.jbody:\n log.info(\"EVENTS [{}]: Looking up quest {}\".format(\n tx, self.jbody[\"questId\"])\n )\n quest = self.session.query(Quest).filter_by(\n id=self.jbody[\"questId\"]\n ).scalar()\n if not quest:\n raise exc.NotFound(\"No such Quest {} found\".format(id))\n for labor in quest.labors:\n hostnames.append(labor.host.hostname)\n\n log.info(\n \"EVENTS [{}]: Hostnames after quest expansion: {}\".format(\n tx, \", \".join(hostnames)\n )\n )\n\n # We need to create a list of hostnames that don't have a Host record\n new_hosts_needed = set(hostnames)\n hosts = (\n self.session.query(Host).filter(Host.hostname.in_(hostnames)).all()\n )\n\n for host in hosts:\n new_hosts_needed.remove(str(host.hostname))\n\n # if we need to create hosts, do them all at once\n if new_hosts_needed:\n log.info(\"EVENTS [{}]: Creating hosts {}\".format(\n tx, \", \".join(new_hosts_needed)\n ))\n Host.create_many(self.session, new_hosts_needed)\n hosts = (\n self.session.query(Host).filter(\n Host.hostname.in_(hostnames)\n ).all()\n )\n\n if not hosts:\n raise exc.BadRequest(\"No hosts found with given list\")\n\n try:\n if len(hosts) > 1:\n # if we are supposed to create many events,\n # we want to do them as a giant batch\n log.info(\"EVENTS [{}]: Creating multiple events\".format(tx))\n events_to_create = []\n for host in hosts:\n events_to_create.append({\n \"host_id\": host.id,\n \"user\": user,\n \"event_type_id\": event_type.id,\n \"note\": note,\n \"tx\": tx\n })\n Event.create_many(self.session, events_to_create, tx)\n else:\n # if we are just creating one event, do it the simple way\n log.info(\"EVENTS [{}]: Creating 1 event\".format(tx))\n event = Event.create(\n self.session, hosts[0], user, event_type, note=note\n )\n\n except IntegrityError as err:\n raise exc.Conflict(err.orig.message)\n except exc.ValidationError as err:\n raise exc.BadRequest(err.message)\n\n log.info(\"EVENTS [{}]: Flushing and committing\".format(tx))\n self.session.flush()\n log.info(\"EVENTS [{}]: Flushed\".format(tx))\n self.session.commit()\n log.info(\"EVENTS [{}]: Committed\".format(tx))\n\n if len(hosts) == 1:\n json = event.to_dict(self.href_prefix)\n json[\"href\"] = \"/api/v1/events/{}\".format(event.id)\n self.created(\n \"/api/v1/events/{}\".format(event.id), json\n )\n else:\n # if we created many events, we need to look them up by the TX\n # number to figure out what they were since the were created in bulk\n created_events = self.session.query(Event).filter(Event.tx == tx).all()\n self.created(\n data={\n \"events\": (\n [event.to_dict(self.href_prefix) for event in created_events]\n ),\n \"totalEvents\": len(created_events)\n }\n )\n\n log.info(\"EVENTS [{}]: Created event {} {} for {}\".format(\n tx, category, state,\n \", \".join(hostnames)\n ))", "def FoodCheckIn(sc, event):\n channel = sc.api_call('channels.info', channel=event['channel'])\n food = event['text'][9:]\n if food:\n if 'pizza' in food:\n sc.api_call('reactions.add', as_user='true', channel=event['channel'],\n timestamp=event['ts'], name='pizza')\n user = sc.api_call('users.info', user=event['user'])\n db = pymysql.connect(host='localhost', user='pizzabot', db='pizzachat')\n cursor = db.cursor()\n query = 'INSERT INTO foodlist (who, what) VALUES (%s, %s)'\n cursor.execute(query, (user['user']['name'], food.encode('utf-8')))\n db.commit()\n db.close()", "def collect_events(helper, ew): # pylint: disable=no-self-argument,invalid-name,too-many-statements,too-many-branches\n\n def clear_checkbox(session_key, stanza):\n \"\"\" Sets the 'reindex_data' value in the REST API to 0 to clear it. Splunk then automatically restarts the input.\"\"\"\n url = f'https://localhost:8089/servicesNS/nobody/TA-strava-for-splunk/data/inputs/strava_api/{stanza}'\n headers = {'Authorization': f'Splunk {session_key}'}\n payload = 'reindex_data=0'\n helper.send_http_request(url, \"POST\", headers=headers, payload=payload, verify=False, use_proxy=False)\n\n def get_activities(ts_activity, access_token):\n \"\"\"Gets all activities, 30 per page as per Strava's default.\"\"\"\n params = {'after': ts_activity, 'access_token': access_token}\n url = \"https://www.strava.com/api/v3/activities\"\n response = return_json(url, \"GET\", parameters=params)\n return response\n\n def get_activity(activity, token):\n \"\"\"Gets specific activity.\"\"\"\n url = f'https://www.strava.com/api/v3/activities/{activity}?include_all_efforts=true'\n params = {'access_token': token}\n response = return_json(url, \"GET\", parameters=params, timeout=10)\n return response\n\n def get_activity_stream(token, activity, types, series_type='time', resolution='high'):\n \"\"\"Gets the activity stream for given activity id.\"\"\"\n types = ','.join(types)\n params = {'access_token': token}\n url = f'https://www.strava.com/api/v3/activities/{activity}/streams/{types}&series_type={series_type}&resolution={resolution}&key_by_type='\n response = return_json(url, \"GET\", parameters=params, timeout=10)\n return response\n\n def get_athlete(token):\n \"\"\"Gets details on currently logged in athlete.\"\"\"\n url = \"https://www.strava.com/api/v3/athlete\"\n params = {'access_token': token}\n response = return_json(url, \"GET\", parameters=params, timeout=10)\n return response\n\n def get_epoch(timestamp):\n \"\"\"Converts Strava datetime to epoch timestamp\"\"\"\n timestamp_dt = datetime.datetime.strptime(timestamp, \"%Y-%m-%dT%H:%M:%SZ\")\n epoch = calendar.timegm(timestamp_dt.timetuple())\n return epoch\n\n def get_token(client_id, client_secret, token, renewal):\n \"\"\"Get or refresh access token from Strava API.\"\"\"\n url = \"https://www.strava.com/api/v3/oauth/token\"\n\n if renewal:\n payload = {\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'refresh_token': token,\n 'grant_type': 'refresh_token'}\n message = \"Successfully refreshed Strava token.\"\n else:\n payload = {\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'code': token,\n 'grant_type': 'authorization_code'}\n message = \"Successfully authenticated with Strava using access code.\"\n\n response = return_json(url, \"POST\", payload=payload)\n helper.log_info(message)\n return response\n\n def kvstore_save_athlete(session_key, athlete_id, firstname, lastname, weight, ftp): # pylint: disable=too-many-arguments\n \"\"\"Stores athlete's id, first name, last name, weight and ftp into strava_athlete KV Store collection.\"\"\"\n url = 'https://localhost:8089/servicesNS/nobody/TA-strava-for-splunk/storage/collections/data/strava_athlete/batch_save'\n headers = {'Content-Type': 'application/json', 'Authorization': f'Splunk {session_key}'}\n payload = [{\"_key\": athlete_id, \"id\": athlete_id, \"firstname\": firstname, \"lastname\": lastname, \"fullname\": firstname + \" \" + lastname, \"weight\": weight, \"ftp\": ftp}]\n helper.send_http_request(url, \"POST\", headers=headers, payload=payload, verify=False, use_proxy=False)\n\n def parse_data(data, activity_id, activity_start_date):\n \"\"\"Gets raw JSON data, parses it into events and writes those to Splunk.\"\"\"\n data_dict = {}\n final_dict = {}\n for i in data:\n data_dict[i['type']] = i['data']\n\n counter = 1\n nrange = len(data_dict['time'])\n for item in range(1, nrange + 1):\n final_dict[item] = {}\n\n for key, value in data_dict.items():\n counter = 1\n for i in value:\n final_dict[counter][key] = i\n final_dict[counter]['activity_id'] = activity_id\n\n if 'time' in key:\n final_dict[counter]['time'] = final_dict[counter]['time'] + activity_start_date\n final_dict[counter]['time'] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(final_dict[counter]['time']))\n\n if 'latlng' in key:\n final_dict[counter]['lat'] = final_dict[counter]['latlng'][0]\n final_dict[counter]['lon'] = final_dict[counter]['latlng'][1]\n final_dict[counter].pop('latlng')\n counter += 1\n\n result_list = [value for key, value in final_dict.items()]\n\n for event in result_list:\n write_to_splunk(index=helper.get_output_index(), sourcetype='strava:activities:stream', data=json.dumps(event))\n\n helper.log_info(f'Added activity stream {activity_id} for {athlete_id}.')\n return True\n\n def return_json(url, method, **kwargs):\n \"\"\"Gets JSON from URL and parses it for potential error messages.\"\"\"\n response = helper.send_http_request(url, method, use_proxy=False, **kwargs)\n\n try:\n response.raise_for_status()\n except requests.HTTPError as ex:\n # status code 429 means we hit Strava's API limit, wait till next 15 minute mark (+5 seconds) and try again\n if ex.response.status_code == 429:\n # Get the 15m/24h API limits for this user\n api_usage_15m = response.headers['X-RateLimit-Usage'].split(\",\")[0]\n api_usage_24h = response.headers['X-RateLimit-Usage'].split(\",\")[1]\n api_limit_15m = response.headers['X-RateLimit-Limit'].split(\",\")[0]\n api_limit_24h = response.headers['X-RateLimit-Limit'].split(\",\")[1]\n\n timestamp_now = int(time.time())\n modulus_time = timestamp_now % 900\n sleepy_time = 0 if modulus_time == 0 else (900 - modulus_time + 5)\n helper.log_warning(f'Strava API rate limit hit. Used {api_usage_15m}/15min (limit {api_limit_15m}), {api_usage_24h}/24h (limit {api_limit_24h}). Sleeping for {sleepy_time} seconds.')\n time.sleep(sleepy_time)\n response = return_json(url, method, **kwargs)\n helper.log_debug(f'429 detail: {response}')\n return response\n if ex.response.status_code in (400, 401):\n helper.log_error(f'{ex.response.status_code} Error: Strava API credentials invalid or session expired. Make sure Client ID & Client Secret have been added to the Configuration -> Add-On Parameters tab and your access code is valid.')\n sys.exit(1)\n if ex.response.status_code == 404:\n helper.log_warning(f'404 Error: no stream data for url {url}, can happen for manually added activities.')\n return False\n if ex.response.status_code == 500:\n helper.log_warning(f'500 Error: no data received from Strava API for url {url}, it might be corrupt or invalid. Skipping activity.')\n return False\n # In case there's any other error than the ones described above, log the error and exit.\n helper.log_error(f'Error: {ex}')\n sys.exit(1)\n\n # Must have been a 200 status code\n return response.json()\n\n def set_athlete(response):\n \"\"\"Creates dict with athlete details, including token expiry.\"\"\"\n name = response['athlete']['firstname'] + \" \" + response['athlete']['lastname']\n athlete = {\n 'id': response['athlete']['id'],\n 'name': name,\n 'access_token': response['access_token'],\n 'refresh_token': response['refresh_token'],\n 'expires_at': response['expires_at'],\n 'ts_activity': 0}\n return athlete\n\n def write_to_splunk(**kwargs):\n \"\"\"Writes activity to Splunk index.\"\"\"\n event = helper.new_event(**kwargs)\n ew.write_event(event)\n\n # get configuration arguments\n client_id = helper.get_global_setting('client_id')\n client_secret = helper.get_global_setting('client_secret')\n access_code = helper.get_arg('access_code')\n start_time = helper.get_arg('start_time') or 0\n types = ['time', 'distance', 'latlng', 'altitude', 'velocity_smooth', 'heartrate', 'cadence', 'watts', 'temp', 'moving', 'grade_smooth']\n\n # stanza is the name of the input. This is a unique name and will be used as a checkpoint key to save/retrieve details about an athlete\n stanza = list(helper.get_input_stanza())[0]\n athlete = helper.get_check_point(stanza)\n helper.log_debug(f'Athlete: {athlete}')\n\n # if reindex_data checkbox is set, update the start_time to be the one specified and clear the checkbox.\n if helper.get_arg('reindex_data'):\n if int(helper.get_arg('reindex_data')) == 1:\n athlete.update({'ts_activity': start_time})\n helper.save_check_point(stanza, athlete)\n # the clear_checkbox function will restart this input as soon as the change is made, so no further code required.\n clear_checkbox(helper.context_meta['session_key'], stanza)\n\n # if athlete is set, get details & tokens - otherwise fetch tokens with get_token()\n if athlete:\n athlete_id = athlete['id']\n athlete_name = athlete['name']\n expires_at = athlete['expires_at']\n refresh_token = athlete['refresh_token']\n else:\n expires_at = False\n refresh_token = False\n\n # Check if expires_at token is set and renew token if token expired. Otherwise fetch token with initial access code.\n if expires_at:\n if time.time() >= expires_at:\n response = get_token(client_id, client_secret, refresh_token, renewal=True)\n helper.log_debug(f\"Access token: {response['access_token']}, refresh token: {response['refresh_token']}\")\n athlete.update({'access_token': response['access_token'], 'refresh_token': response['refresh_token'], 'expires_at': response['expires_at']})\n else:\n response = get_token(client_id, client_secret, access_code, renewal=False)\n athlete = set_athlete(response)\n athlete_id = athlete['id']\n athlete_name = athlete['name']\n\n helper.save_check_point(stanza, athlete)\n\n access_token = athlete['access_token']\n athlete_detail = get_athlete(access_token)\n athlete_firstname = athlete_detail['firstname']\n athlete_lastname = athlete_detail['lastname']\n athlete_weight = ''\n athlete_ftp = ''\n if athlete_detail['resource_state'] == 3:\n athlete_weight = athlete_detail['weight']\n athlete_ftp = athlete_detail['ftp']\n\n helper.log_debug(\"Saving athlete's details to KV Store.\")\n kvstore_save_athlete(helper.context_meta['session_key'], str(athlete_id), athlete_firstname, athlete_lastname, str(athlete_weight), str(athlete_ftp))\n\n # For backwards compatibility with upgrades from pre-2.5.0, which uses athlete['ts_newest_activity']. If there, clean them up.\n if 'ts_newest_activity' in athlete:\n helper.log_info(f\"Found existing timestamp {athlete['ts_newest_activity']}! Will remove it now.\")\n ts_activity = athlete['ts_newest_activity']\n athlete.update({'ts_activity': ts_activity})\n athlete.pop('ts_newest_activity')\n athlete.pop('get_old_activities')\n athlete.pop('ts_oldest_activity')\n helper.save_check_point(stanza, athlete)\n else:\n ts_activity = athlete['ts_activity'] or start_time\n\n # webhook_updates contains updated activities that came in via webhook.\n webhook_updates = helper.get_check_point('webhook_updates') or {}\n\n if str(athlete_id) in webhook_updates:\n for activity in webhook_updates[str(athlete_id)][:]:\n helper.log_info(f'Received update via webhook for activity {activity} from athlete {athlete_id}')\n response = get_activity(activity, access_token)\n ts_activity = get_epoch(response['start_date'])\n\n # Store the event in Splunk\n write_to_splunk(index=helper.get_output_index(), sourcetype=helper.get_sourcetype(), data=json.dumps(response))\n\n # Get stream data for this activity and write to Splunk\n stream_data = get_activity_stream(access_token, activity, types)\n if stream_data:\n parse_data(stream_data, activity, ts_activity)\n\n # Remove from dict and save dict\n webhook_updates[str(athlete_id)].remove(activity)\n helper.save_check_point('webhook_updates', webhook_updates)\n helper.log_info(f'Got all webhook events for athlete {athlete_id}')\n\n helper.log_info(f'Checking if there are new activities for {athlete_name} ({athlete_id})')\n\n while True:\n\n response_activities = get_activities(ts_activity, access_token)\n\n # if all activities retrieved, set get_old_activities, save checkpoint and end loop to finish\n if len(response_activities) == 0: # pylint: disable=no-else-break\n helper.log_info(f'All done, got all activities for {athlete_name} ({athlete_id})')\n break\n else:\n # Get more details from each activity\n for event in response_activities:\n activity_id = event['id']\n response = get_activity(activity_id, access_token)\n\n # response = False for a 500 Error, which is likely an invalid Strava API file. In that case skip the activity and continue.\n if response:\n data = json.dumps(response)\n\n # Get start_date (UTC) and convert to UTC timestamp\n ts_activity = get_epoch(event['start_date'])\n\n # Store the event in Splunk\n write_to_splunk(index=helper.get_output_index(), sourcetype=helper.get_sourcetype(), data=data)\n helper.log_info(f'Added activity {activity_id} for {athlete_id}.')\n\n # Get stream data for this activity\n stream_data = get_activity_stream(access_token, activity_id, types)\n if stream_data:\n parse_data(stream_data, activity_id, ts_activity)\n\n # Save the timestamp of the last event to a checkpoint\n athlete.update({'ts_activity': ts_activity})\n helper.save_check_point(stanza, athlete)", "def send_event(self, event):\n cmd = \"event \" + event\n self.mgen_pipe.Send(cmd)", "def hello_world(\n event: Dict[str, Any],\n context,\n):\n body_str = event.get(\"body\", \"{}\")\n body_str = body_str if body_str else \"{}\"\n body_obj = json.loads(body_str)\n wiki_search_term = body_obj.get(\"searchTerm\", \"\")\n if not body_obj or not wiki_search_term:\n # https://docs.aws.amazon.com/apigateway/latest/developerguide/handle-errors-in-lambda-integration.html\n response = {\n \"statusCode\": 400,\n \"headers\": {\"Content-Type\": \"application/json\"},\n \"body\": json.dumps({\"message\": \"Wikipedia search term was not provided\"}),\n }\n else:\n summary = wikipedia.summary(wiki_search_term)\n response = {\n \"statusCode\": 200,\n \"headers\": {\"Content-Type\": \"application/json\"},\n \"body\": json.dumps(summary),\n }\n # https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-output-format\n return response", "def scrape_event(self, body):\n\n content = body.find('div', {'id': 'main-content'})\n\n title = self.scrape_title(body)\n description = self.scrape_description(content)\n location = self.scrape_location(content)\n location_details = self.scrape_location_details(content)\n admission = self.scrape_admission(content)\n admission_details = self.scrape_admission_details(content)\n # sponsor = self.scrape_sponsor(content)\n related_url = self.scrape_related_url(content)\n invited_audience = self.scrape_invited_audience(content)\n categories = self.scrape_categories(content)\n image = self.scrape_image(content)\n date_times = self.scrape_dates(content)\n\n cost = admission_details\n\n if admission_details == '\"\"':\n cost = admission\n\n event_list = []\n\n for date_time in date_times:\n date, start_time = self.date_time_to_tuple(date_time[0])\n end_time = ''\n\n # If the date_time tuple shows that it is an all day event\n if date_time[1]:\n start_time = '8:00'\n end_time = '20:00'\n event_dict = {\n 'Title': title,\n \"Description\": description,\n 'Date From': date,\n 'Start Time': start_time,\n 'End Time': end_time,\n 'Location': location,\n 'Cost': cost,\n 'Event Website': related_url,\n 'Photo URL': image,\n \"Invited Audience\": invited_audience,\n \"Event Types\": categories,\n \"Location Details\": location_details\n }\n event_list.append(event_dict)\n return event_list", "def send(self, count: int):\n return self.analytics.send(self.anal_name, count)", "def send_alert(self, level, title, additional_data):\n\n full_title = \"{}: {}\".format(self.build_descriptive(), title)\n\n # Make sure that additional_data is a string ... OR, make it JSON.\n if not isinstance(additional_data, str):\n additional_data = json.dumps(additional_data)\n\n self.logger.debug(\"Sending Alert -> {} :: {} :: {}\".format(level, full_title, additional_data))\n\n # Send the alert to each of the systems, The alert system(s) will be responsible for filtering.\n for system in self.alert_systems.values():\n system.send_alert(self.__class__.__name__, level, full_title, additional_data)", "def handler(event, context):\r\n \r\n # define logging default level\r\n log_level = logging.INFO\r\n logging.basicConfig(level=log_level)\r\n\r\n # parse the stringified json sent from javascript\r\n event = json.loads(event['body'])\r\n\r\n # extract street name for query\r\n street_name = event['street_name']\r\n \r\n # enable queries with quote sign like d'Astier\r\n street_name = re.sub(\"(')\",\"''\",street_name)\r\n\r\n # query to prepare examples of towns corresponding to street name\r\n sql_query = query_streets.format(street_name)\r\n df = pd.DataFrame(perform_query(sql_query))\r\n \r\n if len(df)!=0:\r\n df['street'] = df[0].apply(lambda x: x['VarCharValue'])\r\n df['zip'] = df[1].apply(lambda x: x['VarCharValue'])\r\n df['town'] = df[2].apply(lambda x: x['VarCharValue'])\r\n\r\n df_results = df.drop(columns=[0,1,2])\r\n \r\n results_string = \"\"\r\n for i, r in df_results.iterrows():\r\n results_string += \"<li>{} - {} ({})</li>\" \\\r\n .format(r['street'],r['town'],r['zip'][0:-3].zfill(2))\r\n \r\n if len(df)==100:\r\n results_string = \"<p>Ci-dessous 100 exemples de résultats</p>\" + results_string\r\n \r\n else:\r\n results_string = \"\"\r\n \r\n # query to prepare results on maps\r\n fr_codes_dict = fr_department_codes\r\n sql_query = query_departments.format(street_name)\r\n dep_rows = perform_query(sql_query)\r\n\r\n results_dict = {}\r\n for row in dep_rows:\r\n k = row[1]['VarCharValue']\r\n v = row[0]['VarCharValue']\r\n results_dict[k] = int(v)\r\n\r\n # initialize results dictionary\r\n final_result_dict = init_results_dict(fr_codes_dict)\r\n \r\n # map region names with region codes to construct result dict\r\n for k1,v1 in results_dict.items():\r\n for k2,v2 in fr_codes_dict.items():\r\n if k1==k2:\r\n final_result_dict[v2]=v1\r\n \r\n # total number of streets for request \r\n total_street_nbr = 0\r\n for k,v in final_result_dict.items():\r\n total_street_nbr += v\r\n \r\n print(\"Total: {} for {}\".format(total_street_nbr,street_name))\r\n \r\n # dict to send back to JS\r\n subreports = { \"nbr_streets_per_region\":final_result_dict, \\\r\n \"total_street_nbr\":\"<p>Il y a {} résultats qui contiennent {}.</p>\" \\\r\n .format(total_street_nbr,street_name), \\\r\n \"town_zip_list\":results_string }\r\n \r\n return {\r\n 'statusCode': 200,\r\n 'body': json.dumps(subreports),\r\n 'headers': {\r\n \"Access-Control-Allow-Origin\" : \"*\", # Required for CORS support to work\r\n \"Access-Control-Allow-Methods\": \"*\"\r\n },\r\n }", "def test_updateEvent(self):\n date = {'date': '2015-08-21T00:00:00.000Z'}\n eventprev = dict(start = '2015-08-21T01:23:00.000Z',\n end = '2015-08-21T01:25:00.000Z',\n date = '2015-08-21T00:00:00.000Z')\n eventcurr = dict(start = '2015-08-21T02:23:00.000Z',\n end = '2015-08-21T02:25:00.000Z',\n date = '2015-08-21T00:00:00.000Z')\n eventnext = dict(start = '2015-08-21T03:23:00.000Z',\n end = '2015-08-21T03:25:00.000Z',\n date = '2015-08-21T00:00:00.000Z')\n i=0\n # Create sample itinerary for alex for the event day\n self.json_post('/createItinerary/alex', dict(\n name = 'New Day',\n date = date['date']\n ))\n\n uid = str('alex_' + eventprev['start'] + eventprev['end'])\n uidcurr = str('alex_' + eventcurr['start'] + eventcurr['end'])\n uidnext = str('alex_' + eventnext['start'] + eventnext['end'])\n invuid = '00000000000000000000000'\n\n rv = self.json_post('/createEvent/alex', eventprev)\n assert uid in str(rv.data)\n\n rv = self.json_post('/createEvent/alex', eventnext)\n assert uidnext in str(rv.data)\n\n rv = self.json_post('/createEvent/alex', eventcurr)\n assert uidcurr in str(rv.data)\n\n rv = self.json_post('/updateEvent/bbbb', {'uid': uid})\n assert 'Invalid username' in str(rv.data)\n\n rv = self.json_post('/updateEvent/alex', {'uid': invuid})\n assert 'Event not found' in str(rv.data)\n\n rv = self.json_get('/getSuggestions/bbbb', {'uid': uid,\n 'query': 'Homewood Campus, Baltimore'})\n assert 'Invalid username' in str(rv.data)\n\n rv = self.json_get('/getSuggestions/bbbb', {'uid': uid,\n 'query': 'Homewood Campus, Baltimore'})\n assert 'Invalid username' in str(rv.data)\n\n # Set prev event\n rv = self.json_get('/getSuggestions/alex', {'uid': uid,\n 'query': 'Homewood Campus, Baltimore'})\n sugId = json.loads(rv.data)['uid']\n placeId = json.loads(rv.data)['business'][1]['id']\n assert 'business' in str(rv.data)\n\n rv = self.json_post('/updateEvent/alex', {'uid': uid,\n 'choice': '1',\n 'suggestionId': sugId})\n assert 'yelpId' in str(rv.data)\n\n rv = self.json_post('/ratePlace/alex', {'uid': placeId,\n 'rating': 5})\n assert 'ratings' in str(rv.data)\n\n rv = self.json_post('/ratePlace/alex', {'uid': placeId,\n 'rating': 4})\n assert 'ratings' in str(rv.data)\n\n # Reset prev event\n rv = self.json_get('/getSuggestions/alex', {'uid': uid,\n 'query': 'Homewood Campus, Baltimore'})\n sugId = json.loads(rv.data)['uid']\n assert 'business' in str(rv.data)\n\n rv = self.json_post('/updateEvent/alex', {'uid': uid,\n 'choice': '0',\n 'suggestionId': sugId})\n assert 'yelpId' in str(rv.data)\n\n # Set next event\n rv = self.json_get('/getSuggestions/alex', {'uid': uidnext,\n 'query': 'Homewood Campus, Baltimore'})\n sugId = json.loads(rv.data)['uid']\n assert 'business' in str(rv.data)\n\n rv = self.json_post('/updateEvent/alex', {'uid': uidnext,\n 'choice': '2',\n 'suggestionId': sugId})\n assert 'yelpId' in str(rv.data)\n\n # Set curr event\n rv = self.json_get('/getSuggestions/alex', {'uid': uidcurr,\n 'query': 'Towson, MD'})\n print(rv.data)\n sugId = json.loads(rv.data)['uid']\n assert 'business' in str(rv.data)\n\n rv = self.json_post('/updateEvent/alex', {'uid': uidnext,\n 'choice': '0',\n 'suggestionId': sugId})\n assert 'yelpId' in str(rv.data)\n\n rv = self.json_get('/getEventFromId/alex', {'uid': invuid})\n assert 'Event not found' in str(rv.data)\n\n rv = self.json_get('/getSuggestions/alex', {'uid': invuid,\n 'query': 'Homewood Campus, Baltimore'})\n assert 'Event not found' in str(rv.data)", "def process_event(self, event):\n message = 'From: ' + self.fromaddr + \\\n '\\nTo: ' + join(self.toaddrs, ', ') + \\\n '\\nSubject: ' + event.data.match.expand(self.subject) + \\\n '\\n\\n' + \\\n event.data.match.expand(self.body) + '\\n' \n\n try:\n server = SMTP(self.smtphost)\n server.sendmail(self.fromaddr, self.toaddrs, message)\n server.quit()\n\n except Exception, e:\n print >> stderr, \"Could not send mail:\", e\n\n return 1", "def write_out_event(\n self,\n stamp: datetime,\n sensor_vals: Dict[str, Union[float, str, None]],\n label_vals: Dict[str, Optional[str]]\n ):\n\n # Form the dictionary to write:\n event_dict = {\n self.stamp_field: stamp,\n **sensor_vals,\n **label_vals\n }\n\n self.out_data.write_row_dict(event_dict)", "def publish_event(self, params, event_id):\n\n event = self.data[self.data[\"event_id\"] == event_id].iloc[-1]\n\n if event[\"num_assoc\"] >= params[\"ndef_min\"]:\n\n json_data = event.to_dict()\n publish_mqtt.run(\"event\", json_data, params)" ]
[ "0.55040216", "0.5488861", "0.54437023", "0.5373623", "0.5324502", "0.5313765", "0.5313765", "0.523546", "0.5173672", "0.5173648", "0.5173168", "0.5152679", "0.51510435", "0.5126581", "0.5120982", "0.5093802", "0.50590044", "0.50430846", "0.50288856", "0.5026668", "0.5018201", "0.50166905", "0.5010588", "0.5006409", "0.49564782", "0.49396685", "0.4929026", "0.49117327", "0.49068385", "0.49025732", "0.48889807", "0.48849028", "0.48732555", "0.48598093", "0.48487455", "0.4847889", "0.4847259", "0.48404348", "0.4827585", "0.48210868", "0.4820389", "0.48147157", "0.48058876", "0.48023134", "0.47974348", "0.47964758", "0.47959325", "0.4793107", "0.4790446", "0.4744579", "0.4726194", "0.47255057", "0.4719929", "0.47184384", "0.4718284", "0.469751", "0.46963614", "0.469299", "0.46918166", "0.46885008", "0.46877202", "0.46829528", "0.4682137", "0.4679078", "0.4678256", "0.4675021", "0.46535537", "0.46472123", "0.46457627", "0.46379936", "0.46289575", "0.46265915", "0.46214578", "0.46190885", "0.46145374", "0.46095183", "0.46082658", "0.46019125", "0.4596981", "0.45962843", "0.45960268", "0.4590889", "0.45907348", "0.4589697", "0.45892546", "0.458412", "0.45789942", "0.457872", "0.45785657", "0.45705652", "0.45687994", "0.4568204", "0.45619345", "0.45402268", "0.45382708", "0.4535759", "0.453508", "0.4534777", "0.45337182", "0.45328084", "0.45291907" ]
0.0
-1
Check a set of namespaces to see if their number of available backends is too low, emitting events to Sensu based on the fraction available and the thresholds defined in the corresponding yelpsoa config.
def check_smartstack_replication_for_instance( instance_config, expected_count, smartstack_replication_checker, ): crit_threshold = instance_config.get_replication_crit_percentage() log.info('Checking instance %s in smartstack', instance_config.job_id) smartstack_replication_info = \ smartstack_replication_checker.get_replication_for_instance(instance_config) log.debug('Got smartstack replication info for %s: %s' % (instance_config.job_id, smartstack_replication_info)) if len(smartstack_replication_info) == 0: status = pysensu_yelp.Status.CRITICAL output = ( 'Service %s has no Smartstack replication info. Make sure the discover key in your smartstack.yaml ' 'is valid!\n' ) % instance_config.job_id log.error(output) else: expected_count_per_location = int(expected_count / len(smartstack_replication_info)) output = '' output_critical = '' output_ok = '' under_replication_per_location = [] for location, available_backends in sorted(smartstack_replication_info.items()): num_available_in_location = available_backends.get(instance_config.job_id, 0) under_replicated, ratio = is_under_replicated( num_available_in_location, expected_count_per_location, crit_threshold, ) if under_replicated: output_critical += '- Service %s has %d out of %d expected instances in %s (CRITICAL: %d%%)\n' % ( instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio, ) else: output_ok += '- Service %s has %d out of %d expected instances in %s (OK: %d%%)\n' % ( instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio, ) under_replication_per_location.append(under_replicated) output += output_critical if output_critical and output_ok: output += '\n\n' output += 'The following locations are OK:\n' output += output_ok if any(under_replication_per_location): status = pysensu_yelp.Status.CRITICAL output += ( "\n\n" "What this alert means:\n" "\n" " This replication alert means that a SmartStack powered loadbalancer (haproxy)\n" " doesn't have enough healthy backends. Not having enough healthy backends\n" " means that clients of that service will get 503s (http) or connection refused\n" " (tcp) when trying to connect to it.\n" "\n" "Reasons this might be happening:\n" "\n" " The service may simply not have enough copies or it could simply be\n" " unhealthy in that location. There also may not be enough resources\n" " in the cluster to support the requested instance count.\n" "\n" "Things you can do:\n" "\n" " * You can view the logs for the job with:\n" " paasta logs -s %(service)s -i %(instance)s -c %(cluster)s\n" "\n" " * Fix the cause of the unhealthy service. Try running:\n" "\n" " paasta status -s %(service)s -i %(instance)s -c %(cluster)s -vv\n" "\n" " * Widen SmartStack discovery settings\n" " * Increase the instance count\n" "\n" ) % { 'service': instance_config.service, 'instance': instance_config.instance, 'cluster': instance_config.cluster, } log.error(output) else: status = pysensu_yelp.Status.OK log.info(output) send_event(instance_config=instance_config, status=status, output=output)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check(self):\n BadNamespaces = list()\n\n for namespace in pm.listNamespaces():\n BadNamespaces.append(namespace)\n\n if not BadNamespaces:\n self.status = \"OK\"\n else:\n self.status = self.errorMode\n self.errorNodes = namespace\n for namespace in BadNamespaces:\n self.addError(\"namespace %s exist\" % namespace)\n self.errorMessage = \"%s namespace\" % (len(BadNamespaces))", "def test_create_egress_network_policy_for_all_namespaces(self):\n pass", "def test_list_egress_network_policy_for_all_namespaces(self):\n pass", "def run(self):\n\n syn_cookies = Setup.syn_cookies\n cpu_orange_threshold = Setup.parse_options()['cpu_orange_threshold']\n cpu_red_threshold = Setup.parse_options()['cpu_red_threshold']\n network_orange_threshold = Setup.parse_options()['network_orange_threshold']\n network_red_threshold = Setup.parse_options()['network_red_threshold']\n ram_orange_threshold = Setup.parse_options()['network_orange_threshold']\n ram_red_threshold = Setup.parse_options()['ram_orange_threshold']\n interval = Setup.parse_options()['interval']\n time_period = Setup.parse_options()['time_period']\n\n # Check which resources should be monitored\n if cpu_orange_threshold > 0:\n resource = \"cpu\"\n print(\"CPU is being monitored, orange threshold set at %0.2f, red threshold set to %0.2f\"\n % (cpu_orange_threshold, cpu_red_threshold))\n resource_orange_threshold = float(cpu_orange_threshold)\n resource_red_threshold = float(cpu_red_threshold)\n elif network_orange_threshold > 0:\n resource = \"network\"\n print(\"Network usage is being monitored, orange threshold set at %0.2f, red threshold set to %0.2f\"\n % (network_orange_threshold, network_red_threshold))\n resource_orange_threshold = float(network_orange_threshold)\n resource_red_threshold = float(network_red_threshold)\n elif ram_orange_threshold > 0:\n resource = \"memory\"\n print(\"Memory is being monitored, orange threshold set at %0.2f , red threshold set to %0.2f\"\n % (ram_orange_threshold, ram_red_threshold))\n resource_orange_threshold = float(ram_orange_threshold)\n resource_red_threshold = float(ram_red_threshold)\n else:\n resource = \"cpu\"\n resource_orange_threshold = float(self.calculate_thresholds()['orange_cpu_threshold'])\n resource_red_threshold = float(self.calculate_thresholds()['red_cpu_threshold'])\n print(\"CPU is being monitored, orange threshold set at %0.2f, red threshold set to %0.2f\"\n % (resource_orange_threshold, resource_red_threshold))\n stats = self.get_system_load(interval, time_period, resource)\n print(\"System monitor engaged\")\n while True:\n system_load = 100 * float(get_mean(stats))\n print \"System load is %0.2f\" % system_load\n # If system load below orange threshold change status to green\n if system_load < resource_orange_threshold and Setup.system_status != 'green':\n Setup.system_status = 'green'\n print(\"ALERT: System status green\")\n # If system load exceeds orange threshold change status to orange\n elif system_load >= resource_orange_threshold \\\n and system_load < resource_red_threshold and Setup.system_status != 'orange':\n Setup.system_status = 'orange'\n print(\"ALERT: System status updated to orange\")\n if syn_cookies == 0:\n print(\"Turning on SYN Cookies\")\n self.turn_on_syn_cookies()\n syn_cookies = 1\n # If system load exceeds red threshold change system status to red\n elif system_load > resource_red_threshold and Setup.system_status != 'red':\n Setup.system_status = 'red'\n print(\"WARNING: System status updated to Red\")\n else:\n print(\"No conditions met\")\n print(\"Status: %s, System_load: %0.2f, Orange_threshold: %0.2f, Red_threshold: %0.2f\" %\n (Setup.system_status, system_load, resource_orange_threshold, resource_red_threshold))\n stats = self.update_system_load(interval, stats, resource)", "def check(self):\n illegalNamespaces = list()\n\n progStandard = re.compile(\"^[A-Z]{4}[0-9]{2}_[0-9]{3}$\")\n progShot = re.compile(\"^SH[0-9]{4}_[0-9]{3}$\")\n\n for namespaces in pm.namespaceInfo(listOnlyNamespaces=True, internal=False, recurse=True):\n for namespace in namespaces.split(\":\"):\n if not progStandard.match(namespace) and not progShot.match(namespace) not in [\"UI\", \"shared\"]:\n illegalNamespaces.append(namespace)\n\n if not illegalNamespaces:\n self.status = \"OK\"\n else:\n self.status = self.errorMode\n self.errorNodes = illegalNamespaces\n for illegalNamespace in illegalNamespaces:\n self.addError(\"%s is a illegal namespace\" % illegalNamespace)\n self.errorMessage = \"%s illegal namespace\" % (\n len(illegalNamespaces))", "def _validate_namespaces(self, input_namespaces):\r\n output_namespaces = []\r\n if input_namespaces == []:\r\n return output_namespaces\r\n elif '*' in input_namespaces:\r\n if len(input_namespaces) > 1:\r\n warning = 'Warning: Multiple namespaces are '\r\n warning += 'ignored when one namespace is \"*\"\\n'\r\n sys.stderr.write(warning)\r\n return output_namespaces\r\n else:\r\n for namespace in input_namespaces:\r\n if not isinstance(namespace, unicode):\r\n namespace = unicode(namespace)\r\n namespace_tuple = self._tuplefy_namespace(namespace)\r\n if namespace_tuple is None:\r\n warning = 'Warning: Invalid namespace ' + namespace\r\n warning += ' will be ignored\\n'\r\n sys.stderr.write(warning)\r\n else:\r\n if namespace_tuple not in output_namespaces:\r\n output_namespaces.append(namespace_tuple)\r\n else:\r\n warning = 'Warning: Duplicate namespace ' + namespace\r\n warning += ' will be ignored\\n'\r\n sys.stderr.write(warning)\r\n return output_namespaces", "def checkbands() :\n dontThrowException = False \n success = s.checkConfig(dontThrowException)\n return success", "def test_100_services(self):\n services = {\n self.keystone_sentry: ['keystone'],\n self.cinder_sentry: ['cinder-api',\n 'cinder-scheduler',\n 'cinder-volume']\n }\n if self.is_liberty_or_newer():\n services[self.keystone_sentry] = ['apache2']\n else:\n services[self.keystone_sentry] = ['keystone']\n ret = u.validate_services_by_name(services)\n if ret:\n amulet.raise_status(amulet.FAIL, msg=ret)", "def test_django_multi_settings(self):\r\n\r\n backends = self._reload_backends().values()\r\n\r\n self.assertEqual(len(backends), 2)\r\n\r\n event_count = 10\r\n for _ in xrange(event_count):\r\n tracker.send({})\r\n\r\n self.assertEqual(backends[0].count, event_count)\r\n self.assertEqual(backends[1].count, event_count)", "def test(request, backend_usages, application, client, setup, expect_ok, expect_not_found):\n\n request.getfixturevalue(setup)\n\n assert len(backend_usages) == 2\n\n analytics = application.threescale_client.analytics\n\n for path in expect_ok:\n hits_before = hits(application, analytics)\n\n response = client.get(path)\n assert response.status_code == 200, f\"For path {path} expected status_code 200\"\n\n hits_after = resilient.stats_service_usage(\n application.threescale_client, application[\"service_id\"], \"hits\", \"total\", hits_before+1)\n\n assert hits_before + 1 == hits_after, f\"For path {path} expected hits to be increased by 1\"\n\n for path in expect_not_found:\n hits_before = hits(application, analytics)\n\n response = client.get(path)\n assert response.status_code == 404, f\"For path {path} expected status_code 400\"\n\n hits_after = hits(application, analytics)\n assert hits_before == hits_after, f\"For path {path} expected hits to be same before and after\"", "def test_100_services(self):\n u.log.debug('Checking system services...')\n swift_storage_services = ['swift-account',\n 'swift-account-auditor',\n 'swift-account-reaper',\n 'swift-account-replicator',\n 'swift-container',\n 'swift-container-auditor',\n 'swift-container-replicator',\n 'swift-container-updater',\n 'swift-object',\n 'swift-object-auditor',\n 'swift-object-replicator',\n 'swift-object-updater',\n 'swift-container-sync']\n service_names = {\n self.keystone_sentry: ['keystone'],\n self.glance_sentry: ['glance-registry',\n 'glance-api'],\n self.swift_proxy_sentry: ['swift-proxy'],\n self.swift_storage_sentry: swift_storage_services\n }\n\n if self._get_openstack_release() >= self.trusty_liberty:\n service_names[self.keystone_sentry] = ['apache2']\n\n ret = u.validate_services_by_name(service_names)\n if ret:\n amulet.raise_status(amulet.FAIL, msg=ret)", "def check_thresholds(config):\n ranger = range(0, 100)\n if config[\"warning\"] not in ranger or config[\"critical\"] not in ranger:\n unknown_exit(SERVICE, \"Bad args: Bogus warn/crit thresholds\")", "def get_expected_alerting_messages(duthost, containers_in_namespaces):\n expected_alerting_messages = []\n\n for container_name in containers_in_namespaces.keys():\n logger.info(\"Generating the expected alerting messages for container '{}'...\".format(container_name))\n critical_group_list, critical_process_list, succeeded = duthost.get_critical_group_and_process_lists(container_name)\n pytest_assert(succeeded, \"Failed to get critical group and process lists of container '{}'\".format(container_name))\n\n namespace_ids = containers_in_namespaces[container_name]\n for namespace_id in namespace_ids:\n namespace_name = \"host\"\n if namespace_id != DEFAULT_ASIC_ID:\n namespace_name = NAMESPACE_PREFIX + namespace_id\n\n for critical_process in critical_process_list:\n # Skip 'dsserve' process since it was not managed by supervisord\n # TODO: Should remove the following two lines once the issue was solved in the image.\n if container_name == \"syncd\" and critical_process == \"dsserve\":\n continue\n logger.info(\"Generating the expected alerting message for process '{}'\".format(critical_process))\n expected_alerting_messages.append(\".*Process '{}' is not running in namespace '{}'.*\".format(critical_process, namespace_name))\n\n for critical_group in critical_group_list:\n group_program_info = get_group_program_info(duthost, container_name, critical_group)\n for program_name in group_program_info:\n logger.info(\"Generating the expected alerting message for process '{}'\".format(program_name))\n expected_alerting_messages.append(\".*Process '{}' is not running in namespace '{}'.*\".format(program_name, namespace_name))\n\n logger.info(\"Generating the expected alerting messages for container '{}' was done!\".format(container_name))\n\n return expected_alerting_messages", "def check(self):\n # Determine which services to test\n # TODO: use a smarter algorithm to detect which services to check\n max_lag = max(service.lag for service in self.services)\n now = datetime.utcnow()\n services = [ service\n for service in self.services\n if service.next_update_in(now) <= max_lag\n ]\n if not services:\n return 0, []\n\n period = max(service.period for service in services)\n\n # Test them\n service_states = self._check_services(services)\n\n # Report\n return int(period), service_states", "def test_100_services(self):\n u.log.debug('Checking system services on units...')\n\n services = {\n self.compute_sentry: ['nova-compute',\n 'neutron-plugin-openvswitch-agent'],\n self.rabbitmq_sentry: ['rabbitmq-server'],\n self.neutron_api_sentry: ['neutron-server'],\n }\n\n if self._get_openstack_release() >= self.trusty_mitaka:\n services[self.compute_sentry] = [\n 'nova-compute',\n 'neutron-openvswitch-agent'\n ]\n\n ret = u.validate_services_by_name(services)\n if ret:\n amulet.raise_status(amulet.FAIL, msg=ret)\n\n u.log.debug('OK')", "def get_backend_coverage():\n\n onnx_coverage = {}\n experimental_op = set()\n for handler in BackendHandler.__subclasses__():\n handler.check_cls()\n\n versions = handler.get_versions()\n domain = handler.DOMAIN\n if getattr(handler, \"EXPERIMENTAL\", False):\n experimental_op.add(handler.ONNX_OP)\n _update_coverage(onnx_coverage, domain, handler.ONNX_OP, versions)\n return onnx_coverage, experimental_op", "def verify_services(self):\n services = [\"metric_collector\", \"log_collector\"]\n service_version_9 = [\"lma_collector\"]\n pids = {}\n processes_count = {\n \"collectd \": 1,\n \"collectdmon \": 1\n }\n\n if self.settings.version.startswith(\"0.9\"):\n processes_count[\n \"hekad -config[= ]/etc/{}\".format(service_version_9)] = 1\n else:\n # Starting with 0.10, there are one collector for logs and one for\n # metrics\n for service in services:\n processes_count[\"hekad -config[= ]/etc/{}\".format(service)] = 1\n online_nodes = [node for node in self.helpers.get_all_ready_nodes()\n if node[\"online\"]]\n for node in online_nodes:\n pids[node[\"name\"]] = {}\n with self.env.d_env.get_ssh_to_remote(node[\"ip\"]) as remote:\n for process, count in processes_count.items():\n logger.info(\"Checking process {0} on node {1}\".format(\n process, node[\"name\"]\n ))\n pids[node[\"name\"]][process] = (\n self.checkers.check_process_count(\n remote, process, count))\n return pids", "def generate_sla_metrics(self):\n for module in self.account_definitions:\n\n metric_spec = Definition.return_spec(\n type_set='metric_set',\n module=module\n )\n \n metric_module = importlib.util.module_from_spec(metric_spec)\n metric_spec.loader.exec_module(metric_module)\n try:\n self.metric_sets.append(metric_module.metric_set)\n except AttributeError as _ex:\n print(\"Module has no attribute metric_set\")\n sla_spec = Definition.return_spec(\n type_set='sla_set',\n module=module\n )\n \n sla_module = importlib.util.module_from_spec(sla_spec)\n sla_spec.loader.exec_module(sla_module)\n try:\n self.sla_sets.append(sla_module.sla_set)\n except AttributeError as _ex:\n print(\"Module has no attribute sla_set\")", "def main():\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--dev', required=True)\n parser.add_argument('-w', '--warn', action='append', type=float,\n required=True)\n parser.add_argument('-c', '--crit', action='append', type=float,\n required=True)\n args = parser.parse_args()\n\n # Derive the device type from sysfs\n ssd = dev_is_ssd(args.dev)\n\n # Get the historical and current statistics\n last = get_last(args.dev)\n curr = get_curr(args.dev)\n\n # Save the historical statistics\n set_last(args.dev, curr)\n\n # Handle the first run after startup\n if not last:\n print 'UNKNOWN: history data not available'\n sys.exit(NAGIOS_UNKNOWN)\n\n # Calculate the current latencies for the check period\n read_latency, write_latency = get_latencies(last, curr)\n\n # Select the correct thresholds based on disk type\n try:\n read_crit = args.crit[2] if ssd else args.crit[0]\n write_crit = args.crit[3] if ssd else args.crit[1]\n except IndexError:\n print 'UNKNOWN: SSD detected but no critcal latencies provided'\n sys.exit(NAGIOS_UNKNOWN)\n\n try:\n read_warn = args.warn[2] if ssd else args.warn[0]\n write_warn = args.warn[3] if ssd else args.warn[1]\n except IndexError:\n print 'UNKNOWN: SSD detected but no warning latencies provided'\n sys.exit(NAGIOS_UNKNOWN)\n\n # Calculate the status based on thresholds\n code = NAGIOS_OK\n if read_latency > read_warn or write_latency > write_warn:\n code = NAGIOS_WARNING\n if read_latency > read_crit or write_latency > write_crit:\n code = NAGIOS_CRITICAL\n\n status = ['OK', 'WARNING', 'CRITICAL'][code]\n print ('{0}: read latency {1:.3f}ms, write latency {2:.3f}ms | '\n 'read={1:.3f}ms;{3:.3f};{4:.3f};; '\n 'write={2:.3f}ms;{5:.3f};{6:.3f};;').\\\n format(status, read_latency, write_latency, read_warn, read_crit,\n write_warn, write_crit)\n sys.exit(code)", "def get_backend_resistance(dirs, outDir):\n return get_response_stats(dirs, outDir, \"segfault\")", "def check_standard_groupings(ctx, stmt):\n\n # Don't perform this check for modules that are not OpenConfig\n # or are OpenConfig infrastructure (e.g., extensions)\n if (OCLintFunctions.is_openconfig_validatable_module(stmt.arg) in\n [ModuleType.NONOC, ModuleType.OCINFRA]):\n return\n\n found = False\n for grouping in stmt.search(\"grouping\"):\n if re.match(r\".*\\-top$\", grouping.arg):\n found = True\n\n if not found:\n err_add(ctx.errors, stmt.pos, \"OC_MISSING_STANDARD_GROUPING\",\n (stmt.arg, \"-top\"))", "def __check_events_per_core_params(\n self, variable, sampling_interval, indexes):\n if sampling_interval is not None:\n raise ValueError(\n f\"Variable {variable} does not support a sampling interval\")\n if indexes is not None:\n raise ValueError(\n f\"Variable {variable} can only be recorded \"\n \"on the whole population\")", "def check_filterconfig(filterconfig, config):\n for f in filterconfig[\"filters\"]:\n if f[\"name\"] != \"frequency\":\n continue\n\n missing_freq_groups = set(iter_freq_groups(f[\"config\"][\"groups\"])) - set(\n iter_freq_groups(config[\"frequencies\"][\"groups\"])\n )\n assert not missing_freq_groups, \"Missing frequency group(s) in global config: {}\".format(\n missing_freq_groups\n )", "def check_count():\n\n config = configparser.ConfigParser()\n config.read('config.ini')\n\n while True:\n try:\n for user in get_count_request():\n ip, count, protocol = str(user[0][0]), user[1][0], str(user[2][0])\n if count >= int(config[protocol]['Count Request']) and ip not in BLACK_LIST:\n BLACK_LIST.append(ip)\n logging.warning(ip)\n\n except Exception as e:\n logging.debug(e)", "def getConfiguredBackends():\n\ttry:\n\t\tfrom OPSI.Backend.BackendManager import BackendDispatcher\n\texcept ImportError as impError:\n\t\tlogger.debug(\"Import failed: {}\", impError)\n\t\treturn None\n\n\ttry:\n\t\tdispatcher = BackendDispatcher(\n\t\t\tdispatchConfigFile='/etc/opsi/backendManager/dispatch.conf',\n\t\t\tbackendconfigdir='/etc/opsi/backends/',\n\t\t)\n\texcept BackendConfigurationError as bcerror:\n\t\tlogger.debug(\"Unable to read backends: {}\", bcerror)\n\t\treturn None\n\n\tnames = [name.lower() for name in dispatcher.dispatcher_getBackendNames()]\n\tdispatcher.backend_exit()\n\n\treturn set(names)", "def are_within_limits(rates):\n for srv in rates:\n for state in rates[srv]:\n rate = rates[srv][state]\n if rate < 0 or rate > 5:\n print(f\"Rate {rate} out of bounds: Server {srv}, State {state}\")\n return False\n return True", "def verify_options(parser, config):\n if (\n config[\"infrastructure\"][\"cloud_nodes\"] < 2\n or config[\"infrastructure\"][\"edge_nodes\"] != 0\n or config[\"infrastructure\"][\"endpoint_nodes\"] < 0\n ):\n parser.error(\"ERROR: kubecontrol requires #clouds>=2, #edges=0, #endpoints>=0\")\n elif (\n config[\"infrastructure\"][\"endpoint_nodes\"] % (config[\"infrastructure\"][\"cloud_nodes\"] - 1)\n != 0\n ):\n parser.error(r\"ERROR: Kubernetes requires (#clouds-1) % #endpoints == 0 (-1 for control)\")", "def test_list_policy_for_all_namespaces(self):\n pass", "def _Check(self, cn0, num_obs, type_bits):\n\n if num_obs == 0:\n return\n\n if self._for_log:\n avg_cn0, max_cn0 = self.GetAvgAndMaxCn0FromTimeSeries(\n cn0, num_obs, type_bits)\n else:\n avg_cn0, max_cn0, _ = self.GetAvgAndMaxCn0(cn0, num_obs, type_bits)\n\n avg_ranges = check_range.Interval([40.0, None])\n max_ranges = check_range.Interval([45.0, None])\n all_inclusive = check_range.AllInclusiveRange()\n self._CheckByRange('%s (Avg)' % self._name, avg_cn0, avg_ranges,\n all_inclusive)\n self._CheckByRange('%s (Max)' % self._name, max_cn0, max_ranges,\n all_inclusive)", "def check_bundle_intervals(\n request: Request, policy: RequestPolicy, logger: Logger\n) -> None:\n if not policy.check_bundle_intervals:\n logger.warning(\n \"KSR-POLICY-BUNDLE-INTERVALS: Disabled by policy (check_bundle_intervals)\"\n )\n return\n\n _min_str = fmt_timedelta(policy.min_bundle_interval)\n _max_str = fmt_timedelta(policy.max_bundle_interval)\n\n logger.debug(\n f\"Verifying that bundles intervals is no less than {_min_str}, and no more than {_max_str} \"\n \"(from KSK operator policy)\"\n )\n for num in range(len(request.bundles)):\n interval: Optional[timedelta] = None\n if num:\n interval = (\n request.bundles[num].inception - request.bundles[num - 1].inception\n )\n logger.debug(\n \"{num:<2} {inception:29} {interval}\".format(\n num=num + 1,\n inception=fmt_timestamp(request.bundles[num].inception),\n interval=interval or \"-\",\n )\n )\n\n for num in range(1, len(request.bundles)):\n interval = request.bundles[num].inception - request.bundles[num - 1].inception\n _interval_str = fmt_timedelta(interval)\n if interval < policy.min_bundle_interval:\n bundle = request.bundles[num]\n raise KSR_POLICY_BUNDLE_INTERVAL_Violation(\n f\"Bundle #{num + 1} ({bundle.id}) interval ({_interval_str}) \"\n f\"less than minimum acceptable interval {_min_str}\"\n )\n if interval > policy.max_bundle_interval:\n # TODO: Is it perhaps only the _last_ interval in a cycle that should be permitted to be 9 or 11 days?\n bundle = request.bundles[num]\n raise KSR_POLICY_BUNDLE_INTERVAL_Violation(\n f\"Bundle #{num + 1} ({bundle.id}) interval ({_interval_str}) \"\n f\"greater than maximum acceptable interval {_max_str}\"\n )\n\n logger.info(\n f\"KSR-POLICY-BUNDLE-INTERVALS: All bundles intervals in accordance with the KSK operator policy\"\n )", "def test_create_policy_for_all_namespaces(self):\n pass", "def ensure_all_critical_processes_running(duthost, containers_in_namespaces):\n for container_name in containers_in_namespaces.keys():\n critical_group_list, critical_process_list, succeeded = duthost.get_critical_group_and_process_lists(container_name)\n pytest_assert(succeeded, \"Failed to get critical group and process lists of container '{}'\".format(container_name))\n\n namespace_ids = containers_in_namespaces[container_name]\n for namespace_id in namespace_ids:\n container_name_in_namespace = container_name\n if namespace_id != DEFAULT_ASIC_ID:\n container_name_in_namespace += namespace_id\n\n for critical_process in critical_process_list:\n # Skip 'dsserve' process since it was not managed by supervisord\n # TODO: Should remove the following two lines once the issue was solved in the image.\n if container_name_in_namespace == \"syncd\" and critical_process == \"dsserve\":\n continue\n\n ensure_process_is_running(duthost, container_name_in_namespace, critical_process)\n\n for critical_group in critical_group_list:\n group_program_info = get_group_program_info(duthost, container_name_in_namespace, critical_group)\n for program_name in group_program_info:\n ensure_process_is_running(duthost, container_name_in_namespace, program_name)", "def test_max_events_range(self):\n\n self.log.info(\"Testing max_event counts\")\n enable_failover = True\n timeout_val = 10\n max_plus_1 = CbServer.Failover.MAX_EVENTS + 1\n\n # Set max_events between (min, max)\n for num_events in range(CbServer.Failover.MIN_EVENTS, max_plus_1):\n status = self.rest.update_autofailover_settings(\n enable_failover, timeout_val, maxCount=num_events)\n self.assertTrue(status, \"Failed to set max events=%s\" % num_events)\n self.validate_failover_settings(enable_failover, timeout_val,\n 0, num_events)\n\n for num_events in [0, max_plus_1]:\n self.log.info(\"Testing max_event_count=%s\" % num_events)\n status = self.rest.update_autofailover_settings(\n enable_failover, timeout_val, maxCount=max_plus_1)\n self.assertFalse(status, \"Able to set max events=%s\" % num_events)\n self.validate_failover_settings(enable_failover, timeout_val,\n 0, CbServer.Failover.MAX_EVENTS)", "def check(self):\n illegalNamespaces = list()\n\n prog = re.compile(\"^[A-Z]{4}[0-9]{2}_[0-9]{3}:$\")\n\n for assetNode in pm.ls(type=\"gAsset\"):\n if assetNode.isReferenced() and not prog.match(assetNode.namespace()):\n illegalNamespaces.append(assetNode)\n\n if not illegalNamespaces:\n self.status = \"OK\"\n else:\n self.status = self.errorMode\n self.errorNodes = illegalNamespaces\n for illegalNamespace in illegalNamespaces:\n self.addError(\"%s has a illegal namespace\" % illegalNamespace)\n self.errorMessage = \"%s asset(s) have a illegal namespace\" % (\n len(illegalNamespaces))", "def test_check_bot_confidence(setup_config, get_mock_event):\n\n # !ARRANGE!\n bad_bots = BadBots(setup_config, get_mock_event)\n\n bot_1 = Bot()\n bot_1.source_ip = '1.1.1.1'\n bot_1.http_query_string_parameters = '<script></script>'\n bot_1.http_body = 'EXEC'\n bot_1.geolocation = 'United States'\n bot_1.source_ip_type = BadBots.SourceIPType.IPV4\n bot_1.http_method = \"CONNECT\"\n bot_1.http_user_agent = \"Mozilla/5.0 (compatible; Sosospider/2.0; +http://help.soso.com/webspider.htm)\"\n\n bot_2 = Bot()\n bot_2.source_ip = '77.168.51.231'\n bot_2.http_query_string_parameters = 'hello'\n bot_2.http_body = 'hello!'\n bot_2.geolocation = 'Netherlands'\n bot_2.source_ip_type = BadBots.SourceIPType.IPV4\n bot_2.http_method = \"GET\"\n bot_2.http_user_agent = \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36\"\n\n bot_3 = Bot()\n bot_3.source_ip = '2a02:a445:6d36:1:1e3:a188:313c:1d33'\n bot_3.http_query_string_parameters = 'param=true'\n bot_3.http_body = 'username=xxx'\n bot_3.geolocation = 'United States'\n bot_3.source_ip_type = BadBots.SourceIPType.IPV6\n bot_3.http_method = \"GET\"\n bot_3.http_user_agent = \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36\"\n\n # !ACT!\n\n # Do confidence check on potential bots\n confidence_score_bot_1 = bad_bots.check_bot_confidence(bot_1)\n confidence_score_bot_2 = bad_bots.check_bot_confidence(bot_2)\n confidence_score_bot_3 = bad_bots.check_bot_confidence(bot_3)\n\n # !ASSERT!\n\n # Assert IP addresses are of type IPv4\n assert(confidence_score_bot_1 == 25)\n assert(confidence_score_bot_2 == 0)\n assert(confidence_score_bot_3 == 5)", "def health_check():\n printed_something = False\n\n job_checks = {}\n job_names = []\n for job in config.enabled_jobs:\n spec = nomad.parse(get_job(job.template))\n printed_something |= bool(nomad.check_events_and_logs(job.name))\n for service, checks in nomad.get_health_checks_from_spec(spec):\n if not checks:\n log.warn(f'service {service} has no health checks')\n continue\n job_checks[service] = checks\n job_names.append(job.name)\n printed_something |= nomad.wait_for_service_health_checks(consul, job_names, job_checks, nowait=True)\n\n if printed_something:\n log.error('Problems detected; see logs above.')\n sys.exit(1)\n else:\n log.info('No problems detected.')", "def test_backend_name_reporting(self):\n for volume_id in self.volume_id_list_without_prefix:\n self._test_backend_name_reporting_by_volume_id(volume_id)", "def test_create_policy_binding_for_all_namespaces(self):\n pass", "def available_backends(self, filters=None):\n # pylint: disable=arguments-differ\n backends = []\n for provider in self.providers:\n backends.extend(provider.available_backends())\n\n if filters is not None:\n if isinstance(filters, dict):\n # exact match filter:\n # e.g. {'n_qubits': 5, 'operational': True}\n for key, value in filters.items():\n backends = [instance for instance in backends\n if instance.configuration().get(key) == value\n or instance.status().get(key) == value]\n elif callable(filters):\n # acceptor filter: accept or reject a specific backend\n # e.g. lambda x: x.configuration()['n_qubits'] > 5\n accepted_backends = []\n for backend in backends:\n try:\n if filters(backend) is True:\n accepted_backends.append(backend)\n except Exception: # pylint: disable=broad-except\n pass\n backends = accepted_backends\n else:\n raise QISKitError('backend filters must be either dict or callable.')\n\n return backends", "def check_no_namespace(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n if len(pm.listNamespaces()):\n progress_controller.complete()\n raise PublishError(\n \"There should be no <b>Namespaces</b> in a <b>Model</b> scene.\"\n )\n progress_controller.complete()", "def _CommonChecks(input_api, output_api):\n result = []\n result.extend(_CheckChromeUpdateTriggerRule(input_api, output_api))\n result.extend(_CheckCurrentVersionIncreaseRule(input_api, output_api))\n result.extend(_CheckNoOverlappingFileNamesInResourceDirsRule(input_api,\n output_api))\n\n return result", "def test_env_rules_dont_cause_non_matching_span_to_be_sampled():\n with override_global_config(dict(_sampling_rules='[{\"service\":\"test_ser\",\"name\":\"test_na\"}]')):\n sampling_rules = get_span_sampling_rules()\n assert sampling_rules[0]._service_matcher.pattern == \"test_ser\"\n assert sampling_rules[0]._name_matcher.pattern == \"test_na\"\n tracer = Tracer()\n tracer.configure(writer=DummyWriter())\n span = traced_function(sampling_rules[0], tracer=tracer)\n assert_sampling_decision_tags(span, sample_rate=None, mechanism=None, limit=None)", "def check_min_and_max_alert_widgets(attribute_range_entry: db.Model):\n\n if attribute_range_entry.maximum:\n max_alerts = AlertWidgetModel.get_max_alerts(attribute_range_entry)\n for alert in max_alerts:\n user_details = Users.find_by_id(alert[\"user_id\"])\n if user_details:\n attr = Attributes.get_by_id(attribute_range_entry.attribute_id)\n if attr:\n if not send_alert_email(\n user_details.email, user_details.fullname,\n attr.name, attribute_range_entry.maximum,\n attribute_range_entry.maximum_recorded_date,\n attribute_range_entry.maximum_sensor_id,\n alert[\"max_threshold\"], \"exceeded\"):\n logger.error(\"Server error prevented the \"\n \"sending of a max alert email \"\n \"to {} regarding attribute with \"\n \"id {}\".format(\n user_details.email,\n attribute_range_entry.attribute_id))\n else:\n logger.error(\n \"Could not send max alert email to \"\n \"user with id {} as the attribute with \"\n \"id {} does not exist \".format(\n alert[\"user_id\"],\n attribute_range_entry.attribute_id))\n else:\n logger.error(\"Could not send max alert email to \"\n \"user with id {} as the user does \"\n \"not exist \".format(alert[\"user_id\"]))\n\n if attribute_range_entry.minimum:\n min_alerts = AlertWidgetModel.get_min_alerts(attribute_range_entry)\n for alert in min_alerts:\n user_details = Users.find_by_id(alert[\"user_id\"])\n if user_details:\n attr = Attributes.get_by_id(attribute_range_entry.attribute_id)\n if attr:\n if not send_alert_email(\n user_details.email, user_details.fullname,\n attr.name, attribute_range_entry.minimum,\n attribute_range_entry.minimum_recorded_date,\n attribute_range_entry.minimum_sensor_id,\n alert[\"min_threshold\"], \"fell short of\"):\n logger.error(\"Server error prevented the sending of \"\n \"a min alert email to {} regarding \"\n \"attribute with id {}\".format(\n user_details.email,\n attribute_range_entry.attribute_id))\n else:\n logger.error(\n \"Could not send min alert email to \"\n \"user with id {} as the attribute with \"\n \"id {} does not exist \".format(\n alert[\"user_id\"],\n attribute_range_entry.attribute_id))\n else:\n logger.error(\"Could not send min alert email to \"\n \"user with id {} as the user does \"\n \"not exist \".format(alert[\"user_id\"]))", "def check_packages(ctx, config):\n log.info(\"Checking packages...\")\n os_type = ctx.config.get(\"os_type\", None)\n sha1 = ctx.config.get(\"sha1\", None)\n # We can only do this check if there are a defined sha1 and os_type\n # in the job config.\n if os_type and sha1:\n log.info(\n \"Checking packages for os_type '{os}' and ceph hash '{ver}'\".format(\n os=os_type,\n ver=sha1,\n )\n )\n if not has_packages_for_distro(sha1, os_type):\n msg = \"Packages for os_type '{os}' and ceph hash '{ver}' not found\"\n msg = msg.format(\n os=os_type,\n ver=sha1,\n )\n log.error(msg)\n # set the failure message and update paddles with the status\n ctx.summary[\"failure_reason\"] = msg\n set_status(ctx.summary, \"dead\")\n report.try_push_job_info(ctx.config, dict(status='dead'))\n raise RuntimeError(msg)\n else:\n log.info(\n \"Checking packages skipped, missing os_type '{os}' or ceph hash '{ver}'\".format(\n os=os_type,\n ver=sha1,\n )\n )", "def test_user_rate_reached_perf_issues(self):\n for i in range(0, 10):\n event = self.store_transaction(\n environment=None,\n project_id=self.project.id,\n user_id=str(i),\n fingerprint=[f\"{GroupType.PERFORMANCE_N_PLUS_ONE_DB_QUERIES.value}-group1\"],\n )\n perf_group = event.groups[0]\n snooze = GroupSnooze.objects.create(group=perf_group, user_count=10, user_window=60)\n assert not snooze.is_valid(test_rates=True)", "def autoscaling_load_balancer_healthcheck_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:\n # ISO Time\n iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()\n for asg in describe_auto_scaling_groups(cache, session)[\"AutoScalingGroups\"]:\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(asg,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n asgArn = asg[\"AutoScalingGroupARN\"]\n asgName = asg[\"AutoScalingGroupName\"]\n healthCheckType = asg[\"HealthCheckType\"]\n # Check specific metadata\n asgLbs = asg[\"LoadBalancerNames\"]\n asgTgs = asg[\"TargetGroupARNs\"]\n # If either list is empty it means there are no ELBs or ELBv2s associated with this ASG\n if not (asgLbs or asgTgs):\n # this is a passing check\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{asgArn}/asg-elb-asgs-elb-healthcheck-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": asgArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[Autoscaling.2] Autoscaling Groups with load balancer targets should use ELB health checks\",\n \"Description\": f\"Autoscaling group {asgName} does not have any ELB or Target Groups associated and is not in scope for this check.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For information about enabling ELB health checks refer to the Add Elastic Load Balancing health checks to an Auto Scaling group section of the Amazon EC2 Auto Scaling User Guide\",\n \"Url\": \"https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-add-elb-healthcheck.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Compute\",\n \"AssetService\": \"AWS Auto Scaling\",\n \"AssetComponent\": \"Autoscaling Group\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsAutoScalingAutoScalingGroup\",\n \"Id\": asgArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsAutoScalingAutoScalingGroup\": {\n \"HealthCheckType\": healthCheckType\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 DE.AE-4\",\n \"NIST CSF V1.1 DE.DP-4\",\n \"NIST SP 800-53 Rev. 4 AU-6\",\n \"NIST SP 800-53 Rev. 4 CA-2\",\n \"NIST SP 800-53 Rev. 4 CA-7\",\n \"NIST SP 800-53 Rev. 4 CP-2\",\n \"NIST SP 800-53 Rev. 4 IR-4\",\n \"NIST SP 800-53 Rev. 4 RA-3\",\n \"NIST SP 800-53 Rev. 4 RA-5\",\n \"NIST SP 800-53 Rev. 4 SI-4\",\n \"AICPA TSC CC7.2\",\n \"AICPA TSC CC7.3\",\n \"ISO 27001:2013 A.16.1.2\",\n \"ISO 27001:2013 A.16.1.3\",\n \"ISO 27001:2013 A.16.1.4\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding\n else:\n if healthCheckType != \"ELB\":\n # this is a failing check\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{asgArn}/asg-elb-asgs-elb-healthcheck-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": asgArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"LOW\"},\n \"Confidence\": 99,\n \"Title\": \"[Autoscaling.2] Autoscaling Groups with load balancer targets should use ELB health checks\",\n \"Description\": f\"Autoscaling group {asgName} has ELB or ELBv2 Targets but does not use an ELB Health Check. If you attached a load balancer or target group to your Auto Scaling group, you can configure the group to mark an instance as unhealthy when Elastic Load Balancing reports it as unhealthy. If connection draining is enabled for your load balancer, Amazon EC2 Auto Scaling waits for in-flight requests to complete or the maximum timeout to expire, whichever comes first, before terminating instances due to a scaling event or health check replacement. Review the remediation section for more information on this configuration.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For information about enabling ELB health checks refer to the Add Elastic Load Balancing health checks to an Auto Scaling group section of the Amazon EC2 Auto Scaling User Guide\",\n \"Url\": \"https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-add-elb-healthcheck.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Compute\",\n \"AssetService\": \"AWS Auto Scaling\",\n \"AssetComponent\": \"Autoscaling Group\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsAutoScalingAutoScalingGroup\",\n \"Id\": asgArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsAutoScalingAutoScalingGroup\": {\n \"HealthCheckType\": healthCheckType\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 DE.AE-4\",\n \"NIST CSF V1.1 DE.DP-4\",\n \"NIST SP 800-53 Rev. 4 AU-6\",\n \"NIST SP 800-53 Rev. 4 CA-2\",\n \"NIST SP 800-53 Rev. 4 CA-7\",\n \"NIST SP 800-53 Rev. 4 CP-2\",\n \"NIST SP 800-53 Rev. 4 IR-4\",\n \"NIST SP 800-53 Rev. 4 RA-3\",\n \"NIST SP 800-53 Rev. 4 RA-5\",\n \"NIST SP 800-53 Rev. 4 SI-4\",\n \"AICPA TSC CC7.2\",\n \"AICPA TSC CC7.3\",\n \"ISO 27001:2013 A.16.1.2\",\n \"ISO 27001:2013 A.16.1.3\",\n \"ISO 27001:2013 A.16.1.4\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n # this is a passing check\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{asgArn}/asg-elb-asgs-elb-healthcheck-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": asgArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[Autoscaling.2] Autoscaling Groups with load balancer targets should use ELB health checks\",\n \"Description\": f\"Autoscaling group {asgName} has ELB or ELBv2 Targets and uses an ELB Health Check.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For information about enabling ELB health checks refer to the Add Elastic Load Balancing health checks to an Auto Scaling group section of the Amazon EC2 Auto Scaling User Guide\",\n \"Url\": \"https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-add-elb-healthcheck.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Compute\",\n \"AssetService\": \"AWS Auto Scaling\",\n \"AssetComponent\": \"Autoscaling Group\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsAutoScalingAutoScalingGroup\",\n \"Id\": asgArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsAutoScalingAutoScalingGroup\": {\n \"HealthCheckType\": healthCheckType\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 DE.AE-4\",\n \"NIST CSF V1.1 DE.DP-4\",\n \"NIST SP 800-53 Rev. 4 AU-6\",\n \"NIST SP 800-53 Rev. 4 CA-2\",\n \"NIST SP 800-53 Rev. 4 CA-7\",\n \"NIST SP 800-53 Rev. 4 CP-2\",\n \"NIST SP 800-53 Rev. 4 IR-4\",\n \"NIST SP 800-53 Rev. 4 RA-3\",\n \"NIST SP 800-53 Rev. 4 RA-5\",\n \"NIST SP 800-53 Rev. 4 SI-4\",\n \"AICPA TSC CC7.2\",\n \"AICPA TSC CC7.3\",\n \"ISO 27001:2013 A.16.1.2\",\n \"ISO 27001:2013 A.16.1.3\",\n \"ISO 27001:2013 A.16.1.4\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "def analyze_thresholds(datapath, threshold_lt1, threshold_lt2, normalize = True, save = 1):\n print 'analyzing thresholds...' \n current_dir = os.getcwd()\n os.chdir(datapath)\n files = os.listdir(datapath)\n\n for k in arange(len(files)):\n right_file = '.npz' in files[k]\n \n if right_file:\n data = numpy.load(datapath+'\\\\'+files[k])\n \n CR_cts_after_seq_lt1 = data['cr_hist_LT1_first']\n CR_cts_after_seq_lt2 = data['cr_hist_LT2_first']\n\n nr_of_counts = arange(len(CR_cts_after_seq_lt1))\n\n CR_cts_total_lt1 = data['cr_hist_LT1_total']\n CR_cts_total_lt2 = data['cr_hist_LT2_total']\n \n if normalize:\n CR_cts_after_seq_lt2 = CR_cts_after_seq_lt2/float(sum(CR_cts_after_seq_lt2))\n CR_cts_total_lt2 = CR_cts_total_lt2/float(sum(CR_cts_total_lt2))\n times_passed_after_seq_lt2 = CR_cts_after_seq_lt2[nr_of_counts>=threshold_lt2].sum()*100\n times_passed_overall_lt2 = CR_cts_total_lt2[nr_of_counts>=threshold_lt2].sum()*100\n \n CR_cts_after_seq_lt1 = CR_cts_after_seq_lt1/float(sum(CR_cts_after_seq_lt1))\n CR_cts_total_lt1 = CR_cts_total_lt1/float(sum(CR_cts_total_lt1))\n times_passed_after_seq_lt1 = CR_cts_after_seq_lt1[nr_of_counts>=threshold_lt1].sum()*100\n times_passed_overall_lt1 = CR_cts_total_lt1[nr_of_counts>=threshold_lt1].sum()*100\n else:\n times_passed_after_seq_lt2 = CR_cts_after_seq_lt2[nr_of_counts>=threshold_lt2].sum()/float(CR_cts_after_seq_lt2.sum())*100\n times_passed_overall_lt2 = CR_cts_total_lt2[nr_of_counts>=threshold_lt2].sum()/float(CR_cts_total_lt2.sum())*100\n times_passed_after_seq_lt1 = CR_cts_after_seq_lt1[nr_of_counts>=threshold_lt1].sum()*100/float(CR_cts_after_seq_lt1.sum())\n times_passed_overall_lt1 = CR_cts_total_lt1[nr_of_counts>=threshold_lt1].sum()*100/float(CR_cts_total_lt1.sum())\n\n\n #print 'After sequence: LT2 percentage passed = ',num2str(sum(times_passed_after_seq_lt2),1),'%'\n #print 'and LT1 percentage passed = ',num2str(sum(times_passed_after_seq_lt1),1),'%'\n\n Log = False\n\n figure6 = plt.figure(figsize=(16.0, 12.0))\n plt.subplot(223)\n plt.bar(nr_of_counts,CR_cts_after_seq_lt2,log=Log, color = 'm')\n plt.xlabel('Number of counts')\n plt.ylabel('Fraction of occurrences')\n if normalize:\n plt.title('LT2: CR counts after sequence, passed threshold: '+num2str(times_passed_after_seq_lt2,1)+'%')\n else:\n plt.title('CR counts after sequence')\n plt.xlim(0,25)\n \n plt.subplot(224)\n plt.bar(nr_of_counts,CR_cts_total_lt2,log=Log, color = 'm')\n plt.xlabel('Number of counts')\n plt.ylabel('Fraction of occurrences')\n if normalize:\n plt.title('LT2: all CR checks, passed threshold: '+num2str(times_passed_overall_lt2,1)+'%')\n else:\n plt.title('CR counts for all CR checks')\n plt.xlim(0,25)\n\n plt.subplot(221)\n plt.bar(nr_of_counts,CR_cts_after_seq_lt1,log=Log, color = 'b')\n plt.xlabel('Number of counts')\n plt.ylabel('Fraction of occurrences')\n if normalize:\n plt.title('LT1: CR counts after sequence, passed threshold: '+num2str(times_passed_after_seq_lt1,1)+'%')\n else:\n plt.title('CR counts after sequence')\n plt.xlim(0,50)\n \n plt.subplot(222)\n plt.bar(nr_of_counts,CR_cts_total_lt1,log=Log, color = 'b')\n plt.xlabel('Number of counts')\n plt.ylabel('Fraction of occurrences')\n if normalize:\n plt.title('LT1: all CR checks, passed threshold: '+num2str(times_passed_overall_lt1,1)+'%')\n else:\n plt.title('CR counts for all CR checks')\n plt.xlim(0,50)\n \n if save:\n if normalize:\n figure6.savefig('CR_information_LT1_and_LT2_normalized.pdf')\n else:\n figure6.savefig('CR_information_LT1_and_LT2.pdf')\n\n\n return times_passed_overall_lt1, times_passed_after_seq_lt1, times_passed_overall_lt2, times_passed_after_seq_lt2", "def available_backends(filters=None, compact=True):\n backend_names = [str(backend)\n for backend in _DEFAULT_PROVIDER.available_backends(filters)]\n\n alias_dict = {v: k for k, v in _DEFAULT_PROVIDER.aliased_backend_names().items()}\n backend_names = [alias_dict[name] if name in alias_dict else name for name in backend_names]\n\n if compact:\n group_dict = _DEFAULT_PROVIDER.grouped_backend_names()\n groups = set()\n for name in backend_names:\n backend_group = set(k for k, v in group_dict.items() if name in v)\n if not backend_group:\n groups.add(name)\n elif len(backend_group) == 1:\n (group,) = backend_group\n groups.add(group)\n backend_names = list(groups)\n\n return sorted(backend_names)", "def test_create_deployment_config_for_all_namespaces(self):\n pass", "def define_metrics(config):\n metrics = []\n if config.get(\"data.output.label.choice\") == \"segmentation\":\n metrics = [\n ext_sm.metrics.IOUScore(),\n ext_sm.metrics.FScore(beta=0.5),\n ext_sm.metrics.FScore(beta=2),\n ]\n metrics = []\n elif config.get(\"data.output.label.choice\") == \"inversion\":\n metrics = [\n rmae\n ]\n return metrics", "def test_list_namespaced_egress_network_policy(self):\n pass", "def test_rate_reached_perf_issue(self):\n for i in range(0, 10):\n event = self.store_transaction(\n environment=None,\n project_id=self.project.id,\n user_id=str(i),\n fingerprint=[f\"{GroupType.PERFORMANCE_N_PLUS_ONE_DB_QUERIES.value}-group1\"],\n )\n perf_group = event.groups[0]\n snooze = GroupSnooze.objects.create(group=perf_group, count=10, window=24 * 60)\n assert not snooze.is_valid(test_rates=True)", "def check_threshold(count, warn, crit, logger):\n warn = int(warn)\n crit = int(crit)\n if count < warn:\n msg = (\"Normal: Resource Count={} is less than the warning={} level\".format(count, warn))\n logger.info(msg)\n print(msg)\n sys.exit(0)\n elif count >= warn and count < crit:\n msg = (\"Warning: Resource count={} has reached the warning={} level\".format(\n count, warn))\n logger.warning(msg)\n print(msg)\n sys.exit(1)\n elif count >= crit:\n msg = (\"Critical: Resource count={} has reached the critical={} level\".format(\n count, crit))\n logger.error(msg)\n print(msg)\n sys.exit(2)\n else:\n print(\"Unknown: Resource count is unknown\")\n sys.exit(3)", "def _validate_metrics(self, metrics):\n if metrics is None:\n raise ValueError(\"Expected metrics to be a list. Was None.\")\n if any([self.namespace != m.metric.namespace for m in metrics]):\n raise ValueError(\n f\"Metrics ({metrics}) and metrics provider namespace \"\n f\"{self.namespace} do not match.\"\n )", "def _CommonChecks(input_api, output_api):\n results = []\n results.extend(_CheckNoInterfacesInBase(input_api, output_api))\n results.extend(_CheckNoTraceEventInclude(input_api, output_api))\n results.extend(_WarnPbzeroIncludes(input_api, output_api))\n results.extend(CheckChangeLintsClean(input_api, output_api))\n return results", "def CHECK_ALL():\n vms = [py26_vm, py27_vm, pgsql84_vm, pgsql90_vm, pgsql91_vm,\n mysql51_vm, oracle10g_vm, mssql2005_vm, mssql2008_vm]\n for vm in vms:\n if vm.missing():\n warn(\"VM is not built: {}\", vm.name)\n for vm in vms:\n if vm.running():\n vm.stop()\n errors = 0\n try:\n for client_vm in [py26_vm, py27_vm]:\n if client_vm.missing():\n continue\n client_vm.start()\n client_vm.run(\"~/bin/pip -q install\"\n \" hg+http://bitbucket.org/prometheus/pbbt\")\n sh(\"hg clone --ssh='ssh -F %s' . ssh://linux-vm/src/htsql\"\n % (CTL_DIR+\"/ssh_config\"))\n errors += trial(\"hg update && python setup.py install\",\n \"installing HTSQL under %s\" % client_vm.name)\n errors += trial(\"pbbt test/regress.yaml -E test/regress.py\"\n \" -q -S /all/sqlite\",\n \"testing sqlite backend\")\n for server_vm, suite in [(pgsql84_vm, 'pgsql'),\n (pgsql90_vm, 'pgsql'),\n (pgsql91_vm, 'pgsql'),\n (mysql51_vm, 'mysql'),\n (oracle10g_vm, 'oracle'),\n (mssql2005_vm, 'mssql'),\n (mssql2008_vm, 'mssql')]:\n if server_vm.missing():\n continue\n server_vm.start()\n username_key = \"%s_USERNAME\" % suite.upper()\n password_key = \"%s_PASSWORD\" % suite.upper()\n host_key = \"%s_HOST\" % suite.upper()\n port_key = \"%s_PORT\" % suite.upper()\n username_value = { 'pgsql': \"postgres\",\n 'mysql': \"root\",\n 'oracle': \"system\",\n 'mssql': \"sa\" }[suite]\n password_value = \"admin\"\n host_value = \"10.0.2.2\"\n port_value = 10000+server_vm.port\n command = \"pbbt test/regress.yaml -E test/regress.py\" \\\n \" -q -S /all/%s\" \\\n \" -D %s=%s -D %s=%s -D %s=%s -D %s=%s\" \\\n % (suite, username_key, username_value,\n password_key, password_value,\n host_key, host_value, port_key, port_value)\n message = \"testing %s backend against %s\" \\\n % (suite, server_vm.name)\n errors += trial(command, message)\n server_vm.stop()\n errors += trial(\"pbbt test/regress.yaml -E test/regress.py\"\n \" -q -S /all/routine\",\n \"testing htsql-ctl routines\")\n client_vm.stop()\n except:\n for vm in vms:\n if vm.running():\n vm.stop()\n raise\n log()\n if errors:\n if errors == 1:\n warn(\"1 failed test\")\n else:\n warn(\"{} failed tests\", errors)\n else:\n log(\"`All tests passed`\")", "async def healthcheck(self):\n for service in self.services:\n await service.healthcheck()", "def test_vs_bounds(pudl_out_eia, live_dbs, cases):\n if not live_dbs:\n pytest.skip(\"Data validation only works with a live PUDL DB.\")\n # This test should only run on the un-aggregated data:\n if pudl_out_eia.freq is not None:\n pytest.skip(\"Test should only run on un-aggregated data.\")\n\n for args in cases:\n pudl.validate.vs_bounds(pudl_out_eia.gf_eia923(), **args)", "def test_ap_hs20_max_bss_load(dev, apdev):\n params = hs20_ap_params()\n params['bss_load_test'] = \"12:200:20000\"\n hostapd.add_ap(apdev[0]['ifname'], params)\n\n params = hs20_ap_params()\n params['ssid'] = \"test-hs20-other\"\n params['bss_load_test'] = \"5:20:10000\"\n hostapd.add_ap(apdev[1]['ifname'], params)\n\n logger.info(\"Verify maximum BSS load constraint\")\n values = default_cred()\n values['domain'] = \"example.com\"\n values['max_bss_load'] = \"100\"\n events = policy_test(dev[0], apdev[1], values, only_one=False)\n\n ev = [e for e in events if \"INTERWORKING-AP \" + apdev[0]['bssid'] in e]\n if len(ev) != 1 or \"over_max_bss_load=1\" not in ev[0]:\n raise Exception(\"Maximum BSS Load case not noticed\")\n ev = [e for e in events if \"INTERWORKING-AP \" + apdev[1]['bssid'] in e]\n if len(ev) != 1 or \"over_max_bss_load=1\" in ev[0]:\n raise Exception(\"Maximum BSS Load case reported incorrectly\")\n\n logger.info(\"Verify maximum BSS load does not prevent connection\")\n values['max_bss_load'] = \"1\"\n events = policy_test(dev[0], None, values)\n\n ev = [e for e in events if \"INTERWORKING-AP \" + apdev[0]['bssid'] in e]\n if len(ev) != 1 or \"over_max_bss_load=1\" not in ev[0]:\n raise Exception(\"Maximum BSS Load case not noticed\")\n ev = [e for e in events if \"INTERWORKING-AP \" + apdev[1]['bssid'] in e]\n if len(ev) != 1 or \"over_max_bss_load=1\" not in ev[0]:\n raise Exception(\"Maximum BSS Load case not noticed\")", "def _validate_schedulers(config, schedulers):\n for scheduler in schedulers:\n if scheduler not in PCLUSTER_SCHEDULERS:\n error = f\"Invalid scheduler ({scheduler}) found in config.\"\n logging.error(error)\n raise AssertionError(error)", "async def check_thresholds_periodic(alignment, chains, callback, check_freq, min_cycles, **thresholds):\n while True:\n result = check_thresholds(alignment, chains, min_cycles, **thresholds)\n # None indicates that the minimum number of cycles has not yet been reached\n if result is None or not result.stop:\n if result is not None:\n # print some data for the user\n result.print_data()\n print('') # new line\n\n await asyncio.sleep(check_freq)\n continue\n else:\n # print some data for the user\n result.print_data()\n print('') # new line\n callback(result)\n break", "def check_budgets_of_all_shops() -> None:\n shops = get_online_shops_with_budgets()\n for shop in shops:\n print(shop)\n shop_id = shop['a_id']\n shop_name = shop['a_name']\n month_of_budget = shop['a_month']\n monthly_budget = float(shop['a_budget_amount'])\n monthly_expenditure = shop['a_amount_spent']\n percentage = shop['percentage']\n\n # We want to notify shops when they reach 50% of the current month's budget.\n if percentage > 50:\n notify_shop(threshold=50, shop_id=shop_id, shop_name=shop_name, month_of_budget=month_of_budget, monthly_budget=monthly_budget, monthly_expenditure=monthly_expenditure,\n percentage=percentage)\n print(\"--------------------------------------------------------------------------------------------------------------------------------\")\n # Once they reach 100% of the current month's budget, the shops should be notified again and set to _offline_.\n if percentage > 100:\n notify_shop(threshold=100, shop_id=shop_id, shop_name=shop_name, month_of_budget=month_of_budget, monthly_budget=monthly_budget, monthly_expenditure=monthly_expenditure,\n percentage=percentage)\n # Shops that need to go _offline_ according to the rules in the previous section should be marked as such in the database.\n set_offline(shop_id=shop_id)\n print(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\n if not shops:\n print(\"++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n print(\"+All notifications are sent. Exit?+\")\n print(\"++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n run()", "def get_kong_node_usage_metrics(opts):\n\n url = \"{0}/status\".format(opts['base_url'])\n\n r = requests.get(url)\n try:\n r.raise_for_status()\n except requests.exceptions.RequestException as e:\n logging.debug(\"http response body - %s\", r.text)\n logging.error(\"An exception occurred: (%s)\", e)\n sys.exit(2)\n\n print r.text\n\n return True", "def test_list_policy_binding_for_all_namespaces(self):\n pass", "def test_metric_namespace(self):\n self.statsd.namespace = \"foo\"\n self.statsd.gauge('gauge', 123.4)\n self.assert_equal_telemetry('foo.gauge:123.4|g\\n', self.recv(2))", "def check_limits(data, tables, max_percent=1., replace_with='interpolation'):\n from . import trend as pmtrend\n import numpy as np\n from . import algs\n import pandas as pd\n\n df = data.copy()\n max_count = int(len(df)*max_percent/100.)\n low_count = pd.Series(0, index=tables.columns)\n upp_count = pd.Series(0, index=tables.columns)\n fault_count = pd.Series(0, index=tables.columns)\n\n #-----------\n # First we check the lower values\n if 'lower_limits' in tables.index.values:\n faulty = df < tables.loc['lower_limits']\n low_count = df[ faulty ].count() \n df[ faulty ] = np.nan\n #-------------------------------\n \n #-------------------------------\n # Now we check the upper values\n if 'upper_limits' in tables.index.values:\n faulty = df > tables.loc['upper_limits']\n upp_count = df[ faulty ].count() \n df[ faulty ] = np.nan\n #-------------------------------\n\n fault_count = low_count + upp_count\n valid = fault_count < max_count\n\n #------------\n # Replace data with either its trend or by interpolating\n if replace_with=='trend':\n trend = pmdata.trend(df, how='linear')\n df = df.fillna(trend)\n elif replace_with=='interpolation':\n df = df.interpolate(method='index', limit_direction='both')\n #------------\n\n #-------------------------------\n # Substitute faulty points by the linear trend\n #trend = data.polyfit()\n #df = df.fillna(trend)\n #-------------------------------\n\n return df, valid, fault_count", "def check_configs(self):\n\n pass", "def isolated_backends(backend_echo, backend_quotes, proxy):\n\n echo_metric = backend_echo.metrics.list()[0]\n quotes_metric = backend_quotes.metrics.list()[0]\n\n backend_echo.mapping_rules.create(rawobj.Mapping(echo_metric, \"/anything/loud\"))\n backend_echo.mapping_rules.create(rawobj.Mapping(echo_metric, \"/anything/low\"))\n\n backend_quotes.mapping_rules.create(rawobj.Mapping(quotes_metric, \"/anything/qod\"))\n\n proxy.deploy()", "def run_checks(self):\n\n try:\n check_obj = self.metadata.get_described_element()\n except ObjectDoesNotExist:\n pass\n\n if self.metadata.is_service_metadata:\n if self.metadata.is_service_type(OGCServiceEnum.WMS):\n self.check_wms(check_obj)\n elif self.metadata.is_service_type(OGCServiceEnum.WFS):\n self.check_wfs(check_obj)\n\n elif self.metadata.is_layer_metadata:\n self.check_layer(check_obj)\n elif self.metadata.is_featuretype_metadata:\n self.check_featuretype(check_obj)\n elif self.metadata.is_dataset_metadata:\n self.check_dataset()\n\n # all checks are done. Calculate the health state for all monitoring results\n health_state = HealthState.objects.create(monitoring_run=self.monitoring_run, metadata=self.metadata)\n health_state.run_health_state()", "def check_extensions(self):\n extensions = self.cloud.get_network_extensions()\n for network_extension in self.neutron_extensions:\n if network_extension not in extensions:\n LOGGER.warning(\n \"Cannot find Neutron extension: %s\", network_extension)\n self.is_skipped = True\n break", "def test_check_ess_settings(self):\n ess_settings1 = {'gaussian': [self.servers[0]], 'molpro': [self.servers[1], self.servers[0]],\n 'qchem': [self.servers[0]]}\n ess_settings2 = {'gaussian': self.servers[0], 'molpro': self.servers[1], 'qchem': self.servers[0]}\n ess_settings3 = {'gaussian': self.servers[0], 'molpro': [self.servers[1], self.servers[0]],\n 'qchem': self.servers[0]}\n ess_settings4 = {'gaussian': self.servers[0], 'molpro': self.servers[1], 'qchem': self.servers[0]}\n ess_settings5 = {'gaussian': 'local', 'molpro': self.servers[1], 'qchem': self.servers[0]}\n\n ess_settings1 = check_ess_settings(ess_settings1)\n ess_settings2 = check_ess_settings(ess_settings2)\n ess_settings3 = check_ess_settings(ess_settings3)\n ess_settings4 = check_ess_settings(ess_settings4)\n ess_settings5 = check_ess_settings(ess_settings5)\n\n ess_list = [ess_settings1, ess_settings2, ess_settings3, ess_settings4, ess_settings5]\n\n for ess in ess_list:\n for soft, server_list in ess.items():\n self.assertTrue(soft in ['gaussian', 'molpro', 'qchem'])\n self.assertIsInstance(server_list, list)\n\n with self.assertRaises(SettingsError):\n ess_settings6 = {'nosoft': ['server1']}\n check_ess_settings(ess_settings6)\n with self.assertRaises(SettingsError):\n ess_settings7 = {'gaussian': ['noserver']}\n check_ess_settings(ess_settings7)", "def test_b_negative_add_qos(self):\n qoss = {\"qos_120\": 120, \"qos_-5\": -5}\n for qos_name, qos_value in qoss.iteritems():\n testflow.step(\n \"Create CPU QoS %s on datacenter %s with parameters: %s\",\n qos_name, conf.DC_NAME[0], qos_value\n )\n assert not ll_datacenters.add_qos_to_datacenter(\n datacenter=conf.DC_NAME[0],\n qos_name=qos_name,\n qos_type=conf.QOS_TYPE_CPU,\n cpu_limit=qos_value\n )", "def rule_40_igw_available(session):\n def has_igw(session, side):\n conn_vpc = session[\"conn\"][side](\"vpc\")\n subnet = conn_vpc.get_all_subnets(\n [session[\"config\"][side][\"res\"][\"subnet_id\"]])[0]\n\n for igw in conn_vpc.get_all_internet_gateways():\n for att in igw.attachments:\n if att.vpc_id == subnet.vpc_id:\n return True\n return False\n\n return has_igw(session, \"server\") and has_igw(session, \"client\")", "def service_check(self, env):\n import params\n\n self.active_master_host = params.hawqmaster_host\n self.active_master_port = params.hawq_master_address_port\n self.checks_failed = 0\n self.total_checks = 2\n\n # Checks HAWQ cluster state\n self.check_state()\n\n # Runs check for writing and reading tables on HAWQ\n self.check_hawq()\n\n # Runs check for writing and reading external tables on HDFS using PXF, if PXF is installed\n if params.is_pxf_installed:\n self.total_checks += 1\n self.check_hawq_pxf_hdfs()\n else:\n Logger.info(\"PXF not installed. Skipping HAWQ-PXF checks...\")\n\n if self.checks_failed != 0:\n Logger.error(\"** FAILURE **: Service check failed {0} of {1} checks\".format(self.checks_failed, self.total_checks))\n sys.exit(1)\n\n Logger.info(\"Service check completed successfully\")", "def test_sinus_alerts_when_no_breath(app, events, data, sim_sampler):\n app.run_iterations(SIMULATION_SAMPLES)\n assert len(events.alerts_queue) == 0\n\n # mocking time continue for no breath time.\n intervals = 1 / DriverFactory.MOCK_SAMPLE_RATE_HZ\n num_of_samples = int(NO_BREATH_TIME / intervals)\n for _ in range(num_of_samples):\n sim_sampler._timer.get_time()\n\n app.run_iterations(1)\n assert len(events.alerts_queue) == 1\n alert = events.alerts_queue.queue.get()\n assert alert == alerts.AlertCodes.NO_BREATH", "def multi_event(st,et,instrument_chosen,subevent):\r\n print('checking for multiple events within given time window')\r\n \r\n #creating file for time window with first events for all thresholds\r\n out_name = Path(cfg.obs_path) / database_extraction(st,et,instrument_chosen,subevent)\r\n\r\n #creating files for all second events for all thresholds\r\n new_files = two_in_one(out_name,et,subevent)\r\n \r\n #creating files for any third events for all thresholds that had a second event\r\n for file in new_files:\r\n two_in_one(file,et,subevent) \r\n \r\n return", "def load_balancing(env, service, paths):\n selected_path = env.k_paths # initialize the path to an out of bounds, i.e., non-existent\n least_load = np.finfo(0.0).max # initializes load to the maximum value of a float\n for idp, path in enumerate(paths):\n if is_path_free(env.topology, path, service.number_units) and get_max_usage(env.topology, path) < least_load:\n least_load = get_max_usage(env.topology, path)\n selected_path = idp\n return selected_path < env.k_paths, selected_path", "def collect_handlers(log, base_url, validation):\n base_bookstore_pattern = url_path_join(base_url, '/bookstore')\n base_bookstore_api_pattern = url_path_join(base_url, '/api/bookstore')\n\n handlers = []\n # Always enable the version handler for the API\n handlers.append((base_bookstore_api_pattern, BookstoreVersionHandler))\n\n if validation['publish_valid']:\n log.info(f\"[bookstore] Enabling bookstore publishing, version: {version}\")\n handlers.append(\n (\n url_path_join(base_bookstore_api_pattern, r\"/publish%s\" % path_regex),\n BookstorePublishAPIHandler,\n )\n )\n else:\n log.info(\"[bookstore] Publishing disabled. s3_bucket or endpoint are not configured.\")\n\n if validation['s3_clone_valid']:\n log.info(f\"[bookstore] Enabling bookstore cloning, version: {version}\")\n handlers.append(\n (url_path_join(base_bookstore_api_pattern, r\"/clone(?:/?)*\"), BookstoreCloneAPIHandler)\n ),\n handlers.append(\n (url_path_join(base_bookstore_pattern, r\"/clone(?:/?)*\"), BookstoreCloneHandler)\n )\n else:\n log.info(f\"[bookstore] bookstore cloning disabled, version: {version}\")\n\n if validation['fs_clone_valid']:\n log.info(f\"[bookstore] Enabling filesystem cloning, version: {version}\")\n handlers.append(\n (url_path_join(base_bookstore_pattern, r\"/fs-clone(?:/?)*\"), BookstoreFSCloneHandler)\n )\n handlers.append(\n (\n url_path_join(base_bookstore_api_pattern, r\"/fs-clone(?:/?)*\"),\n BookstoreFSCloneAPIHandler,\n )\n ),\n else:\n log.info(f\"[bookstore] bookstore cloning disabled, version: {version}\")\n return handlers", "def test_metrics_server(self):\n validate_metrics_server()", "def test_ap_hs20_max_bss_load2(dev, apdev):\n params = hs20_ap_params()\n params['bss_load_test'] = \"12:200:20000\"\n hostapd.add_ap(apdev[0]['ifname'], params)\n\n params = hs20_ap_params()\n params['ssid'] = \"test-hs20-other\"\n hostapd.add_ap(apdev[1]['ifname'], params)\n\n logger.info(\"Verify maximum BSS load constraint with AP advertisement\")\n values = default_cred()\n values['domain'] = \"example.com\"\n values['max_bss_load'] = \"100\"\n events = policy_test(dev[0], apdev[1], values, only_one=False)\n\n ev = [e for e in events if \"INTERWORKING-AP \" + apdev[0]['bssid'] in e]\n if len(ev) != 1 or \"over_max_bss_load=1\" not in ev[0]:\n raise Exception(\"Maximum BSS Load case not noticed\")\n ev = [e for e in events if \"INTERWORKING-AP \" + apdev[1]['bssid'] in e]\n if len(ev) != 1 or \"over_max_bss_load=1\" in ev[0]:\n raise Exception(\"Maximum BSS Load case reported incorrectly\")", "def backend_usages(service):\n\n return service.backend_usages.list()", "def get_healthchecks(\n self, service_namespace_config: ServiceNamespaceConfig\n ) -> List[HealthcheckDict]:\n\n mode = self.get_healthcheck_mode(service_namespace_config)\n\n graceperiodseconds = self.get_healthcheck_grace_period_seconds()\n intervalseconds = self.get_healthcheck_interval_seconds()\n timeoutseconds = self.get_healthcheck_timeout_seconds()\n maxconsecutivefailures = self.get_healthcheck_max_consecutive_failures()\n\n if mode == \"http\" or mode == \"https\":\n http_path = self.get_healthcheck_uri(service_namespace_config)\n protocol = f\"MESOS_{mode.upper()}\"\n healthchecks = [\n HealthcheckDict(\n {\n \"protocol\": protocol,\n \"path\": http_path,\n \"gracePeriodSeconds\": graceperiodseconds,\n \"intervalSeconds\": intervalseconds,\n \"portIndex\": 0,\n \"timeoutSeconds\": timeoutseconds,\n \"maxConsecutiveFailures\": maxconsecutivefailures,\n }\n )\n ]\n elif mode == \"tcp\":\n healthchecks = [\n HealthcheckDict(\n {\n \"protocol\": \"TCP\",\n \"gracePeriodSeconds\": graceperiodseconds,\n \"intervalSeconds\": intervalseconds,\n \"portIndex\": 0,\n \"timeoutSeconds\": timeoutseconds,\n \"maxConsecutiveFailures\": maxconsecutivefailures,\n }\n )\n ]\n elif mode == \"cmd\":\n healthchecks = [\n HealthcheckDict(\n {\n \"protocol\": \"COMMAND\",\n \"command\": self.get_healthcheck_cmd(),\n \"gracePeriodSeconds\": graceperiodseconds,\n \"intervalSeconds\": intervalseconds,\n \"timeoutSeconds\": timeoutseconds,\n \"maxConsecutiveFailures\": maxconsecutivefailures,\n }\n )\n ]\n elif mode is None:\n healthchecks = []\n else:\n raise InvalidHealthcheckMode(\n \"Unknown mode: %s. Only acceptable healthcheck modes are http/https/tcp/cmd\"\n % mode\n )\n return healthchecks", "def checks(self, all=False):\n if all:\n warn_states = [\"unknown\", \"passing\", \"warning\", \"critical\"]\n else:\n warn_states = [\"unknown\", \"warning\", \"critical\"]\n checks = {}\n for warn_state in warn_states:\n for state in self.consul.health.state(warn_state):\n if not state['Node'] in checks:\n checks[state['Node']] = dict()\n if not state['ServiceID'] in checks[state['Node']]:\n checks[state['Node']][state['ServiceID']] = {\n 'checks': [],\n 'name': state['ServiceName']\n }\n checks[state['Node']][state['ServiceID']]['checks'].append(\n (state['Name'], state['Status'], state['Output'])\n )\n return checks", "def countSimulationEventQueues(self):\r\n raise NotImplementedError()", "def _events_available_during_other_events(\n events, slots, X, summation_type=None\n):\n summation = lpu.summation_functions[summation_type]\n event_availability_array = lpu.event_availability_array(events)\n\n label = 'Event clashes with another event'\n for slot1, slot2 in lpu.concurrent_slots(slots):\n for row, event in enumerate(event_availability_array):\n if events[row].unavailability:\n for col, availability in enumerate(event):\n if availability == 0:\n yield Constraint(\n f'{label} - event: {row} and event: {col}',\n summation(\n (X[row, slot1], X[col, slot2])\n ) <= 1 + availability\n )", "def _check_all_systems_ready(self):\n raise NotImplementedError()", "def test_alerts_when_no_breath(app, events, data):\n time_intervals = 1 / DriverFactory.MOCK_SAMPLE_RATE_HZ\n num_of_samples = int(NO_BREATH_TIME / time_intervals)\n app.run_iterations(num_of_samples)\n assert alerts.AlertCodes.NO_BREATH in events.alerts_queue.active_alerts, \\\n f\"NO_BREATH missing from: {events.alerts_queue.active_alerts}\"", "def _apply_gating_logic(self):\n if self._mode != QcQuantizeOpMode.LEARN_ENCODINGS:\n return\n\n applied_quantizers = set()\n def apply_logic(name, quantizer):\n if quantizer in self._grouped_quantizers.values():\n if quantizer in applied_quantizers:\n return\n\n name, *_ = [n for n, q in self._grouped_quantizers.items() if q == quantizer]\n\n if quantizer.enabled:\n if quantizer.bitwidth == 32 or quantizer.data_type == QuantizationDataType.float:\n return\n set_encoding_min_max_gating_threshold(\n getattr(self, name + '_encoding_min'),\n getattr(self, name + '_encoding_max'))\n applied_quantizers.add(quantizer)\n\n for name, quantizer in self.input_quantizers.items():\n apply_logic(name, quantizer)\n for name, quantizer in self.output_quantizers.items():\n apply_logic(name, quantizer)\n for name, quantizer in self._param_quantizers.items():\n apply_logic(name, quantizer)", "def test_list_applied_cluster_resource_quota_for_all_namespaces(self):\n pass", "def main(argv):\n\tendpoint = \"C\"\n\tif len(argv) > 0:\n\t\tendpoint = argv[0]\t\t\n\t\n\tsite_map = {}\n\tsite_map[\"5 Major Pooled\"] = [\"Agogo\", \"Kintampo\", \"Kombewa\", \"Nanoro\", \"Siaya\"]\n\tsite_map[\"11 Pooled\"] = [\"Agogo\", \"Kintampo\", \"Kombewa\", \"Nanoro\", \"Siaya\", \"Bagamoyo\", \"Kilifi\", \"Korogwe\", \"Lambarene\", \"Lilongwe\", \"Manhica\"]\n\tstart_map = {}\n\tstart_map[\"TEP\"] = 294\n\tstart_map[\"SERA2\"] = 36\n\tend_map = {}\n\tend_map[\"TEP\"] = 387\n\tend_map[\"SERA2\"] = 119\n\t\n\tthreshold_map = {}\n\tthreshold_map[\"TEP_5 Major Pooled_6-12 Weeks_C\"] = 4\n\tthreshold_map[\"TEP_11 Pooled_6-12 Weeks_C\"] = 4\n\tthreshold_map[\"TEP_5 Major Pooled_5-17 Months_C\"] = 4\n\tthreshold_map[\"TEP_11 Pooled_5-17 Months_C\"] = 4\t\n\tthreshold_map[\"TEP_5 Major Pooled_6-12 Weeks_X\"] = 3\n\tthreshold_map[\"TEP_11 Pooled_6-12 Weeks_X\"] = 3\n\tthreshold_map[\"TEP_5 Major Pooled_5-17 Months_X\"] = 4\n\tthreshold_map[\"TEP_11 Pooled_5-17 Months_X\"] = 4\t\n\t\n\tthreshold_map[\"SERA2_5 Major Pooled_6-12 Weeks_C\"] = 4\n\tthreshold_map[\"SERA2_11 Pooled_6-12 Weeks_C\"] = 4\n\tthreshold_map[\"SERA2_5 Major Pooled_5-17 Months_C\"] = 4\n\tthreshold_map[\"SERA2_11 Pooled_5-17 Months_C\"] = 4\t\n\tthreshold_map[\"SERA2_5 Major Pooled_6-12 Weeks_X\"] = 4\n\tthreshold_map[\"SERA2_11 Pooled_6-12 Weeks_X\"] = 3\n\tthreshold_map[\"SERA2_5 Major Pooled_5-17 Months_X\"] = 4\n\tthreshold_map[\"SERA2_11 Pooled_5-17 Months_X\"] = 4\t\t\t\n\t\t\n\tdata = get_data(endpoint)\n\t\n\tprint \"locus\\tsite\\tcohort\\taa\"\n\n\tfor locus in [\"SERA2\"]:\n\t\tfor site_group in [\"5 Major Pooled\", \"11 Pooled\"]:\n\t\t\tfor cohort in [\"6-12 Weeks\", \"5-17 Months\"]:\t\t\n\t\t\t\tsubset = subset_data(data, locus, site_map[site_group], cohort)\n\t\t\t\tthreshold = threshold_map[\"_\".join([locus, site_group, cohort, endpoint])]\n\t\t\t\taa_sites = get_filtered_sites(subset, start_map[locus], end_map[locus], threshold)\n\t\t\t\tprint_filtered_sites(locus, site_group, cohort, aa_sites)", "def check_bollinger(self):\n upper, lower = self.bollinger_bands()\n if self.daily['Adj Close'][-1] > upper[-1]:\n self.debug += '\\nAbove upper bollinger: sells + 1'\n self.sells += 1\n elif self.daily['Adj Close'][-1] < lower[-1]:\n self.debug += '\\nBelow lower bollinger: buys + 1'\n self.buys += 1", "def test_list_deployment_config_for_all_namespaces(self):\n pass", "def test_create_pod_security_policy_review_for_all_namespaces(self):\n pass", "def test_all_meet(self, initial_placement_fixture):\n assert len(ctx.cluster.influx_db.aggregate_performance()) == 0, \\\n \"Test should run on the basic model\"\n vols = len(ctx.cluster.data_stores.values())\n self.generic_function(above_objective=vols)", "def get_expected_instance_count_for_namespace(\n service: str,\n namespace: str,\n cluster: str = None,\n instance_type_class: Type[LongRunningServiceConfig] = MarathonServiceConfig,\n soa_dir: str = DEFAULT_SOA_DIR,\n) -> int:\n total_expected = 0\n if not cluster:\n cluster = load_system_paasta_config().get_cluster()\n\n pscl = PaastaServiceConfigLoader(\n service=service, soa_dir=soa_dir, load_deployments=False\n )\n for job_config in pscl.instance_configs(\n cluster=cluster, instance_type_class=instance_type_class\n ):\n if f\"{service}.{namespace}\" in job_config.get_registrations():\n total_expected += job_config.get_instances()\n return total_expected", "def _check_sensor_schema(conf):\n try:\n valid = [s.name for s in pysma.Sensors()]\n except (ImportError, AttributeError):\n return conf\n\n customs = list(conf[CONF_CUSTOM].keys())\n\n for sensor in conf[CONF_SENSORS]:\n if sensor in customs:\n _LOGGER.warning(\n \"All custom sensors will be added automatically, no need to include them in sensors: %s\",\n sensor,\n )\n elif sensor not in valid:\n raise vol.Invalid(f\"{sensor} does not exist\")\n return conf", "def getMetricsClass(pred_bboxes, gt_bboxes, nclasses):\r\n aps = []\r\n iou = []\r\n for cls in range(nclasses):\r\n if bool(pred_bboxes):\r\n if len(pred_bboxes[0]) == 4: \r\n avg_precision_class, iou_class = getMetrics(pred_bboxes, gt_bboxes)\r\n if len(pred_bboxes[0]) == 5:\r\n avg_precision_class, iou_class = getMetrics(pred_bboxes, gt_bboxes, confidence = True)\r\n else:\r\n avg_precision_class = 0\r\n iou_class = 0\r\n\r\n aps.append(avg_precision_class)\r\n iou.append(iou_class)\r\n \r\n return np.mean(aps), np.mean(iou)", "def check_servers (self):\n results = []\n\n for server_name in self.servers_sumtimes.keys():\n server = self._get_server(server_name)\n\n time = float(self.servers_sumtimes[server_name]) / self.servers_counts[server_name]\n server_time, created = models.HBase_ServerTime.objects.get_or_create(server=server)\n server_avg_state = server_time.averager_state\n avg_time = server_avg_state.value()\n\n if avg_time is not None and self.is_anomaly(avg_time, time):\n msg=\"\"\"\nAverage server's probe time exceeded average + threshold (%.2f%%). Values:\n- server: %s\n- history response time %.2f ms\n- probe time %.2f ms\n\"\"\" % (anomaly_threshold * 100.0, server_name, avg_time, time)\n result = ProcessAnomaly(is_region=False, object_name=server_name,\n text=\"Request time %.2f ms (avg is %.2f)\" % (time, avg_time),\n description=msg)\n results.append(result)\n else:\n # Normal value, update state\n self.averager.update(time, server_avg_state)\n server_time.averager_state = server_avg_state\n server_time.save()\n return results", "def autoscaling_high_availability_az_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:\n ec2 = session.client(\"ec2\")\n # ISO Time\n iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()\n # Collect the open AZs in the Region\n regionalAzs = []\n for az in ec2.describe_availability_zones(AllAvailabilityZones=False)[\"AvailabilityZones\"]:\n if (az[\"State\"] == \"available\" and az[\"OptInStatus\"] != \"not-opted-in\"):\n if az[\"ZoneName\"] not in regionalAzs:\n regionalAzs.append(az[\"ZoneName\"])\n availableAzCount = len(regionalAzs)\n for asg in describe_auto_scaling_groups(cache, session)[\"AutoScalingGroups\"]:\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(asg,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n asgArn = asg[\"AutoScalingGroupARN\"]\n asgName = asg[\"AutoScalingGroupName\"]\n healthCheckType = asg[\"HealthCheckType\"]\n # Check specific metadata\n asgAzs = asg[\"AvailabilityZones\"]\n if len(asgAzs) < (availableAzCount / 2):\n # this is a failing check\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{asgArn}/asg-multiaz-ha-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": asgArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"LOW\"},\n \"Confidence\": 99,\n \"Title\": \"[Autoscaling.3] Autoscaling Groups should use at least half of a Region's Availability Zones\",\n \"Description\": f\"Autoscaling group {asgName} does not use at least half of {awsRegion}'s {availableAzCount} available Availability Zones and only uses {len(asgAzs)}. Allowing instances to scale across more Availability Zones increases the availability and resilience of your applications in the case of unavailable resources, Availability Zone degradation, or to rapidly recover from unplanned application failures. To take advantage of the safety and reliability of geographic redundancy, span your Auto Scaling group across multiple Availability Zones within a Region and attach a load balancer to distribute incoming traffic across those Availability Zones. Review the remediation section for more information on this configuration.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"To learn more about adding AZs to your ASGs refer to the Add and remove Availability Zones section of the Amazon EC2 Auto Scaling User Guide\",\n \"Url\": \"https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-add-availability-zone.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Compute\",\n \"AssetService\": \"AWS Auto Scaling\",\n \"AssetComponent\": \"Autoscaling Group\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsAutoScalingAutoScalingGroup\",\n \"Id\": asgArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsAutoScalingAutoScalingGroup\": {\n \"HealthCheckType\": healthCheckType\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 ID.BE-5\",\n \"NIST CSF V1.1 PR.DS-4\",\n \"NIST CSF V1.1 PR.PT-5\",\n \"NIST SP 800-53 Rev. 4 AU-4\",\n \"NIST SP 800-53 Rev. 4 CP-2\",\n \"NIST SP 800-53 Rev. 4 CP-7\",\n \"NIST SP 800-53 Rev. 4 CP-8\",\n \"NIST SP 800-53 Rev. 4 CP-11\",\n \"NIST SP 800-53 Rev. 4 CP-13\",\n \"NIST SP 800-53 Rev. 4 PL-8\",\n \"NIST SP 800-53 Rev. 4 SA-14\",\n \"NIST SP 800-53 Rev. 4 SC-5\",\n \"NIST SP 800-53 Rev. 4 SC-6\",\n \"AICPA TSC CC3.1\",\n \"AICPA TSC A1.1\",\n \"AICPA TSC A1.2\",\n \"ISO 27001:2013 A.11.1.4\",\n \"ISO 27001:2013 A.12.3.1\",\n \"ISO 27001:2013 A.17.1.1\",\n \"ISO 27001:2013 A.17.1.2\",\n \"ISO 27001:2013 A.17.2.1\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n # this is a passing check\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{asgArn}/asg-multiaz-ha-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": asgArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[Autoscaling.3] Autoscaling Groups should use at least half of a Region's Availability Zones\",\n \"Description\": f\"Autoscaling group {asgName} uses at least half of {awsRegion}'s {availableAzCount} available Availability Zones by using {len(asgAzs)}.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"To learn more about adding AZs to your ASGs refer to the Add and remove Availability Zones section of the Amazon EC2 Auto Scaling User Guide\",\n \"Url\": \"https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-add-availability-zone.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Compute\",\n \"AssetService\": \"AWS Auto Scaling\",\n \"AssetComponent\": \"Autoscaling Group\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsAutoScalingAutoScalingGroup\",\n \"Id\": asgArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsAutoScalingAutoScalingGroup\": {\n \"HealthCheckType\": healthCheckType\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 ID.BE-5\",\n \"NIST CSF V1.1 PR.DS-4\",\n \"NIST CSF V1.1 PR.PT-5\",\n \"NIST SP 800-53 Rev. 4 AU-4\",\n \"NIST SP 800-53 Rev. 4 CP-2\",\n \"NIST SP 800-53 Rev. 4 CP-7\",\n \"NIST SP 800-53 Rev. 4 CP-8\",\n \"NIST SP 800-53 Rev. 4 CP-11\",\n \"NIST SP 800-53 Rev. 4 CP-13\",\n \"NIST SP 800-53 Rev. 4 PL-8\",\n \"NIST SP 800-53 Rev. 4 SA-14\",\n \"NIST SP 800-53 Rev. 4 SC-5\",\n \"NIST SP 800-53 Rev. 4 SC-6\",\n \"AICPA TSC CC3.1\",\n \"AICPA TSC A1.1\",\n \"AICPA TSC A1.2\",\n \"ISO 27001:2013 A.11.1.4\",\n \"ISO 27001:2013 A.12.3.1\",\n \"ISO 27001:2013 A.17.1.1\",\n \"ISO 27001:2013 A.17.1.2\",\n \"ISO 27001:2013 A.17.2.1\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "def check_api(self):\n catalog = self.service_catalog\n for service in catalog:\n if service['name'] not in self.RESOURCE_MAP:\n self.logger.notice(\"Don't know how to check service '%s'\" %\n service['name'])\n status = self.UNKNOWN\n else:\n r = self.get(service['name'],\n self.RESOURCE_MAP[service['name']])\n if not r or r.status_code < 200 or r.status_code > 299:\n status = self.FAIL\n else:\n status = self.OK\n\n yield {\n 'service': service['name'],\n 'status': status,\n 'region': service['region']\n }", "def test_metrics(self):\n # Check the route\n self.check_metrics(self.test_metrics_submission_id, False, \"award\")\n self.check_metrics(self.test_metrics_submission_id, True, \"award_financial\")\n self.check_metrics(self.test_metrics_submission_id, True, \"appropriations\")" ]
[ "0.51222503", "0.51177275", "0.5094526", "0.50608486", "0.49589232", "0.49122682", "0.4861849", "0.48579317", "0.48054186", "0.47776112", "0.47486487", "0.47325936", "0.46939585", "0.4686834", "0.4660218", "0.45691466", "0.4541956", "0.45377165", "0.4520044", "0.45192608", "0.44956958", "0.44946888", "0.4490688", "0.4479495", "0.4474163", "0.4464776", "0.44614807", "0.44560575", "0.4434048", "0.441718", "0.4417152", "0.44040266", "0.44015607", "0.43946967", "0.43895462", "0.43756613", "0.43551022", "0.43494856", "0.43471247", "0.43431783", "0.4338124", "0.4336148", "0.43281844", "0.43181694", "0.43140125", "0.43139923", "0.4301398", "0.4298829", "0.42939353", "0.42914414", "0.42907572", "0.4286969", "0.42842948", "0.42813167", "0.42798275", "0.42613038", "0.42562404", "0.42558745", "0.42533377", "0.42463043", "0.42390952", "0.42342532", "0.4232035", "0.42318347", "0.42297956", "0.422684", "0.42260164", "0.42252323", "0.42222232", "0.42214182", "0.42198053", "0.4215272", "0.4211773", "0.4207285", "0.42048803", "0.4199101", "0.41969702", "0.41940543", "0.419294", "0.41918823", "0.41868955", "0.4184504", "0.4183748", "0.4183526", "0.4179165", "0.41750976", "0.41726348", "0.4171647", "0.41679096", "0.41674104", "0.41658753", "0.4163852", "0.4162643", "0.4160731", "0.41477084", "0.41448507", "0.41433394", "0.41421664", "0.4141769", "0.41394636", "0.41377404" ]
0.0
-1
Checks a service's replication levels based on how the service's replication should be monitored. (smartstack or mesos)
def check_service_replication( instance_config, all_tasks, smartstack_replication_checker, ): expected_count = instance_config.get_instances() log.info("Expecting %d total tasks for %s" % (expected_count, instance_config.job_id)) proxy_port = marathon_tools.get_proxy_port_for_instance( name=instance_config.service, instance=instance_config.instance, cluster=instance_config.cluster, soa_dir=instance_config.soa_dir, ) registrations = instance_config.get_registrations() # if the primary registration does not match the service_instance name then # the best we can do is check marathon for replication (for now). if proxy_port is not None and registrations[0] == instance_config.job_id: check_smartstack_replication_for_instance( instance_config=instance_config, expected_count=expected_count, smartstack_replication_checker=smartstack_replication_checker, ) else: check_healthy_marathon_tasks_for_service_instance( instance_config=instance_config, expected_count=expected_count, all_tasks=all_tasks, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mmo_configsrv_replication_status(self, mmo_connection):\n replication_state = []\n if self.mmo_is_mongos(mmo_connection):\n configsrv = self.mmo_config_servers(mmo_connection)[0]\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n c = self.mmo_connect_mongod(hostname=configsrv[\"hostname\"],\n \t port=configsrv[\"port\"],\n \t username=auth_dic[\"username\"],\n \t password=auth_dic[\"password\"],\n \tauthentication_db=auth_dic[\"authentication_database\"]\n \t)\n if self.mmo_is_cfg_rs(c):\n command_output = c[\"admin\"].command(\"replSetGetStatus\")\n shard = command_output[\"set\"]\n replication_state.append({\"hostname\": configsrv[\"hostname\"], \"port\": configsrv[\"port\"], \"shard\": shard, \"command_output\": command_output})\n else:\n raise Exception(\"Not a mongos process\")\n return replication_state", "def mmo_replication_status(self, mmo_connection):\n replication_state = []\n if self.mmo_is_mongos(mmo_connection):\n #o = self.mmo_execute_on_primaries(mmo_connection, \"replSetGetStatus\")\n o = self.mmo_execute_on_secondary_or_primary(mmo_connection, \"replSetGetStatus\", \"all\", True)\n #print o2;\n return o\n else:\n raise Exception(\"Not a mongos process\")", "def mmo_replication_status_summary(self, mmo_connection):\n replication_summary = []\n primary_info = {}\n o = self.mmo_replication_status(mmo_connection)\n o = o + self.mmo_configsrv_replication_status(mmo_connection)\n replset_hosts_up_down = {}\n for shard in self.shards:\n replset_hosts_up_down[shard] = 0\n for replicaset in o:\n if \"Error\" not in replicaset[\"command_output\"].keys():\n for member in replicaset[\"command_output\"][\"members\"]:\n if member[\"stateStr\"] == \"PRIMARY\":\n primary_info[replicaset[\"command_output\"][\"set\"]] = member[\"optimeDate\"]\n\n replication_summary.append( { \"replicaset\": replicaset[\"command_output\"][\"set\"],\n \"hostname\": member[\"name\"],\n \"state\": member[\"stateStr\"],\n \"uptime\": member[\"uptime\"],\n \"configVersion\": member[\"configVersion\"],\n \"optimeDate\": member[\"optimeDate\"] } )\n for doc in replication_summary:\n if doc[\"state\"] == \"PRIMARY\":\n doc[\"lag\"] = \"NA\" # not relevant here\n else: # calculate the slave lag from the PRIMARY optimeDate\n if doc[\"replicaset\"] in primary_info.keys(): # is there a primary in the replset?\n try:\n if hasattr((doc[\"optimeDate\"] - primary_info[doc[\"replicaset\"]]), \"total_seconds\"): # Does not exist in python 2.6\n doc[\"lag\"] = abs((doc[\"optimeDate\"] - primary_info[doc[\"replicaset\"]]).total_seconds())\n else: # for python 2.6 that does not have total_seconds attribute\n # Will only be correct for delays of up to 24 hours\n doc[\"lag\"] = abs((doc[\"optimeDate\"] - primary_info[doc[\"replicaset\"]]).seconds) # Primary needs ot be first in this case\n except:\n doc[\"lag\"] = \"ERR\"\n else:\n doc[\"lag\"] = \"UNK\" # We cannot know what the delay is if there is no primary\n else:\n replset_hosts_up_down[replicaset[\"shard\"]] += 1\n\n #else: Probably redundant code now. Removed ot fix https://github.com/rhysmeister/mmo/issues/34\n # We cannot know the state of much of the replicaset at this point\n # replication_summary.append({\"replicaset\": replicaset[\"shard\"],\n # \"hostname\": \"UNK\",\n # \"state\": \"UNK\",\n # \"uptime\": \"UNK\",\n # \"configVersion\": \"UNK\",\n # \"optimeDate\": \"UNK\"})\n\n\n shard_server_count = {}\n # how many servers in each shard\n for shard in self.shards:\n shard_server_count[shard] = 0\n for s in self.shard_servers:\n shard_server_count[s['shard']] += 1\n # are all the hosts of any shard down?\n for shard in self.shards:\n if replset_hosts_up_down[shard] > 0:\n if replset_hosts_up_down[shard] == shard_server_count[shard]:\n replication_summary.append({\"replicaset\": shard,\n \"hostname\": \"UNK\",\n \"state\": \"UNK\",\n \"uptime\": \"UNK\",\n \"configVersion\": \"UNK\",\n \"optimeDate\": \"UNK\",\n \"lag\": \"UNK\"})\n deduped_replication_summary = []\n for d in replication_summary:\n if d not in deduped_replication_summary:\n deduped_replication_summary.append(d)\n return deduped_replication_summary", "def check_smartstack_replication_for_instance(\n instance_config,\n expected_count,\n smartstack_replication_checker,\n):\n\n crit_threshold = instance_config.get_replication_crit_percentage()\n\n log.info('Checking instance %s in smartstack', instance_config.job_id)\n smartstack_replication_info = \\\n smartstack_replication_checker.get_replication_for_instance(instance_config)\n\n log.debug('Got smartstack replication info for %s: %s' %\n (instance_config.job_id, smartstack_replication_info))\n\n if len(smartstack_replication_info) == 0:\n status = pysensu_yelp.Status.CRITICAL\n output = (\n 'Service %s has no Smartstack replication info. Make sure the discover key in your smartstack.yaml '\n 'is valid!\\n'\n ) % instance_config.job_id\n log.error(output)\n else:\n expected_count_per_location = int(expected_count / len(smartstack_replication_info))\n output = ''\n output_critical = ''\n output_ok = ''\n under_replication_per_location = []\n\n for location, available_backends in sorted(smartstack_replication_info.items()):\n num_available_in_location = available_backends.get(instance_config.job_id, 0)\n under_replicated, ratio = is_under_replicated(\n num_available_in_location, expected_count_per_location, crit_threshold,\n )\n if under_replicated:\n output_critical += '- Service %s has %d out of %d expected instances in %s (CRITICAL: %d%%)\\n' % (\n instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,\n )\n else:\n output_ok += '- Service %s has %d out of %d expected instances in %s (OK: %d%%)\\n' % (\n instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,\n )\n under_replication_per_location.append(under_replicated)\n\n output += output_critical\n if output_critical and output_ok:\n output += '\\n\\n'\n output += 'The following locations are OK:\\n'\n output += output_ok\n\n if any(under_replication_per_location):\n status = pysensu_yelp.Status.CRITICAL\n output += (\n \"\\n\\n\"\n \"What this alert means:\\n\"\n \"\\n\"\n \" This replication alert means that a SmartStack powered loadbalancer (haproxy)\\n\"\n \" doesn't have enough healthy backends. Not having enough healthy backends\\n\"\n \" means that clients of that service will get 503s (http) or connection refused\\n\"\n \" (tcp) when trying to connect to it.\\n\"\n \"\\n\"\n \"Reasons this might be happening:\\n\"\n \"\\n\"\n \" The service may simply not have enough copies or it could simply be\\n\"\n \" unhealthy in that location. There also may not be enough resources\\n\"\n \" in the cluster to support the requested instance count.\\n\"\n \"\\n\"\n \"Things you can do:\\n\"\n \"\\n\"\n \" * You can view the logs for the job with:\\n\"\n \" paasta logs -s %(service)s -i %(instance)s -c %(cluster)s\\n\"\n \"\\n\"\n \" * Fix the cause of the unhealthy service. Try running:\\n\"\n \"\\n\"\n \" paasta status -s %(service)s -i %(instance)s -c %(cluster)s -vv\\n\"\n \"\\n\"\n \" * Widen SmartStack discovery settings\\n\"\n \" * Increase the instance count\\n\"\n \"\\n\"\n ) % {\n 'service': instance_config.service,\n 'instance': instance_config.instance,\n 'cluster': instance_config.cluster,\n }\n log.error(output)\n else:\n status = pysensu_yelp.Status.OK\n log.info(output)\n send_event(instance_config=instance_config, status=status, output=output)", "def replication_status(self):\n psql = postgresql_svc.PSQL()\n try:\n query_out = psql.execute(self.replication_status_query)\n except PopenError, e:\n if 'function pg_last_xact_replay_timestamp() does not exist' in str(e):\n raise BaseException('This version of PostgreSQL server does not support replication status')\n else:\n raise e\n query_result = self._parse_query_out(query_out)\n\n is_master = int(__postgresql__[OPT_REPLICATION_MASTER])\n\n if query_result['xlog_delay'] is None:\n if is_master:\n return {'master': {'status': 'up'}}\n return {'slave': {'status': 'down',\n 'error': query_result['error']}}\n return {'slave': {'status': 'up',\n 'xlog_delay': query_result['xlog_delay']}}", "def service_level(self, level=None):\n if level is None:\n flags = self._flags()\n if flags['auto_service_level']:\n return 0\n return flags['service_level']\n else:\n if self._request('SL', _service_levels[level])[0]:\n return level\n\n raise EvseError", "def check_oplog(con, warning, critical, perf_data):\n warning = warning or 24 \n critical = critical or 4\n try:\n db = con.local\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) \n if (db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) != None) :\n oplog = \"oplog.rs\";\n else :\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"})\n if (db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"}) != None) :\n oplog = \"oplog.$main\";\n else :\n message = \"neither master/slave nor replica set replication detected\";\n return check_levels(None,warning,critical,message)\n\n try:\n set_read_preference(con.admin)\n data=con.local.command(pymongo.son_manipulator.SON([('collstats',oplog)]))\n except:\n data = con.admin.command(son.SON([('collstats',oplog)]))\n\n ol_size=data['size']\n ol_storage_size=data['storageSize']\n ol_used_storage=int(float(ol_size)/ol_storage_size*100+1)\n ol=con.local[oplog]\n firstc = ol.find().sort(\"$natural\",pymongo.ASCENDING).limit(1)[0]['ts']\n lastc = ol.find().sort(\"$natural\",pymongo.DESCENDING).limit(1)[0]['ts']\n time_in_oplog= (lastc.as_datetime()-firstc.as_datetime())\n message=\"Oplog saves \"+ str(time_in_oplog) + \" %d%% used\" %ol_used_storage \n try: #work starting from python2.7\n hours_in_oplog= time_in_oplog.total_seconds()/60/60\n except:\n hours_in_oplog= float(time_in_oplog.seconds + time_in_oplog.days * 24 * 3600)/60/60\n approx_level=hours_in_oplog*100/ol_used_storage\n message+=performance_data(perf_data,[(\"%.2f\" % hours_in_oplog,'oplog_time',warning,critical),(\"%.2f \" % approx_level, 'oplog_time_100_percent_used')])\n return check_levels(-approx_level,-warning,-critical,message)\n\n except Exception, e:\n return exit_with_general_critical(e)", "def mmo_is_cfg_rs(self, mmo_connection):\n s = None\n if self.mmo_is_configsrv(mmo_connection):\n try:\n r = mmo_connection[\"admin\"].command(\"replSetGetStatus\")\n s = True\n except Exception as exception:\n if \"not running with --replSet\" in str(exception):\n s = False\n else:\n raise exception\n else:\n raise Exception(\"Not a config server\")\n return s", "def check_consul_services(con):\n whitelist = get_whitelist(con)\n\n if whitelist:\n LOG.warning(\"Checks from the following hosts will be ignored, \" +\n \"because service/rebootmgr/ignore_failed_checks is set: {}\".format(\", \".join(whitelist)))\n\n local_checks = get_local_checks(con, tags=[\"rebootmgr\"])\n LOG.debug(\"relevant_checks: %s\" % local_checks)\n\n for name, check in get_failed_cluster_checks(con, local_checks).items():\n if check[\"Node\"] in whitelist:\n continue\n\n LOG.error(\"There were failed consul checks. Exit\")\n sys.exit(EXIT_CONSUL_CHECKS_FAILED)\n\n LOG.info(\"All checks passed\")", "def slave_status():\n run_mysql_command(\"SHOW SLAVE STATUS\\G;\")", "def service_check(self, env):\n import params\n\n self.active_master_host = params.hawqmaster_host\n self.active_master_port = params.hawq_master_address_port\n self.checks_failed = 0\n self.total_checks = 2\n\n # Checks HAWQ cluster state\n self.check_state()\n\n # Runs check for writing and reading tables on HAWQ\n self.check_hawq()\n\n # Runs check for writing and reading external tables on HDFS using PXF, if PXF is installed\n if params.is_pxf_installed:\n self.total_checks += 1\n self.check_hawq_pxf_hdfs()\n else:\n Logger.info(\"PXF not installed. Skipping HAWQ-PXF checks...\")\n\n if self.checks_failed != 0:\n Logger.error(\"** FAILURE **: Service check failed {0} of {1} checks\".format(self.checks_failed, self.total_checks))\n sys.exit(1)\n\n Logger.info(\"Service check completed successfully\")", "def _check_rac_srv(cfg, warning=None, critical=None):\n regex = re.compile(\"Instance .* is running on node .*\")\n bin_name = \"srvctl\"\n _check_attrs(cfg, [\"sid\", \"oh\"])\n bin_name = os.path.join(cfg.oh, \"bin\", bin_name)\n try:\n args = bin_name + \" status database -d {sid}\".format(sid=cfg.sid)\n cp = subprocess.run(args, shell=True, check=True, stdout=subprocess.PIPE)\n if cp.stdout is None:\n print(\"None result from crsctl\")\n return UNKNOWN\n out = str(cp.stdout, \"utf-8\")\n running, not_running = 0, 0\n for l in out.split(os.linesep):\n if l.lstrip().rstrip() == \"\":\n continue\n if regex.search(l.lstrip().rstrip()):\n running += 1\n else:\n not_running += 1\n\n if not_running >= running:\n print(\"you got {0} nodes was not running\".format(not_running))\n return CRITICAL\n if not_running > 0:\n print(\"you got {0} nodes was not running\".format(not_running))\n return WARNING\n\n print(\"all {0} nodes is running\".format(running))\n return OK\n except subprocess.CalledProcessError as err:\n print(err.output)\n return UNKNOWN", "def test_contrail_services_status_after_restart_master_node(os_faults_steps):\n services_statuses = contrail_status.get_services_statuses(os_faults_steps)\n master_node_fqdn = None\n for fqdn, services in services_statuses.items():\n for service in services:\n if (service['service'] == 'contrail-schema' and\n service['status'] == contrail_status.STATUS_ACTIVE):\n master_node_fqdn = fqdn\n break\n assert master_node_fqdn is not None, \"Can't find master node\"\n master_node = os_faults_steps.get_node(fqdns=[master_node_fqdn])\n os_faults_steps.reset_nodes(master_node)\n\n waiter.wait(\n contrail_status.check_services_statuses,\n args=(os_faults_steps),\n expected_exceptions=AssertionError,\n timeout=settings.CONTRAIL_NODE_RESET_TIMEOUT)", "def mcstatus(self, irc, msg, args):\n prefix = self.registryValue('prefix')\n suffix = self.registryValue('suffix')\n\n separator = self.registryValue('separator')\n\n svprefix = self.registryValue('service.prefix')\n svsuffix = self.registryValue('service.suffix')\n\n stonline = self.registryValue('status.online')\n stoffline = self.registryValue('status.offline')\n\n\n json_data = urllib2.urlopen(self.registryValue('statusURL')).read()\n data = json.loads(json_data)\n services = []\n\n for pair in data:\n service, status = pair.keys()[0], pair.values()[0]\n services.append('%s%s%s%s' % (svprefix, service, svsuffix,\n stonline if status == 'green' else \\\n stoffline))\n\n irc.reply('%s%s%s' % (prefix, separator.join(services), suffix))", "def _check_ops(self):\n required_ops = ['san_ip', 'san_login', 'san_password']\n for attr in required_ops:\n if not getattr(self.configuration, attr, None):\n raise exception.InvalidInput(reason=_('%s is not set.') % attr)\n\n replica = self.configuration.safe_get('replication_device')\n if replica and isinstance(replica, list):\n replica_ops = ['backend_id', 'login', 'password', 'rpo']\n for attr in replica_ops:\n if attr not in replica[0]:\n msg = _('replication_device %s is not set.') % attr\n raise exception.InvalidInput(reason=msg)\n self.replica = Replication(replica[0])", "def test_redis_increase_replica_count_usual_case():", "async def healthcheck(self):\n for service in self.services:\n await service.healthcheck()", "def test_check_replication_crit_lag_notworking(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (None, 0)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n [(STATUS_CRIT,\n \"'{}' replication lag not working \"\n \"(perms issue? check syslog)\".format(repl))\n for repl in ('account', 'object', 'container')])", "def crash_safe_replication_enabled(self) -> bool:\n return pulumi.get(self, \"crash_safe_replication_enabled\")", "def can_change_srv_sets(self):\n\t\treturn bool(call_sdk_function('PrlUsrCfg_CanChangeSrvSets', self.handle))", "def handleCondorStatusService(self):\n if isCMSSWSupported(self.getCmsswVersion(), \"CMSSW_7_6_0\"):\n self.logger.info(\"Tag chirp updates from CMSSW with step %s\", self.step.data._internal_name)\n self.process.add_(cms.Service(\"CondorStatusService\",\n tag=cms.untracked.string(\"_%s_\" % self.step.data._internal_name)))\n\n return", "def ensure_cloudwatch_rule_for_replication(context, installed_region='us-east-1'):\n client = boto3.client('events', region_name=installed_region)\n cw_rule_name = utils.find_replication_cw_event_rule(context)\n current_state = client.describe_rule(Name=cw_rule_name)\n configurations = dynamo.list_configurations(context, installed_region)\n replication = False\n for cfg in configurations:\n if 'replication' in cfg and cfg['replication'] == 'yes':\n replication = True\n\n if replication and current_state['State'] == 'DISABLED':\n LOG.warn('Enabling snapshot replication due to configuration.')\n client.enable_rule(Name=cw_rule_name)\n elif not replication and current_state['State'] == 'ENABLED':\n LOG.warn('Disabling snapshot replication due to configuration.')\n client.disable_rule(Name=cw_rule_name)", "def check(self, instance):\n\n def total_seconds(td):\n \"\"\"\n Returns total seconds of a timedelta in a way that's safe for\n Python < 2.7\n \"\"\"\n if hasattr(td, 'total_seconds'):\n return td.total_seconds()\n else:\n return (\n lag.microseconds +\n (lag.seconds + lag.days * 24 * 3600) * 10**6\n ) / 10.0**6\n\n if 'server' not in instance:\n raise Exception(\"Missing 'server' in mongo config\")\n\n # x.509 authentication\n ssl_params = {\n 'ssl': instance.get('ssl', None),\n 'ssl_keyfile': instance.get('ssl_keyfile', None),\n 'ssl_certfile': instance.get('ssl_certfile', None),\n 'ssl_cert_reqs': instance.get('ssl_cert_reqs', None),\n 'ssl_ca_certs': instance.get('ssl_ca_certs', None)\n }\n\n for key, param in ssl_params.items():\n if param is None:\n del ssl_params[key]\n\n server = instance['server']\n username, password, db_name, nodelist, clean_server_name, auth_source = self._parse_uri(server, sanitize_username=bool(ssl_params))\n additional_metrics = instance.get('additional_metrics', [])\n\n # Get the list of metrics to collect\n collect_tcmalloc_metrics = 'tcmalloc' in additional_metrics\n metrics_to_collect = self._get_metrics_to_collect(\n server,\n additional_metrics\n )\n\n # Tagging\n tags = instance.get('tags', [])\n # ...de-dupe tags to avoid a memory leak\n tags = list(set(tags))\n\n if not db_name:\n self.log.info('No MongoDB database found in URI. Defaulting to admin.')\n db_name = 'admin'\n\n service_check_tags = [\n \"db:%s\" % db_name\n ]\n service_check_tags.extend(tags)\n\n # ...add the `server` tag to the metrics' tags only\n # (it's added in the backend for service checks)\n tags.append('server:%s' % clean_server_name)\n\n if nodelist:\n host = nodelist[0][0]\n port = nodelist[0][1]\n service_check_tags = service_check_tags + [\n \"host:%s\" % host,\n \"port:%s\" % port\n ]\n\n timeout = float(instance.get('timeout', DEFAULT_TIMEOUT)) * 1000\n try:\n cli = pymongo.mongo_client.MongoClient(\n server,\n socketTimeoutMS=timeout,\n connectTimeoutMS=timeout,\n serverSelectionTimeoutMS=timeout,\n read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED,\n **ssl_params)\n # some commands can only go against the admin DB\n admindb = cli['admin']\n db = cli[db_name]\n except Exception:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.CRITICAL,\n tags=service_check_tags)\n raise\n\n # Authenticate\n do_auth = True\n use_x509 = ssl_params and not password\n\n if not username:\n self.log.debug(\n u\"A username is required to authenticate to `%s`\", server\n )\n do_auth = False\n\n if do_auth:\n if auth_source:\n self.log.info(\"authSource was specified in the the server URL: using '%s' as the authentication database\", auth_source)\n self._authenticate(cli[auth_source], username, password, use_x509, clean_server_name, service_check_tags)\n else:\n self._authenticate(db, username, password, use_x509, clean_server_name, service_check_tags)\n\n try:\n status = db.command('serverStatus', tcmalloc=collect_tcmalloc_metrics)\n except Exception:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.CRITICAL,\n tags=service_check_tags)\n raise\n else:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.OK,\n tags=service_check_tags)\n\n if status['ok'] == 0:\n raise Exception(status['errmsg'].__str__())\n\n ops = db.current_op()\n status['fsyncLocked'] = 1 if ops.get('fsyncLock') else 0\n\n status['stats'] = db.command('dbstats')\n dbstats = {}\n dbstats[db_name] = {'stats': status['stats']}\n\n # Handle replica data, if any\n # See\n # http://www.mongodb.org/display/DOCS/Replica+Set+Commands#ReplicaSetCommands-replSetGetStatus # noqa\n try:\n data = {}\n dbnames = []\n\n replSet = admindb.command('replSetGetStatus')\n if replSet:\n primary = None\n current = None\n\n # need a new connection to deal with replica sets\n setname = replSet.get('set')\n cli_rs = pymongo.mongo_client.MongoClient(\n server,\n socketTimeoutMS=timeout,\n connectTimeoutMS=timeout,\n serverSelectionTimeoutMS=timeout,\n replicaset=setname,\n read_preference=pymongo.ReadPreference.NEAREST,\n **ssl_params)\n\n if do_auth:\n if auth_source:\n self._authenticate(cli_rs[auth_source], username, password, use_x509, server, service_check_tags)\n else:\n self._authenticate(cli_rs[db_name], username, password, use_x509, server, service_check_tags)\n\n # Replication set information\n replset_name = replSet['set']\n replset_state = self.get_state_name(replSet['myState']).lower()\n\n tags.extend([\n u\"replset_name:{0}\".format(replset_name),\n u\"replset_state:{0}\".format(replset_state),\n ])\n\n # Find nodes: master and current node (ourself)\n for member in replSet.get('members'):\n if member.get('self'):\n current = member\n if int(member.get('state')) == 1:\n primary = member\n\n # Compute a lag time\n if current is not None and primary is not None:\n if 'optimeDate' in primary and 'optimeDate' in current:\n lag = primary['optimeDate'] - current['optimeDate']\n data['replicationLag'] = total_seconds(lag)\n\n if current is not None:\n data['health'] = current['health']\n\n data['state'] = replSet['myState']\n\n if current is not None:\n total = 0.0\n cfg = cli_rs['local']['system.replset'].find_one()\n for member in cfg.get('members'):\n total += member.get('votes', 1)\n if member['_id'] == current['_id']:\n data['votes'] = member.get('votes', 1)\n data['voteFraction'] = data['votes'] / total\n\n status['replSet'] = data\n\n # Submit events\n self._report_replica_set_state(\n data['state'], clean_server_name, replset_name, self.agentConfig\n )\n\n except Exception as e:\n if \"OperationFailure\" in repr(e) and \"not running with --replSet\" in str(e):\n pass\n else:\n raise e\n\n # If these keys exist, remove them for now as they cannot be serialized\n try:\n status['backgroundFlushing'].pop('last_finished')\n except KeyError:\n pass\n try:\n status.pop('localTime')\n except KeyError:\n pass\n\n dbnames = cli.database_names()\n self.gauge('mongodb.dbs', len(dbnames), tags=tags)\n\n for db_n in dbnames:\n db_aux = cli[db_n]\n dbstats[db_n] = {'stats': db_aux.command('dbstats')}\n\n # Go through the metrics and save the values\n for metric_name in metrics_to_collect:\n # each metric is of the form: x.y.z with z optional\n # and can be found at status[x][y][z]\n value = status\n\n if metric_name.startswith('stats'):\n continue\n else:\n try:\n for c in metric_name.split(\".\"):\n value = value[c]\n except KeyError:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(value, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(metric_name, type(value)))\n\n # Submit the metric\n submit_method, metric_name_alias = self._resolve_metric(metric_name, metrics_to_collect)\n submit_method(self, metric_name_alias, value, tags=tags)\n\n for st, value in dbstats.iteritems():\n for metric_name in metrics_to_collect:\n if not metric_name.startswith('stats.'):\n continue\n\n try:\n val = value['stats'][metric_name.split('.')[1]]\n except KeyError:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(val, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(metric_name, type(val))\n )\n\n # Submit the metric\n metrics_tags = (\n tags +\n [\n u\"cluster:db:{0}\".format(st), # FIXME 6.0 - keep for backward compatibility\n u\"db:{0}\".format(st),\n ]\n )\n\n submit_method, metric_name_alias = \\\n self._resolve_metric(metric_name, metrics_to_collect)\n submit_method(self, metric_name_alias, val, tags=metrics_tags)\n\n if _is_affirmative(instance.get('collections_indexes_stats')):\n mongo_version = cli.server_info().get('version', '0.0')\n if LooseVersion(mongo_version) >= LooseVersion(\"3.2\"):\n self._collect_indexes_stats(instance, db, tags)\n else:\n self.log.error(\"'collections_indexes_stats' is only available starting from mongo 3.2: your mongo version is %s\", mongo_version)\n\n # Report the usage metrics for dbs/collections\n if 'top' in additional_metrics:\n try:\n dbtop = db.command('top')\n for ns, ns_metrics in dbtop['totals'].iteritems():\n if \".\" not in ns:\n continue\n\n # configure tags for db name and collection name\n dbname, collname = ns.split(\".\", 1)\n ns_tags = tags + [\"db:%s\" % dbname, \"collection:%s\" % collname]\n\n # iterate over DBTOP metrics\n for m in self.TOP_METRICS:\n # each metric is of the form: x.y.z with z optional\n # and can be found at ns_metrics[x][y][z]\n value = ns_metrics\n try:\n for c in m.split(\".\"):\n value = value[c]\n except Exception:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(value, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(m, type(value))\n )\n\n # Submit the metric\n submit_method, metric_name_alias = \\\n self._resolve_metric(m, metrics_to_collect, prefix=\"usage\")\n submit_method(self, metric_name_alias, value, tags=ns_tags)\n except Exception as e:\n self.log.warning('Failed to record `top` metrics %s' % str(e))\n\n\n if 'local' in dbnames: # it might not be if we are connectiing through mongos\n # Fetch information analogous to Mongo's db.getReplicationInfo()\n localdb = cli['local']\n\n oplog_data = {}\n\n for ol_collection_name in (\"oplog.rs\", \"oplog.$main\"):\n ol_options = localdb[ol_collection_name].options()\n if ol_options:\n break\n\n if ol_options:\n try:\n oplog_data['logSizeMB'] = round(\n ol_options['size'] / 2.0 ** 20, 2\n )\n\n oplog = localdb[ol_collection_name]\n\n oplog_data['usedSizeMB'] = round(\n localdb.command(\"collstats\", ol_collection_name)['size'] / 2.0 ** 20, 2\n )\n\n op_asc_cursor = oplog.find({\"ts\": {\"$exists\": 1}}).sort(\"$natural\", pymongo.ASCENDING).limit(1)\n op_dsc_cursor = oplog.find({\"ts\": {\"$exists\": 1}}).sort(\"$natural\", pymongo.DESCENDING).limit(1)\n\n try:\n first_timestamp = op_asc_cursor[0]['ts'].as_datetime()\n last_timestamp = op_dsc_cursor[0]['ts'].as_datetime()\n oplog_data['timeDiff'] = total_seconds(last_timestamp - first_timestamp)\n except (IndexError, KeyError):\n # if the oplog collection doesn't have any entries\n # if an object in the collection doesn't have a ts value, we ignore it\n pass\n except KeyError:\n # encountered an error trying to access options.size for the oplog collection\n self.log.warning(u\"Failed to record `ReplicationInfo` metrics.\")\n\n for (m, value) in oplog_data.iteritems():\n submit_method, metric_name_alias = \\\n self._resolve_metric('oplog.%s' % m, metrics_to_collect)\n submit_method(self, metric_name_alias, value, tags=tags)\n\n else:\n self.log.debug('\"local\" database not in dbnames. Not collecting ReplicationInfo metrics')\n\n # get collection level stats\n try:\n # Ensure that you're on the right db\n db = cli[db_name]\n # grab the collections from the configutation\n coll_names = instance.get('collections', [])\n # loop through the collections\n for coll_name in coll_names:\n # grab the stats from the collection\n stats = db.command(\"collstats\", coll_name)\n # loop through the metrics\n for m in self.collection_metrics_names:\n coll_tags = tags + [\"db:%s\" % db_name, \"collection:%s\" % coll_name]\n value = stats.get(m, None)\n if not value:\n continue\n\n # if it's the index sizes, then it's a dict.\n if m == 'indexSizes':\n submit_method, metric_name_alias = \\\n self._resolve_metric('collection.%s' % m, self.COLLECTION_METRICS)\n # loop through the indexes\n for (idx, val) in value.iteritems():\n # we tag the index\n idx_tags = coll_tags + [\"index:%s\" % idx]\n submit_method(self, metric_name_alias, val, tags=idx_tags)\n else:\n submit_method, metric_name_alias = \\\n self._resolve_metric('collection.%s' % m, self.COLLECTION_METRICS)\n submit_method(self, metric_name_alias, value, tags=coll_tags)\n except Exception as e:\n self.log.warning(u\"Failed to record `collection` metrics.\")\n self.log.exception(e)", "def mmo_replica_state(self, mmo_connection):\n\n # https://docs.mongodb.org/manual/reference/replica-states/\n replica_states = [\n { \"id\": 0, \"name\": \"STARTUP\", \"description\": \"Not yet an active member of any set. All members start up in this state. The mongod parses the replica set configuration document while in STARTUP.\" },\n { \"id\": 1, \"name\": \"PRIMARY\", \"description\": \"The member in state primary is the only member that can accept write operations.\" },\n { \"id\": 2, \"name\": \"SECONDARY\", \"description\": \"A member in state secondary is replicating the data store. Data is available for reads, although they may be stale.\" },\n { \"id\": 3, \"name\": \"RECOVERING\", \"description\": \"Can vote. Members either perform startup self-checks, or transition from completing a rollback or resync.\" },\n { \"id\": 5, \"name\": \"STARTUP2\", \"description\": \"The member has joined the set and is running an initial sync.\" },\n { \"id\": 6, \"name\": \"UNKNOWN\", \"description\": \"The member's state, as seen from another member of the set, is not yet known.\" },\n { \"id\": 7, \"name\": \"ARBITER\", \"description\": \"Arbiters do not replicate data and exist solely to participate in elections.\" },\n { \"id\": 8, \"name\": \"DOWN\", \"description\": \"The member, as seen from another member of the set, is unreachable.\" },\n { \"id\": 9, \"name\": \"ROLLBACK\", \"description\": \"This member is actively performing a rollback. Data is not available for reads.\" },\n { \"id\": 10, \"name\": \"REMOVED\", \"description\": \"This member was once in a replica set but was subsequently removed.\" }\n ]\n\n if self.mmo_is_mongod(mmo_connection):\n return replica_states[mmo_connection[\"admin\"].command(\"replSetGetStatus\")[\"myState\"]]\n else:\n raise Exception(\"Not a mongod process\")", "def replication_check():\n\n try:\n entries = [os.path.join(current_app.config['REPLICATION_PACKETS_DIR'], e)\n for e in os.listdir(current_app.config['REPLICATION_PACKETS_DIR'])]\n except OSError as e:\n logging.warning(e)\n return Response(\"UNKNOWN \" + str(e), mimetype='text/plain')\n\n pattern = re.compile(\"replication-[0-9]+.tar.bz2$\")\n entries = filter(lambda x: pattern.search(x), entries)\n entries = filter(os.path.isfile, entries)\n entries = _sort_natural(entries)\n\n if len(entries) == 0:\n return Response(\"UNKNOWN no replication packets available\", mimetype='text/plain')\n\n resp = \"OK\"\n last = -1\n pattern = re.compile(\"replication-([0-9]+).tar.bz2$\")\n for entry in entries:\n m = pattern.search(entry)\n if not m:\n resp = \"UNKNOWN Unkown files in the replication directory\"\n break\n num = int(m.groups()[0])\n if last < 0:\n last = num - 1\n if last != num - 1:\n resp = \"CRITICAL Replication packet %d is missing\" % (num - 1)\n last = num\n\n if resp != \"OK\":\n return Response(resp, mimetype='text/plain')\n \n last_packet_age = time.time() - os.path.getmtime(entries[-1]) \n if last_packet_age > MAX_PACKET_AGE_CRITICAL:\n resp = \"CRITICAL Latest replication packet is %.1f hours old\" % (last_packet_age / 3600)\n elif last_packet_age > MAX_PACKET_AGE_WARNING:\n resp = \"WARNING Latest replication packet is %.1f hours old\" % (last_packet_age / 3600)\n\n return Response(resp, mimetype='text/plain')", "def check_all_server_power_state(self, state):\n api_data = request(\"get\", \"/serviceProfile\")\n self.assertEqual(api_data['status'], 200,\n 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))\n total_elements = 0\n for server in api_data[\"json\"][\"ServiceProfile\"][\"members\"]:\n if server[\"assoc_state\"] == \"associated\":\n api_data_c = request(\"get\", \"/power\",\n query={\"identifier\": str(server[\"path\"])})\n self.assertEqual(api_data_c['status'], 200,\n 'Incorrect HTTP return code, expected 200, got:' +\n str(api_data_c['status']))\n self.assertEqual(api_data_c[\"json\"][\"serverState\"], state,\n 'Server ' + str(server[\"path\"]) + ' reported power state ' +\n str(api_data_c[\"json\"][\"serverState\"]) + ' expected: ' + state)\n total_elements += 1\n self.assertGreater(total_elements, 0, \"Found zero elements\")", "def check_journaled(con, warning, critical,perf_data):\n\n warning = warning or 20\n critical = critical or 40\n try:\n data=get_server_status(con)\n journaled = data['dur']['journaledMB'] \n message=\"Journaled : %.2f MB\" % journaled\n message+=performance_data(perf_data,[(\"%.2f\"%journaled,\"journaled\",warning, critical)])\n return check_levels(journaled,warning, critical, message)\n\n except Exception, e:\n return exit_with_general_critical(e)", "def check_wms(self, service: Service, capabilities_only: bool = False):\n wms_helper = WmsHelper(service)\n\n if wms_helper.get_capabilities_url is not None:\n self.check_get_capabilities(wms_helper.get_capabilities_url)\n if not capabilities_only:\n wms_helper.set_operation_urls()\n if wms_helper.get_map_url is not None:\n self.check_service(wms_helper.get_map_url)\n\n if wms_helper.get_feature_info_url is not None:\n self.check_service(wms_helper.get_feature_info_url)\n\n if wms_helper.describe_layer_url is not None:\n self.check_service(wms_helper.describe_layer_url)\n\n if wms_helper.get_legend_graphic_url is not None:\n self.check_service(wms_helper.get_legend_graphic_url)\n\n if wms_helper.get_styles_url is not None:\n self.check_service(wms_helper.get_styles_url)", "def Check_SYSLOG(name):\n\n if name == None: return 0 # Syslog server is optional\n\n if nslookup(name)[0] != 0:\n add_info (name, SYSLOG_SERVER, \"cannot resolve SYSLOG server\")\n return 1\n\n if ping_machine(name) != 0:\n add_info(name, SYSLOG_SERVER, \"cannot ping SYSLOG server\")\n return 2\n add_info(name, SYSLOG_SERVER, \"OK\")\n return 0", "def verify_services(self):\n services = [\"metric_collector\", \"log_collector\"]\n service_version_9 = [\"lma_collector\"]\n pids = {}\n processes_count = {\n \"collectd \": 1,\n \"collectdmon \": 1\n }\n\n if self.settings.version.startswith(\"0.9\"):\n processes_count[\n \"hekad -config[= ]/etc/{}\".format(service_version_9)] = 1\n else:\n # Starting with 0.10, there are one collector for logs and one for\n # metrics\n for service in services:\n processes_count[\"hekad -config[= ]/etc/{}\".format(service)] = 1\n online_nodes = [node for node in self.helpers.get_all_ready_nodes()\n if node[\"online\"]]\n for node in online_nodes:\n pids[node[\"name\"]] = {}\n with self.env.d_env.get_ssh_to_remote(node[\"ip\"]) as remote:\n for process, count in processes_count.items():\n logger.info(\"Checking process {0} on node {1}\".format(\n process, node[\"name\"]\n ))\n pids[node[\"name\"]][process] = (\n self.checkers.check_process_count(\n remote, process, count))\n return pids", "def check_services_status(system, **kwargs):\n logger = kwargs[\"logger\"]\n hosts = list(set([host.host_name for host in system.api.hosts.list()]))\n hosts_agents = dict()\n hosts_status = dict()\n services = kwargs['services']\n for host in hosts:\n # if a hostname contains localhost, we want to avoid trying to connect\n if 'localhost' in host:\n continue\n try:\n service_for_host = services[host]\n with ssh_client(host, username=\"root\", password=system.password) as ssh:\n service_status_dict = get_services_status_list(ssh)\n except KeyError:\n logger.info(\"Skipping host {} as it is not in yml.\".format(host))\n continue\n for service_name, expected_status in service_for_host.items():\n # if service_status_dict has service `service_name` get its status\n # compare it with expected_status\n try:\n logger.debug(\"service:{} status: {} expected_status: {}\"\n .format(service_name, service_status_dict[service_name], expected_status))\n service_status = (expected_status in service_status_dict[service_name])\n except KeyError:\n # This is because not all hosts may have all services installed\n logger.debug(\"Service {} not found on host {}\".format(service_name, host))\n continue\n try:\n hosts_agents[host].update({service_name: service_status})\n except KeyError:\n hosts_agents[host] = {service_name: service_status}\n hosts_status[host] = all(hosts_agents[host].values())\n overall_status = all(hosts_status.values())\n\n if overall_status: # all true, everything is running\n msg = (\"Ok: all services {} are in the desired state on all hosts\".format(services.keys()))\n logger.info(msg)\n print(msg)\n sys.exit(0)\n else:\n trouble_hosts = [host for host, status in hosts_status.iteritems() if not status]\n msg = (\"Critical: These hosts don't have all agents in the desired state: {}.\"\n \"Overall status is {} (where False means not in desired state)\"\n .format(trouble_hosts, hosts_agents))\n logger.error(msg)\n print(msg)\n sys.exit(2)", "def test_automation_agent_service_state(Service):\n\n if os.environ['MONGODB_MMS_AUTO_AGENT_INSTALL'] == 'false':\n pytest.skip('Not apply to this test environment')\n\n service = Service('mongodb-mms-automation-agent')\n\n assert service.is_enabled\n assert service.is_running", "def _check_all_replicas_connected(num_replicas, gateway_port, protocol):\n exec_ids = set()\n exec_id_list = []\n for i in range(num_replicas + 1):\n id_ = _send_request(gateway_port, protocol, request_size=2)[0].text\n exec_ids.add(id_)\n exec_id_list.append(id_)\n print(exec_id_list)\n assert len(exec_ids) == num_replicas", "def check_replica_primary(con,host, warning, critical,perf_data):\n if warning is None and critical is None:\n warning=1\n warning=warning or 2\n critical=critical or 2\n\n primary_status=0\n message=\"Primary server has not changed\"\n db=con[\"nagios\"]\n data=get_server_status(con)\n current_primary=data['repl'].get('primary')\n saved_primary=get_stored_primary_server_name(db)\n if current_primary is None:\n current_primary = \"None\"\n if saved_primary is None:\n saved_primary = \"None\"\n if current_primary != saved_primary:\n last_primary_server_record = {\"server\": current_primary}\n db.last_primary_server.update({\"_id\": \"last_primary\"}, {\"$set\" : last_primary_server_record} , upsert=True, safe=True)\n message = \"Primary server has changed from %s to %s\" % (saved_primary, current_primary)\n primary_status=1\n return check_levels(primary_status,warning,critical,message)", "def getreplicationsettings(self):\n d = {}\n try:\n con = hcpsdk.Connection(self.target, debuglevel=self.debuglevel)\n except Exception as e:\n raise hcpsdk.HcpsdkError(str(e))\n else:\n self.connect_time = con.connect_time\n try:\n r = con.GET('/mapi/services/replication')\n except Exception as e:\n raise hcpsdk.HcpsdkError(str(e))\n else:\n if r.status == 200:\n # Good status, get and parse the Response\n x = r.read()\n self.service_time = con.service_time2\n for child in Et.fromstring(x):\n d[child.tag] = child.text\n else:\n raise (hcpsdk.HcpsdkError('{} - {}'.format(r.status, r.reason)))\n finally:\n # noinspection PyUnboundLocalVariable\n con.close()\n\n return d", "def testOperations(options):\n program = options[\"bindir\"] + \"/pg_ctl\"\n cmd = [program, \"status\", \"-D\", options[\"dbdir\"]]\n ret = runProgram(cmd)\n # pg_ctl: no server running\n # pg_ctl: server is running (PID: 13988)\n # does /var/run/postgresql/inmaintenance exist? -> YELLOW\n cmdRepmgr = [\"pgrep\", \"repmgrd\"]\n retRepmgr = runProgram(cmdRepmgr)\n \n msg = \"????\"\n if os.path.isfile(\"/var/run/postgresql/inmaintenance\"):\n msg = \"YELLOW: in maintenance mode\"\n elif re.search(\"no server running\", ret):\n msg = \"RED: no PG server running\"\n elif re.search(\"server is running\", ret) and re.search(\"[0-9]+\", retRepmgr):\n msg = \"GREEN\"\n elif re.search(\"server is running\", ret):\n msg = \"YELLOW: no repmgrd running\"\n elif re.search(\"[0-9]+\", retRepmgr):\n msg = \"YELLOW: no PG server running\"\n else:\n msg = \"YELLOW: neither PG server nor repmgrd are running\"\n audit(\"test: \" + msg)\n print(msg, end=\"\")", "def all_services_running():\n return all(['RUNNING' in line or 'EXITED' in line\n for line in supervisor_status()])", "def global_status(self):\n def process_service(service):\n print service.__repr__(path_only=True)\n\n for name, action in service.get_actions():\n if name == 'status':\n try:\n action()\n except Exception, e:\n print 'Failed: ', e.message\n\n for name, subservice in service.get_subservices():\n process_service(subservice)\n\n process_service(self.root)", "def check_hdfs_service(master, ec2_opts, num_nodes):\n output = spark_ec2.ssh_read(master, ec2_opts, \"/root/ephemeral-hdfs/bin/hdfs dfsadmin -report|grep Name |wc -l\")\n # Ok if one slave is down\n return int(output) >= int(num_nodes) - 1", "def get_logging_level():\n try:\n level = rcp.get(\"logging\",\"level\").upper()\n return convert_logging_level(level)\n except:\n logging.warning(\"[logging] section of the config malformed.\")\n return False", "def can_change_srv_sets(self):\n\t\treturn bool(call_sdk_function('PrlUsrInfo_CanChangeSrvSets', self.handle))", "def check_canaries(canaries, service, threshold, logstash, delay, cores=2):\n logger = utils.get_logger()\n\n canary_checks = []\n\n # Build Check command list\n for canary in canaries:\n check_name = 'Logstash Error rate for {}'.format(canary)\n\n # Split canary name at first \".\" since domain isn't in logs\n canary = canary.split('.')[0]\n\n cmd = ['/usr/local/bin/logstash_checker.py',\n '--service-name', service,\n '--host', canary,\n '--fail-threshold', threshold,\n '--delay', delay,\n '--logstash-host', logstash]\n\n cmd = ' '.join(map(str, cmd))\n canary_checks.append(\n checks.Check(check_name, 'canary', command=cmd, timeout=120.0))\n\n success, done = checks.execute(canary_checks, logger, concurrency=cores)\n failed = [job.check.name for job in done if job.isfailure()]\n\n return (len(done) - len(failed), len(failed))", "def health_ok(self):\n for client in self.clients():\n if client.run_cmd('ls'):\n log.info('Vmware cluster is up.')\n return True\n else:\n return False", "def test_check_replication_ok(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (MagicMock(days=0, seconds=0), 0)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result, [(STATUS_OK, 'OK')])", "def test_check_replication_crit_lag(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (MagicMock(days=0, seconds=12), 0)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n [(STATUS_CRIT,\n \"'{}' replication lag is \"\n \"12 seconds\".format(repl))\n for repl in ('account', 'object', 'container')])", "def GetControlServiceStatus(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_set_subsystem_logger_level(self):\n pass", "def replication(self):\n return self._replication", "def start_services(self):\n ircd_result = True\n synapse_result = self.start_synapse()\n if self.charm_config.get(\"enable-ircd\"):\n ircd_result = self.start_ircd()\n return synapse_result and ircd_result", "def health_check(task_service_id):\n logger.info(f\"Checking task service status for {task_service_id}\")\n task_service = TaskService.objects.get(kf_id=task_service_id)\n task_service.refresh_from_db()\n task_service.health_check()", "def checkonly(self):\n OTHER_WSREP.append(socket.gethostbyname(socket.gethostname()))\n for hostitem in ALL_NODES:\n checkhost(hostitem)\n if OTHER_WSREP:\n for wsrepitem in OTHER_WSREP:\n REMAINING_NODES.append(wsrepitem)\n if REMAINING_NODES:\n for wsrephost in OTHER_WSREP:\n checkwsrep(wsrephost)\n print ''", "def test_otoroshi_controllers_adminapi_analytics_controller_services_status(self):\n pass", "def get_service_status(self, service_params={}):\n return {}", "def analyise(self):\n print termcolor.colored('Showing service::role for every match of %s' % self.host.slug, 'cyan')\n\n def process_service(service):\n # Grather roles which contain this host in the current service.\n roles = []\n for role in service.hosts.roles:\n if self.host in service.hosts.filter(role):\n roles.append(role)\n\n # If roles were found, print result\n if roles:\n print service.__repr__(path_only=True), termcolor.colored(' :: ', 'cyan'), termcolor.colored(', '.join(roles), 'yellow')\n\n for name, subservice in service.get_subservices():\n if subservice.parent == service: # Avoid cycles\n process_service(subservice)\n\n process_service(self.root)", "def check_yarn_service(master, ec2_opts, num_nodes):\n output = spark_ec2.ssh_read(master, ec2_opts, \"/root/ephemeral-hdfs/bin/yarn node -list -all |grep RUNNING |wc -l\")\n # Ok if one slave is down\n return int(output) >= int(num_nodes) - 1", "def get_num_servers():\n return 1", "def test_check_replication_crit_failures(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (MagicMock(days=0, seconds=0), 12)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n 3*[(STATUS_CRIT, \"12 replication failures\")])", "def health_check(system=''):\n if not system:\n system = os.uname()[1]\n\n print 'Checking system: %s' % (system)\n c = hcvcs.VCS(server=system)\n if not c.status:\n print ' Error: Problem communicating with cluster. Moving on.'\n return\n\n # 0. Status information\n t1 = time.localtime(int(c.info['ClusterTime']))\n print ' Cluster \"%s\" was last updated %s (%s)' % (c.info['ClusterName'], time.strftime('%F %T', t1), c.info['ClusterTime'])\n # VCSFeatures == 'NONE' means non-global. WACPort is the port which a global cluster connect to.\n print ' VCSFeatures: %s, WACPort: %s' % (c.info['VCSFeatures'], c.info['WACPort'])\n\n # 1. General cluster status health\n c_info = c.status[system]\n if c.info['ReadOnly'] != '1':\n print ' Warn: Cluster is Writable. (haconf -dump -makero)'\n if c_info['frozen']:\n print ' Warn: system %s is frozen.' % system\n if c_info['state'] != 'RUNNING':\n print ' Warn: system %s state is \"%s\".' % (system, c_info['state'])\n\n attr_list = std_cluser_attr\n for k, v in attr_list.iteritems():\n if c.info[k] != v:\n print ' Warn: Expecting cluster \"%s\" value \"%s\" to be \"%s\": Currently \"%s\".' % (system, k, v, c.info[k])\n \n # 2. Service group health\n for group in c.groups:\n g_state = c_info[group]\n #print ' Checking group: %s - \"%s\" on \"%s\"' % (group, g_state['state'], system)\n if not g_state['probed']:\n print ' Warn: group \"%s\" is not probed on system \"%s\".' % (group, system)\n if g_state['autodisabled']:\n print ' Warn: group \"%s\" is currently autodisabled.' % (group)\n \n g_list = c.group_display(group) #, c.group_display(group, system)\n\n g_info = hcvcs.quad2dict(g_list)\n # Check values that should be set. Some attributes are different for parallel vs. failover groups.\n if g_info.get('Parallel', '0') == '1':\n attr_list = parallel_group_attr\n else:\n attr_list = failover_group_attr\n for k, v in attr_list.iteritems():\n try:\n if g_info[k] != v:\n print ' Warn: Expecting group %s \"%s\" to be \"%s\": Currently \"%s\".' % (group, k, v, g_info[k])\n except (KeyError), e:\n pass\n\n # Is the group configured to run on all systems?\n syslist = g_info.get('SystemList', '').split('\\t')\n group_nodes = set([ syslist[i] for i in range(len(syslist)) if not i % 2 ])\n cluster_nodes = set(c.status.keys())\n group_nodes_off = cluster_nodes.difference(group_nodes)\n if group_nodes_off:\n print ' Warn: group %s is not configured to run on cluster nodes: %s' % (group, ', '.join(group_nodes_off))\n \n # 3. Attributes on a group\n for resource in [ x[0] for x in c.resource_list(group) if x[1] == system ]:\n r_list = c.resource_display(resource, system)\n r_info = hcvcs.quad2dict(r_list)\n attr_list = std_resource_attr\n for k, v in attr_list.iteritems():\n try:\n if r_info[k] != v:\n print ' Warn: Resource \"%s\", in group \"%s\", attr \"%s\" should be \"%s\": Currently \"%s\".' % (resource, group, k, v, r_info[k])\n except (KeyError), e:\n pass", "def mmo_replicaset_conf(self, mmo_connection):\n command = {\"replSetGetConfig\" : 1}\n return self.mmo_execute_on_primaries(mmo_connection, command)", "def check_layer(self, service: Service):\n wms_helper = WmsHelper(service)\n urls_to_check = [\n (wms_helper.get_get_map_url(), True),\n (wms_helper.get_get_styles_url(), False),\n (wms_helper.get_get_feature_info_url(), False),\n (wms_helper.get_describe_layer_url(), False),\n ]\n for url in urls_to_check:\n if url[0] is None:\n continue\n self.check_service(url[0], check_image=url[1])", "def health_check():\n printed_something = False\n\n job_checks = {}\n job_names = []\n for job in config.enabled_jobs:\n spec = nomad.parse(get_job(job.template))\n printed_something |= bool(nomad.check_events_and_logs(job.name))\n for service, checks in nomad.get_health_checks_from_spec(spec):\n if not checks:\n log.warn(f'service {service} has no health checks')\n continue\n job_checks[service] = checks\n job_names.append(job.name)\n printed_something |= nomad.wait_for_service_health_checks(consul, job_names, job_checks, nowait=True)\n\n if printed_something:\n log.error('Problems detected; see logs above.')\n sys.exit(1)\n else:\n log.info('No problems detected.')", "def test_daemon_redundancy(self):\n self.assertEqual((\"NONE\", 0), StratisCli.daemon_redundancy())", "def mmo_execute_on_primaries(self, mmo_connection, command, replicaset=\"all\"): # TODO add execution database?\n\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n\n cluster_command_output = []\n if replicaset == self.config_server_repl_name: # Config server replset\n for doc in self.mmo_config_servers(mmo_connection):\n hostname, port = doc[\"hostname\"], doc[\"port\"]\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"PRIMARY\":\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": command_output})\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": {\"Error\": \"mongod process is not up\"}})\n else:\n raise excep\n else:\n for doc in self.mmo_shard_servers(mmo_connection):\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"], auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"PRIMARY\" and (replicaset == \"all\" or replicaset == shard):\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({ \"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output })\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({ \"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": { \"Error\": \"mongod process is not up\" } })\n else:\n raise excep\n return cluster_command_output", "def _check_services(self, services):\n now = datetime.utcnow()\n\n # Worker\n service_states = []\n def task(service):\n # Get state, measure lag\n start = datetime.utcnow()\n state = service.get_state()\n finish = datetime.utcnow()\n\n # Update lag\n service.lag = (finish - start).total_seconds()\n\n # Add state\n service_states.append(state)\n logger.debug(u'Checked service {} (lag={}, real_period={}): last checked {} ago, state={}: {}'.format(\n service.name,\n service.lag,\n service.real_period,\n now - service.last_tested if service.last_tested else '(never)',\n state['state'], state['info']\n ))\n\n # Update timestamp\n service.last_tested = now\n\n # Run\n threads = [threading.Thread(target=task, args=(service,)) for service in services]\n for t in threads: t.start()\n for t in threads: t.join()\n # TODO: declare max waiting time. If any process doesnt manage to finish in time -- report it as a separate request\n\n return service_states", "def test_100_services(self):\n u.log.debug('Checking system services...')\n swift_storage_services = ['swift-account',\n 'swift-account-auditor',\n 'swift-account-reaper',\n 'swift-account-replicator',\n 'swift-container',\n 'swift-container-auditor',\n 'swift-container-replicator',\n 'swift-container-updater',\n 'swift-object',\n 'swift-object-auditor',\n 'swift-object-replicator',\n 'swift-object-updater',\n 'swift-container-sync']\n service_names = {\n self.keystone_sentry: ['keystone'],\n self.glance_sentry: ['glance-registry',\n 'glance-api'],\n self.swift_proxy_sentry: ['swift-proxy'],\n self.swift_storage_sentry: swift_storage_services\n }\n\n if self._get_openstack_release() >= self.trusty_liberty:\n service_names[self.keystone_sentry] = ['apache2']\n\n ret = u.validate_services_by_name(service_names)\n if ret:\n amulet.raise_status(amulet.FAIL, msg=ret)", "def health_checks(self):\n return [self.check_device_connected, self.check_clear_flags]", "def test_snat_with_kubelet_restart_on_slave(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = self.inputs.k8s_slave_ips)\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def test_service(host):\n assert host.service('php7.0-fpm').is_enabled\n assert host.service('php7.0-fpm').is_running\n assert host.service('php7.2-fpm').is_enabled\n assert host.service('php7.2-fpm').is_running", "def test_api_ucs_power(self):\n # first power off all servers\n self.set_all_server_power_state(\"off\")\n # verify power state is down\n self.check_all_server_power_state(\"down\")\n # now power on the servers\n self.set_all_server_power_state(\"on\")\n # verify power state is up\n self.check_all_server_power_state(\"up\")", "def determine_colocated_services(self):\n if self.install_type() == \"rpm\":\n return # Not needed for RPM install\n services = \"\"\n # -- if not on same node, we don't have any co-located\n if self.hostname() == self.usercollector.hostname(): \n if self.condor_location() == self.usercollector.condor_location():\n self.daemon_list += \" %s\" % self.usercollector.daemon_list\n self.colocated_services.append(\"usercollector\")\n else:\n services += \"User Collector \"\n\n if self.hostname() == self.submit.hostname():\n if self.condor_location() == self.submit.condor_location():\n self.daemon_list += \", %s\" % self.submit.daemon_list\n self.colocated_services.append(\"submit\")\n else:\n services += \"Submit \"\n\n # -- determine services which are collocated ---\n if len(self.colocated_services) == 0:\n self.client_only_install = True\n if len(services) > 0:\n common.ask_continue(\"\"\"These services are on the same node yet have different condor_locations:\n %s\nDo you really want to continue.\"\"\" % services)\n if len(self.colocated_services) > 0:\n common.ask_continue(\"\"\"These services are on the same node and share condor_locations:\n %(services)s\nYou will need the options from that service included in the %(section)s\nof your ini file.\nDo you want to continue.\"\"\" % { \"services\" : self.colocated_services,\n \"section\" : self.ini_section} )", "def test_check_replication_crit_day_plus_lag(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (MagicMock(days=2, seconds=5), 0)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n [(STATUS_CRIT,\n \"'{}' replication lag is \"\n \"172805 seconds\".format(repl))\n for repl in ('account', 'object', 'container')])", "def check_status(con):\n try:\n status = con.sudo('su - splunk -c \"/opt/splunk/bin/splunk status\"', hide=True)\n if 'is running' in status.stdout:\n return True\n else:\n return False\n except (ConnectionError, AuthenticationException, NoValidConnectionsError, UnexpectedExit):\n return False", "def checkwsrep(sqlhost):\n fnull = open(os.devnull, 'wb')\n ping = subprocess.Popen([\"/bin/ping\", \"-w2\", \"-c2\", sqlhost],\n stdout=fnull, stderr=subprocess.STDOUT)\n _, __ = ping.communicate()\n retcode = ping.poll()\n fnull.close()\n if retcode == 0:\n print \"\\nChecking if {} belongs to cluster ...\".format(sqlhost)\n cnx_sqlhost = None\n wsrep_status = 0\n try:\n cnx_sqlhost = MySQLdb.connect(\n user='sstuser',\n passwd=CREDENTIALS[\"sstuser\"],\n unix_socket='/var/lib/mysql/mysql.sock',\n host=sqlhost\n )\n cursor = cnx_sqlhost.cursor()\n wsrep_status = cursor.execute(\"\"\"show variables LIKE 'wsrep_on'\"\"\")\n except Exception:\n pass\n finally:\n if cnx_sqlhost:\n cnx_sqlhost.close()\n if wsrep_status == 1:\n LASTCHECK_NODES.append(sqlhost)\n print \"{}{} belongs to cluster{}\".format(GREEN, sqlhost, WHITE)\n else:\n print \"{}Skipping {}: it is not in the cluster{}\".format(\n YELLOW, sqlhost, WHITE)", "def _interesting_service(self, service: UpnpService) -> bool:\n service_type = service.service_type\n for service_types in self._SERVICE_TYPES.values():\n if service_type in service_types:\n return True\n\n return False", "def test_starts_cluster_state_service(self):\n options = ControlOptions()\n options.parseOptions(\n [b\"--port\", b\"tcp:8001\", b\"--data-path\", self.mktemp()])\n reactor = MemoryCoreReactor()\n ControlScript().main(reactor, options)\n server = reactor.tcpServers[0]\n service = server[1].resource._v1_user.cluster_state_service\n self.assertEqual((service.__class__, service.running),\n (ClusterStateService, True))", "def status(self):\n if self.qemu.is_running():\n status = 0\n self.log.info(\"vm-status\", result=\"online\")\n for device in list(self.qemu.block_info().values()):\n self.log.info(\n \"disk-throttle\",\n device=device[\"device\"],\n iops=device[\"inserted\"][\"iops\"],\n )\n else:\n status = 1\n self.log.info(\"vm-status\", result=\"offline\")\n for volume in self.ceph.volumes:\n locker = volume.lock_status()\n self.log.info(\"rbd-status\", volume=volume.fullname, locker=locker)\n consul = locate_live_service(self.consul, \"qemu-\" + self.name)\n if consul:\n self.log.info(\n \"consul\", service=consul[\"Service\"], address=consul[\"Address\"]\n )\n else:\n self.log.info(\"consul\", service=\"<not registered>\")\n return status", "def is_secured_cluster(self, services):\n return services and \"cluster-env\" in services[\"configurations\"] and\\\n \"security_enabled\" in services[\"configurations\"][\"cluster-env\"][\"properties\"] and\\\n services[\"configurations\"][\"cluster-env\"][\"properties\"][\"security_enabled\"].lower() == \"true\"", "def service_load_balancer_prefixes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"service_load_balancer_prefixes\")", "def check_microservice(params) -> None:\n cmd = \"docker container inspect -f '{{.State.Running}}' bg_changer >/dev/null 2>&1\"\n if os.system(cmd) == 0:\n print(\"Microservice is running\")\n else:\n print(\"Microservice is NOT running\")", "def services(status):\n\n run(\"sudo systemctl %s xprof.service\" % status)", "def check_for_service(self, remote_node, status):\n with remote_node.client() as c:\n r = c.get(\"/node/network\")\n current_status = r.body.json()[\"service_status\"]\n current_cert = r.body.json()[\"service_certificate\"]\n\n expected_cert = open(\n os.path.join(self.common_dir, \"networkcert.pem\"), \"rb\"\n ).read()\n\n assert (\n current_cert == expected_cert[:-1].decode()\n ), \"Current service certificate did not match with networkcert.pem\"\n assert (\n current_status == status.value\n ), f\"Service status {current_status} (expected {status.value})\"", "def onSubSys(self) -> List[int]:\n return self._onSubSys", "def test_get_node_state_servicelight(self):\n pass", "def get_num_replicas():\n\n tf_replicator = get_tf_replicator()\n\n if tf_replicator:\n return tf_replicator.num_replicas_in_sync\n elif tf.distribute.has_strategy():\n return tf.distribute.get_strategy().num_replicas_in_sync\n else:\n # I'm assuming replicas and shards are always equal until someone tells me\n # different.\n num_replicas = tpu_function.get_tpu_context().number_of_shards\n if num_replicas:\n return num_replicas\n else:\n return 1", "def average_replication(q, N, m, replication=(lambda k, N, m: 1), s=1):\n if USE_APPROX:\n max_replication = max(1.0, m * TOP_REPLICATION_FACTOR)\n main_domain_replication = (max_replication *\n harmonic_approx(N, math.log(max_replication, N)) / N)\n linked_domains_replication = (max_replication *\n harmonic_approx(N, s + math.log(max_replication, N)) /\n harmonic_approx(N, s)) ** (q-1)\n else:\n res_main = _sum_repl_func(replication, N, m)\n res_linked = _sum_repl_func_zipf(replication, N, m, s)\n main_domain_replication = res_main / N\n linked_domains_replication = (res_linked / harmonic(N, s)) ** (q-1)\n assert main_domain_replication >= 1\n assert linked_domains_replication >= 1\n return main_domain_replication * linked_domains_replication", "def check_config(config: ConfigParser) -> bool:\n has_sc = False\n for section in config.sections():\n if is_subcluster_like(section):\n has_sc = True\n check_section(config, section)\n return has_sc", "def http_checker(service):\n verify_ssl = getattr(settings, 'VERIFY_SSL', True)\n try:\n resp = requests.get(service.connection_string, verify=verify_ssl)\n if resp.status_code >= 500:\n service.update_status('Down', resp.status_code)\n else:\n service.update_status('Up', resp.status_code)\n except requests.exceptions.RequestException:\n # for an unknown reason, curl may work here, and requests fail\n # so let's try it out\n skip_ssl_flag = '-k ' if not verify_ssl else ''\n p = subprocess.Popen(\n ('curl %s %s-m 3 -I' %\n (service.connection_string, skip_ssl_flag)).split(),\n stdout=subprocess.PIPE)\n\n res = p.communicate()[0]\n if any([status in res for status in\n ('500', '501', '502', '503', '504')]):\n service.update_status('Down', res)\n else:\n service.update_status('Up', res)", "def check_cinder_status(remote):\n cmd = '. openrc; cinder service-list'\n result = remote.execute(cmd)\n cinder_services = ''.join(result['stdout'])\n logger.debug('>$ cinder service-list\\n{}'.format(cinder_services))\n if result['exit_code'] == 0:\n return all(' up ' in x.split('enabled')[1]\n for x in cinder_services.split('\\n')\n if 'cinder' in x and 'enabled' in x\n and len(x.split('enabled')))\n return False", "def test_service(self):\n port = self.port(store=self.store)\n installOn(port, self.store)\n\n self.assertEqual(\n list(self.store.powerupsFor(IService)),\n [port])", "def test_check_replication_crit_null_failures(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (MagicMock(days=0, seconds=0), -1)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n 3*[(STATUS_CRIT,\n \"replication failures counter is NULL \"\n \"(check syslog)\")])", "def status(server: str):\n\n if isUp(server):\n log.info(f'{server} is running.')\n else:\n log.info(f'{server} is not running.')", "def get_service_status(self):\n return self.service.status()", "def has_power_management_enabled(labels):\n if not labels:\n return False\n\n for label in labels:\n if label.label_key == constants.KUBE_POWER_MANAGER_LABEL and label.label_value:\n return constants.KUBE_POWER_MANAGER_VALUE == label.label_value.lower()\n\n # We haven't found the power-management node key. Return False\n return False", "def test_activeIsEnvironmentSpecific(self):\n node = create_node(\"somewhere\", \"myservice\", \"env1\")\n disco = create_disco()\n disco.onMessage(None, NodeActive(node))\n self.assertEqual((knownNodes(disco, \"myservice\", \"env1\"),\n knownNodes(disco, \"myservice\", \"env2\")),\n ([node], []))", "def check_service_permission(self, request, service_path=None):\n for permission in self.get_service_permissions(request, service_path):\n if not permission.has_permission(request, self):\n self.permission_denied(\n request, message=getattr(permission, 'message', None)\n )", "def derive_newrelic_slaves(self):\n if self.has_slave_data is True:\n self.update_metric(\"newrelic/replication_lag\", self.sum_of([\"slave/seconds_behind_master\"]))\n\n # both need to be YES, which is 1\n running = self.sum_of([\"slave/slave_io_running\", \"slave/slave_sql_running\"])\n if running is not None:\n replication_status = 1.0\n if running == 2:\n replication_status = 0.0\n self.update_metric(\"newrelic/replication_status\", replication_status)\n self.update_metric(\"newrelic/slave_relay_log_bytes\", self.sum_of([\"slave/relay_log_pos\"]))\n self.update_metric(\"newrelic/master_log_lag_bytes\", self.diff_of([\"slave/read_master_log_pos\",\n \"slave/exec_master_log_pos\"]))\n else: # This is a hack because the NR UI can't handle it missing for graphs\n self.update_metric(\"newrelic/replication_lag\", 0.0)\n self.update_metric(\"newrelic/replication_status\", 0.0)\n self.update_metric(\"newrelic/slave_relay_log_bytes\", 0.0)\n self.update_metric(\"newrelic/master_log_lag_bytes\", 0.0)", "def status(service_name: str, print_action: bool = True):\n\n if print_action:\n print_log_status(3, f\"Requesting status from `{service_name}`\")\n \n (err_code, _) = run_command(f\"sudo systemctl status {service_name}\", exit_on_error=False)\n\n return err_code", "def healthy_service(self):\n return not self.service_currently_down and not self.service_recently_down", "def check_subprocess_status(self, sub_process):\n server_count = len(self.server_list)\n patterns = {\n \"format\": \"(SCM format required)(?!;)\",\n \"normal\": \"DAOS I/O server.*started\",\n }\n expected = {\n \"format\": server_count,\n \"normal\": server_count * len(self.yaml_params.server_params),\n }\n detected = 0\n complete = False\n timed_out = False\n start_time = time.time()\n\n # Search for patterns in the 'daos_server start' output until:\n # - the expected number of pattern matches are detected (success)\n # - the time out is reached (failure)\n # - the subprocess is no longer running (failure)\n while not complete and not timed_out and sub_process.poll() is None:\n output = sub_process.get_stdout()\n detected = len(re.findall(patterns[self.mode], output))\n complete = detected == expected[self.mode]\n timed_out = time.time() - start_time > self.timeout\n\n # Summarize results\n msg = \"{}/{} {} messages detected in {}/{} seconds\".format(\n detected, expected[self.mode], self.mode, time.time() - start_time,\n self.timeout)\n if not complete:\n self.log.info(\n \"%s detected - %s:\\n%s\",\n \"Time out\" if timed_out else \"Error\",\n msg,\n sub_process.get_stdout() if not self.verbose else \"<See above>\")\n else:\n self.log.info(\"Server startup detected - %s\", msg)\n\n return complete", "def check_pod_pvc_status(self, skip_replication_resources=False):\n config.switch_to_cluster_by_name(self.preferred_primary_cluster)\n dr_helpers.wait_for_all_resources_creation(\n self.workload_pvc_count,\n self.workload_pod_count,\n self.workload_namespace,\n skip_replication_resources=skip_replication_resources,\n )" ]
[ "0.59834796", "0.5918712", "0.537204", "0.5249215", "0.52178967", "0.5167108", "0.5149234", "0.5141773", "0.5053718", "0.50021493", "0.49990278", "0.49700785", "0.48497504", "0.4836432", "0.48359025", "0.47858948", "0.4753289", "0.47522944", "0.4747612", "0.47456878", "0.4742173", "0.47218564", "0.4710541", "0.46857047", "0.46578512", "0.46370143", "0.46314391", "0.4629667", "0.46250433", "0.4613766", "0.46101293", "0.46016786", "0.4591415", "0.4587492", "0.45828867", "0.4578874", "0.45678467", "0.45666236", "0.45663172", "0.4559718", "0.455645", "0.45377612", "0.4530753", "0.45137063", "0.44904408", "0.4486552", "0.447958", "0.44754323", "0.44753364", "0.44730687", "0.44549885", "0.44528702", "0.44488513", "0.44430068", "0.44225487", "0.44081622", "0.44077304", "0.44014788", "0.4395439", "0.43940312", "0.43868747", "0.43618113", "0.43561256", "0.4355783", "0.43527645", "0.4338317", "0.43365985", "0.43329006", "0.43275082", "0.43261102", "0.4320726", "0.43168506", "0.43053117", "0.4301651", "0.42999518", "0.42968145", "0.4293859", "0.42919144", "0.42881665", "0.42794976", "0.42740542", "0.42732415", "0.42708156", "0.42616093", "0.426053", "0.42599577", "0.425876", "0.42582232", "0.42570975", "0.4254066", "0.4251342", "0.42379296", "0.42322937", "0.42241946", "0.42210698", "0.42205945", "0.421722", "0.42163798", "0.42131555", "0.42110118" ]
0.59363264
1
Finds shortest path from start node to end node on weighted graph using Dijkstra's algorithm. Returns list of nodes from start to end making up the shortest route
def shortest_path(graph, start, end): nodes_to_visit = {start} visited_nodes = set() # Distance from start to start is 0 distance_from_start = {start: 0} predecessors = {} # Store previous node for shortest route for each node while nodes_to_visit: # Get node with smallest weight current = min( [(distance_from_start[node], node) for node in nodes_to_visit] )[1] # If the end is reached, quit if current == end: break nodes_to_visit.discard(current) visited_nodes.add(current) edges = graph[current] unvisited_neighbours = set(edges).difference(visited_nodes) for neighbour in unvisited_neighbours: neighbour_distance = distance_from_start[current] + \ edges[neighbour] if neighbour_distance < distance_from_start.get(neighbour, float('inf')): distance_from_start[neighbour] = neighbour_distance predecessors[neighbour] = current nodes_to_visit.add(neighbour) return _deconstruct_path(predecessors, end)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shortestpaths(self, start, end, edgeweight=\"t_0\"):\n graph = self.graph\n shortest_nodepaths = list(\n nx.all_shortest_paths(\n graph, start, end, weight=edgeweight, method=\"dijkstra\"\n )\n )\n shortest_paths = []\n for path in shortest_nodepaths:\n edgepath = []\n for i in range(len(path) - 1):\n edgepath.append((path[i], path[i + 1]))\n shortest_paths.append(edgepath)\n\n return shortest_paths", "def dijkstra(self, start, end):\n distance = {}\n path_weights = {start: (None, 0)}\n for key in self:\n distance[key] = float('inf')\n distance[start] = 0\n while distance:\n current = min(distance, key=distance.get)\n for neighbor in self[current]:\n temp_dist = distance[current] + self[current][neighbor]\n if neighbor in distance and temp_dist < distance[neighbor]:\n distance[neighbor] = temp_dist\n path_weights[neighbor] = (current, temp_dist)\n del distance[current]\n path = []\n prev = end\n while prev is not None:\n path.append(prev)\n prev = path_weights[prev][0]\n return list(reversed(path))", "def dijkstra(self, start, end):\n unvisited = self.nodes()\n distance = {}\n previous = {}\n for node in unvisited:\n distance[node] = sys.maxsize\n distance[start] = 0\n while len(unvisited) > 0:\n node = unvisited[0]\n smallest_curr = sys.maxsize\n for d in distance:\n if d in unvisited and distance[d] < smallest_curr:\n node = d\n smallest_curr = distance[d]\n unvisited.remove(node)\n for neighbor in self.neighbors(node).keys():\n alt_path = distance[node] + self.weight(node, neighbor)\n if alt_path < distance[neighbor]:\n distance[neighbor] = alt_path\n previous[neighbor] = node\n result = []\n result.append(end)\n curr = end\n while curr in previous:\n result.append(previous[curr])\n curr = previous[curr]\n return result", "def shortest_path(edges, start, end):\n visitedNodes = []\n queue = [[start]]\n if start == end:\n return [start]\n \n while queue:\n path = queue.pop(0)\n node = path[-1]\n if node not in visitedNodes:\n neighbors = get_neighbors(edges, node)\n for neighbor in neighbors:\n newPath = list(path)\n newPath.append(neighbor)\n queue.append(newPath)\n if neighbor == end:\n return fix_format(edges, newPath)\n visitedNodes.append(node)\n return None", "def dijkstra(graph,start,goal):\n shortest_distance = {}\n predecessor = {}\n unseenNodes = graph\n path = []\n for node in unseenNodes:\n shortest_distance[node] = infinity\n shortest_distance[start] = 0\n \n while unseenNodes:\n minNode = None\n for node in unseenNodes:\n if minNode is None:\n minNode = node\n elif shortest_distance[node] < shortest_distance[minNode]:\n minNode = node\n \n for childNode, weight in graph[minNode].items():\n \n if weight + shortest_distance[minNode] < shortest_distance[childNode]:\n shortest_distance[childNode] = weight + shortest_distance[minNode]\n predecessor[childNode] = minNode\n unseenNodes.pop(minNode)\n \n currentNode = goal\n while currentNode != start:\n try:\n path.insert(0,currentNode)\n currentNode = predecessor[currentNode]\n except KeyError:\n print('Path not reachable')\n break\n path.insert(0,start)\n if shortest_distance[goal] != infinity:\n return path", "def shortestPath(G, start, end):\n\n D, P = Dijkstra(G, start)\n print(D)\n print(P)\n Path = []\n while 1:\n Path.append(end)\n if end == start: break\n end = P[end]\n Path.reverse()\n return Path", "def shortestPath(G,start,end):\n\n D,P = Dijkstra(G,start)\n Path = []\n while 1:\n Path.append(end)\n if end == start: break`\u001b`\n end = P[end]\n Path.reverse()\n return Path", "def dijkstra(self,start):\n path_weight = {i : float('inf') for i in range(self.n)}\n path_weight[start] = 0\n previous = {i : float('nan') for i in range(self.n)}\n remaining = PriorityQueue()\n for node,priority in path_weight.items():\n remaining.put((priority,node))\n\n while not remaining.empty():\n priority,node = remaining.get()\n for tgt,weight in self.edges[node].items():\n possibleNewWeight = path_weight[node] + weight\n if (possibleNewWeight < path_weight[tgt]):\n path_weight[tgt] = possibleNewWeight\n previous[tgt] = node\n \n return path_weight, previous", "def shortest_route(self, start, end):\n if start not in self.nodes or end not in self.nodes:\n return None\n\n previous = {}\n unvisited = set(self.nodes.keys())\n distance = {n: maxsize for n in unvisited}\n\n distance[start] = 0\n\n while unvisited:\n u = min(unvisited, key=distance.get)\n if distance[u] == maxsize:\n return None\n if u == end:\n break\n unvisited.remove(u)\n\n for neighbor in self.nodes[u]:\n if neighbor not in unvisited:\n continue\n\n d = distance[u] + self.edges[u, neighbor]\n if neighbor not in distance or d < distance[neighbor]:\n distance[neighbor] = d\n previous[neighbor] = u\n\n sequence = [end]\n u = end\n while u in previous:\n u = previous[u]\n sequence.append(u)\n sequence.reverse()\n return sequence, distance[end]", "def get_shortest_path(self, node_id_start: int, node_id_end: int) -> List[int]:\n\n _, tree_idx_start = self.node_id_to_idx(node_id_start)\n _, tree_idx_end = self.node_id_to_idx(node_id_end)\n\n assert tree_idx_start == tree_idx_end, 'Provided node ids need to be part of the same tree'\n\n graph = self.get_graph(tree_idx_start)\n shortest_path = nx.shortest_path(graph, node_id_start, node_id_end)\n\n return shortest_path", "def __dikjstra(self, start_node):\n visited = []\n unvisited = [x for x in self.__node]\n shortest_dist_from_start_node = 0\n current_node = start_node\n\n current_node.setShortestDist(shortest_dist_from_start_node)\n\n while current_node:\n #check unvisited neighbor\n for neighbor_node, distance in current_node.getNeighbors().items():\n #print(neighbor_node.getId(), distance) troubleshoot je ni\n if neighbor_node in visited:\n continue\n\n #add up shortest_dist_from_start_node with distance from neighbor distance\n calc_dist = shortest_dist_from_start_node + distance\n\n if calc_dist < neighbor_node.getShortestDist():\n neighbor_node.setShortestDist(calc_dist)\n neighbor_node.setPrevNode(current_node)\n\n # add current node to visited array\n visited.append(current_node)\n unvisited.remove(current_node)\n \n #update next node and next shortest distance\n next_shortest_dist_from_start_node = inf\n next_node = None\n\n for unvisited_node in unvisited:\n if unvisited_node.getShortestDist() < next_shortest_dist_from_start_node:\n next_shortest_dist_from_start_node = unvisited_node.getShortestDist()\n next_node = unvisited_node\n\n # update current node and shortest distance from start vertex\n if next_node:\n current_node = next_node\n shortest_dist_from_start_node = next_shortest_dist_from_start_node\n #if there are left over unvisited node\n else: \n if unvisited:\n current_node = unvisited[0]\n else:\n current_node = None", "def dijkstra(graph, start):\n unvisited = []\n weight = {}\n prev = {}\n time = {}\n imp = {}\n cost = {}\n dist = {}\n for node in graph.keys():\n # add all nodes to 'unvisited' with no previous node and a weight of infinity\n unvisited.append(node)\n weight[node] = float('inf')\n time[node] = float('inf')\n imp[node] = float('inf')\n cost[node] = float('inf')\n dist[node] = float('inf')\n prev[node] = None\n\n # set the starting distance to be 0\n weight[start] = 0\n time[start] = 0\n imp[start] = 0\n cost[start] = 0\n dist[start] = 0\n\n # iterate until no node is left unvisited\n while len(unvisited) > 0:\n # get the lowest distance that has not yet been visited\n curr_node = min(weight.viewkeys() & unvisited, key=weight.get)\n # mark the node as visited\n unvisited.remove(curr_node)\n # iterate through each neighbor of the current node\n for neighbor in graph[curr_node]:\n # calculate distance to that node from this node\n tmp_weight = weight[curr_node] + neighbor[WEIGHT]\n tmp_time = time[curr_node] + neighbor[TIME]\n tmp_imp= imp[curr_node] + neighbor[IMP]\n tmp_cost = cost[curr_node] + neighbor[COST]\n tmp_dist = dist[curr_node] + neighbor[DISTANCE]\n # if this distance is less than the one already stored at that node\n if tmp_weight < weight[neighbor[NEXT_NODE]]:\n # we store this distance as its distance,\n weight[neighbor[NEXT_NODE]] = tmp_weight\n time[neighbor[NEXT_NODE]] = tmp_time\n imp[neighbor[NEXT_NODE]] = tmp_imp\n cost[neighbor[NEXT_NODE]] = tmp_cost\n dist[neighbor[NEXT_NODE]] = tmp_dist\n # and this node as its previous node\n prev[neighbor[NEXT_NODE]] = curr_node\n\n return weight, prev, time, imp, cost, dist", "def find_shortest_path(self, start, end):\n\n if start==None:\n return\n\n visited = {}\n\n distance = {start:0}\n parent = {start:None}\n\n queue = deque()\n queue.append(start)\n\n while queue:\n\n cn = queue.popleft()\n\n for n in self.adjacencylist[cn]:\n if n not in visited:\n queue.append(n)\n parent[n] = cn\n if n not in distance:\n distance[n] = 1\n else:\n distance[n] += 1\n\n visited[cn] = True\n\n if all(visited.values()) == True:\n print('BFS done')\n\n print(\"Finding shortest path\")\n\n path = []\n cn = end\n path.append(cn)\n\n while cn != start:\n cn = parent[cn]\n path.append(cn)\n\n print (path[::-1])", "def shortestPath(self, G, start, end):\n def flatten(L): # Flatten linked list of form [0,[1,[2,[]]]]\n while len(L) > 0:\n yield L[0]\n L = L[1]\n\n q = [(0, start, ())] # Heap of (cost, path_head, path_rest).\n visited = set() # Visited vertices.\n while True:\n (cost, v1, path) = heapq.heappop(q)\n if v1 not in visited:\n visited.add(v1)\n if v1 == end:\n return list(flatten(path))[::-1] + [v1]\n path = (v1, path)\n for (v2, cost2) in G[v1].items():\n if v2 not in visited:\n heapq.heappush(q, (cost + cost2, v2, path))", "def all_shortest_paths(self, start_node, end_node):\n s=self.min_dist(start_node,end_node)\n return self.all_paths(start_node,end_node,s,[])", "def dijkstra_shortest_path(graph_object: Graph, start_node: int) -> List[float]:\n\n def init_distance(g: Graph, s: int) -> List[float]:\n d = [inf] * len(g) # type: List[float]\n d[s] = 0.0\n return d\n\n # Assign a key value to all vertices in the input graph.\n # Initial value is inf for all but the first one\n distance = init_distance(graph_object, start_node)\n priority_queue = MinHeap.build([(i, priority) for i, priority in enumerate(distance)])\n\n while priority_queue: # Priority queue has O(V) elements\n min_index = priority_queue.pop() # O(log(V))\n for edge in graph_object.graph[min_index]: # A graph has at most 2E Edges in adjacency list, so O(E)\n source, destination, edge_weight = edge\n if priority_queue.contains_item(destination) and distance[source] + edge_weight < distance[destination]:\n distance[destination] = distance[source] + edge_weight\n priority_queue.push(item=destination, priority=edge_weight) # O(log(V))\n\n return distance", "def single_dijkstra(graph, start, edge_weight_name):\r\n distances = []\r\n for x in start:\r\n try:\r\n value_set = nx.single_source_dijkstra_path_length(graph, source=x, weight=edge_weight_name)\r\n except nx.NetworkXNoPath:\r\n pass\r\n for key in value_set:\r\n\r\n distances.append([x,key,value_set[key]])\r\n return distances", "def dijkstra(start: Vector2D, goal: Vector2D, grid: Scene, *args) -> (list, list):\n\n frontier = PriorityQueue() # nodes to be explored\n\n prev_node = dict() # maps n to node that precedes it in cheapest currently-known path from start to n\n cost_so_far = dict() # maps n to cost of cheapest currently-known path from start to n\n explored = [] # keeps track of previously explored nodes, to be drawn later\n\n current_cost = 0\n\n frontier.put(start, current_cost)\n prev_node[start] = None\n cost_so_far[start] = current_cost\n\n while not frontier.empty():\n current = frontier.get()\n\n if current == goal: # solution found!\n return (reconstruct_path(goal, prev_node), explored)\n\n explored.append(current)\n\n for neighbor in grid.get_unexplored_neighbors(current):\n path_cost = cost_so_far[current] + grid.cost(neighbor) # cost of path to 'neighbor' going through 'current'\n\n if (neighbor not in cost_so_far) or (path_cost < cost_so_far[neighbor]): # if this path reaches node 'neighbor' faster than some previous path\n cost_so_far[neighbor] = path_cost\n \n frontier.put(neighbor, path_cost) # 'expected_cost' is used as a tie breaker to reduce paths to explore\n prev_node[neighbor] = current\n\n # If frontier empty but goal was never reached, no solution was found\n return ([], explored)", "def return_path(self, start, target):\n\n # return early if we don't need to go anywhere\n if start == target: return []\n\n # otherwise look for a path\n visited = set()\n path = {}\n costs = {}\n pq = [(0, start)]\n\n # Look until we find the target, dijkstra's guarantees\n # minimum cost path according to cost function.\n #\n # Cost function is: Cost to get to current worker + workload at worker\n while target not in visited:\n cost, curr = heapq.heappop(pq)\n if curr in visited: continue\n visited.add(curr)\n for neighbor in self.neighbors[curr]:\n if neighbor in visited: continue\n cost_new = cost + self.workload[neighbor] \n if neighbor not in costs or cost_new < costs[neighbor]:\n costs[neighbor] = cost_new\n path[neighbor] = curr\n heapq.heappush(pq, (cost_new, neighbor))\n\n ret_path = [0]\n while ret_path[-1] != start:\n ret_path.append(path[ret_path[-1]])\n\n return ret_path[::-1]", "def bfs_shortest_path(graph, start, end):\n assert not graph.weighted, 'This method will not work for weighted graphs.'\n\n parents = {}\n distances = {start: 0}\n\n queue = deque([start])\n while queue:\n node = queue.popleft()\n for next_node in (graph.adj[node] - distances.keys()):\n parents[next_node] = node\n distances[next_node] = distances[node] + 1\n if next_node == end:\n return backtrace_path(start, end, parents)\n queue.append(next_node)\n\n return None", "def shortest_path(self, start_node_id, end_node_id):\n # convert to lowercase \n start_node_id = start_node_id.lower()\n end_node_id = end_node_id.lower()\n\n # validate whether both node id exist \n start_node = None\n end_node = None\n \n # for loop to convert id to obj --> can be used for passing param in dijkstra method\n for x in self.__node:\n if x.getId() == start_node_id:\n start_node = x\n if x.getId() == end_node_id:\n end_node = x\n \n #validate if the id exists\n if start_node == None or end_node == None:\n print(\"The ID does not exist\")\n return\n\n # execute dikstra method\n self.__dikjstra(start_node)\n\n output_node = [end_node.getId()]\n temp_end_node = end_node # to be used to display shortest distance from start node\n\n while end_node.getId() != start_node.getId():\n end_node = end_node.getPrevNode()\n if end_node == None:\n break\n output_node.insert(0,end_node.getId())\n\n #if first element of otput_node is not the start node id, display error, other wise, display the \n if output_node[0] != start_node_id:\n print(\"Error. No path between vertex \"+start_node_id+\" and vertex \"+end_node_id)\n return \n \n # shortest path \n print(\"Shortest Path: \")\n \n for node in output_node:\n if node == end_node_id:\n print(node, end=\"\")\n break \n\n print(node+' -> ', end=\"\")\n \n # shortest distance\n print ('\\nShortest distance to '+ temp_end_node.getId() + ' = ' + str(temp_end_node.getShortestDist()) )\n\n #display table\n self.__display_table(start_node)", "def shortest_path(self, distance_graph: dict) -> list:\n # Start from start pos\n start = self.start\n path = []\n\n node = start\n while True:\n # Default min point is point itself\n min_node = node\n min_val = distance_graph[node]\n # Find a neighbor that has lowest distance from center\n for neighbor in self.graph[node]:\n # If neighbor is not mapped in distanceGraph then it is a member that is far away\n if neighbor not in distance_graph:\n continue\n val = distance_graph[neighbor]\n if min_val > val:\n min_val = val\n min_node = neighbor\n node = min_node\n # Add node to path\n path.append(node)\n\n if min_val == 0:\n # Center found\n break\n return path", "def shortest_route(self, start, finish):\n distances = dict()\n previous = dict()\n nodes = list()\n result = dict()\n best_price = 0\n\n for vertex in self.vertices:\n if vertex == start:\n distances[vertex] = 0\n heapq.heappush(nodes, [0, vertex])\n else:\n distances[vertex] = sys.maxsize\n heapq.heappush(nodes, [sys.maxsize, vertex])\n previous[vertex] = None\n\n while nodes:\n smallest = heapq.heappop(nodes)[1]\n if smallest == finish:\n path = []\n while previous[smallest]:\n path.append(smallest)\n smallest = previous[smallest]\n if len(path) == 1:\n result[\"Cost\"] = distances[finish]\n else:\n result[\"Cost\"] = best_price\n\n if len(path) > 0:\n path.append(start)\n result[\"Path\"] = path[::-1]\n else:\n result = dict()\n\n self.spa_result = result\n return result\n\n if distances[smallest] == sys.maxsize:\n break\n\n for neighbor in self.vertices[smallest]:\n cost = distances[smallest] + self.vertices[smallest][neighbor]\n if cost < distances[neighbor]:\n distances[neighbor] = cost\n previous[neighbor] = smallest\n for n in nodes:\n if n[1] == neighbor:\n n[0] = cost\n best_price = cost\n break\n heapq.heapify(nodes)\n\n return result", "def find_shortest_path(self, start, end, path=[]):\n path = path+[start]\n if start == end:\n return path\n shortest_path = []\n for node in self.graph[start]:\n if node not in path:\n newpath = self.find_path(node, end, path)\n if not shortest_path or len(shortest_path) > len(newpath):\n shortest_path = newpath\n return shortest_path if shortest_path else None", "def djikstra(self, source, target):\r\n dist = {}\r\n prev = {}\r\n set_q = {}\r\n for vertex in self.vertices.keys():\r\n dist[vertex] = sys.maxsize\r\n prev[vertex] = None\r\n set_q[vertex] = dist[vertex]\r\n dist[source] = 0\r\n set_q[source] = 0\r\n while set_q:\r\n vertex_u = min(set_q, key=set_q.get)\r\n if vertex_u == target:\r\n break\r\n set_q.pop(vertex_u)\r\n for edge in self.edges[vertex_u]:\r\n alt = dist[vertex_u] + edge.distance\r\n if alt < dist[edge.destination]:\r\n dist[edge.destination] = alt\r\n set_q[edge.destination] = dist[edge.destination]\r\n prev[edge.destination] = vertex_u\r\n path = []\r\n vertex_u = target\r\n while prev[vertex_u]:\r\n path.insert(0, vertex_u)\r\n vertex_u = prev[vertex_u]\r\n path.insert(0, vertex_u)\r\n return path", "def shortestpath(graph,start,end,visited=[],distances={},predecessors={}):\r\n # detect if first time through, set current distance to zero\r\n try:\r\n if not visited: \r\n distances[start]=0\r\n # if we've found our end node, find the path to it, and return\r\n if start==end:\r\n path=[]\r\n while end != None:\r\n path.append(end)\r\n end=predecessors.get(end,None)\r\n return distances[start], path[::-1]\r\n # process neighbors as per algorithm, keep track of predecessors\r\n for neighbor in graph[start]:\r\n if neighbor not in visited:\r\n neighbordist = distances.get(neighbor,float(math.inf))\r\n tentativedist = distances[start] + graph[start][neighbor]\r\n if tentativedist < neighbordist:\r\n distances[neighbor] = tentativedist\r\n predecessors[neighbor]=start\r\n # neighbors processed, now mark the current node as visited \r\n visited.append(start)\r\n # finds the closest unvisited node to the start \r\n unvisiteds = dict((k, distances.get(k,float(math.inf))) for k in graph if k not in visited)\r\n closestnode = min(unvisiteds, key=unvisiteds.get)\r\n # now take the closest node and recurse, making it current\r\n except:\r\n pygame.time.delay(700)\r\n SCREEN.fill((0,0,0))\r\n displayText(\"Path Not Found\",400,200)\r\n displayText(\"Try Again....!!\",400,240)\r\n pygame.display.update()\r\n pygame.time.delay(1500)\r\n pygame.quit()\r\n sys.exit() \r\n return shortestpath(graph,closestnode,end,visited,distances,predecessors)", "def find_path(g: Graph, start: str) -> None:\n g.dijkstra(start)", "def bfs_shortest_path(graph: dict=g2, start: str = \"1\", goal: str = \"4\") -> list:\n visited = []\n queue = [[start]]\n\n while queue:\n path = queue.pop(0)\n node = path[-1]\n if node not in visited:\n neighbours = graph[node]\n for neighbour in neighbours:\n new_path = path[:]\n new_path.append(neighbour)\n queue.append(new_path)\n if neighbour == goal:\n return new_path\n visited.append(node)\n # No path\n return [\"No Path\"]", "def min_path(self, start, end, maxD=1e309):\n tdist, preceding_node = self.dijkstra(start, maxD)\n dist = tdist[end]\n backpath = [end]\n try:\n while end != start:\n end = preceding_node[end]\n backpath.append(end)\n path = list(reversed(backpath))\n except KeyError:\n path = None\n\n return dist, path", "def shortest_path(self, start: str, goal: str) -> Path:\n return next(self.bfs_paths(start, goal), [])", "def shortest_distance(self, begin, end):\n\n begin_index = self._cell_indexes[begin]\n end_index = self._cell_indexes[end]\n\n distance = self._distance_mat[begin_index, end_index]\n # distance *= pq.meter\n\n path = [begin]\n inv_index = {v: k for k, v in self._cell_indexes.items()}\n while True:\n next_index = self._preds[end_index, begin_index]\n if next_index == -9999:\n break\n\n begin_index = next_index\n\n seg = inv_index[next_index]\n path.append(seg)\n\n return distance, path", "def shortest_path_no_lefts(edges, start, end):\n visitedNodes = []\n queue = [[start]]\n if start == end:\n return [start]\n \n while queue:\n path = queue.pop(0) #pops off last element in queue\n node = path[-1] #takes last element in path\n if node not in visitedNodes:\n neighbors = get_neighbors_noleft(edges, node)\n for neighbor in neighbors:\n newPath = list(path)\n newPath.append(neighbor)\n queue.append(newPath)\n if neighbor == end:\n return fix_format(edges, newPath)\n visitedNodes.append(node)\n return None", "def shortestPath(graph, start, end, maxOutdistance, toPrint = False):\r\n return DFS(graph, start, end, [], None, sys.maxsize, sys.maxsize, 0, maxOutdistance, toPrint)", "def shortest_path(self, source, target):\r\n key = self.d.keys()\r\n #check that endpoints are in graph\r\n if source not in key or target not in key:\r\n raise KeyError(str(source) + \" and \" + str(target) + \" must be in graph\")\r\n #initialize V,Q and M\r\n V = []\r\n vis = dict()\r\n Q = deque()\r\n Q.append(source)\r\n M = set(source)\r\n #while target has not been visited\r\n while target not in M:\r\n #take first element of Q\r\n current = Q.popleft()\r\n #add element to visited\r\n V.append(current)\r\n neighbors = self.d[current]\r\n #for each neighbor of element\r\n for n in neighbors:\r\n #if element has not been checked, add it to queue\r\n #also save traveled edge in visited\r\n if n not in M:\r\n Q.append(n)\r\n vis.update({n:current})\r\n M.add(n)\r\n L = [target]\r\n #reverse the order of the traveled edges\r\n while L[-1] in vis.keys():\r\n L.append(vis[L[-1]])\r\n return L[::-1]", "def findShortestPath(start, end):\n # Using a queue as the dispenser type will result in a breadth first\n # search\n queue = []\n queue.append(start) # prime the queue with the start vertex\n\n # The predecessor dictionary maps the current Vertex object to its\n # immediate predecessor. This collection serves as both a visited\n # construct, as well as a way to find the path\n predecessors = {}\n predecessors[start] = None # add the start vertex with no predecessor\n\n # Loop until either the queue is empty, or the end vertex is encountered\n while len(queue) > 0:\n current = queue.pop(0)\n if current == end:\n break\n for neighbor in current.getConnections():\n if neighbor not in predecessors: # if neighbor unvisited\n predecessors[neighbor] = current # map neighbor to current\n queue.append(neighbor) # enqueue the neighbor\n\n # If the end vertex is in predecessors a path was found\n if end in predecessors:\n path = []\n current = end\n while current != start: # loop backwards from end to start\n path.insert(0, current) # prepend current to the path list\n current = predecessors[current] # move to the predecessor\n path.insert(0, start)\n return path\n else:\n return None", "def DFS1(graph, start, end, path=[], shortest=None):\n path = path + [start]\n print 'Current DFS path:', printPath(path)\n if start == end:\n return path\n for node in graph.childrenOf(start):\n if node not in path: #avoid cycles\n if shortest == None or len(path) < len(shortest):\n newPath = DFS1(graph, node, end, path, shortest)\n if newPath != None:\n shortest = newPath\n return shortest", "def dijsktra(graph, initial, end):\n shortest_paths = {initial: (None, 0)}\n current_node = initial\n visited = set()\n\n while current_node != end:\n visited.add(current_node)\n destinations = graph.edges[current_node]\n weight_to_current_node = shortest_paths[current_node][1]\n logger.debug(\"shortest_paths = {}\".format(shortest_paths))\n\n for next_node in destinations:\n weight = graph.weights[(current_node, next_node)] + weight_to_current_node\n logger.debug(\"next_node = {}\".format(next_node))\n logger.debug(\"graph.weights[(current_node, next_node)] = {}\".format(graph.weights[(current_node, next_node)]))\n logger.debug(\"weight_to_current_node = {}\".format(weight_to_current_node))\n logger.debug(\"weight = {}\".format(weight))\n if next_node not in shortest_paths:\n shortest_paths[next_node] = (current_node, weight)\n else:\n current_shortest_weight = shortest_paths[next_node][1]\n if current_shortest_weight > weight:\n shortest_paths[next_node] = (current_node, weight)\n\n next_destinations = {node: shortest_paths[node] for node in shortest_paths if node not in visited}\n logger.debug(\"next_destinations = {}\".format(next_destinations))\n\n if not next_destinations:\n return \"Route not possible\"\n\n current_node = min(next_destinations, key=lambda k: next_destinations[k][1])\n logger.debug(\"current_node = {}\".format(current_node))\n\n path = []\n while current_node is not None:\n path.append(current_node)\n next_node = shortest_paths[current_node][0]\n current_node = next_node\n\n # Reverse the path\n path = path[::-1]\n return path", "def shortest_path(start, end):\n\n\tmoves = rubik.quarter_twists\n\n\t# Parent nodes: (Parent_State, move)\n\tstartParents = {}\n\tstartParents[start] = None # Start state has no parent\n\n\t# Parent nodes: (Parent_State, move)\n\tendParents = {}\n\tendParents[end] = None # End state has no parent\n\n\tstartFrontier = [] # Current frontier in start BFS\n\tendFrontier = [] # Current frontier in end BFS\n\n\tstartFrontier.append(start) # Add start state as first and only node to generate next frontier\n\tendFrontier.append(end) # Add end state as first and only node to generate next frontier\n\n\tif end in startParents:\n\t\treturn [] # Start == End : No moves required\n\n\t# We only have to search at most 14 levels in BFS\n\t# Two-way BFS therefore requires 7 concurrent levels from both states\n\tfor i in range(7):\n\n\t\tstartNextFrontier = [] # New empty set for new frontier to be discovered\n\t\tfor state in startFrontier: # Iterate through each rubiks state in this frontier\n\t\t\tfor move in moves: # Apply each move to this state\n\t\t\t\tnextState = rubik.perm_apply(move, state)\n\n\t\t\t\t# Makes sure this new state is not already in the Graph\n\t\t\t\t# This skips nodes that were already permuted in another path,\n\t\t\t\t# essentially trimming the Graph's leaves\n\t\t\t\tif nextState not in startParents:\n\t\t\t\t\tstartParents[nextState] = (state, move) # Store this state's parent + move\n\t\t\t\t\tstartNextFrontier.append(nextState) # Create a node in the next frontier\n\t\t\t\t\n\t\t\t\t# Intersect of both Graphs, Intermediate state of path found\n\t\t\t\tif nextState in endParents:\n\t\t\t\t\treturn solution(startParents, endParents, nextState)\n\n\t\tstartFrontier = startNextFrontier # Make the next frontier the current one\n\n\t\tendNextFrontier = [] # New empty set for new frontier to be discovered\n\t\tfor state in endFrontier: # Iterate through each rubiks state in this frontier\n\t\t\tfor move in moves: # Apply each move to this state\n\t\t\t\tnextState = rubik.perm_apply(move, state)\n\n\t\t\t\t# Makes sure this new state is not already in the Graph\n\t\t\t\t# This skips nodes that were already permuted in another path,\n\t\t\t\t# essentially trimming the Graph's leaves\n\t\t\t\tif nextState not in endParents:\n\t\t\t\t\tendParents[nextState] = (state, move) # Store this state's parent + move\n\t\t\t\t\tendNextFrontier.append(nextState) # Create a node in the next frontier\n\t\t\t\t\n\t\t\t\t# Intersect of both Graphs, Intermediate state of path found\n\t\t\t\tif nextState in startParents:\n\t\t\t\t\treturn solution(startParents, endParents, nextState)\n\n\t\tendFrontier = endNextFrontier # Make the next frontier the current one\n\n\treturn None", "def _dijkstra(G, start, end=None):\n distances = {}\n predecessors = {}\n Q = PriorityDictionary()\n Q[start] = 0\n\n for pt in Q:\n distances[pt] = Q[pt]\n if pt == end: break\n\n for neigh in G[pt]:\n pnLength = distances[pt] + G[pt][neigh]\n if neigh in distances:\n if pnLength < distances[neigh]:\n raise ValueError, \"Dijkstra: found better path to already-final vertex\"\n elif neigh not in Q or pnLength < Q[neigh]:\n Q[neigh] = pnLength\n predecessors[neigh] = pt\n return distances, predecessors", "def dijkstra(grid,start,end, heuristic_cost=null_heuristic):\n return a_star(grid,start,end, heuristic_cost)", "def path_to(start, end):\n sol = djikstra(start, end)\n if sol is None:\n return None\n (distances, cost) = sol\n return compute_path(distances, cost, start, end)", "def shortestpath(graph, current, end, visited=[], distances={}, predecessors={}):\n\t# we've found our end node, now find the path to it, and return\n\tif current == end:\n\t\tpathShortest = []\n\t\twhile end != None:\n\t\t\tpathShortest.append(end)\n\t\t\tend = predecessors.get(end, None)\n\t\tpass\n\t\treturn distances[current], pathShortest[::-1]\n\tpass\n\t\n\t# detect if it's the first time through, set current distance to zero\n\tif not visited: distances[current] = 0\n\t# process neighbors as per algorithm, keep track of predecessors\n\tfor adjacent in graph[current]:\n\t\tif adjacent not in visited:\n\t\t\tadjDist = distances.get(adjacent, sys.maxint)\n\t\t\tcurDist = distances[current] + graph[current][adjacent]\n\t\t\tif curDist < adjDist:\n\t\t\t\tdistances[adjacent] = curDist\n\t\t\t\tpredecessors[adjacent] = current\n\t\t\tpass\n\t\tpass\n\tpass\n\t# neighbors processed, now mark the current node as visited\n\tvisited.append(current)\n\t# finds the closest unvisited node to the start\n\tnotVisited = dict((k, distances.get(k, sys.maxint)) for k in graph if k not in visited)\n\tclosest = min(notVisited, key=notVisited.get)\n\t# now we can take the closest node and recurse, making it current\n\treturn shortestpath(graph, closest, end, visited, distances, predecessors)", "def findRoute(self, x1, y1, x2, y2):\r\n\r\n\t\t# Check to see if the start and end node are the same\r\n\t\tif x1 == x2 and y1 == y2:\r\n\t\t\treturn [(x1, y1)]\r\n\r\n\t\troot_node = DijkstraNode(x1, y1, None, 0)\r\n\t\troot_node.neighbours = self.getNeighbours(x1, y1)\r\n\r\n\t\t# Create a dictionary to store all of the nodes\r\n\t\tall_nodes = {(x1, y1): root_node}\r\n\t\t# If no starting place is found return nothing\r\n\t\tif len(root_node.neighbours) == 0:\r\n\t\t\treturn []\r\n\t\tcurrent_node = root_node\r\n\t\twhile (x2, y2) not in all_nodes:\r\n\r\n\t\t\t# If the algorithm hasn't found the target node and cannot explore further then return empty path\r\n\t\t\tif current_node is None:\r\n\t\t\t\treturn []\r\n\r\n\t\t\tcurrent_node.neighbours = self.getNeighbours(current_node.x, current_node.y)\r\n\r\n\t\t\t# The distance from the root node through the current node to the neighbour\r\n\t\t\tcurrent_neighbour_dist = current_node.dist + 1\r\n\r\n\t\t\tfor neighbour in current_node.neighbours:\r\n\t\t\t\tif neighbour in all_nodes:\r\n\t\t\t\t\tneighbour_node = all_nodes[neighbour]\r\n\t\t\t\t\tif current_neighbour_dist < neighbour_node.dist:\r\n\t\t\t\t\t\t# The new best path is through the current node\r\n\t\t\t\t\t\tneighbour_node.parent = current_node\r\n\t\t\t\t\t\tneighbour_node.dist = current_neighbour_dist\r\n\t\t\t\telse:\r\n\t\t\t\t\t# Add a new node if it doesn't exist within the currently explored nodes\r\n\t\t\t\t\tall_nodes[neighbour] = DijkstraNode(neighbour[0], neighbour[1], current_node, current_neighbour_dist)\r\n\r\n\t\t\t# Mark the current node as being explored as you have checked all the neighbours\r\n\t\t\tcurrent_node.explored = True\r\n\r\n\t\t\t# Gets a list of all of the unexplored nodes to check for the next node to explore\r\n\t\t\tunexplored_nodes = [node for _, node in all_nodes.items() if not node.explored]\r\n\r\n\t\t\tif len(unexplored_nodes) > 0:\r\n\t\t\t\t# Go to the next node with the smallest distance that hasn't been explored\r\n\t\t\t\tcurrent_node = min(unexplored_nodes, key=lambda node: node.dist)\r\n\t\t\telse:\r\n\t\t\t\tcurrent_node = None\r\n\r\n\t\t# Make your way back from the target node\r\n\t\tcurrent_node = all_nodes[(x2, y2)]\r\n\t\t# Initialise a list to hold the path going from the target to the root\r\n\t\treversed_path = []\r\n\t\t# This will end when the root node tries to travel to a None node\r\n\t\twhile current_node is not None:\r\n\t\t\t# Add the current node to the list\r\n\t\t\treversed_path.append((current_node.x, current_node.y))\r\n\t\t\t# Travel to the parent node\r\n\t\t\tcurrent_node = current_node.parent\r\n\t\t\t# current_node will be None at the root because the parent of the root node is 'None'\r\n\r\n\t\t# Return the list in the correct order\r\n\t\treturn list(reversed(reversed_path))", "def shortestPath(self, source, target):\n dist = {}\n prev = {}\n q = []\n for y,a in enumerate(self.sm):\n for x,b in enumerate(self.sm[y]):\n dist[(x,y)] = sys.maxint\n prev[(x,y)] = None\n q.append((x,y))\n dist[source] = 0\n\n while len(q) is not 0:\n # find the node with minimum value (u)\n d = deepcopy(dist)\n while True:\n b = dict(map(lambda item: (item[1],item[0]), d.items()))\n u = b[min(b.keys())]\n if u not in q:\n d.pop(u)\n else:\n break\n\n if dist[u] == sys.maxint: # remaining nodes are inaccessible\n break\n\n q.remove(u)\n\n\n if u == target: # target found\n break\n\n for v in self.getNeighbors(u):\n alt = dist[u] + 1\n if alt < dist[v]:\n dist[v] = alt\n prev[v] = u\n\n s = []\n u = target\n while prev[u] is not None:\n s.append(u)\n u = prev[u]\n s.reverse()\n\n return s", "def shortest_path_tree__bfs(self, start):\r\n from queue import deque\r\n\r\n assert start in self.graph\r\n\r\n distance = {vertex: None for vertex in self.vertices()}\r\n distance[start] = 0\r\n\r\n previous = {vertex: None for vertex in self.vertices()}\r\n\r\n queue = deque()\r\n queue.append(start)\r\n\r\n while queue:\r\n current_vertex = queue.pop()\r\n for neighbour in self.neighbours(current_vertex):\r\n if distance[neighbour] is None:\r\n queue.append(neighbour)\r\n distance[neighbour] = distance[current_vertex] + 1\r\n previous[neighbour] = current_vertex\r\n\r\n return previous", "def shortestPathBFS(start):\n if start is None:\n return None\n\n # keep track of nodes to be checked\n queue = [start]\n start.curr_dist = 0\n\n while queue:\n curr = queue.pop()\n for neighbor in curr.neighbors:\n next_distance = curr.curr_dist + curr.getDistance(neighbor)\n if neighbor.curr_dist == math.inf or neighbor.curr_dist > next_distance:\n neighbor.curr_dist = next_distance\n neighbor.previous = curr\n queue.insert(0, neighbor)", "def _get_paths(G, nodes, edges, target, weight, available_dist, cache={}):\n\n G_succ = G.succ if G.is_directed() else G.adj\n\n output = []\n visited = set(nodes)\n path_info = []\n source = nodes[-1]\n get_weight = _create_weight_func(G, weight)\n\n for neighbor, neighbor_edge in G_succ[source].items():\n\n if neighbor in visited:\n continue\n\n new_available_dist = available_dist - get_weight(source, neighbor)\n\n if new_available_dist < 0:\n continue\n\n # shortest path key for cache\n sp_key = (source, neighbor, target)\n\n if sp_key not in cache:\n sp = dijkstra(G, neighbor, target, 'length', set([source]), \n new_available_dist)\n\n cache[sp_key] = INF if not sp else sp[1]\n\n if new_available_dist < cache[sp_key]:\n continue\n\n path_info.append((neighbor, neighbor_edge, new_available_dist))\n\n\n for (neighbor, neighbor_edge, new_available_dist) in path_info:\n new_path = nodes + [neighbor]\n new_edges = edges + [neighbor_edge]\n\n if neighbor == target:\n output.append((new_path, new_edges))\n else:\n output.extend(_get_paths(G, new_path, new_edges, target, \n weight, new_available_dist, cache))\n\n return output", "def shortest_path(graph, src, dest, modifiers):\r\n # Distances to source node\r\n distances = {vertex: float(\"inf\") for vertex in range(graph.num_vertices)}\r\n # Previous node in optimal path\r\n previous = {vertex: -1 for vertex in range(graph.num_vertices)}\r\n # Shortest path from source to source is 0\r\n distances[src] = 0\r\n # Initialize priority queue and vertex set\r\n pqueue = [(distances[src], src)]\r\n vertex_set = {src}\r\n\r\n while len(pqueue) != 0:\r\n vertex_added = False\r\n curr = heappop(pqueue)[1]\r\n vertex_set.remove(curr)\r\n for neighbor in graph.outgoing(curr):\r\n alt = distances[curr] + weight(neighbor, modifiers)\r\n other = neighbor.other(curr) # Opposite vertex\r\n if alt < distances[other]:\r\n distances[other] = alt\r\n previous[other] = curr\r\n if other not in vertex_set:\r\n vertex_added = True\r\n pqueue.append((alt, other))\r\n vertex_set.add(other)\r\n if vertex_added:\r\n heapify(pqueue)\r\n\r\n # Shortest path\r\n shortest_path = []\r\n shortest_path_distance = distances[dest]\r\n\r\n # Traverse previous[] to look for shortest path to target\r\n current_node = dest\r\n while previous[current_node] != -1:\r\n shortest_path.append(current_node)\r\n current_node = previous[current_node]\r\n if len(shortest_path) != 0:\r\n shortest_path.append(current_node)\r\n shortest_path.reverse()\r\n\r\n return shortest_path, shortest_path_distance", "def FindShortestPath(graph, start, end, path=[]):\n path = path + [start]\n if start == end:\n return path\n if start not in graph:\n return None\n shortest = None\n for node in graph[start]:\n if node not in path:\n newpath = FindShortestPath(graph, node, end, path)\n if newpath:\n if not shortest or len(newpath) < len(shortest):\n shortest = newpath\n return shortest", "def Dijkstra2(node_init, node_end, graph):\n\n ### Parameter initialisation\n node_list = list(graph.vertices.keys())\n dist = np.full(len(node_list), -np.inf)\n # At the beginning we have not reached the end_node\n node_end_reached = False\n # At the beginning, we assume there is a shortest path:\n no_path = False\n \n # Initialising the distances of the nodes\n dist[node_init] = 0\n # Setting the father_node which contains the provenance of the nodes\n father_node = np.full(len(node_list), -np.inf)\n # Initialising the current node\n current_node = node_init \n # Initialising the dictionnary of fixed node which has the following shape:\n #{fixed_node: (previous_node, iteration, cost)}\n # Fixing the number of iterations\n k = 0\n dict_fixed_node = {node_init:(None,k, 0)}\n \n # In the trivial case where the two nodes are identical\n if node_init == node_end:\n cost = 0\n shortest_path = [node_init]\n no_path = False\n return cost, shortest_path, no_path\n \n # While the end node has not been reached\n while not node_end_reached:\n current_node_adj = graph.node_get_adj(current_node).copy()\n # We get rid off the nodes that have been fixed, except at the first iteration\n if k != 0:\n current_node_adj.remove(dict_fixed_node[current_node][0])\n ## Updating the distances : either the node are neighbors and \n # something might change, either they are not, and their distance \n # does not change.\n # For the neighbors node\n for e in current_node_adj:\n dist_temp = dist[current_node] + graph.weights[(current_node, e)]\n # We change the distance only if it is lower than it used to be\n # otherwise, we keep it\n if dist_temp < dist[e] or dist[e] == -np.inf:\n dist[e] = dist_temp\n # Setting the father node\n father_node[e] = current_node\n father_node[current_node] = None\n # We set the distance of the current node to 0\n dist[current_node] = 0 \n # Index and distances which are not 0 and not minus infty\n sub_dist_index = [i for i, e in enumerate(dist) if e > 0]\n sub_dist_value = np.array([e for i, e in enumerate(dist) if e > 0])\n # If these two lists are empty, we stop the algorithm and that means\n # that we cannot reach our point\n if not sub_dist_index or sub_dist_value.size == 0:\n no_path = True\n cost = 'impossible path'\n shortest_path = 'impossible path'\n break\n # Now we need to set our choice for the next node\n if np.array_equal(sub_dist_value, np.ones(len(sub_dist_value))):\n ## If there are only ones : we pick them up randomly\n current_node = int(random.choice(list(sub_dist_index)))\n min_dist = sub_dist_value.min()\n else:\n ## If not we just pick up the one with the minimum distance.\n current_node = sub_dist_index[sub_dist_value.argmin()]\n min_dist = sub_dist_value.min()\n # Adding this node to the dictionnary\n dict_fixed_node[current_node] = (int(father_node[current_node]), k, min_dist)\n # If the end_node has been reached, we stop the search algorithm\n if node_end in dict_fixed_node.keys():\n node_end_reached = True\n # Incrementing the counter\n k += 1\n #print('current_node : {}'.format(current_node))\n #print(dict_fixed_node)\n # Now we need to get the shortest path from our iterations whose information \n # are in dict_fixed_node. To do this, we need to circle back from the end_node\n # to the init_node in this dictionnary.\n # This is done only if some path between node_init and end_node exists.\n if no_path == False:\n list_father_node = list(dict_fixed_node.values())\n previous_node = list_father_node[-1][0]\n shortest_path = [node_end, previous_node]\n # While the initial node has not been reached, we add the relevant\n # nodes to our shortest path\n while previous_node != node_init:\n previous_node = dict_fixed_node[previous_node][0]\n shortest_path.append(previous_node)\n \n # Computing the cost of this shortest path in terms of weights\n cost = int(dict_fixed_node[node_end][2])\n \n return cost, shortest_path, no_path", "def floyd_warshall_path(self, start, end, next_node): # pragma no cover\n if next_node[start][end] is None:\n return []\n path = [start]\n while start is not end:\n start = next_node[start][end]\n path.append(start)\n return path", "def find_shortest_path(graph, start, end, path=[]):\n path = path + [start]\n if start == end:\n return path\n if not graph.has_key(start):\n return None\n shortest = None\n for node in graph[start]:\n if node not in path:\n newpath = find_shortest_path(graph, node, end, path)\n if newpath:\n if not shortest or len(newpath) < len(shortest):\n shortest = newpath\n return shortest", "def min_path(vs, es, source, target):\n dijkstra(vs, es, source, stop = target)\n test = target\n result = []\n while test != source:\n e = test._ss_edge\n result.append(e)\n test = e.v1 if e.v1 != test else e.v2\n assert test == source and test._ss_edge is None\n return result[::-1]", "def Dijkstra(node_init, node_end, graph):\n\n ### Parameter initialisation\n node_list = list(graph.vertices.keys())\n dist = np.full(len(node_list), -np.inf)\n # At the beginning we have not reached the end_node\n node_end_reached = False\n # At the beginning, we assume there is a shortest path:\n no_path = False\n\n # Initialising the distances of the nodes\n dist[node_init] = 0\n # Setting the father_node which contains the provenance of the nodes\n father_node = np.full(len(node_list), -np.inf)\n # Initialising the current node\n current_node = node_init \n # Initialising the dictionnary of fixed node which has the following shape:\n #{fixed_node: previous_node}\n # Fixing the number of iterations\n k = 0\n dict_fixed_node = {node_init:(None,k)}\n \n # In the trivial case where the two nodes are identical\n if node_init == node_end:\n cost = 0\n shortest_path = [node_init]\n no_path = False\n return cost, shortest_path, no_path\n \n # While the end node has not been reached\n while not node_end_reached:\n current_node_adj = graph.node_get_adj(current_node).copy()\n # We get rid off the node that have been fixed, except at the first iteration\n if k != 0:\n current_node_adj.remove(dict_fixed_node[current_node][0])\n ## Updating the distances : either the node are neighbors and \n # something might change, either they are not, and their distance \n # does not change.\n # For the neighbors node\n for e in current_node_adj:\n dist_temp = dist[current_node] + 1\n # We change the distance only if it is lower than it used to be\n # otherwise, we keep it\n if dist_temp < dist[e] or dist[e] == -np.inf:\n dist[e] = dist_temp\n # Setting the father node\n father_node[e] = current_node\n father_node[current_node] = None\n # We set the distance of the current node to 0\n dist[current_node] = 0 \n # Index and distances which are not 0 and not minus infty\n sub_dist_index = [i for i, e in enumerate(dist) if e > 0]\n sub_dist_value = np.array([e for i, e in enumerate(dist) if e > 0])\n # If these two lists are empty, we stop the algorithm and that means\n # that we cannot reach our point\n if not sub_dist_index or sub_dist_value.size == 0:\n no_path = True\n cost = 'impossible path'\n shortest_path = 'impossible path'\n break\n # Now we need to set our choice for the next node\n if np.array_equal(sub_dist_value, np.ones(len(sub_dist_value))):\n ## If there are only ones : we pick them up randomly\n current_node = int(random.choice(list(sub_dist_index)))\n else:\n ## If not we just pick up the one with the minimum distance.\n current_node = sub_dist_index[sub_dist_value.argmin()]\n # Adding this node to the dictionnary\n dict_fixed_node[current_node] = (int(father_node[current_node]), k)\n # If the end_node has been reached, we stop the search algorithm\n if node_end in dict_fixed_node.keys():\n node_end_reached = True\n # Incrementing the counter\n k += 1\n\n # Now we need to get the shortest path from our iterations whose information \n # are in dict_fixed_node. To do this, we need to circle back from the end_node\n # to the init_node in this dictionnary.\n # This is done only if some path between node_init and end_node exists.\n if no_path == False:\n list_father_node = list(dict_fixed_node.values())\n previous_node = list_father_node[-1][0]\n shortest_path = [node_end, previous_node]\n # While the initial node has not been reached, we add the relevant\n # nodes to our shortest path\n while previous_node != node_init:\n previous_node = dict_fixed_node[previous_node][0]\n shortest_path.append(previous_node)\n \n # Computing the cost of this shortest path in terms of weights\n cost = len(shortest_path) - 1\n \n return cost, shortest_path, no_path", "def dijkstras(graph_list, start):\n # for returning\n shortest_dist = dict()\n shortest_parent = dict()\n\n # create an adjacency list from the graph definition, and a dict for keeping track of parents\n graph = {} # our adjacency list dict\n parent = {} # our parents dict\n\n for fr, to, w in graph_list:\n if fr not in graph:\n graph[fr] = [(to, w)]\n else:\n graph[fr].append((to, w))\n\n if fr not in parent:\n parent[fr] = None\n if to not in parent:\n parent[to] = None\n\n # in this implementation of dijkstras, we keep track of a list of visited nodes in order to use a heap as-is,\n # without having to modify the heap's weights\n visited = set()\n heap = [(0, start, None)] # distance, node, parent\n\n while heap:\n curr_dist, node, par = heapq.heappop(heap)\n\n # only visit this node if it's not visited yet. The first time we visit a node, because of the heap, it's\n # guaranteed that it's the shortest path\n if node not in visited:\n # visit this node\n visited.add(node)\n\n # update shortest dict and parent dict\n shortest_dist[node] = curr_dist\n parent[node] = par\n\n # we won't visit this node again, so compute the parent list here\n curr_parent_list = []\n pnode = node\n # work out shortest parent\n while pnode:\n curr_parent_list.append(pnode)\n pnode = parent[pnode]\n shortest_parent[node] = curr_parent_list[::-1]\n\n # add adjacent nodes if they haven't been visited\n if node in graph:\n for to, w in graph[node]:\n if to not in visited:\n heapq.heappush(heap, (curr_dist + w, to, node))\n\n print(shortest_dist)\n print(shortest_parent)", "def shortest_path(source, target):\n #although lecture checks for goal when a node is popped off the frontier, efficiency of search can be improved\n #by checking for a goal as nodes are ADDED. If goal detected, don't add it to frontier, just return the solution\n #immediately\n\n #create start point\n start = Node(state = source, parent = None, action = None)\n frontier = QueueFrontier()\n frontier.add(start)\n\n #create explored set\n explored = set()\n\n while True:\n #if nothing left in frontier, no path exists\n if frontier.empty():\n return None\n\n #choose a node from the frontier\n node = frontier.remove()\n #if node is goal, we have solution\n\n #add neighbors 2 frontier using function THATS ALR THERE DUMMY\n for (movie, star) in neighbors_for_person(node.state):\n newNode = Node(state = star, parent = node, action=movie)\n if not frontier.contains_state(newNode) and newNode.state not in explored:\n if newNode.state == target:\n #reverse the solution\n solution = []\n while newNode.parent is not None:\n actionTuple = (newNode.action, newNode.state)\n solution.append(actionTuple)\n newNode = newNode.parent\n solution.reverse()\n return solution\n else: frontier.add(newNode)\n\n #mark state as explored\n explored.add(node.state)", "def floyd_warshall(self, start, end):\n distance = {}\n next_node = {}\n nodes = self.keys()\n for edge in self.edges():\n distance.setdefault(edge[0], {})[edge[1]] = edge[2]\n next_node.setdefault(edge[0], {})[edge[1]] = edge[1]\n\n for node in nodes:\n for neighbor in nodes:\n if neighbor not in self[node]:\n distance.setdefault(node, {})[neighbor] = float('inf')\n for k in nodes:\n for i in nodes:\n for j in nodes:\n if distance[i][j] > distance[i][k] + distance[k][j]:\n distance[i][j] = distance[i][k] + distance[k][j]\n next_node[i][j] = next_node[i][k]\n\n return self.floyd_warshall_path(start, end, next_node)", "def getPath(\n self,\n source,\n dest,\n as_nodes=False,\n ):\n\n self.dist = {} # A map from nodes to their labels (float)\n self.predecessor = {} # A map from a node to a node\n\n # Initialize the distance labels to \"infinity\"\n\n vertices = self.g.nodes()\n for vertex in vertices:\n self.dist[vertex] = self.inf\n self.predecessor[vertex] = source\n\n # Further set up the distance from the source to itself and\n # to all one hops away.\n\n self.dist[source] = 0.0\n if self.g.is_directed():\n outEdges = self.g.out_edges([source])\n else:\n outEdges = self.g.edges([source])\n for edge in outEdges:\n self.dist[edge[1]] = self.g[edge[0]][edge[1]][self.wt]\n\n s = set(vertices)\n s.remove(source)\n currentMin = self._findMinNode(s)\n if currentMin == None:\n return None\n s.remove(currentMin)\n while currentMin != dest and len(s) != 0 and currentMin != None:\n if self.g.is_directed():\n outEdges = self.g.out_edges([currentMin])\n else:\n outEdges = self.g.edges([currentMin])\n for edge in outEdges:\n opposite = edge[1]\n if self.dist[currentMin] + self.g[edge[0]][edge[1]][self.wt] \\\n < self.dist[opposite]:\n self.dist[opposite] = self.dist[currentMin] \\\n + self.g[edge[0]][edge[1]][self.wt]\n self.predecessor[opposite] = currentMin\n s.add(opposite)\n\n currentMin = self._findMinNode(s)\n\n # print \"Current min node {}, s = {}\".format(currentMin, s)\n\n if currentMin == None:\n return None\n s.remove(currentMin)\n\n # Compute the path as a list of edges\n\n currentNode = dest\n predNode = self.predecessor.get(dest)\n node_list = [dest]\n done = False\n path = []\n while not done:\n path.append((predNode, currentNode))\n currentNode = predNode\n predNode = self.predecessor[predNode]\n node_list.append(currentNode)\n done = currentNode == source\n node_list.reverse()\n if as_nodes:\n return node_list\n else:\n return path", "def find_shortest_path(g, n, s, e):\n dist, prev = lazy_dijkstra(g, n, s)\n path = []\n if (dist[e] == inf):\n return path\n # loop backwards from the end vertex\n path.append(e)\n i = e\n # prev[i] == None corresponds to the start node\n while prev[i] != None:\n path.append(prev[i])\n i = prev[i]\n return list(reversed(path))", "def shortest_flight(self):\r\n distance = sys.maxsize\r\n for code, _list in self.edges.items():\r\n for edge in _list:\r\n if edge.distance < distance:\r\n distance = edge.distance\r\n start = edge.start\r\n destination = edge.destination\r\n return start, destination, distance", "def __search_path(self, start_node, goal_node):\n\n path = []\n queue = PriorityQueue()\n queue.put((0, start_node))\n visited = set(start_node)\n\n branch = {}\n found = False\n \n while not queue.empty():\n item = queue.get()\n current_cost = item[0]\n current_node = item[1]\n\n if current_node == goal_node: \n found = True\n break\n else:\n for next_node in self._route_graph[current_node]:\n cost = self._route_graph.edges[current_node, next_node]['weight']\n new_cost = current_cost + cost + self.__heuristic(next_node, goal_node)\n\n if next_node not in visited: \n visited.add(next_node) \n queue.put((new_cost, next_node))\n\n branch[next_node] = (new_cost, current_node)\n\n path = []\n path_cost = 0\n if found:\n # retrace steps\n path = []\n n = goal_node\n path_cost = branch[n][0]\n while branch[n][1] != start_node:\n path.append(branch[n][1])\n n = branch[n][1]\n path.append(branch[n][1])\n else:\n print(\"Path Not Found\")\n\n return path[::-1], path_cost", "def nearest_neighbor_tsp(shortest_paths, starting_point=0):\n number_of_nodes = len(shortest_paths)\n unvisited_nodes = list(range(number_of_nodes))\n unvisited_nodes.remove(starting_point)\n visited_nodes = [starting_point]\n\n while number_of_nodes > len(visited_nodes):\n neighbor_distances = pd.Series(shortest_paths[visited_nodes[-1]])\n neighbor_distances = neighbor_distances[(neighbor_distances > 0) &\n (neighbor_distances.index\n .isin(set(unvisited_nodes)))]\n next_node = neighbor_distances.idxmin()\n visited_nodes.append(next_node)\n unvisited_nodes.remove(next_node)\n return visited_nodes", "def get_shortest_route_two_nodes(start_node, end_node, graph_instance):\n route = shortest_path(\n graph_instance.ox_graph, start_node, end_node, weight=\"length\"\n )\n return route", "def dfs(graph, start):\n\tstack,path = [start],[]\n\twhile stack:\n\t\tele = stack.pop()\n\t\tif ele in path:\n\t\t\tcontinue\n\t\telse:\n\t\t\tpath.append(ele)\n\t\t\tfor neighbours in graph[ele]:\n\t\t\t\tstack.append(neighbours)\n\n\treturn path", "def FindAllPaths(graph, start, end, path=[]):\n path = path + [start]\n if start == end:\n return [path]\n if start not in graph:\n return None\n paths = []\n for node in graph[start]:\n if node not in path:\n newpaths = find_all_paths(graph, node, end, path)\n for newpath in newpaths:\n paths.append(newpath)\n return paths", "def dijkstra(self, start, maxD=1e309):\n # total distance from origin\n tdist = defaultdict(lambda: 1e309)\n tdist[start] = 0\n # neighbour that is nearest to the origin\n preceding_node = {}\n unvisited = self.nodes\n\n while unvisited:\n current = unvisited.intersection(tdist.keys())\n if not current: break\n min_node = min(current, key=tdist.get)\n unvisited.remove(min_node)\n\n for neighbour in self.neighbours[min_node]:\n d = tdist[min_node] + self.dist[min_node, neighbour]\n if tdist[neighbour] > d and maxD >= d:\n tdist[neighbour] = d\n preceding_node[neighbour] = min_node\n\n return tdist, preceding_node", "def bfs_paths(self, start: str, goal: str) -> List[Path]:\n queue = [(start, [start])]\n while queue:\n (node, path) = queue.pop(0)\n if node not in self.graph:\n yield []\n for _next in set(self.graph[node]) - set(path):\n if _next == goal:\n yield path + [_next]\n elif _next in self.graph:\n queue.append((_next, path + [_next]))", "def spDijkstra(self, node, returnPaths = False, nodeWeights = {}):\n # Initialize heap, not found nodes, and found nodes\n heap = map(lambda k:\\\n (0 if k == node else self.Inf, k,\\\n k if k == node else None), self.__nodes.keys())\n nodes = dict(map(lambda (i, n): (n[1], i), enumerate(heap)))\n found = defaultdict(lambda: None)\n \n # Set default node weight to zero\n nodeWeights = defaultdict(lambda: 0, nodeWeights)\n \n # Swap target node to beginning of heap\n Graph.__swapHeapNodes(heap, nodes, 0, nodes[node])\n \n # Iterate through nodes\n for stopPos in xrange(len(heap) - 1, -1 , -1):\n # Pop minimum node\n n = Graph.__heappop(heap, nodes, 0, stopPos)\n \n # Add to found nodes\n found[n[1]] = (n[0], n[2])\n \n # Update path length for nodes reachable from minimum node\n for e in self.__nodes[n[1]]['tails']:\n # Get edge components\n tail, head, weight = self.__edges[e]\n \n # Continue if head node already found\n if found[head] is not None: continue\n \n # Calculate new path length and update if less than current\n newLen = n[0] + weight + nodeWeights[tail] - nodeWeights[head]\n if newLen < heap[nodes[head]][0]:\n heap[nodes[head]] = (newLen, head, tail)\n Graph.__siftup(heap, nodes, nodes[head])\n \n # Remove node weights from path lenghts\n found = dict(map(lambda k:\\\n (k, (found[k][0] - nodeWeights[node] + nodeWeights[k], found[k][1])),\\\n found.keys()))\n \n # Return shortest paths\n return self.__reconstructPath(found, returnPaths)", "def djikstra(self, start_vertex):\n \n # Initialize a priority queue by distance to vertices\n queue = [(0, start_vertex)]\n heapq.heapify(queue)\n \n # Initialize the dictionary which will contain the answer\n min_dists = collections.defaultdict(lambda: float('inf'))\n min_dists[start_vertex] = 0\n \n # A set to keep track of visisted vertices\n visited = set()\n \n # While there are elements left in the priority queue\n while queue:\n \n # Pop off the vertex which is closest\n (dist, vertex) = heapq.heappop(queue)\n \n # If it's seen, continue. If not, it's seen now\n if vertex not in visited:\n visited.add(vertex)\n else:\n continue\n \n # Go through every neighbor of the vertex\n for neighbor, weight in self.neighbors(vertex, True):\n \n # The minimum distance to `neighbor` might be improved if\n # the path going through `vertex` is smaller than the\n # existing solution. This is the dynamic programming step.\n min_dist_so_far = min_dists[neighbor]\n possible_new_min = min_dists[vertex] + weight\n min_dists[neighbor] = min(min_dist_so_far, possible_new_min)\n \n # Push to the queue. Prioritize by distance.\n heapq.heappush(queue, (min_dists[neighbor], neighbor))\n \n return min_dists", "def find_path_all_bfs(graph,start,end):\n\tvisited = set()\n\twatched = set()\n\tpaths = []\n\n\twatched.add(start)\n\n\tnodes_queue = [(start,[start])]\n\twhile nodes_queue:\n\t\tcurrent_node, path = nodes_queue.pop(0)\n\n\t\tvisited.add(current_node)\n\n\t\tif (current_node == end):\n\t\t\tpaths.append(path)\n\n\t\tfor adjacent_node in graph[current_node]:\n\t\t\tif (adjacent_node not in watched) and (adjacent_node not in visited):\n\t\t\t\tnodes_queue.append((adjacent_node, path+[adjacent_node]))\n\n\treturn paths", "def dijkstra(start, vertex_list, line_list, vertex_labels, polygons):\n # create stack Q with all vertices including the arbitrary starting point\n Q = {**vertex_labels}\n Q[0] = start\n vertex_labels_with_start = {**Q}\n dist = {}\n prev = {}\n for key, val in Q.items():\n dist[key] = 1e10\n prev[key] = None\n # start has zero distance to itself\n dist[0] = 0\n while Q:\n min_ = 1e10\n curr_vertex = None\n # simulates priority queue (min heap) with for loop\n for v in Q.keys():\n if dist[v] < min_:\n curr_vertex = v\n min_ = dist[v]\n # curr_vertex = min(dist, key=dist.get)\n if curr_vertex is None:\n print(\"Target cannot be reached!\")\n break\n Q.pop(curr_vertex)\n invalid_point = False\n for poly in polygons:\n if inside_polygon(vertex_labels_with_start[curr_vertex], poly):\n invalid_point = True\n break\n if invalid_point:\n continue\n if curr_vertex == len(vertex_list):\n break\n _, vis_labels = visibility_graph(vertex_labels_with_start[curr_vertex], vertex_list, line_list)\n # Just implement dijkstra - need a way to mark vertices with labels\n for elem in vis_labels:\n if elem in Q:\n alt = dist[curr_vertex] + np.sqrt(len2((diff_(vertex_labels_with_start[curr_vertex],\n vertex_labels_with_start[elem]))))\n if alt < dist[elem]:\n dist[elem] = alt\n prev[elem] = curr_vertex\n return dist, prev", "def dijkstra_shortest_path(graph, search):\n distances_from_start = [None] * len(graph)\n\n visited_vertexes = []\n\n current_vertex = 0\n\n distances_from_start[current_vertex] = [0, 0] # [distance from start, via vertex]\n\n for row in range(len(graph)):\n\n current_vertex = row\n\n #print(\"Current vertex: \", current_vertex)\n\n # Iterate through each column in the current row in the adjacency matrix\n for col in range(len(graph[current_vertex])):\n\n if graph[current_vertex][col] is not None and distances_from_start[col] is None:\n distances_from_start[col] = [distances_from_start[current_vertex][0] + graph[current_vertex][col], current_vertex]\n\n elif graph[current_vertex][col] is not None and (graph[current_vertex][col] + distances_from_start[current_vertex][0]) < distances_from_start[col][0]:\n distances_from_start[col] = [(graph[current_vertex][col] + distances_from_start[current_vertex][0]), current_vertex]\n\n print(\"Distances from start: \", distances_from_start) # show updated distances_from_start array\n\n # Add current_vertex to visited list so that its distance from the start is calculated again in future\n if current_vertex not in visited_vertexes:\n visited_vertexes.append(current_vertex)\n\n # print(\"Visited vertexes: \", visited_vertexes)\n\n # Print the shortest path in a friendly format\n print(\"Shortest path:\")\n current_vertex = search #len(graph) - 1\n path_string = \"\"\n orderlist = []\n while current_vertex > 0:\n\n # Add the distance for the current vertex from the start in brackets after the letter of the vertex.\n path_string = \"{0}({1}) \".format(chr(current_vertex + 65), distances_from_start[current_vertex][0]) + path_string\n\n temp = [chr(current_vertex + 65), distances_from_start[current_vertex][0]]\n\n orderlist.append(temp)\n\n # Update the current vertex to be the one that the current one goes via on its way back to the start\n current_vertex = distances_from_start[current_vertex][1] # distances_from_start[vertex number, via vertex]\n\n\n # Add the start vertex to the output string as the while loop will stop before we add its details to the string\n path_string = \"{0}({1}) \".format(chr(current_vertex + 65), distances_from_start[current_vertex][0]) + path_string\n\n temp = [chr(current_vertex + 65), distances_from_start[current_vertex][0]]\n orderlist.append(temp)\n\n print(path_string)\n\n return orderlist[::-1]", "def shortest_path(graph, start, end):\n # Tree will contain both the final path and some other paths in reverse.\n # Each value is a 2-tuple; the first item is the depth of the key, and the\n # second is the parent of the key.\n tree = {start: (0, None)}\n\n # next_edges is a min_heap used as a priority queue; the next item in it\n # will always be the node adjacent to the growing tree that adds the least\n # to the total depth of the branch.\n next_edges = []\n\n # Add all of the edges adjacent to the start to next_edges.\n for edge in graph[start]:\n heapq.heappush(next_edges, edge + (start, ))\n\n # Loop until we run out of edges or we find the end (see condition below).\n while len(next_edges) > 0:\n depth, child, parent = heapq.heappop(next_edges)\n\n # Ignore edges connecting to children nodes already in the tree.\n if child in tree:\n continue\n\n tree[child] = (depth, parent)\n\n # Add the edges from the new node to the list of possible edges.\n for length, new_child in graph[child]:\n if new_child not in tree:\n heapq.heappush(next_edges, (depth + length, new_child, child))\n\n # If we found the end, flush the next_edges queue and end the search.\n if child is end:\n next_edges = []\n break\n\n # Construct the forward path from start -> end from the tree.\n path = []\n node = end\n while node is not None:\n path.append(node)\n node = tree[node][1]\n\n path.reverse()\n\n total_depth = tree[end][0]\n return (total_depth, path)", "def dfs_paths(graph, start, goal, method='dfs'):\n \n # Define the search method\n stack_pop = -1\n if method == 'bfs':\n stack_pop = 0\n \n stack = [(start, [start])]\n while stack:\n (vertex, path) = stack.pop(stack_pop)\n neighbors = node_neighbors(graph, vertex)\n for next_node in set(neighbors) - set(path):\n if next_node == goal:\n yield path + [next_node]\n else:\n stack.append((next_node, path + [next_node]))", "def shortest_path(self, id1: int, id2: int) -> (float, list):\n\n if id1 == id2:\n return 0, [id1]\n if id1 not in self.dw_graph.nodes or id2 not in self.dw_graph.nodes:\n return math.inf, []\n\n for n in self.dw_graph.get_all_v().values(): # Set all distance to be max value.\n if n.node_id != id1:\n n.distance = sys.maxsize\n n.visited = 0\n path = []\n min_heap=[]\n self.dw_graph.nodes[id1].distance = 0\n heapq.heappush(min_heap,(self.dw_graph.nodes[id1].distance,self.dw_graph.nodes[id1]))\n\n while len(min_heap):\n node = heapq.heappop(min_heap) # pop the smallest item\n current = node[1] # Get node from tuples\n current.visited = 1 # Set the node to visited\n\n for neighbour in self.dw_graph.all_out_edges_of_node(current.node_id).values(): # Get neighbours\n if self.dw_graph.nodes[neighbour.dest].visited == 0: # if we didn't visit this neighbour\n new_dist = current.distance + neighbour.weight # Set new distance\n\n if self.dw_graph.nodes[neighbour.dest].distance > new_dist: # If new distance is smaller , update it.\n self.dw_graph.nodes[neighbour.dest].distance = new_dist\n\n heapq.heappush(min_heap,(self.dw_graph.nodes[neighbour.dest].distance,\n self.dw_graph.nodes[neighbour.dest])) # add to priority queue\n self.dw_graph.nodes[neighbour.dest].parent = current.node_id # Update parent\n\n if self.dw_graph.nodes[id2].distance == sys.maxsize: # if the distance is still max value , can't reach\n return math.inf, []\n\n\n path.append(id2)\n current = self.dw_graph.nodes[id2].parent\n self.dw_graph.nodes[id1].parent=-1\n while current != -1: # Traverse backwards until parent is -1\n path.append(current)\n current = self.dw_graph.nodes[current].parent\n path.reverse()\n return self.dw_graph.nodes[id2].distance, path", "def bfsShortestPath(graph, start, goal):\n\n # set up a path list\n path = [start]\n\n # return a simple path if start is the goal\n if start == goal:\n return path\n\n # list to keep track of all visited nodes\n explored = []\n\n # the FIFO queue\n queue = []\n\n # add the first path to the queue\n queue.append(path)\n\n # keep looping until there are no nodes still to be checked\n while len(queue) > 0:\n\n # pop first item from queue (FIFO)\n path = queue.pop(0)\n\n # retrieve the last node from the path list\n node = path[-1]\n\n # check if the node has already been explored\n if node not in explored:\n\n # add node to list of checked nodes\n explored.append(node)\n\n # get neighbours if node is present, otherwise default to empty list\n neighbours = graph.get(node, [])\n\n # go through all neighbour nodes\n for neighbour in neighbours:\n # make a copy of the current path\n path1 = path[:]\n\n # add this neighbour to the path\n path1.append(neighbour)\n\n # return path if neighbour is goal\n if neighbour == goal:\n return path1\n\n # push it onto the queue for further exploration\n queue.append(path1)\n\n # we couldn't find the goal... :(\n return None", "def find_all_paths(graph, start, end, path=[]):\n path = path + [start]\n if start == end:\n return [path]\n paths = []\n for node in graph[start]:\n newpaths = find_all_paths(graph, node, end, path)\n paths += newpaths\n return paths", "def djikstra(nodes,links,source,dest):\n route = []\n vertexes = []\n for v in nodes:\n v.set_dist(float(\"inf\"))\n v.set_prev(None)\n heappush(vertexes, v)\n source.set_dist(0)\n heapify(vertexes)\n while vertexes:\n unsorted = False\n u = heappop(vertexes)\n if u == dest:\n break #because we found the destination no need to look further\n for v in u.get_links():\n if v.get_enabled():\n alt = u.get_dist() + 1\n target = v.get_target()\n if alt < target.get_dist():\n target.set_dist(alt)\n target.set_prev(u)\n unsorted = True #just a variable that help check if changes were made to the objects inside the heap\n if unsorted: #because i updated the variables but the heap wasn't maintained, i just heapify it again\n heapify(vertexes) \n #this is the part that saves the distance and route \n if dest.get_dist() == float(\"inf\"): #if there is no route then we just return None\n return None\n u = dest\n while u.get_prev() != None:\n v = u.get_prev()\n route.insert(0, v.get_specific_link(u)) \n u = v\n return route", "def shortest_path(M, start, goal):\n\n print(\"shortest path called\")\n\n came_from = {}\n g_score = {}\n came_from[start] = None\n g_score[start] = 0\n open_heap = []\n heappush(open_heap, (0, start))\n\n while open_heap:\n current = heappop(open_heap)[1]\n\n if current == goal:\n break\n\n for neighbor in M.roads[current]:\n new_g_score = g_score[current] + heuristic(M.intersections[current], M.intersections[neighbor])\n\n if neighbor not in g_score or new_g_score < g_score[neighbor]:\n came_from[neighbor] = current\n g_score[neighbor] = new_g_score\n heappush(open_heap, (new_g_score, neighbor))\n\n optimal_path = []\n node = goal\n\n while came_from[node]:\n optimal_path.append(node)\n node = came_from[node]\n else:\n optimal_path.append(node)\n\n optimal_path.reverse()\n\n return optimal_path", "def get_path(start_state, dest_state):\n\n # 1. Compute minimal distances from end state while going backwards\n # Distance increases as we move away from dest_state\n distances = {dest_state: 0}\n next_distance = 1\n last_added = [dest_state]\n\n while start_state not in distances:\n # Crawl back one more step\n if len(last_added) == 0:\n return None\n\n new_added = []\n for state in last_added:\n for edge in state.get_incoming():\n if edge.pred not in distances:\n distances[edge.pred] = next_distance\n new_added.append(edge.pred)\n\n next_distance = next_distance + 1\n last_added = new_added\n new_added = [] # Clean up, not really necessary\n\n # Here, start_state has a known distance to dest_state\n\n # 2. From the start_state, step towards the dest_state\n # guided by the distances\n path = []\n while start_state != dest_state:\n next_distance = distances[start_state] - 1 # Distance we are looking for\n foundit = False\n for edge in start_state.get_outgoing():\n if edge.succ in distances and distances[edge.succ] == next_distance:\n path.append((start_state, edge.label))\n start_state = edge.succ\n foundit = True\n break\n\n assert foundit\n\n distances.clear() # Clean up, not really needed\n return path", "def dfs(graph, start, goal):\n\n final = []\n agenda = [[start]]\n\n # Process node stack\n while agenda:\n path = agenda.pop()\n\n # Exit if a path is found which reaches the goal\n if path[-1] == goal:\n final = path\n break\n\n # Push the new paths onto the stack\n connected = graph.get_connected_nodes(path[-1])\n for node in connected:\n # Ignore previously visited nodes\n if node not in path:\n agenda.append(path + [node])\n\n # Return the final path or initial empty list\n return final", "def dijkstra(G, source, target, weight, blacklist=set(), max_dist=INF):\n\n G_succ = G.succ if G.is_directed() else G.adj\n\n push = heappush\n pop = heappop\n\n # dictionary of optimal path to solved nodes\n paths = { source: [ source ] }\n # dictionary of final distances for each solved node\n dist = {}\n # track which nodes have been seen and the shortest path distance\n seen = {}\n\n c = count()\n fringe = []\n\n seen[source] = 0.0\n push(fringe, (0.0, next(c), source))\n\n weight = _create_weight_func(G, 'length')\n\n while fringe:\n # this is a priority queue, so will always pop minimum distance\n d, _, v = pop(fringe)\n \n # ignore if node has been visited already OR is blacklisted\n if v in dist or v in blacklist:\n continue\n\n dist[v] = d\n #\n # path found! break\n if v == target:\n break\n\n # loop through neighbors of v\n for u, e in G_succ[v].items():\n vu_cost = dist[v] + weight(u, v)\n\n if vu_cost <= max_dist and (u not in seen or vu_cost < seen[u]):\n seen[u] = vu_cost\n paths[u] = paths[v] + [u]\n push(fringe, (vu_cost, next(c), u))\n\n if target in paths and target in dist:\n return paths[target], dist[target]\n \n return None", "def shortest_path(self):\n\t\t#dict that will hold the cost of traveling to each station\n\t\t#add the initial cost of the starting station, which is 0\n\t\tD = {0:0}\n\n\t\t#add all of our dict keys (stations) to our queue\n\t\tstation_queue = self.station_graph.keys()\n\n\t\t#sort the keys! since the graph is directed and acyclic, the stations\n\t\t#can be explored one at a time, in order, without having to adjust\n\t\t#for the lowest distance value via priority queue.\n\t\t#\n\t\t#sort them with reverse=True so that they can be popped from the\n\t\t#end of the list instead of from the beginning. This should save\n\t\t#some cpu time.\n\t\tstation_queue.sort(reverse=True)\n\t\twhile len(station_queue) > 0:\n\n\t\t\tstation = station_queue.pop() #grab the next node in the queue\n\n\t\t\tfor next_st, next_cost in self.station_graph[station].iteritems():\n\t\t\t\t#loops through the current station's neighbors, and calculates\n\t\t\t\t#their costs from the starting node, making sure to store\n\t\t\t\t#the lowest cost in our D dict\n\t\t\t\talt = D[station] + next_cost #sum the costs\n\t\t\t\tif not D.has_key(next_st) or alt < D[next_st]:\n\t\t\t\t\t#if there is no cost on record, or if the newly calculated\n\t\t\t\t\t#cost is lower than the currently recorded one, then\n\t\t\t\t\t#record the newly calculated cost as the lowest\n\t\t\t\t\tD[next_st] = alt #set the cost to get to next_st\n\n\t\treturn D[self.final_stop]", "def dfs(self, starting_vertex, destination_vertex):\n\n parents = {}\n\n for index, (p, c) in enumerate(self.vertices.items()):\n for child in c:\n if child not in parents:\n parents[child] = []\n parents[child].append(p)\n\n path = []\n current = destination_vertex\n path.append(destination_vertex)\n\n while len(parents[current]):\n parent = parents[current][0]\n path.append(parent)\n if parent == starting_vertex:\n break\n current = parent\n\n path.reverse()\n return path", "def shortest_path_recursive(graph, source, target, visited):\n if source == target:\n return [source]\n neighbor_set = graph.get_node_neighbors(source).difference(visited)\n if len(neighbor_set) < 1:\n return None\n neighbor_set = neighbor_set.union(visited)\n return_list = [source]\n for element in neighbor_set:\n recur_list = shortest_path_recursive(\n graph, element, target, neighbor_set)\n if not recur_list is None:\n for return_item in recur_list:\n return_list.append(return_item)\n if len(return_list) < 2:\n return None\n return return_list", "def shortest_route(self, src, dest):\n\n # Dijkstra with unusual start condition to prevent src -> src == 0 distance\n x_in = set()\n a = defaultdict(lambda: float('inf'))\n v = self.V.copy()\n\n for node, cost in self.G[src].items():\n a[node] = cost\n x_in.add(node)\n v.remove(node)\n\n while x_in != self.V:\n mn = float('inf')\n new = None\n for x in x_in:\n for node, cost in self.G[x].items():\n if node in v:\n if (a[x] + cost) < mn: # optimize large/dense G with pri. q\n mn = a[x] + cost\n new = (x, node, cost)\n if new is None:\n break\n x, node, cost = new\n x_in.add(node)\n v.remove(node)\n a[node] = a[x] + cost\n return a[dest]", "def shortJourney(Alist,s,d):\n \"\"\"Find shortest distances to s in weighted graph, G\"\"\"\n \n #Initialize dictionaries\n dinit = 10**6\n Edict = {} #Explored nodes\n Udict = {} #Unexplored nodes\n path = [[] for l in Alist]\n\n Alen = len(Alist) #length of Alist\n dinits = [dinit]*Alen #list of airport indexes\n Udict = dict(zip(list(range(Alen)),dinits)) #zip into dictionary\n Udict[s] = 0\n path[s] = [s]\n \n #Main search\n while len(Udict)>0:\n #Find node with min d in Udict and move to Edict\n dmin = dinit\n for n,w in Udict.items():\n if w<dmin:\n dmin=w\n nmin=n\n Edict[nmin] = Udict.pop(nmin)\n print(\"moved node\", nmin)\n\n #Update provisional distances for unexplored neighbors of nmin \n for item in Alist[nmin]: #nminth element is a list of two element tuples (node, weight)\n n = item[0] #first elt of tuple is node/neighbour\n w = item[1] #2nd elt is density/weigh\n #for n,w in etc_______________________-\n \n if n in Edict:\n pass\n elif n in Udict:\n #key difference below\n dcomp = (w+dmin) #take sum as you go along\n if dcomp<Udict[n]:\n print(Udict)\n Udict[n]=dcomp\n path[n] = path[nmin] + [n]\n print(path) \n if nmin == d: #if current node is destination\n return [path[d],Edict[d]]\n return [] #no path", "def DFS(graph, start, end, path, shortest, toPrint=False):\n path = path + [start]\n if toPrint:\n print('Current DFS path:', printPath(path))\n if start == end:\n return path\n for node in graph.childrenOf(start):\n if node not in path: # avoid cycles\n if shortest == None or len(path) < len(shortest):\n newPath = DFS(graph, node, end, path, shortest, toPrint)\n if newPath != None:\n shortest = newPath\n elif toPrint:\n print('Already visited', node)\n return shortest", "def tdsp_dijsktra(graphs, start_time, initial, end):\n shortest_paths = {initial: (None, 0)}\n current_node = initial\n visited = set()\n\n ts = get_timestep(time(start_time.hour, start_time.minute))\n current_time = start_time\n\n while current_node != end:\n visited.add(current_node)\n destinations = graphs[ts].edges[current_node]\n weight_to_current_node = shortest_paths[current_node][1]\n logger.debug(\"weight_to_current_node = {}\".format(weight_to_current_node))\n if weight_to_current_node > 0:\n # update the time.\n ct = convert_float_to_time(weight_to_current_node)\n current_time += timedelta(hours=ct.hour, minutes=ct.minute)\n ts = get_timestep(time(current_time.hour, current_time.minute))\n\n logger.debug(\"shortest_paths = {}\".format(shortest_paths))\n logger.debug(\"destinations = {}\".format(destinations))\n\n for next_node in destinations:\n logger.debug(\"current_node = {}\".format(current_node))\n logger.debug(\"next_node = {}\".format(next_node))\n logger.debug(\"ts = {}\".format(ts))\n graph_weight = graphs[ts].weights.get((current_node, next_node), 1.0) # Fixme: if there is no entry then default to cost of 1.0\n weight = graph_weight + weight_to_current_node\n logger.debug(\"graphs[{}].weights[({}, {})] = {}\".format(ts, current_node, next_node, graph_weight))\n logger.debug(\"weight_to_current_node = {}\".format(weight_to_current_node))\n logger.debug(\"weight = {}\".format(weight))\n if next_node not in shortest_paths:\n logger.debug(\"next_node {} not in shortest paths\".format(next_node))\n shortest_paths[next_node] = (current_node, weight)\n else:\n current_shortest_weight = shortest_paths[next_node][1]\n if current_shortest_weight > weight:\n shortest_paths[next_node] = (current_node, weight)\n\n logger.debug(\"shortest_paths = {}\".format(shortest_paths))\n\n next_destinations = {node: shortest_paths[node] for node in shortest_paths if node not in visited}\n logger.debug(\"next_destinations = {}\".format(next_destinations))\n\n if not next_destinations:\n return None\n\n current_node = min(next_destinations, key=lambda k: next_destinations[k][1])\n logger.debug(\"selected node for shortest path = {}\".format(current_node))\n\n logger.debug(\"shortest_paths = {}\".format(shortest_paths))\n\n path = []\n while current_node is not None:\n path.append(current_node)\n next_node = shortest_paths[current_node][0]\n current_node = next_node\n\n # Reverse the path\n path = path[::-1]\n return path", "def paths(self, start):\n # This is probably a little slow\n tupadd = lambda p, v: (p[0] + v[0], p[1] + v[1])\n # First, we'll check adjacency moves.\n adj = [tupadd(start, v) for v in DIRECTIONS]\n yield from (p for p in adj if self.board(p) == 0)\n # Now we check repeated hops.\n # We do this by a breadth first search.\n\n #TODO: Consensus on legality of hopping back to start and \"skipping\"\n visited = set(adj)\n to_visit = [start]\n while len(to_visit):\n pt = to_visit.pop(0)\n if pt in visited:\n continue\n\n # We have to actually move a piece\n # But this stops us from considering \"start\" even if we can\n # make some hops and get back to start\n if pt is not start:\n yield pt\n \n visited.add(pt)\n # Compute the hop directions\n dirs = ((tupadd(pt, v), tupadd(pt, tupadd(v, v))) for v in DIRECTIONS)\n to_visit.extend(\n dest for over, dest in dirs\n if self.board(over) > 0\n and self.board(dest) == 0\n and dest not in visited\n and over != start\n )", "def findPathDFS(start, end):\n visited = set()\n visited.add(start)\n return __findPathDFS(start, end, visited)", "def _shortest_path(G, start, end, sp_cache):\n if (start, end) in SP_TABLE:\n return sp_cache[(start, end)]\n elif (end, start) in SP_TABLE:\n return sp_cache[(end, start)]\n else:\n D, P = _dijkstra(G, start, end)\n path = []\n temp = end\n while 1:\n path.append(end)\n if end == start: break\n end = P[end]\n path.reverse()\n sp_cache[(start, temp)] = path\n return path", "def dijkstra(graph, initial):\n \n visited_nodes = {initial: 0}\n path = {}\n\n nodes = set(graph.nodes)\n\n while nodes:\n closest_node = None\n for node in nodes:\n if node in visited_nodes:\n if closest_node is None:\n closest_node = node\n elif visited_nodes[node] < visited_nodes[closest_node]:\n closest_node = node\n if closest_node is None:\n break\n\n nodes.remove(closest_node)\n current_weight = visited_nodes[closest_node]\n\n for edge in graph.edges[closest_node]:\n try:\n weight = current_weight + graph.costs[(closest_node, edge)]\n except:\n continue\n if edge not in visited_nodes or weight < visited_nodes[edge]:\n visited_nodes[edge] = weight\n path[edge] = closest_node\n\n return visited_nodes, path", "def distance(self, start, end):\n if start == end:\n return 0\n if self.adjR == None:\n self.build_reverse_graph()\n n = self.nodes\n m = self.edges\n adj = self.adj\n adjR = self.adjR\n # For forward search\n processed = set()\n dist = {}\n dist[start] = 0\n heap = []\n heapq.heappush(heap,(0, start))\n # For backward search\n processedB = set()\n distB = {}\n distB[end] = 0\n heapB = []\n heapq.heappush(heapB,(0, end))\n shortest = float('inf')\n while heap and heapB:\n # For forward search\n if heap:\n d, u = heapq.heappop(heap)\n if u not in processed:\n processed.add(u)\n for v, w in adj[u]:\n if dist.get(v, -1) == -1:\n dist[v] = dist[u] + w\n heapq.heappush(heap,(dist[v], v))\n elif dist[v] > dist[u] + w:\n dist[v] = dist[u] + w\n heapq.heappush(heap,(dist[v], v))\n if dist[u] < shortest:\n for v, w in adj[u]:\n if v in processedB:\n length = dist[u] + distB[v] + w\n if length < shortest:\n shortest = length\n if u in processedB:\n return shortest\n else:\n return shortest\n # For backward search\n if heapB:\n d, u = heapq.heappop(heapB)\n if u not in processedB:\n processedB.add(u)\n for v, w in adjR[u]:\n if distB.get(v, -1) == -1:\n distB[v] = distB[u] + w\n heapq.heappush(heapB,(distB[v], v))\n elif distB[v] > distB[u] + w:\n distB[v] = distB[u] + w\n heapq.heappush(heapB,(distB[v], v))\n if distB[u] < shortest:\n for v, w in adjR[u]:\n if v in processed:\n length = distB[u] + dist[v] + w\n if length < shortest:\n shortest = length\n if u in processed:\n return shortest\n else:\n return shortest\n return -1", "def dfs(g: nx.Graph, start_node: Any) -> str:\n\n way = []\n stack = [start_node]\n y = {node: [] for node in g.nodes}\n while stack:\n elem = stack.pop()\n way.append(elem)\n for node in list(g.neighbors(elem)):\n if node not in way:\n stack.append(node)\n y[node].extend((*y[elem], elem))\n print(y)\n return \"\".join(way)", "def dfs(graph, initial_node, dest_node):\n parents = {}\n dfs_rec(graph, initial_node, {}, parents)\n\n path = []\n current_node = dest_node\n while current_node != initial_node:\n next_node = parents[current_node]\n path = [g.Edge(next_node, current_node, graph.distance(next_node, current_node))] + path\n current_node = next_node\n\n return path", "def least_cost_path(G, start, dest, cost):\n\n # Create a priority queue\n todo = pqueue.PQueue()\n todo.update(start, 0);\n\n # v in visited when the vertex v's least cost from start has been determined\n visited = set()\n\n # parent[v] is the vertex that just precedes v in the path from start to v\n parent = {}\n\n while todo and (dest not in visited):\n\n # priority queue operation\n # remove smallest estimated cost vertex from todo list\n (cur, c) = todo.pop_smallest()\n\n # it is now visited, and will never have a smaller cost\n visited.add(cur)\n\n for n in G.adj_to(cur):\n if n in visited: continue\n if todo.update(n, c+cost((cur,n))):\n parent[n] = cur\n\n # now, if there is a path, extract it. The graph may be disconnected\n # so in that case return None\n if dest not in visited:\n return None\n\n path = [dest]\n cur = dest\n while start not in path:\n cur = parent[cur]\n path.append(cur)\n\n path.reverse()\n return path", "def dfs(self, starting_vertex, destination_vertex):\n visited = set()\n paths = [[starting_vertex]]\n \"\"\"\n While the length of possible paths is not zero. \n Store the current path and remove it from possible \n paths. Return the last path if it's the destination. \n If the path hasn't been visited yet add it to the \n visited list and loop over it's edges creating paths \n to check later. \n \"\"\"\n while len(paths) > 0:\n path = paths.pop(-1)\n vertex = path[-1]\n if vertex == destination_vertex:\n return path\n if vertex not in visited:\n visited.add(vertex)\n for key in self.get_neighbors(vertex):\n newPath = path + [key]\n paths.append(newPath)", "def Dijkstra(self, start, end):\n # Pour n parcourant noeuds\n # for each node of graph\n walked = {}\n previous = {}\n for key in self.nodes.iterkeys():\n walked[key] = -1 # infinity\n previous[key] = None\n \n #Fin pour\n #début.parcouru = 0\n #pasEncoreVu = noeuds\n walked[start] = 0\n notFoundYet = []\n for key in self.nodes.iterkeys():\n notFoundYet.append(key)\n\n #Tant que pasEncoreVu != liste vide\n # n1 = minimum(pasEncoreVu) // Le nœud dans pasEncoreVu avec parcouru le plus petit\n # pasEncoreVu.enlever(n1)\n while len(notFoundYet) > 0:\n minimalNode = self.nodeAtMinimumDistance(notFoundYet, walked)\n \n # delete minimal from notFoundYet\n tmp = []\n for node in notFoundYet:\n if node != minimalNode: tmp.append(node)\n notFoundYet = tmp\n # Pour n2 parcourant fils(n1) // Les nœuds reliés à n1 par un arc\n # Si n2.parcouru > n1.parcouru + distance(n1, n2) // distance correspond au poids de l'arc reliant n1 et n2\n # n2.parcouru = n1.parcouru + distance(n1, n2)\n # n2.précédent = n1 // Dit que pour aller à n2, il faut passer par n1\n # Fin si\n # Fin pour\n # for each successor of minimal\n for successor in self.nodes[minimalNode]:\n distance = self.distanceBetween(successor, node)\n walkToSucc = walked[successor]\n walkToNode = walked[minimalNode]\n if walkToSucc == -1 or (walkToSucc > (walkToNode + distance)):\n walked[successor] = walkToNode + distance\n previous[successor] = minimalNode\n\n # Here, all nodes are founded\n \n\n\n #Fin tant que\n #chemin = liste vide\n #n = fin\n #Tant que n != début\n path = []\n target = end\n while target != start:\n # chemin.ajouterAvant(n)\n # n = n.précédent\n path = [target] + path\n target = previous[target]\n\n #Fin tant que\n #chemin.ajouterAvant(début)\n path = [start] + path\n #Retourner chemin\n return path", "def min_distance(graph, begin, end):\n routes = {k: \"inf\" for (k, v) in graph.items()}\n\n nodes = deque([(begin, graph[begin])])\n\n while nodes:\n\n root, _nodes = nodes.popleft()\n\n for (name, dist) in _nodes:\n # current length from begining to a node name\n _dist = routes[root] + dist if routes[root] != \"inf\" else dist\n # if length ro a node name is not defined yet - define it\n if routes[name] != \"inf\":\n routes[name] = min([routes[name], _dist])\n else:\n # else - choose which way is shorter - a new or old\n routes[name] = _dist\n\n nodes.append([name, graph[name]])\n\n return routes[end]" ]
[ "0.858256", "0.7994935", "0.7979896", "0.7651095", "0.75588065", "0.7505487", "0.7493066", "0.7488095", "0.7451653", "0.74178004", "0.73748696", "0.7356794", "0.726981", "0.7252933", "0.7238483", "0.72201383", "0.7189133", "0.71680886", "0.71166384", "0.7039121", "0.70280594", "0.7027079", "0.70185757", "0.7007236", "0.6997403", "0.69774806", "0.6927137", "0.6922878", "0.69120705", "0.68877083", "0.6874931", "0.68739945", "0.68610466", "0.68583477", "0.68361366", "0.6827559", "0.68200874", "0.68162465", "0.6808534", "0.6804021", "0.6796883", "0.67932546", "0.6789905", "0.6780912", "0.6775609", "0.6775292", "0.6772479", "0.67601705", "0.67537457", "0.6753609", "0.6749663", "0.67492336", "0.6738277", "0.67330414", "0.67154396", "0.6713947", "0.6706574", "0.66975605", "0.66792923", "0.6673104", "0.6665069", "0.66574866", "0.66558146", "0.66547", "0.6650051", "0.66470647", "0.6639708", "0.662947", "0.6628068", "0.6604541", "0.66036475", "0.65994656", "0.6597552", "0.6593379", "0.65725267", "0.65501475", "0.65457565", "0.6529979", "0.6529754", "0.65287787", "0.6527065", "0.65253264", "0.65130335", "0.65046495", "0.65045977", "0.65037954", "0.6492715", "0.6490138", "0.6488866", "0.6464992", "0.6463805", "0.6452885", "0.6451734", "0.6446167", "0.6422409", "0.64108187", "0.64072883", "0.64064336", "0.6402164", "0.64001304" ]
0.7826668
3
Traverses backwards through predecessors from end
def _deconstruct_path(predecessors, end): if end not in predecessors: return None current = end path = [] while current: path.append(current) current = predecessors.get(current) return list(reversed(path))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __backward(self):\n if self.is_empty():\n raise StopIteration\n\n current = self._tail\n yield current._data\n while current._previ:\n current = current._previ\n yield current._data", "def reverse_iterative(self):\n \"\"\"O(n) / O(1) solution.\"\"\"\n pre = None\n current = self.head\n while current is not None:\n next = current.next\n current.next = pre\n pre = current\n current = next\n self.head = pre", "def __reversed__(self): \n yield from self._traverse_backward(self.root)", "def digraph_walker_backwards(graph, element, call_back):\r\n call_back(graph, element)\r\n for predecessor in graph.predecessors(element):\r\n call_back(graph, predecessor)\r\n for predecessor in graph.predecessors(element):\r\n digraph_walker_backwards(graph, predecessor, call_back)", "def __reversed__(self):\n if len(self) == 0:\n return\n\n # Create a list containing pointers to each\n # prev_node in the list.\n cur_node = self.head\n prev_nodes = [None]\n while cur_node != self.tail:\n prev_nodes.append(cur_node)\n cur_node = cur_node.next_node\n\n # Using the prev_nodes list, iterate backwards\n while cur_node is not None:\n for x in reversed(cur_node.data_list):\n yield x\n cur_node = prev_nodes[-1]\n del prev_nodes[-1]", "def backtrack(start, end, prev):\n backtracked = False\n curr_node = end\n # instantiate path as list with destination only\n path = [curr_node]\n while not backtracked:\n # retrieve previous node\n prev_node = prev[curr_node]\n # insert it at beginning of path\n path.insert(0, prev_node)\n # move onto previous node as current node for next iteration\n curr_node = prev_node\n # break loop if we reached start\n if curr_node == start:\n backtracked = True\n return path", "def reverse(self):\n current = self.head\n previous = None \n while current is not None:\n next_node = current.next_node \n current.next_node = previous\n current, previous = next_node, current \n self.head = previous", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def connect_backwards(self):\n\n for n in self.nodes:\n n.receives_from = []\n\n for n1 in self.nodes:\n for n2 in n1.sends_to:\n n2.receives_from.append(n1)", "def backward(self, top, propagate_down, bottom):\r\n pass", "def reverse(self):\n curr = self.head\n prev_node = None\n while curr:\n prev_node = curr.prev\n curr.prev = curr.next\n curr.next = prev_node\n curr = curr.prev\n self.head = prev_node.prev", "def _traverse_backward(self, node):\n if node is not None:\n yield from self._traverse_backward(node.right)\n yield node.data\n yield from self._traverse_backward(node.left)", "def backward(self):\n raise NotImplemented", "def backward(self):\n raise NotImplemented", "def backward(self):\n raise NotImplemented", "def reconstruct_path(source, target, predecessors):\n if source == target:\n return []\n prev = predecessors[source]\n curr = prev[target]\n path = [target, curr]\n while curr != source:\n curr = prev[curr]\n path.append(curr)\n return list(reversed(path))", "def reverse(self):\n\n h = self.head\n previous = None\n while h is not None:\n next = h.next\n h.next = previous\n previous = h\n h = next\n\n self.head = previous\n\n # self.head.next = h\n # pass", "def backward(self, top, propagate_down, bottom):\n\t\tpass", "def test_remove_predecessors():\n assert remove_predecessors({\"A\": [\"B\", \"C\"]}, \"B\") == {\"A\": [\"C\"]}\n assert remove_predecessors({\"A\": [\"B\", \"C\"]}, \"D\") == {\"A\": [\"B\", \"C\"]}", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backwards(self):\n pass", "def reverse_iterative(self):\n # Create the new LinkedList.\n new_list = LinkedList()\n\n # Set the initial node to reverse from.\n node = self.first_node\n\n # iterate over each node and stop when node is None\n while node:\n next = node.next\n # Prepend the node to the new list.\n new_list.prepend(node)\n\n # Update the node reference.\n node = next\n return new_list", "def __traverse_backward(self):\n work = list(reversed(self.__components))\n while work:\n component = work.pop()\n if isinstance(component, _Imagine):\n yield component\n else:\n work.extend(list(reversed(component.__components)))", "def backtrack(start, current, prev):\n if current != start:\n v = prev[current]\n return backtrack(start, v, prev) + [current]\n else:\n return [start]", "def backtrack(self):\n last_intersection = self.intersection.pop()\n retrace = Shortest_path().shortestPath(self.graph, self.current, last_intersection)\n print retrace\n print \"Moving back...\"\n self.current = retrace.pop(0)\n if self.current in self.intersection:\n self.intersection.remove(self.current)\n while retrace:\n position = retrace.pop(0)\n self.move_to_position(position)\n if position in self.intersection:\n self.intersection.remove(position)", "def backward(self):\n raise NotImplementedError", "def reverse(self):\n\n (self.front, _) = LinkedList.reverse_recursive(self.front)", "def reverse(self):\n node = self.head\n while node is not None:\n next_node = node.next_node \n node.next_node, node.prev_node = node.prev_node, node.next_node \n node = next_node\n self.head, self.tail = self.tail, self.head", "def reverse(self): # Class O(nlog2n)\r\n # I'm assuming this classification because this function\r\n # calls removeNode() and addNodeAfter()\r\n listvalues = \"%s\" % self.head\r\n h = self.head\r\n l = self.length()\r\n count = 0\r\n while count <= l:\r\n try:\r\n self.addNodeAfter(h.value, l - count)\r\n self.removeNode(1)\r\n h = h.next\r\n count += 1\r\n except:\r\n break", "def reverse(self):\n\n current = self.head\n prev = None\n\n while current is not None:\n tmp = current.next\n current.next = prev\n\n prev = current\n current = tmp\n\n # Update the head\n self.head = prev", "def backward(self, top, propagate_down, bottom):\n for ib in range(2):\n if not propagate_down[ib]:\n continue\n ndim = bottom[0].data.shape\n count = ndim[0] * ndim[2] * ndim[3]\n if not self.count:\n bottom[ib].diff[ ... ] = np.zeros_like( bottom[0].data )\n continue\n if top[0].data < 1.\n bottom[ib].diff[ ... ] = np.abs( bottom[0].data - bottom[1].data )\n bottom[ib].diff[ ... ] *= ( 1 - 1.0*self.iter/self.maxiter )\n else:\n bottom[ib].diff[ ... ] = np.ones_like( bottom[ib].data )\n inop = bottom[0].data < bottom[1].data\n bottom[ib].diff[ inop ] *= -1\n \n # ingore false label and repair\n ignore = bottom[1].data <= 0.\n count -= np.sum(ignore)\n bottom[ib].diff[ignore] = 0.\n #normlist\n bottom[ib].diff[...] /= count", "def instrsreversed(self):\n x = self._lastInstr\n while x is not None:\n # now we can remove x and continue iterating :)\n x_prev = x.prev\n yield x\n x = x_prev", "def invert(self) -> None:\n\n curr = self._first\n previous = None\n\n while curr:\n curr_value = curr.next\n curr.next = previous\n\n previous = curr\n curr = curr_value\n\n self._first = previous", "def reverse_path_iterator(node):\n while node:\n yield node\n node = node.parent", "def reverse(self) -> None:\n def reverse_list(node: Node) -> None: #recursive function to reverse the list\n temp = node.prev\n node.prev = node.next\n node.next = temp\n if node.prev is self.node:\n return None\n reverse_list(node.prev)\n\n reverse_list(self.node)", "def print_backward(self):\n head = self\n tail = self.__next # go to my next node\n if tail is not None: # as long as the end of the list has not been reached\n tail.print_backward() # recursively print remainder of the list backwards\n print(head, end=\" \") # print my head", "def __path_to_end(self) -> List[List[int]]:\n predecessors = self.__predecessors_list()\n path = []\n\n row_exit, col_exit = Player.find_exit_position(self.__labyrinth)\n dest = self.__convert_position(row_exit, col_exit)\n\n v = dest\n\n path.append([v // 10, v % 10])\n\n while predecessors[v] != -1:\n path.append(predecessors[v])\n v = self.__convert_position(predecessors[v][0], predecessors[v][1])\n\n return path[::-1]", "def print_backward(self):\r\n head = self\r\n tail = self.__next # go to my next node\r\n if tail is not None: # as long as the end of the list has not been reached\r\n tail.print_backward() # recursively print remainder of the list backwards\r\n print(head, end=\" \") # print my head\r", "def predecessor_traverse(p,s,g):\n L = []\n v = g\n while v is not None:\n L.append(v)\n v = p.get(v,None)\n #rather than prepending, we appended and now we'll reverse. This is a more efficient than prepending\n return L[::-1]", "def BFS(start, end):\r\n queue = []\r\n queue.append(start)\r\n predecessors = {}\r\n predecessors[start] = None\r\n\r\n while len(queue):\r\n current = queue.pop(0)\r\n if current == end:\r\n break\r\n for neighbor in current.getConnections():\r\n if neighbor not in predecessors:\r\n predecessors[neighbor] = current\r\n queue.append(neighbor)\r\n\r\n if end in predecessors:\r\n path = []\r\n current = end\r\n while current != start:\r\n path.insert(0, current)\r\n current = predecessors[current]\r\n path.insert(0, start)\r\n return path\r\n else:\r\n return None", "def back_node(self):\n return self.sentinel.prev if self.N != 0 else None", "def dft(self, starting_vertex):\n # Create a s and push starting vertex\n ss = Stack()\n ss.push([starting_vertex])\n # Create a set of traversed vertices\n visited = []\n eldest = [] \n # While stack is not empty:\n while ss.size() > 0:\n # dequeue/pop the first vertex\n path = ss.pop()\n if path[-1] not in visited:\n # DO THE THING!!!!!!!\n # print(path[-1])\n # mark as visited\n visited.append(path[-1])\n print(visited)\n # enqueue all neightbors\n if not self.get_neighbors(path[-1]):\n if starting_vertex == path[-1]:\n return -1\n else:\n # print(\"eldest ancestor:\",path[-1])\n eldest.append(path[-1])\n\n for next_vert in self.get_neighbors(path[-1]):\n new_path = list(path)\n # print(new_path)\n new_path.append(next_vert)\n ss.push(new_path)\n \n return min(eldest)", "def transition_path(self):\n node, path_back = self, []\n while node:\n path_back.append(node.action)\n node = node.parent\n return list(reversed(path_back))", "def _behind(self):\n if(self._prev is self._head):\n return self._head._prev\n return self._prev", "def rev(self):\n self.set.reverse()", "def path(self): # Path taken to reach Goal\n node, path_back = self, []\n while node:\n path_back.append(node)\n node = node.parent\n return list(reversed(path_back))", "def reversed(self):\n try:\n i = self.db[self._tailKey]\n while True:\n yield i\n i = self.db[self._getPrevKey(i)]\n except KeyError:\n pass", "def remove_from_predecessors(self, next_task: Task) -> None:\n for task in self._tasks:\n if next_task.is_predecessor_of(task):\n task.remove_predecessor(next_task)", "def reverse_path(self, crossings=[]):\r\n v = self\r\n while True:\r\n e = v.in_arrow\r\n v.reverse()\r\n if not e:\r\n break\r\n e.reverse(crossings)\r\n v = e.end\r\n if v == self:\r\n return\r\n self.reverse()\r\n v = self\r\n while True:\r\n e = v.out_arrow\r\n v.reverse()\r\n if not e:\r\n break\r\n e.reverse(crossings)\r\n v = e.start\r\n if v == self:\r\n return", "def reversed(self):\n return LINE(*self.elems,**{'reverse':(not self.reverse)})", "def _anchored_predecessors(self, n):\n\n # loop on all incoming edges\n for t in self.predecessors(n):\n \n # if predecessor is anchored\n # stop looking for (necessarily earlier) predecessors\n if t.anchored:\n yield t\n continue\n \n # if neighbor is not anchored\n # look one level deeper\n for tt in self._anchored_predecessors(t):\n yield tt", "def backward(self):\n #initiate the gradients\n #print('')\n \n #print('node {} grad {}'.format(self.id, self.gradient))\n #print('node {} times visited : {}/{}'.format(self.id, self.times_visited, self.times_used))\n\n if self.gradient is None:\n self.gradient=np.eye(self.output_dim)\n self.times_visited+=1\n\n \n \n if self.childrens==[]:\n return(self.gradient)\n else:\n self.backward()\n \n else: \n if self.childrens!=[]:\n #we can still going deeper in backprop\n #print(len(self.childrens), ' childrens', str([self.childrens[i]['node'].id for i in range(len(self.childrens))]))\n for child in self.childrens:\n node,jacobian=child['node'], child['jacobian']\n \n new_grad = np.dot(self.gradient, jacobian)\n #print(node.gradient)\n #print(new_grad)\n \n if node.gradient is None:\n node.gradient = new_grad\n else: \n node.gradient += new_grad\n \n node.times_visited+=1\n #print('looking at node {} \\ngradient {}'.format(node.id, node.gradient))\n\n \n if node.times_used ==node.times_visited: \n #print(node.gradient)\n node.backward() \n else:\n #still some computations to perform upwards before going deeped\n #print('node {} visits : {}/{}'.format(node.id, node.times_visited, node.times_used))\n pass", "def predecessors(self):\n predecessors = []\n for inst in self.inst.uses():\n if inst.op_name != 'OpPhi':\n predecessors.append(inst.basic_block)\n return predecessors", "def siftDown(start, count):\n root = start\n while root * 2 + 1 < count:\n child = root * 2 + 1 # 'child' is the left children of the current node\n if child < count - 1 and self.data[child] > self.data[child + 1]:\n # Verify that right sibling is lower than the left one, if so,\n # let 'child' be the right sibling\n child += 1\n if self.data[root] > self.data[child]:\n # Swap the current child and the parent if the parent is higher than the child\n self.data[root], self.data[child] = self.data[child], self.data[root]\n root = child\n else:\n return", "def reverse(self):\n for i in self._children:\n if isinstance(i, PQ):\n i.reverse()\n\n self._children.reverse()", "def relations_to(self, end_node):", "def compute_rev(p1, p2):\n p1 = list(reversed(p1))\n p2 = list(reversed(p2))\n return(compute_fwd(p1, p2))", "def rich_chain(self):\n chain = self\n\n result = []\n while chain.prev_fragment:\n result.append(chain)\n chain = chain.prev_fragment\n result.append(chain)\n result.reverse()\n\n return result", "def rrev(self, prev=None):\n if not self.head:\n self.head = prev\n else:\n save_next = self.head.next\n self.head.next = prev\n prev = self.head\n self.head = save_next\n self.rrev(prev)", "def displayBackward(self):\n if self.is_empty():\n print(\"Nothing to print...\")\n return\n \n walk = self._start._prev # starting from the end\n \n while walk is not self._start:\n print(walk._element, end=' ')\n walk = walk._prev\n print(self._start._element) # print the start element at the last", "def print_backward(self):\n print(\"[\", end=\" \")\n if self.__head is not None:\n self.__head.print_backward()\n print(\"]\")", "def trace_back(self, node):\n state_id = node.get_state_id()\n steps = [eval(state_id)]\n if state_id in self._history:\n previous = self._history[state_id]\n else:\n previous = False\n while previous:\n steps.append(eval(previous))\n if previous in self._history:\n previous = self._history[previous]\n else:\n previous = False\n return steps[::-1]", "def pop_from_tail(self):\n res = self.tail.pre\n\n res.pre.post = self.tail\n self.tail.pre = res.pre\n\n return res", "def detach_node(self, node):\n # if the node is at the end\n if self.end == node:\n self.pop()\n\n # elif it's at the beginning\n elif self.begin == node:\n # call unshift\n self.unshift()\n #else it's in the middle\n else:\n # skip over it\n # save node.prev, node.next\n prev = node.prev\n next = node.next\n # set prev.next to saved next\n node.prev.next = next\n # set next.prev to saved prev\n node.next.prev = prev", "def relations_from(self, start_node):", "def backward(self) -> np.ndarray:\n # TODO\n return None", "def reverse_list_iterative(head):\n curr = head\n prev = None\n while curr is not None:\n tmp = curr.next\n curr.next = prev\n prev = curr\n curr = tmp\n return prev", "def action_sequence(node):\n actions = []\n while node.previous:\n actions.append(node.action)\n node = node.previous\n return actions[::-1]", "def update_predecessors(graph, path):\n\n for i in range(1,len(path)):\n graph.Dictionary[graph.Keys[graph.Vertices.index(path[i])]].predecessor = copy.deepcopy(path[i-1])", "def ascend(self):\n node = self.parent\n while node:\n yield node\n node = node.parent", "def path(self):\n node, path_back = self, []\n while node:\n path_back.append(node)\n node = node.parent\n return list(reversed(path_back))", "def path(self):\n node, path_back = self, []\n while node:\n path_back.append(node)\n node = node.parent\n return list(reversed(path_back))", "def bfs(self, start, end):\n\n queue = [start]\n parent = dict()\n\n # Initialize parent dictionary\n for v in iter(self._reachable): parent[v] = None\n parent[start] = start\n\n while len(queue) > 0:\n (x, y) = queue.pop(0)\n if (x, y) == end: break\n\n for v in self.get_reachables(x, y):\n if parent[v] is not None: \n # Vertex v already visited\n continue\n parent[v] = (x, y)\n queue.append(v)\n\n # Reconstruct path\n path = [end]\n vertex = end\n\n while parent[vertex] != vertex:\n if parent[vertex] is None: return []\n path.append(parent[vertex])\n vertex = parent[vertex]\n\n path.reverse()\n return path", "def previous(self,dec=-1):\n for i in range(-dec):\n self.currentSub._previous()", "def sift_down(self, start, end):\n i, j = start, 2*start+1\n # Temporary variable to decrease exchange times\n temp = self.heap_list[start]\n # end is equal to len(self.heap_list)-1\n while j <= end:\n # compare left child node with right child node\n if j<end and self.heap_list[j]<self.heap_list[j+1]:\n j += 1\n if temp >= self.heap_list[j]:\n break\n else:\n #self.heap_list[i], self.heap_list[j] = self.heap_list[j], self.heap_list[i]\n self.heap_list[i] = self.heap_list[j]\n i = j\n j = 2*j+1\n self.heap_list[i] = temp", "def sift_down(array, start, end):\n root = start\n while root*2+1 <= end:\n child = root*2+1\n swap = root\n if array[swap] < array[child]:\n swap = child\n if child+1 <= end and array[swap] < array[child+1]:\n swap = child+1\n if swap != root:\n array[root], array[swap] = array[swap], array[root]\n root = swap\n else:\n break", "def move_back(t,n):\n lt(t)\n bk(t, n)\n rt(t)", "def compute_path(predecessor_matrix, start_node, end_node):\n\n i = start_node\n j = end_node\n path = []\n\n #Go through the predecessor matrix to save the data in a list\n while j != i:\n path.append(j)\n j = predecessor_matrix[j]\n path.append(i)\n\n #reverse it so that it goes from start node to end node instead\n path.reverse()\n return path", "def predecessors(self, node: Node):\n return iter(self.get_node(node_id) for node_id in node.in_nodes_ids)", "def _backward(T, edge_to_P, root, root_prior_distn, node_to_data_fset):\n v_to_subtree_partial_likelihoods = {}\n for v in nx.topological_sort(T, [root], reverse=True):\n fset_data = node_to_data_fset[v]\n if T and T[v]:\n cs = T[v]\n else:\n cs = set()\n if cs:\n partial_likelihoods = {}\n for s in fset_data:\n prob = _get_partial_likelihood(edge_to_P,\n v_to_subtree_partial_likelihoods, v, cs, s)\n if prob is not None:\n partial_likelihoods[s] = prob\n else:\n partial_likelihoods = dict((s, 1) for s in fset_data)\n if v == root:\n pnext = {}\n for s in set(partial_likelihoods) & set(root_prior_distn):\n pnext[s] = partial_likelihoods[s] * root_prior_distn[s]\n partial_likelihoods = pnext\n v_to_subtree_partial_likelihoods[v] = partial_likelihoods\n return v_to_subtree_partial_likelihoods", "def back(self, step):\r\n self.forward(-step)", "def reverseLinkedListIterative(head):\n slow, curr, fast = None, head, head\n\n # Update the bindings\n while curr is not None:\n fast = fast.next\n curr.next = slow\n\n # Move one step ahead\n slow = curr\n curr = fast\n\n return slow", "def get_predecessors(self, node): \n preds = []\n child_state = self.node_to_state(node)\n for it in self.predecessors:\n parent_node = (node[0] + it[0], node[1] + it[1])\n parent_state = self.node_to_state(parent_node)\n edge = self.interpolate(child_state, parent_state, self.distance_bw_states(child_state, parent_state)/self.path_resolution)\n preds.append([parent_node, edge])\n return preds", "def backtrack(\n candidates: list, path: list, answer: list, target: int, previous_index: int\n) -> None:\n if target == 0:\n answer.append(path.copy())\n else:\n for index in range(previous_index, len(candidates)):\n if target >= candidates[index]:\n path.append(candidates[index])\n backtrack(candidates, path, answer, target - candidates[index], index)\n path.pop(len(path) - 1)", "def backprop(node, result):\n while node:\n node.addOutcome(result)\n node = node.getParent()", "def bfs_path(G, source, destination):\n vertex_dict = dict(nx.bfs_predecessors(G, source))\n queue = deque()\n queue.append(destination)\n while queue[-1] != source:\n queue.append(vertex_dict[queue[-1]])\n queue.reverse()\n return queue" ]
[ "0.69886434", "0.69002724", "0.6687282", "0.659495", "0.6564393", "0.655781", "0.6412653", "0.63977957", "0.63977957", "0.63977957", "0.6395437", "0.63730013", "0.6367989", "0.6310967", "0.6306441", "0.6306441", "0.6306441", "0.63061464", "0.62905055", "0.6285904", "0.6272839", "0.6269229", "0.6269229", "0.6269229", "0.6269229", "0.6269229", "0.6269229", "0.6269229", "0.6269229", "0.6269229", "0.6269229", "0.6269229", "0.6269229", "0.6269229", "0.6242374", "0.62403184", "0.62355703", "0.62202394", "0.62030864", "0.62012315", "0.61938685", "0.61727405", "0.61713666", "0.61614436", "0.614387", "0.61370033", "0.611674", "0.6098824", "0.6093489", "0.6075261", "0.606062", "0.60384345", "0.60358614", "0.6031236", "0.6007177", "0.59846735", "0.5981846", "0.59668773", "0.59592235", "0.5954687", "0.59193593", "0.58718324", "0.5855123", "0.5830607", "0.5829118", "0.58108455", "0.5779991", "0.57447094", "0.57148165", "0.5710644", "0.56990534", "0.5666593", "0.5662056", "0.56594145", "0.56461114", "0.5646055", "0.5636756", "0.56318116", "0.5631738", "0.56256634", "0.56239396", "0.56237835", "0.5621112", "0.5619068", "0.5606288", "0.5606288", "0.5602623", "0.5595059", "0.55930567", "0.55860674", "0.5584951", "0.5580125", "0.5579473", "0.55753416", "0.55701697", "0.5562263", "0.55613405", "0.555799", "0.55574954", "0.5556195" ]
0.7261165
0
Recursively creates Level_Pair nodes from start up to end. Assumes that end's attack and strength are greater than start's. Neighbours for a node are stored in graph[node]. Distances between neighbours are stored in graph[nodeA][nodeB].
def populate_graph( graph, start, end, attack_bonus, strength_bonus): # Check if already created if start in graph: return graph[start] = dict() # Recursively create neighbouring level pairs if start.attack < end.attack: inc_attack = Level_Pair(start.attack + 1, start.strength) # Store level-up time graph[start][inc_attack] = level_time_average( start, Attack_Style.ATTACK, attack_bonus, strength_bonus) # Continue at next node populate_graph(graph, inc_attack, end, attack_bonus, strength_bonus) if start.strength < end.strength: inc_strength = Level_Pair(start.attack, start.strength + 1) # Store level-up time graph[start][inc_strength] = level_time_average( start, Attack_Style.STRENGTH, attack_bonus, strength_bonus) # Continue at next node populate_graph(graph, inc_strength, end, attack_bonus, strength_bonus)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def astar_map(map, start, end):\n\n # Create start and end node\n start_node = Node3(None, start)\n start_node.g = start_node.h = start_node.f = 0\n end_node = Node3(None, end)\n end_node.g = end_node.h = end_node.f = 0\n\n # Initialize both open and closed list\n open_list = []\n closed_list = []\n\n # Add the start node\n open_list.append(start_node)\n\n # Loop until you find the end\n x = 0\n while len(open_list) > 0:\n x+=1\n # Get the current node\n current_node = open_list[0]\n \n current_index = 0\n for index, item in enumerate(open_list):\n if item.f < current_node.f:\n current_node = item\n current_index = index\n\n # Pop current off open list, add to closed list\n open_list.pop(current_index)\n closed_list.append(current_node)\n\n # Found the goal\n if current_node.position == end_node.position:\n path = []\n current = current_node\n while current is not None:\n path.append(current.position)\n current = current.parent\n return path[::-1] # Return reversed path\n\n # Generate children\n children = []\n for key in map:\n if key == tuple(current_node.position):\n for elem in map[key]:\n new_node = Node3(current_node, elem[0],elem[1])\n children.append(new_node)\n\n # Loop through children\n for child in children:\n\n # Child is on the closed list\n for closed_child in closed_list:\n if child == closed_child:\n continue\n\n # Create the f, g, and h values \n child.g = current_node.g + child.cost\n child.h = ((child.position[0] - end_node.position[0]) ** 2) + ((child.position[1] - end_node.position[1]) ** 2)\n child.f = child.g + child.h\n\n # Child is already in the open list\n for open_node in open_list:\n if child == open_node and child.g > open_node.g:\n continue\n # Add the child to the open list\n open_list.append(child)", "def generate_adjacents(node):\n # Makes a dictionary where keys are current upper token positions and\n # values are the list of positions attainable from one slide move\n slide_moves = {}\n for key, value in node.boardstate.items():\n if value.isupper() and value != \"B\":\n slide_moves[key] = get_slide_moves(key, node.boardstate)\n\n # Append list of swing moves to get all moves\n moves_dict = {}\n #relevant_pieces = [name ]\n for key in slide_moves:\n all_moves = set(slide_moves[key] + get_swing_moves(key, slide_moves))\n moves_dict[key] = list(all_moves)\n\n # Convert from dictionary to list of list of tuples of the form:\n #[[(curr_move, next_move)...]...] where each tokens moves occupy a list\n moves_list = []\n for curr, news in moves_dict.items():\n moves_list.append([(curr, new) for new in news])\n\n # Get all combinations of moves and for each combo construct a new board state\n adjacent_states = []\n turns = list(product(*moves_list))\n\n for turn in turns:\n new_board = apply_turn(node, turn)\n if new_board:\n adjacent_states.append((turn, new_board))\n return adjacent_states", "def build_node_chains(self):\n\n self.node_chain_lookup = -np.ones(self.tri.npoints, dtype=np.int)\n self.node_chain_list = []\n\n node_chain_idx = 1\n\n self.node_chain_list.append([]) # placeholder for any isolated base-level nodes\n\n for node1 in self.node_high_to_low: \n if (self.node_chain_lookup[node1] != -1): \n continue\n\n junction, this_chain = self._node_walk_downhill(node1)\n\n if len(this_chain) > 1:\n self.node_chain_list.append(this_chain)\n \n self.node_chain_lookup[this_chain[0:-1]] = node_chain_idx \n if self.node_chain_lookup[this_chain[-1]] == -1:\n self.node_chain_lookup[this_chain[-1]] = node_chain_idx\n\n node_chain_idx += 1\n\n else: \n self.node_chain_list[0].append(this_chain[0])\n self.node_chain_lookup[this_chain[0]] = 0\n\n return", "def astar(maze, start, end):\n\n # Create start and end node\n start_node = Node(None, start)\n start_node.g = start_node.h = start_node.f = 0\n end_node = Node(None, end)\n end_node.g = end_node.h = end_node.f = 0\n\n # Initialize both open and closed list\n open_list = []\n closed_list = []\n\n # Add the start node\n open_list.append(start_node)\n\n # Loop until you find the end\n while len(open_list) > 0:\n\n # Get the current node\n current_node = open_list[0]\n current_index = 0\n for index, item in enumerate(open_list):\n if item.f < current_node.f:\n current_node = item\n current_index = index\n\n # Pop current off open list, add to closed list\n open_list.pop(current_index)\n closed_list.append(current_node)\n\n # Found the goal\n if current_node == end_node:\n path = []\n current = current_node\n while current is not None:\n path.append(current.position)\n current = current.parent\n return path[::-1] # Return reversed path\n\n # Generate children\n children = []\n for new_position in [(0, -1), (0, 1), (-1, 0), (1, 0), (-1, -1), (-1, 1), (1, -1), (1, 1)]: # Adjacent squares\n\n # Get node position\n node_position = (current_node.position[0] + new_position[0], current_node.position[1] + new_position[1])\n\n # Make sure within range\n if node_position[0] > (len(maze) - 1) or node_position[0] < 0 or node_position[1] > (len(maze[len(maze)-1]) -1) or node_position[1] < 0:\n continue\n\n # Make sure walkable terrain\n if maze[node_position[0]][node_position[1]] != 0:\n continue\n\n # Create new node\n new_node = Node(current_node, node_position)\n children.append(new_node)\n for child in children:\n for closed_child in closed_list:\n if child == closed_child:\n continue\n\n # Create the f, g, and h values\n child.g = current_node.g + 1\n child.h = ((child.position[0] - end_node.position[0]) ** 2) + ((child.position[1] - end_node.position[1]) ** 2)\n child.f = child.g + child.h\n for open_node in open_list:\n if child == open_node and child.g > open_node.g:\n continue\n\n # Add the child to the open list\n open_list.append(child)", "def a_star_alg(self, p1: int, p2: int, max_level: int = 1000):\r\n \r\n # Create start and end node\r\n start_node = Node(None, p1, self.node_dict[p1])\r\n start_node.g = start_node.h = start_node.f = 0\r\n end_node = Node(None, p2, self.node_dict[p2])\r\n end_node.g = end_node.h = end_node.f = 0\r\n\r\n # Initialize both open and closed list\r\n open_list = []\r\n closed_list = []\r\n\r\n # Add the start node\r\n open_list.append(start_node)\r\n\r\n # Loop until you find the end\r\n level = 0\r\n while len(open_list) > 0 and level < max_level:\r\n level += 1\r\n\r\n # Get the current node (the node in open_list with the lowest cost)\r\n current_node = open_list[0]\r\n current_index = 0\r\n for index, item in enumerate(open_list):\r\n if item.f < current_node.f:\r\n current_node = item\r\n current_index = index\r\n\r\n # Pop current off open list, add to closed list\r\n open_list.pop(current_index)\r\n closed_list.append(current_node)\r\n\r\n # Found the goal\r\n if current_node == end_node:\r\n path = []\r\n distance = current_node.g\r\n current = current_node\r\n while current is not None:\r\n path.append(current.number)\r\n current = current.parent\r\n\r\n return path[::-1], distance # Return reversed path\r\n\r\n # Generate children\r\n children = []\r\n for new_number in self.road_tree[current_node.number]: # Adjacent nodes\r\n new_node = Node(current_node, new_number, self.node_dict[new_number])\r\n children.append(new_node)\r\n\r\n # Loop through children\r\n for child in children:\r\n append_to_open_list = False\r\n\r\n # Create the f, g, and h values\r\n child.g = current_node.g + self.road_dict[(current_node.number, child.number)]\r\n child.h = sqrt((child.x - end_node.x) ** 2 + (child.y - end_node.y) ** 2) / 200\r\n child.f = child.g + child.h\r\n\r\n # Child is already in the closed list\r\n closed_list, append_to_open_list = self.check_in_list(child, closed_list, append_to_open_list)\r\n\r\n # Child is already in the open list\r\n open_list, append_to_open_list = self.check_in_list(child, open_list, append_to_open_list)\r\n\r\n # Add the child to the open list\r\n if append_to_open_list:\r\n open_list.append(child)\r\n\r\n return [], 1e10", "def astar(maze, start, end):\n\n # Create start and end node\n start_node = Node(None, start)\n start_node.g = start_node.h = start_node.f = 0\n end_node = Node(None, end)\n end_node.g = end_node.h = end_node.f = 0\n\n # Initialize both open and closed list\n open_list = []\n closed_list = []\n\n # Add the start node\n open_list.append(start_node)\n\n # Loop until you find the end\n while len(open_list) > 0:\n\n # Get the current node\n current_node = open_list[0]\n current_index = 0\n for index, item in enumerate(open_list):\n if item.f < current_node.f:\n current_node = item\n current_index = index\n\n # Pop current off open list, add to closed list\n open_list.pop(current_index)\n closed_list.append(current_node)\n\n # Found the goal\n if current_node == end_node:\n path = []\n current = current_node\n while current is not None:\n path.append(current.position)\n current = current.parent\n return path[::-1] # Return reversed path\n\n # Generate children\n children = []\n for new_position in [(0, -1), (0, 1), (-1, 0), (1, 0), (-1, -1), (-1, 1), (1, -1), (1, 1)]: # Adjacent squares\n\n # Get node position\n node_position = (current_node.position[0] + new_position[0], current_node.position[1] + new_position[1])\n\n # Make sure within range\n if node_position[0] > (len(maze) - 1) or node_position[0] < 0 or node_position[1] > (len(maze[len(maze)-1]) -1) or node_position[1] < 0:\n continue\n\n # Make sure walkable terrain\n if maze[node_position[0]][node_position[1]] != 0:\n continue\n\n # Create new node\n new_node = Node(current_node, node_position)\n\n # Append\n children.append(new_node)\n\n # Loop through children\n for child in children:\n\n # Child is on the closed list\n for closed_child in closed_list:\n if child == closed_child:\n continue\n\n # Create the f, g, and h values\n child.g = current_node.g + 1\n child.h = ((child.position[0] - end_node.position[0]) ** 2) + ((child.position[1] - end_node.position[1]) ** 2)\n child.f = child.g + child.h\n\n # Child is already in the open list\n for open_node in open_list:\n if child == open_node and child.g > open_node.g:\n continue\n\n # Add the child to the open list\n open_list.append(child)", "def astar(maze, start, end):\n\n # Create start and end node\n start_node = Node(None, start)\n start_node.g = start_node.h = start_node.f = 0\n end_node = Node(None, end)\n end_node.g = end_node.h = end_node.f = 0\n\n # Initialize both open and closed list\n open_list = []\n closed_list = []\n\n # Add the start node\n open_list.append(start_node)\n\n # Loop until you find the end\n while len(open_list) > 0:\n\n # Get the current node\n current_node = open_list[0]\n current_index = 0\n for index, item in enumerate(open_list):\n if item.f < current_node.f:\n current_node = item\n current_index = index\n\n # Pop current off open list, add to closed list\n open_list.pop(current_index)\n closed_list.append(current_node)\n\n # Found the goal\n if current_node == end_node:\n path = []\n current = current_node\n while current is not None:\n path.append(current.position)\n current = current.parent\n return path[::-1] # Return reversed path\n\n # Generate children\n children = []\n for new_position in [(0, -1), (0, 1), (-1, 0), (1, 0), (-1, -1), (-1, 1), (1, -1), (1, 1)]: # Adjacent squares\n\n # Get node position\n node_position = (current_node.position[0] + new_position[0], current_node.position[1] + new_position[1])\n\n # Make sure within range\n if node_position[0] > (len(maze) - 1) or node_position[0] < 0 or node_position[1] > (len(maze[len(maze)-1]) -1) or node_position[1] < 0:\n continue\n\n # Make sure walkable terrain\n if maze[node_position[0]][node_position[1]] != 0:\n continue\n\n # Create new node\n new_node = Node(current_node, node_position)\n\n # Append\n children.append(new_node)\n \n # Loop through children\n for child in children:\n\n # Child is on the closed list\n for closed_child in closed_list:\n if child == closed_child:\n continue\n\n # Create the f, g, and h values\n child.g = current_node.g + 1\n child.h = ((child.position[0] - end_node.position[0]) ** 2) + ((child.position[1] - end_node.position[1]) ** 2)\n child.f = child.g + child.h\n\n # Child is already in the open list\n for open_node in open_list:\n if child == open_node and child.g > open_node.g:\n continue\n\n # Add the child to the open list\n open_list.append(child)", "def astar(maze, start, end):\n\n # Create start and end node\n start_node = Node(None, start)\n start_node.g = start_node.h = start_node.f = 0\n end_node = Node(None, end)\n end_node.g = end_node.h = end_node.f = 0\n\n # Initialize both open and closed list\n open_list = []\n closed_list = []\n\n # Add the start node\n open_list.append(start_node)\n\n # Loop until you find the end\n while len(open_list) > 0:\n\n # Get the current node\n current_node = open_list[0]\n current_index = 0\n for index, item in enumerate(open_list):\n if item.f < current_node.f:\n current_node = item\n current_index = index\n\n # Pop current off open list, add to closed list\n open_list.pop(current_index)\n closed_list.append(current_node)\n\n # Found the goal\n if current_node == end_node:\n path = []\n current = current_node\n while current is not None:\n path.append(current.position)\n current = current.parent\n return path[::-1] # Return reversed path\n\n # Generate children\n children = []\n for new_position in [(0, -1), (0, 1), (-1, 0), (1, 0), (-1, -1), (-1, 1), (1, -1), (1, 1)]: # Adjacent squares\n\n # Get node position\n node_position = (current_node.position[0] + new_position[0], current_node.position[1] + new_position[1])\n\n # Make sure within range\n if node_position[0] > (len(maze) - 1) or node_position[0] < 0 or node_position[1] > (len(maze[len(maze)-1]) -1) or node_position[1] < 0:\n continue\n\n # Make sure walkable terrain\n if maze[node_position[0]][node_position[1]] != 0:\n continue\n\n # Create new node\n new_node = Node(current_node, node_position)\n\n # Append\n children.append(new_node)\n\n # Loop through children\n for child in children:\n\n # Child is on the closed list\n for closed_child in closed_list:\n if child == closed_child:\n continue\n\n # Create the f, g, and h values\n child.g = current_node.g + 1\n child.h = ((child.position[0] - end_node.position[0]) ** 2) + ((child.position[1] - end_node.position[1]) ** 2)\n child.f = child.g + child.h\n\n # Child is already in the open list\n for open_node in open_list:\n if child == open_node and child.g > open_node.g:\n continue\n\n # Add the child to the open list\n open_list.append(child)", "def traverse_graph_start_end_extra_node_heuristic(graph):\n\n tree = traverse_graph_start_end_extra_node(graph)\n\n # delete the indexes\n\n for st in tree.subtrees():\n if \"start\" in st.label() or \"end\" in st.label():\n new_label = re.sub(r'[0-9]+', '', st.label())\n st.set_label(new_label)\n\n return tree", "def astar(maze, start, end):\n\n # Create start and end \n start_node = node.Node(None, start)\n start_node.g = start_node.h = start_node.f = 0\n end_node = node.Node(None, end)\n end_node.g = end_node.h = end_node.f = 0\n\n # Initialize both open and closed list\n open_list = []\n closed_list = []\n\n # Add the start node\n open_list.append(start_node)\n\n # Loop until you find the end\n while len(open_list) > 0:\n\n # Get the current node\n current_node = open_list[0]\n current_index = 0\n for index, item in enumerate(open_list):\n if item.f < current_node.f:\n current_node = item\n current_index = index\n\n # Pop current off open list, add to closed list\n open_list.pop(current_index)\n closed_list.append(current_node)\n\n # Found the goal\n if current_node == end_node:\n path = []\n current = current_node\n while current is not None:\n path.append(current.position)\n current = current.parent\n return path[::-1] # Return reversed path\n\n # Generate children\n children = []\n \n for new_position in [(0, 0, -1), (0, 0, 1), (0, -1, 0), (0, 1, 0), (1, 0, 0), (-1, 0, 0)]: # Adjacent squares\n \n # Get node position\n node_position = (current_node.position[0] + new_position[0], current_node.position[1] + new_position[1], current_node.position[2] + new_position[2])\n\n # Make sure within range\n if node_position[1] > (len(maze[0]) - 1) or node_position[1] < 0 or node_position[2] > (len(maze[0][len(maze)-1]) -1) or node_position[2] < 0 or node_position[0] < 0 or node_position[0] > len(maze) - 1:\n continue\n \n # Make sure walkable terrain\n if node_position == end_node.position:\n new_node = node.Node(current_node, node_position)\n \n # Append\n children.append(new_node)\n continue\n \n if maze[node_position[0]][node_position[1]][node_position[2]] != 0:\n continue\n\n # Create new node\n new_node = node.Node(current_node, node_position)\n \n # Append\n children.append(new_node)\n\n # Loop through children\n for child in children:\n # Child is on the closed list\n for closed_child in closed_list:\n if child == closed_child:\n break\n else:\n # Create the f, g, and h values\n child.g = current_node.g + 1\n # H: Manhattan distance to end point\n child.h = abs(child.position[0] - end_node.position[0]) + abs(child.position[1] - end_node.position[1])\n child.f = child.g + child.h\n\n # Child is already in the open list\n for open_node in open_list:\n # check if the new path to children is worst or equal \n # than one already in the open_list (by measuring g)\n if child == open_node and child.g >= open_node.g:\n break\n else:\n # Add the child to the open list\n open_list.append(child)", "def astar(maze, start, end):\n\n # Create start and end node\n start_node = Node(None, start)\n start_node.g = start_node.h = start_node.f = 0\n end_node = Node(None, end)\n end_node.g = end_node.h = end_node.f = 0\n\n # Initialize both open and closed list\n open_list = []\n closed_list = []\n\n # Add the start node\n open_list.append(start_node)\n\n # Loop until you find the end\n while len(open_list) > 0:\n for i in open_list:\n jazda1(kratka,i.position)\n # Get the current node\n current_node = open_list[0]\n current_index = 0\n for index, item in enumerate(open_list):\n if item.f < current_node.f:\n current_node = item\n current_index = index\n\n # Pop current off open list, add to closed list\n open_list.pop(current_index)\n closed_list.append(current_node)\n\n # Found the goal\n if current_node == end_node:\n path = []\n current = current_node\n while current is not None:\n path.append(current.position)\n current = current.parent\n return path[::-1] # Return reversed path\n\n # Generate children\n children = []\n for new_position in [(0, -1), (0, 1), (-1, 0), (1, 0)]: # Adjacent squares\n\n # Get node position\n node_position = (current_node.position[0] + new_position[0], current_node.position[1] + new_position[1])\n\n # Make sure within range\n if node_position[0] > (len(maze) - 1) or node_position[0] < 0 or node_position[1] > (len(maze[len(maze)-1]) -1) or node_position[1] < 0:\n continue\n\n # Make sure walkable terrain\n if maze[node_position[0]][node_position[1]] != 0:\n continue\n\n # Create new node\n new_node = Node(current_node, node_position)\n\n # Append\n children.append(new_node)\n\n # Loop through children\n for child in children:\n\n # Child is on the closed list\n for closed_child in closed_list:\n if child == closed_child:\n continue\n\n # Create the f, g, and h values\n child.g = current_node.g + 1\n child.h = ((abs(child.position[0] - end_node.position[0]))) + ((abs(child.position[1] - end_node.position[1])) )\n child.f = child.g + child.h\n \n\n # Child is already in the open list\n for open_node in open_list:\n if child == open_node and child.g > open_node.g:\n continue\n\n # Add the child to the open list\n if(child not in open_list):\n open_list.append(child)", "def astar(maze, start, end, agents):\r\n\r\n # Create start and end node\r\n start_node = Node(None, start)\r\n end_node = Node(None, end)\r\n\r\n # Initialize both open and closed list\r\n open_list = []\r\n open_pos = []\r\n closed_pos = []\r\n\r\n # Add the start node\r\n open_list.append(start_node)\r\n open_pos.append(start)\r\n\r\n # Loop until you find the end\r\n while len(open_list) > 0:\r\n\r\n # Get the current node\r\n current_node = open_list[0]\r\n current_index = 0\r\n \r\n for index, item in enumerate(open_list):\r\n if item.f < current_node.f:\r\n current_node = item\r\n current_index = index\r\n\r\n # Pop current off open list, add to closed list\r\n open_list.pop(current_index)\r\n open_pos.pop(current_index)\r\n closed_pos.append(current_node.position)\r\n\r\n # Found the goal\r\n if current_node == end_node:\r\n path = []\r\n\r\n current = current_node\r\n while current is not None:\r\n path.append(current.position) \r\n current = current.parent\r\n\r\n return path[::-1] # Return reversed path\r\n\r\n # # Generate children\r\n for new_position in [(0, -1), (0, 1), (-1, 0), (1, 0)]: # Adjacent squares\r\n \r\n # Get node position\r\n node_position = (current_node.position[0] + new_position[0], current_node.position[1] + new_position[1])\r\n\r\n # Make sure within range\r\n if node_position[0] > maze.shape[0]-1 or node_position[0] < 0 or node_position[1] > maze.shape[1]-1 or node_position[1] < 0:\r\n continue\r\n\r\n # Make sure walkable terrain\r\n if maze[node_position[0]][node_position[1]] == 0:\r\n continue\r\n\r\n if not validataPath(current_node, node_position, agents):\r\n continue\r\n\r\n # Create new node\r\n child = Node(current_node, node_position)\r\n\r\n if node_position not in closed_pos:\r\n child = Node(current_node, node_position)\r\n\r\n # Create the f, g, and h values\r\n child.g = current_node.g + 1\r\n child.h = ((child.position[0] - end_node.position[0]) ** 2) + ((child.position[1] - end_node.position[1]) ** 2)\r\n child.f = child.g + child.h\r\n\r\n # Child is already in the open list\r\n if node_position in open_pos:\r\n index = open_pos.index(node_position)\r\n if open_list[index].g > child.g:\r\n open_list[index] = child\r\n\r\n # Add the child to the open list\r\n else:\r\n open_list.append(child)\r\n open_pos.append(node_position)\r\n\r\n return None", "def dfs(g: nx.Graph, start_node: Any) -> str:\n\n way = []\n stack = [start_node]\n y = {node: [] for node in g.nodes}\n while stack:\n elem = stack.pop()\n way.append(elem)\n for node in list(g.neighbors(elem)):\n if node not in way:\n stack.append(node)\n y[node].extend((*y[elem], elem))\n print(y)\n return \"\".join(way)", "def build_auxiliary_structures(nodes_filename, ways_filename):\n nodes = {}\n for way in read_osm_data(ways_filename):\n highway_type = way['tags'].get('highway', '( ͡° ͜ʖ ͡°)')\n if highway_type in ALLOWED_HIGHWAY_TYPES:\n nodes_along_way = way['nodes'] # List of nodes along this way\n for i in range(len(nodes_along_way) - 1):\n # A pair of adjacent nodes along this way\n left = nodes_along_way[i]\n right = nodes_along_way[i + 1]\n default_speed_limit = DEFAULT_SPEED_LIMIT_MPH[highway_type]\n # If this way doesn't have a speed limit tag, we use the default value based on highway type\n speed_limit = way['tags'].get('maxspeed_mph', default_speed_limit)\n\n def build_data(root, adjacent):\n \"\"\"\n root: ID of some node along way\n adjacent: ID of some node adjacent to root node along way\n \"\"\"\n new_node_data_struct = {'adjacent': {adjacent: speed_limit}} # Init dict for node data structure\n root_data = nodes.get(root, new_node_data_struct)\n # There might be another way where root and adjacent are directly adjacent, so our\n # speed limit is the max of the speed limits of those two ways:\n root_data['adjacent'][adjacent] = max(root_data['adjacent'].get(adjacent, 0), speed_limit)\n nodes[root] = root_data # Add the data on root to our dictionary of node data\n\n build_data(left, right)\n if not way['tags'].get('oneway', '( ͡° ͜ʖ ͡°)') == 'yes':\n # If this isn't a oneway way, we can build the data structure for the next node as well\n build_data(right, left)\n elif right == nodes_along_way[-1]:\n # In non-oneway ways, the above build_data(right, left) call creates the data structure\n # for the final node at the same time as the penultimate one. However, in the case of a\n # oneway path, we have to do it manually:\n nodes[right] = nodes.get(right, {'adjacent': {}})\n\n for node in read_osm_data(nodes_filename):\n id = node['id']\n if id in nodes:\n # If the id of this node in the generator was on a valid way, we add the data about that node\n # to its dictionary in nodes.\n # Add lat/lon data\n nodes[id]['lat'] = node['lat']\n nodes[id]['lon'] = node['lon']\n\n return nodes", "def build_node_graph(self):\n G = pgv.AGraph(strict=False, directed=True)\n temp_dict = defaultdict(int) #key - from_to_ip, val - counter\n\n for i, ip in enumerate(self.node_graph_dict.keys()):\n G.add_node(ip, shape='rect', label='%d' % i)\n logger.info(\"All nodes added\")\n\n for ip, droplist in self.node_graph_dict.iteritems():\n for gnid, dropids in droplist:\n for did in dropids:\n tip = self.gnid_ip_dict[self.oid_gnid_dict[did]]\n k = '{0}_{1}'.format(ip, tip)\n temp_dict[k] += 1\n\n for k, v in temp_dict.iteritems():\n ks = k.split('_')\n G.add_edge(ks[0], ks[1], weight=v)\n\n return G", "def make_sidewalk_nodes(street, prev_node, curr_node, next_node):\n if prev_node is None:\n v = - curr_node.vector_to(next_node, normalize=False)\n vec_prev = curr_node.vector() + v\n prev_node = Node(None, vec_prev[0], vec_prev[1])\n elif next_node is None:\n v = - curr_node.vector_to(prev_node, normalize=False)\n vec_next = curr_node.vector() + v\n next_node = Node(None, vec_next[0], vec_next[1])\n\n curr_latlng = np.array(curr_node.location())\n\n v_cp_n = curr_node.vector_to(prev_node, normalize=True)\n v_cn_n = curr_node.vector_to(next_node, normalize=True)\n v_sidewalk = v_cp_n + v_cn_n\n\n if np.linalg.norm(v_sidewalk) < 1e-10:\n v_sidewalk_n = np.array([v_cn_n[1], - v_cn_n[0]])\n else:\n v_sidewalk_n = v_sidewalk / np.linalg.norm(v_sidewalk)\n\n p1 = curr_latlng + street.distance_to_sidewalk * v_sidewalk_n\n p2 = curr_latlng - street.distance_to_sidewalk * v_sidewalk_n\n\n p_sidewalk_1 = Node(None, p1[0], p1[1])\n p_sidewalk_2 = Node(None, p2[0], p2[1])\n\n curr_node.append_sidewalk_node(street.id, p_sidewalk_1)\n curr_node.append_sidewalk_node(street.id, p_sidewalk_2)\n\n # Figure out on which side you want to put each sidewalk node\n v_c1 = curr_node.vector_to(p_sidewalk_1)\n if np.cross(v_cn_n, v_c1) > 0:\n return p_sidewalk_1, p_sidewalk_2\n else:\n return p_sidewalk_2, p_sidewalk_1", "def create_basic_adjacency_map_2():\n sample_adj_map = {\n \"A\": [\"B\", \"C\"],\n \"C\": [\"D\", \"E\"],\n \"D\": [\"X\"],\n \"E\": [\"X\"]\n }\n graph = generate_graph(sample_adj_map, node_start_name=\"A\")\n return graph", "def shortest_path(start, end):\n\n\tmoves = rubik.quarter_twists\n\n\t# Parent nodes: (Parent_State, move)\n\tstartParents = {}\n\tstartParents[start] = None # Start state has no parent\n\n\t# Parent nodes: (Parent_State, move)\n\tendParents = {}\n\tendParents[end] = None # End state has no parent\n\n\tstartFrontier = [] # Current frontier in start BFS\n\tendFrontier = [] # Current frontier in end BFS\n\n\tstartFrontier.append(start) # Add start state as first and only node to generate next frontier\n\tendFrontier.append(end) # Add end state as first and only node to generate next frontier\n\n\tif end in startParents:\n\t\treturn [] # Start == End : No moves required\n\n\t# We only have to search at most 14 levels in BFS\n\t# Two-way BFS therefore requires 7 concurrent levels from both states\n\tfor i in range(7):\n\n\t\tstartNextFrontier = [] # New empty set for new frontier to be discovered\n\t\tfor state in startFrontier: # Iterate through each rubiks state in this frontier\n\t\t\tfor move in moves: # Apply each move to this state\n\t\t\t\tnextState = rubik.perm_apply(move, state)\n\n\t\t\t\t# Makes sure this new state is not already in the Graph\n\t\t\t\t# This skips nodes that were already permuted in another path,\n\t\t\t\t# essentially trimming the Graph's leaves\n\t\t\t\tif nextState not in startParents:\n\t\t\t\t\tstartParents[nextState] = (state, move) # Store this state's parent + move\n\t\t\t\t\tstartNextFrontier.append(nextState) # Create a node in the next frontier\n\t\t\t\t\n\t\t\t\t# Intersect of both Graphs, Intermediate state of path found\n\t\t\t\tif nextState in endParents:\n\t\t\t\t\treturn solution(startParents, endParents, nextState)\n\n\t\tstartFrontier = startNextFrontier # Make the next frontier the current one\n\n\t\tendNextFrontier = [] # New empty set for new frontier to be discovered\n\t\tfor state in endFrontier: # Iterate through each rubiks state in this frontier\n\t\t\tfor move in moves: # Apply each move to this state\n\t\t\t\tnextState = rubik.perm_apply(move, state)\n\n\t\t\t\t# Makes sure this new state is not already in the Graph\n\t\t\t\t# This skips nodes that were already permuted in another path,\n\t\t\t\t# essentially trimming the Graph's leaves\n\t\t\t\tif nextState not in endParents:\n\t\t\t\t\tendParents[nextState] = (state, move) # Store this state's parent + move\n\t\t\t\t\tendNextFrontier.append(nextState) # Create a node in the next frontier\n\t\t\t\t\n\t\t\t\t# Intersect of both Graphs, Intermediate state of path found\n\t\t\t\tif nextState in startParents:\n\t\t\t\t\treturn solution(startParents, endParents, nextState)\n\n\t\tendFrontier = endNextFrontier # Make the next frontier the current one\n\n\treturn None", "def connectNodes(imgR,nodes,start,goal):\n alphabet = string.ascii_lowercase\n nodeConnections = [[] for i in range(len(nodes)+2)]\n for index, node in enumerate(nodes):\n paths = adjPaths(imgR,node)\n for path in paths:\n result = checkPath(imgR,nodes,node,path)\n if not result == 0: \n nIndex = nodes.index(result)\n nodeConnections[index+1].append(alphabet[nIndex+1])\n paths = adjPaths(imgR,start) # add start to nodes\n for path in paths:\n result = checkPath(imgR,nodes,start,path)\n if not result == 0: \n nIndex = nodes.index(result)\n nodeConnections[0].append(alphabet[nIndex+1])\n for node in nodeConnections[0]:\n nodeConnections[alphabet.index(node)].append(alphabet[0])\n paths = adjPaths(imgR,goal) # add goal to nodes\n for path in paths:\n result = checkPath(imgR,nodes,goal,path)\n if not result == 0: \n nIndex = nodes.index(result)\n nodeConnections[len(nodeConnections)-1].append(alphabet[nIndex+1])\n for node in nodeConnections[len(nodeConnections)-1]:\n nodeConnections[alphabet.index(node)].append(alphabet[len(nodeConnections)-1])\n return [alphabet[i] for i in range(len(nodes)+2)], nodeConnections", "def get_subgraph_between_nodes(self, start, end):\n nodes = set()\n nodes.add(start)\n\n to_visit = set()\n to_visit.add(start)\n\n while len(to_visit) > 0:\n current_visit = copy.copy(to_visit)\n for tv in current_visit:\n to_visit.remove(tv)\n if tv is not end:\n for s in self.successors(tv):\n to_visit.add(s)\n nodes.add(s)\n\n nodes.add(end)\n\n return self.subgraph(nodes)", "def create_complete_graph(pair_weights, flip_weights=True):\n g = nx.Graph()\n for k, v in pair_weights.items():\n wt_i = -v if flip_weights else v\n g.add_edge(k[0], k[1], attr_dict={\"distance\": v, \"weight\": wt_i})\n return g", "def get_2_step_neighbours(node):\n for i in range(len(node)):\n yield node[0:i] + (flip(node[i]),) + node[i+1:]\n\n for i, j in itertools.permutations(range(len(node)), 2):\n if i < j:\n yield node[0:i] + (flip(node[i]),) + node[i+1:j] + (flip(node[j]),) + node[j+1:]", "def shortest_path(graph, start, end):\n # Tree will contain both the final path and some other paths in reverse.\n # Each value is a 2-tuple; the first item is the depth of the key, and the\n # second is the parent of the key.\n tree = {start: (0, None)}\n\n # next_edges is a min_heap used as a priority queue; the next item in it\n # will always be the node adjacent to the growing tree that adds the least\n # to the total depth of the branch.\n next_edges = []\n\n # Add all of the edges adjacent to the start to next_edges.\n for edge in graph[start]:\n heapq.heappush(next_edges, edge + (start, ))\n\n # Loop until we run out of edges or we find the end (see condition below).\n while len(next_edges) > 0:\n depth, child, parent = heapq.heappop(next_edges)\n\n # Ignore edges connecting to children nodes already in the tree.\n if child in tree:\n continue\n\n tree[child] = (depth, parent)\n\n # Add the edges from the new node to the list of possible edges.\n for length, new_child in graph[child]:\n if new_child not in tree:\n heapq.heappush(next_edges, (depth + length, new_child, child))\n\n # If we found the end, flush the next_edges queue and end the search.\n if child is end:\n next_edges = []\n break\n\n # Construct the forward path from start -> end from the tree.\n path = []\n node = end\n while node is not None:\n path.append(node)\n node = tree[node][1]\n\n path.reverse()\n\n total_depth = tree[end][0]\n return (total_depth, path)", "def create_graph_network(start_node, connections):\n graph = nx.Graph()\n graph.add_node(start_node)\n print(connections.index)\n graph.add_nodes_from(connections.index)\n edge_list = list(zip(itertools.repeat(start_node), connections.index))\n print(\"edge list is \", edge_list)\n graph.add_edges_from(edge_list)\n for i in graph.edges():\n graph[i[0]][i[1]]['weight'] = connections.loc[i[1]]['count']\n # graph[i[0]][i[1]]['proposal_number'] = connections.loc[i[1]]['proposal_number']\n # graph[i[0]][i[1]]['institution'] = connections.loc[i[1]]['institution']\n # graph[i[0]][i[1]]['proposal_title'] = connections.loc[i[1]]['proposal_title']\n # graph[i[0]][i[1]]['project_status'] = connections.loc[i[1]]['project_status']\n\n # Adding random position data to the graph.\n # pos = nx.spring_layout(graph, k=1)\n pos = nx.circular_layout(graph)\n nx.set_node_attributes(graph, 'pos', pos)\n return graph", "def offset_graph():\n pylon_graph = graph.graph()\n base = square(ORIGIN, LENGTH)\n base_ids = pylon_graph.add_nodes(base, \"base\")\n pylon_graph.connect_neighbours(base_ids, LENGTH)\n all_ids = []\n for i in range(LEVELS):\n level = offset(base, LENGTH * i, \"z\")\n level_ids = pylon_graph.add_nodes(level, \"level\" + str(i))\n all_ids.extend(level_ids)\n pylon_graph.connect_neighbours(all_ids, LENGTH)\n return pylon_graph", "def find_reachable_nodes_from(self, start_node, **kwargs):\r\n\t\treturn BreadthFirstTraverser(start_node, **kwargs)", "def compare_nodes(G,all_match_pairs,match_pair,traversed,node1,node2, ports_weight):\n logger.debug(f\"comparing {node1},{node2}, traversed {traversed}\")\n nbrs1 = sorted(set(G.neighbors(node1)) - set(traversed))\n #remove dummies\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=7]))\n nbrs2 = sorted(set(G.neighbors(node2)) - set(traversed))\n #remove dummies\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=7]))\n logger.debug(f\"node1:{node1},property: {G.nodes[node1]},neigbors1: {nbrs1}\")\n logger.debug(f\"node2:{node2},property: {G.nodes[node2]},neigbors2: {nbrs2}\")\n if not nbrs1 or not nbrs2:\n if compare_two_nodes(G, node1, node2, ports_weight):\n match_pair[node1] = node2\n logger.debug(f\"no new neihbours, returning recursion {match_pair}\")\n return\n elif len(nbrs1)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n logger.debug(f\"skipping high fanout nets due to large computation, {node1} {nbrs1}\")\n traversed.append(node1)\n return\n elif len(nbrs2)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n traversed.append(node2)\n logger.debug(f\"skipping high fanout nets due to large computation, {node2} {nbrs2}\")\n return\n\n if node1 == node2:\n if node1 in match_pair.keys() or node1 in match_pair.values():\n logger.debug(\"avoid existing pair wise symmetry\")\n return\n logger.debug(f\"single node {node1}, nbrs {nbrs1}, nbr_weight {[G.get_edge_data(node1,nbr) for nbr in nbrs1]}\")\n SD_nbrs= [nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]\n ## TBD: filter based on primitive constraints\n ## Right now will try to figure out S/D paths\n if len(SD_nbrs) ==0:\n logger.debug(f\"No SD paths found to traverse\")\n match_pair[node1]=node1\n elif len(SD_nbrs) ==1:\n logger.debug(f\"traversing single S/D path {SD_nbrs}\")\n match_pair[node1]=node1\n traversed.append(node1)\n compare_nodes(G,all_match_pairs,match_pair,traversed,SD_nbrs[0],SD_nbrs[0],ports_weight)\n else:\n logger.debug(f\" multiple nodes diverging {SD_nbrs}\")\n logger.debug(f\"nbr weights: {SD_nbrs} {[G.get_edge_data(node1, nbr)['weight'] for nbr in SD_nbrs ]}\")\n match_pair[node1]=node1\n traversed.append(node1)\n new_sp=sorted(set(SD_nbrs)-set(traversed))\n all_match_pairs_local={}\n for nbr1,nbr2 in combinations(new_sp, 2):\n logger.debug(f\"recursive pair call from single branch {nbr1} {nbr2}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n if new_pair:\n #new_pair[nbr1]=nbr2\n all_match_pairs_local[nbr1+'_'+nbr2] = new_pair\n all_match_pairs_local={k: v for k, v in all_match_pairs_local.items() if len(v)>0}\n if len(all_match_pairs_local)==1:\n match_pair.update( all_match_pairs_local[list(all_match_pairs_local.keys())[0]])\n logger.debug(f\"found inline pair: {pprint.pformat(match_pair, indent=4)}\")\n else:\n for nbr1 in new_sp:\n if (nbr1+'_'+nbr1 not in all_match_pairs.keys()):\n logger.debug(f\"recursive single branch call from single branch {nbr1} {nbr1}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr1,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr1] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif nbrs1 == nbrs2:\n logger.debug(f\"traversing converging branch\")\n match_pair[node1]=node2\n traversed+=[node1,node2]\n nbrs1=sorted(set(nbrs1)-set([node1,node2]))\n logger.debug(f\"all non traversed neighbours: {nbrs1}\")\n if len(nbrs1)==1:\n nbr1=nbr2=nbrs1[0]\n logger.debug(f\"keeping single converged branch inline {nbr1} {nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n else:\n for nbr1,nbr2 in combinations_with_replacement(nbrs1,2):\n logger.debug(f\"recursive call from converged branch {nbr1} {nbr2}\")\n if nbr1+'_'+nbr2 not in all_match_pairs.keys():\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr2] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif compare_two_nodes(G,node1,node2,ports_weight):\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]))\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=2]))\n match_pair[node1]=node2\n traversed+=[node1,node2]\n logger.debug(f\"Traversing parallel branches from {node1},{node2} {nbrs1}, {nbrs2}\")\n nbrs1_wt = [G.get_edge_data(node1, nbr)['weight'] for nbr in nbrs1]\n nbrs2_wt = [G.get_edge_data(node2, nbr)['weight'] for nbr in nbrs2]\n unique_match=find_unique_matching_branches(G,nbrs1,nbrs2,ports_weight)\n if len(nbrs1)==0 or len(nbrs2)==0:\n logger.debug(f\"no new SD neihbours, returning recursion {match_pair}\")\n elif len(nbrs1) ==1 and len(nbrs2)==1:\n logger.debug(f\"traversing binary branch\")\n compare_nodes(G,all_match_pairs,match_pair,traversed,nbrs1.pop(),nbrs2.pop(),ports_weight)\n elif unique_match:\n logger.debug(f'traversing unique matches {unique_match}')\n match_pair[node1]=node2\n traversed+=[node1,node2]\n for nbr1,nbr2 in unique_match.items():\n logger.debug(f\"recursive call from binary {node1}:{node2} to {nbr1}:{nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n elif len(nbrs1_wt)>len(set(nbrs1_wt))>1 and len(nbrs2_wt)>len(set(nbrs2_wt))>1:\n logger.debug(f\"setting new start points {node1} {node2}\")\n match_pair[node1]=node2\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n else:\n match_pair = {}\n logger.debug(f\"end all traversal from binary branch {node1} {node2}\")\n\n else:\n match_pair = {}\n logger.debug(f\"end of recursion branch, matches {match_pair}\")", "def constrained_offset_graph(length=10000, levels=10):\n LEVELS = levels\n LENGTH = length\n pylon_graph = graph.pylon_graph()\n base = square(ORIGIN, LENGTH)\n base_ids = pylon_graph.add_nodes(base, \"line\")\n pylon_graph.connect_neighbours(base_ids, LENGTH)\n all_ids = []\n for i in range(LEVELS):\n level = offset(base, LENGTH * i, \"z\")\n if i == 10:\n level_ids = pylon_graph.add_nodes(level, \"line\")\n else:\n level_ids = pylon_graph.add_nodes(level, \"level\" + str(i))\n all_ids.extend(level_ids)\n pylon_graph.connect_neighbours(all_ids, LENGTH)\n return pylon_graph", "def Dijkstra2(node_init, node_end, graph):\n\n ### Parameter initialisation\n node_list = list(graph.vertices.keys())\n dist = np.full(len(node_list), -np.inf)\n # At the beginning we have not reached the end_node\n node_end_reached = False\n # At the beginning, we assume there is a shortest path:\n no_path = False\n \n # Initialising the distances of the nodes\n dist[node_init] = 0\n # Setting the father_node which contains the provenance of the nodes\n father_node = np.full(len(node_list), -np.inf)\n # Initialising the current node\n current_node = node_init \n # Initialising the dictionnary of fixed node which has the following shape:\n #{fixed_node: (previous_node, iteration, cost)}\n # Fixing the number of iterations\n k = 0\n dict_fixed_node = {node_init:(None,k, 0)}\n \n # In the trivial case where the two nodes are identical\n if node_init == node_end:\n cost = 0\n shortest_path = [node_init]\n no_path = False\n return cost, shortest_path, no_path\n \n # While the end node has not been reached\n while not node_end_reached:\n current_node_adj = graph.node_get_adj(current_node).copy()\n # We get rid off the nodes that have been fixed, except at the first iteration\n if k != 0:\n current_node_adj.remove(dict_fixed_node[current_node][0])\n ## Updating the distances : either the node are neighbors and \n # something might change, either they are not, and their distance \n # does not change.\n # For the neighbors node\n for e in current_node_adj:\n dist_temp = dist[current_node] + graph.weights[(current_node, e)]\n # We change the distance only if it is lower than it used to be\n # otherwise, we keep it\n if dist_temp < dist[e] or dist[e] == -np.inf:\n dist[e] = dist_temp\n # Setting the father node\n father_node[e] = current_node\n father_node[current_node] = None\n # We set the distance of the current node to 0\n dist[current_node] = 0 \n # Index and distances which are not 0 and not minus infty\n sub_dist_index = [i for i, e in enumerate(dist) if e > 0]\n sub_dist_value = np.array([e for i, e in enumerate(dist) if e > 0])\n # If these two lists are empty, we stop the algorithm and that means\n # that we cannot reach our point\n if not sub_dist_index or sub_dist_value.size == 0:\n no_path = True\n cost = 'impossible path'\n shortest_path = 'impossible path'\n break\n # Now we need to set our choice for the next node\n if np.array_equal(sub_dist_value, np.ones(len(sub_dist_value))):\n ## If there are only ones : we pick them up randomly\n current_node = int(random.choice(list(sub_dist_index)))\n min_dist = sub_dist_value.min()\n else:\n ## If not we just pick up the one with the minimum distance.\n current_node = sub_dist_index[sub_dist_value.argmin()]\n min_dist = sub_dist_value.min()\n # Adding this node to the dictionnary\n dict_fixed_node[current_node] = (int(father_node[current_node]), k, min_dist)\n # If the end_node has been reached, we stop the search algorithm\n if node_end in dict_fixed_node.keys():\n node_end_reached = True\n # Incrementing the counter\n k += 1\n #print('current_node : {}'.format(current_node))\n #print(dict_fixed_node)\n # Now we need to get the shortest path from our iterations whose information \n # are in dict_fixed_node. To do this, we need to circle back from the end_node\n # to the init_node in this dictionnary.\n # This is done only if some path between node_init and end_node exists.\n if no_path == False:\n list_father_node = list(dict_fixed_node.values())\n previous_node = list_father_node[-1][0]\n shortest_path = [node_end, previous_node]\n # While the initial node has not been reached, we add the relevant\n # nodes to our shortest path\n while previous_node != node_init:\n previous_node = dict_fixed_node[previous_node][0]\n shortest_path.append(previous_node)\n \n # Computing the cost of this shortest path in terms of weights\n cost = int(dict_fixed_node[node_end][2])\n \n return cost, shortest_path, no_path", "def astar(image, start, end, avg_dist):\n\n # Create start and end node\n start_node = Node(None, start)\n end_node = Node(None, end)\n # Initialize both open and closed list\n open_list = []\n closed_list = []\n\n # Add the start node\n open_list.append(start_node)\n\n # Loop until you find the end\n while len(open_list) > 0:\n\n # Get the current node\n current_node = open_list[0]\n current_index = 0\n for index, item in enumerate(open_list):\n if item.f <= current_node.f:\n current_node = item\n current_index = index\n if debug:\n print(\"Chosen:\"+str(current_node.position)+\"Cost:\" + str(current_node.f))\n # Pop current off open list, add to closed list\n open_list.pop(current_index)\n closed_list.append(current_node)\n\n # Found the goal\n if current_node == end_node:\n path = []\n current = current_node\n while current is not None:\n path.append(current.position)\n current = current.parent\n return path[::-1] # Return reversed path\n\n # Generate children\n children = []\n child_num = 0\n for new_position in [(0, -1), (0, 1), (1, 0), (-1, 0), (-1, -1), (-1, 1), (1, -1), (1, 1)]: # Adjacent squares\n # Get node position\n node_position = (current_node.position[0] + new_position[0], current_node.position[1] + new_position[1])\n\n # Make sure within range\n if node_position[0] > (image.shape[1] - 1) or node_position[0] < 0 or node_position[1] < 0 or node_position[\n 1] > (image.shape[0] - 1):\n if debug:\n print(\"beyond range\")\n continue\n\n # Create new node\n new_node = Node(current_node, node_position)\n\n # Append\n children.append(new_node)\n child_num += 1\n\n # adding all nodes and assigning extra cost for an ink cut later\n if child_num == 0:\n new_node = Node(current_node, (current_node.position[0], current_node.position[1] + 1))\n children.append(new_node)\n if debug:\n print(\"must cut through line\")\n\n # Loop through children\n for child in children:\n move_on = 0\n # Child is on the closed list\n for closed_child in closed_list:\n if child == closed_child:\n if debug:\n print(str(node_position)+\"in closed list\")\n move_on = 1\n break\n if move_on == 1:\n continue\n # Create the f, g, and h values\n if child.position[1] - current_node.position[1] != 0 and child.position[0] - current_node.position[0] != 0:\n child.n = 14\n else:\n child.n = 10\n child.h = (np.abs(child.position[0] - end_node.position[0])**2) + (np.abs(child.position[1] - end_node.position[1])**2)\n child.v = np.abs(child.position[0] - start_node.position[0]) # cost for horizontal movement\n child.d, child.d2 = blocker_dist(child, image, debug=debug)\n if image[child.position[1], child.position[0]] > 0.9:\n child.m = 1000\n child.g = current_node.g + child.n + child.v + child.m + child.d + child.d2\n child.f = child.g + child.h # heuristic still needed to speed up computations\n if debug:\n print(child.position, child.f)\n\n # Child is already in the open list\n for open_node in open_list:\n if child == open_node:\n move_on = 1\n if child.f < open_node.f:\n # open_node.position = child.position\n open_node.parent = child.parent\n open_node.g = child.g\n open_node.f = child.f\n break\n\n if move_on == 1:\n continue\n\n # Add the child to the open list\n open_list.append(child)", "def create(self, range_value):\n adjacency_list = []\n for idx, node in enumerate(range_value):\n if idx == len(range_value):\n break\n for node2 in range_value[idx + 1:]:\n adjacency_list.append([node, node2])\n return adjacency_list", "def retrace_path(current_node, start_node, chip):\n path = []\n path_cost = 0\n collisions = 0\n\n while current_node != start_node:\n # Add the cost for the wires\n path_cost += 1\n x = current_node.position[0]\n y = current_node.position[1]\n z = current_node.position[2]\n\n # Keep track of collisions\n if chip.coordinates[z][y][x].used:\n collisions =+ 1\n\n path.append(current_node)\n current_node = current_node.parent\n\n # Add the costs for the collisions\n path_cost += collisions * 300\n\n # Add the start node\n path.append(current_node)\n\n # Reverse the order of the path\n return path[::-1], path_cost, True", "def iterativeDeepeningSearch(problem):\n from util import Stack\n \n for max_depth in range(0, 10000000):\n # print max_depth\n st = Stack()\n mapper = {}\n mapper[(problem.getStartState(), 0)] = None #map of (childpos, depth): (parentpos, direction, depth)\n st.push((problem.getStartState(), 0)) # stack of ((x,y) , depth)\n\n while not(st.isEmpty()):\n vertex = st.pop() #( (x,y) , depth )\n depth = vertex[1]\n\n if (problem.isGoalState(vertex[0])):\n c = vertex\n l = []\n while mapper[c] != None:\n tup = mapper[c]\n l.append(tup[1])\n c = tup[0], tup[2]\n l.reverse()\n print \"max_depth: \", max_depth\n print l\n return l\n\n else:\n n_depth = depth + 1 # new depth\n if n_depth < max_depth:\n neigh = problem.getSuccessors(vertex[0])\n # neigh.reverse()\n for child in neigh:\n if (child[0], n_depth) not in mapper:\n st.push((child[0], n_depth))\n mapper[(child[0], n_depth)] = (vertex[0], child[1], depth)", "def __init__(self, rows=10, cols=10, start=(0, 0), end=(9, 9), board=False, node_type=BaseNode):\n self.open_nodes = []\n self.closed_nodes = []\n self.path_found = False\n self.alg_end = False\n\n if board:\n self.BOARD = [[node_type(x, y) for x in range(len(board[0]))] for y in range(len(board))]\n self.board_array = board\n self.len_x = len(self.BOARD[0])\n self.len_y = len(self.BOARD)\n for y in range(len(board)):\n for x in range(len(board[0])):\n if board[y][x] == 1:\n self.BOARD[y][x].traversable = False\n elif board[y][x] == 2:\n self.start_node = self.BOARD[y][x]\n elif board[y][x] == 3:\n self.end_node = self.BOARD[y][x]\n elif board[y][x] in [4, 5, 6]:\n self.board_array[y][x] = 0\n if not self.start_node or not self.end_node:\n raise AttributeError(\"No start/end node specified!\")\n else:\n self.BOARD = [[node_type(x, y) for x in range(rows)] for y in range(cols)]\n\n self.len_x = len(self.BOARD[0])\n self.len_y = len(self.BOARD)\n self.board_array = [[0 for _ in range(self.len_x)] for _ in range(self.len_y)]\n\n self.start_node = self.BOARD[start[1]][start[0]]\n self.board_array[start[1]][start[0]] = 2\n\n self.end_node = self.BOARD[end[1]][end[0]]\n self.board_array[end[1]][end[0]] = 3", "def _traverse_level_order_iterative(self, start_node, visit):\n # Create queue to store nodes not yet traversed in level-order\n queue = LinkedQueue()\n # Enqueue given starting node\n queue.enqueue(start_node)\n # Loop until queue is empty\n while queue.is_empty() == False:\n # Dequeue node at front of queue\n node = queue.dequeue()\n # Visit this node's data with given function\n visit(node.data)\n # Enqueue this node's left child, if it exists\n if node.left_child is not None:\n queue.enqueue(node.left_child)\n # Enqueue this node's right child, if it exists\n if node.right_child is not None:\n queue.enqueue(node.right_child)", "def wrong_buildTree(self, start, end, cur):\n if start == end:\n self.nodes[cur] = self.nums[start]\n else: \n mid = start+(end-start)/2\n left, right = 2*cur+1, 2*cur+2\n self.nodes[cur] = self.buildTree(start, mid, left) + self.buildTree(mid+1, end, right)\n return self.nodes[cur]", "def floyd_warshall(self, start, end):\n distance = {}\n next_node = {}\n nodes = self.keys()\n for edge in self.edges():\n distance.setdefault(edge[0], {})[edge[1]] = edge[2]\n next_node.setdefault(edge[0], {})[edge[1]] = edge[1]\n\n for node in nodes:\n for neighbor in nodes:\n if neighbor not in self[node]:\n distance.setdefault(node, {})[neighbor] = float('inf')\n for k in nodes:\n for i in nodes:\n for j in nodes:\n if distance[i][j] > distance[i][k] + distance[k][j]:\n distance[i][j] = distance[i][k] + distance[k][j]\n next_node[i][j] = next_node[i][k]\n\n return self.floyd_warshall_path(start, end, next_node)", "def __drawNodes(self, levelDictionary, linkNodeDict, topLeft):\r\n setSmooth = self.__optionsDatabase.get('Spline optimization') \r\n setCurvature = self.__optionsDatabase.get('Arrow curvature') \r\n minOffsetY = self.__optionsDatabase.get('yOffset') \r\n minOffsetX = self.__optionsDatabase.get('xOffset') \r\n giveExtraSpaceForLinks = self.__optionsDatabase.get('addEdgeObjHeight') \r\n\r\n # Caclulate x, y offsets\r\n offsetX = 0\r\n levelInt2offsetY = dict()\r\n for levelInt in levelDictionary.keys():\r\n currentLevel = levelDictionary[levelInt]\r\n levelInt2offsetY[levelInt] = 0\r\n \r\n # Calculate maximum node size on a per level basis (X is for all levels)\r\n # Then add minimum seperation distance between nodes\r\n for node in currentLevel:\r\n # getSize returns node width, and height of the node & child link icon\r\n x, y = node.getSize(giveExtraSpaceForLinks)\r\n offsetX = max(offsetX, x)\r\n levelInt2offsetY[levelInt] = max(levelInt2offsetY[levelInt], y) \r\n \r\n \r\n maxOffsetX = offsetX + minOffsetX\r\n halfOffsetX = offsetX / 2\r\n \r\n # Send nodes to their final destination, assign final pos to dummy edges\r\n x, y = topLeft\r\n for levelInt in levelDictionary.keys():\r\n currentLevel = levelDictionary[levelInt] \r\n longEdgeOffset = [halfOffsetX, levelInt2offsetY[levelInt] / 3]\r\n \r\n # Move each node in the level (Dummy edges save the pos but don't move)\r\n for node in currentLevel:\r\n node.moveTo(x + node.getGridPosition() * maxOffsetX, y, longEdgeOffset)\r\n \r\n # Increment y for the next iteration\r\n y += levelInt2offsetY[levelInt] + minOffsetY\r\n \r\n # Self-looping edges (Must move these manually into position)\r\n for selfLoopedEdge in NodeWrapper.SelfLoopList: \r\n x, y = selfLoopedEdge.getEdgePosition()\r\n obj = selfLoopedEdge.getASGNode().graphObject_\r\n obj.moveTo(x, y)\r\n\r\n # Re-doing links can take a while, lets show something in meanwhile...\r\n self.atom3i.parent.update()\r\n \r\n # Re-wire the links to take into account the new node positions\r\n selectedLinks = []\r\n for obj in linkNodeDict.values():\r\n selectedLinks.append(obj)\r\n optimizeLinks(self.cb, setSmooth, setCurvature, \r\n selectedLinks=selectedLinks)\r\n \r\n # Re-doing links can take a while, lets show something in meanwhile...\r\n self.atom3i.parent.update()\r\n \r\n # Route multi-layer edges\r\n self.__edgeRouter()", "def shortest_path(start, end):\n if start == end:\n return []\n \n start_frontier = Queue.Queue()\n start_parent = {}\n start_level = {}\n start_parent_move = {}\n start_frontier.put(start)\n start_level[start] = 0\n start_parent[start] = None\n start_parent_move[start] = None\n \n end_frontier = Queue.Queue()\n end_parent = {}\n end_level = {}\n end_parent_move = {}\n end_frontier.put(end)\n end_level[end] = 0\n end_parent[end] = None\n end_parent_move[end] = None\n \n intersectFound = False\n intersect = None\n level = 0\n while (True):\n level += 1\n# print (\"level = \" + str(level))\n if not start_frontier.empty():\n vertex = start_frontier.get()\n for move in rubik.quarter_twists:\n position = rubik.perm_apply(move,vertex)\n if position not in start_parent:\n# print (\"start permutation unvisited\")\n start_parent[position] = vertex\n start_level[position] = level\n start_parent_move[position] = move\n start_frontier.put(position)\n if position in end_parent:\n# print (\"position exists in end_parent\")\n intersect = position\n intersectFound = True\n break\n if intersectFound:\n break\n if not end_frontier.empty():\n vertex = end_frontier.get()\n for move in rubik.quarter_twists:\n position = rubik.perm_apply(move,vertex)\n if position not in end_parent:\n# print (\"end permutation unvisited\")\n end_parent[position] = vertex\n end_level[position] = level\n end_parent_move[position] = move\n end_frontier.put(position)\n if position in start_parent:\n# print (\"position exists in start_parent\")\n intersect = position\n intersectFound = True\n break\n if intersectFound:\n break\n if end_frontier.empty() and start_frontier.empty():\n break\n \n if intersect is None:\n return None\n \n path = []\n pos = intersect\n while (start_parent[pos] is not None):\n path.insert(0,start_parent_move[pos])\n pos = start_parent[pos]\n \n pos = intersect\n while (end_parent[pos] is not None):\n move = rubik.perm_inverse(end_parent_move[pos])\n path.append(move)\n pos = end_parent[pos]\n \n# path = [None] * start_level[intersect]\n# pos = intersect\n# move = start_parent_move[pos]\n# path[start_level[intersect]-1] = move\n# for i in range(start_level[intersect]-2,-1,-1):\n# if (start_parent[pos] is not None):\n# pos = start_parent[pos]\n# move = start_parent_move[pos]\n# path[i] = move\n# \n# pos = intersect\n# while (end_parent[pos] is not None):\n# move = rubik.perm_inverse(end_parent_move[pos])\n# path.append(move)\n# pos = end_parent[pos]\n \n return path", "def make_nodes_and_paths(friends_lst):\n\n # nodes = {}\n\n # for item in friends_lst:\n # friend1, friend2, group = item\n # for person in pair:\n # if not nodes.get(person):\n # nodes[person] = pair[1]\n\n # nodes = [{'name': person, 'friend': nodes[person]} for person in nodes.keys()]\n\n nodes = {}\n for item in friends_lst:\n friend1, friend2, group = item\n if not nodes.get(friend1):\n nodes[friend1] = group\n elif nodes.get(friend1) > group:\n nodes[friend1] = group\n\n nodes = [{'name': person, 'group': nodes[person]} for person in nodes.keys()]\n\n index_nodes = {}\n for idx, n in enumerate(nodes):\n index_nodes[n['name']] = (idx, n['group'])\n\n paths = []\n\n # paths.append({'source': item[1], 'target': item[0]})\n\n for item in friends_lst:\n # one = User.query.get(item.user_id)\n # two = User.query.get(item.friend_id)\n source, target, group = item\n paths.append({'source': index_nodes[source][0], 'target': index_nodes[target][0]})\n\n # print nodes\n # print index_nodes\n # print paths\n\n return nodes, paths", "def get_levels(self, nodes, boundary_nodes): # pylint: disable=R0201\n # current distance = collection of all node ids with same minimum distance\n # from the outermost boundary\n levels = []\n\n nodes_with_same_distance = boundary_nodes\n distance = 0\n # keep the process going until we have gone through all the possible distances\n while nodes_with_same_distance:\n next_nodes_with_same_distance = []\n for node_id in nodes_with_same_distance:\n nodes[node_id]['distance'] = distance # this is only needed for 1st step\n # relations = all nodes connected to the node in question by a single edge\n for related_node_id in nodes[node_id]['relations']:\n # if we have not labeled this node yet, that means it must\n # have a distance 1 greater than the nodes we're iterating over\n if nodes[related_node_id].get('distance') is None:\n nodes[related_node_id]['distance'] = distance + 1\n next_nodes_with_same_distance.append(related_node_id)\n\n level_cycles, level_paths = self.identify_level_elements(nodes, nodes_with_same_distance) # pylint: disable=C0301\n levels.append({\n 'node_ids': nodes_with_same_distance,\n 'cycles': level_cycles,\n 'paths': level_paths\n })\n distance += 1\n nodes_with_same_distance = next_nodes_with_same_distance\n\n return levels", "def traverse_graph_start_end_extra_node(graph):\n\n # get tree with starting node tags\n\n def traverse(graph, node):\n\n children = [int(c) for c in graph[node][\"children\"]]\n tagged_children = []\n for child in children:\n ellipsed_parents = [int(p) for p in graph[child][\"ellipsed_parents\"]]\n # if the child is explicit\n if node not in ellipsed_parents:\n if graph[child][\"terminal\"] == \"yes\":\n tagged_children.append(ParentedTree(graph[child][\"tag\"], [graph[child][\"text\"]]))\n else:\n tagged_children.append(traverse(graph, child))\n # if the child is ellipsed\n else:\n ellipsis_tag = get_ellipsis_tag_from_graph(graph, child)\n tagged_children.append(ParentedTree(ellipsis_tag, []))\n \n tree = ParentedTree(graph[node][\"tag\"], tagged_children)\n\n return tree\n\n tree = traverse(graph, 0)\n\n # get ending node tags\n positions = [pos for pos in tree.treepositions() if pos not in tree.treepositions(\"leaves\")]\n end_tags = []\n ellipsis_id = 0 # assign an id to each ellipsis start and end nodes\n for pos_i, pos in enumerate(positions):\n if tree[pos].label().startswith(\"start\"):\n ellipsis_tag = tree[pos].label().split(\"start\")[-1]\n tree[pos].set_label(\"start\" + str(ellipsis_id))\n end_location = get_ellipsis_location(tree, ellipsis_tag)\n end_tag = \"end\" + str(ellipsis_id)\n end_tags.append((end_location, end_tag))\n ellipsis_id += 1\n\n # insert ending node tags\n for index, st in enumerate(tree.subtrees()):\n for end_location, end_tag in end_tags:\n if st.treeposition() == end_location:\n st.insert(index, ParentedTree(end_tag, []))\n\n positions = [pos for pos in tree.treepositions() if pos not in tree.treepositions(\"leaves\")]\n rev_positions = [pos for pos in reversed(positions)]\n for pos_i, pos in enumerate(rev_positions):\n # append start tag to the previous node\n if tree[pos].label().startswith(\"start\"):\n prev_pos_i = pos_i + 1\n prev_pos = rev_positions[prev_pos_i]\n tree[prev_pos].set_label(tree[prev_pos].label() + tree[pos].label())\n del tree[pos]\n # append end tag to the parent of the current node\n elif tree[pos].label().startswith(\"end\"):\n parent_pos = tree[pos].parent().treeposition()\n tree[parent_pos].set_label(tree[parent_pos].label() + tree[pos].label())\n del tree[pos] \n\n # wrap each constituent that has end or start tags with extra nodes\n\n def add_extra_nodes(tree):\n children = []\n for subtree in tree:\n if type(subtree) == str:\n children.append(subtree)\n else:\n splits = re.split(\"(start|end)\", subtree.label())\n const_tag = splits[0]\n ellipsis_tag = \"\".join(splits[1:]) \n if len(ellipsis_tag) > 0:\n children.append(Tree(subtree.label(), [Tree(const_tag, [sst for sst in subtree])]))\n else:\n children.append(add_extra_nodes(subtree))\n\n return Tree(tree.label(), children)\n\n tree = add_extra_nodes(tree)\n\n return tree", "def depth_node_ordering(start_node, end_nodes):\n ordered_list = []\n ordered_set = set()\n working_list = [start_node]\n while working_list != []:\n node = working_list.pop(0)\n if not node in ordered_set:\n ordered_set.add(node)\n ordered_list.append(node)\n if not is_leaf_node(node) and not node in end_nodes:\n for node_op in node.get_inputs():\n working_list.append(node_op)\n return ordered_list", "def BidirectionalSearch(start_node, end_node, goal_state, improved_descendants = False):\t\n\tqueue_down = [start_node]\n\tqueue_up = [end_node]\n\n\tvisited_nodes_down = set()\n\tvisited_nodes_up = set()\n\n\tnumber_nodes_expanded = 0\n\tnumber_nodes_visited = 0\n\n\tchild_nodes_down = []\n\tchild_nodes_up = []\n\n\thash_value_down = {}\n\thash_value_up = {}\n\n\tt0 = time.time()\n\t\n\twhile len(queue_down) > 0 or len(queue_up) > 0:\n\t\ttop_expanded = False\n\t\tbottom_expanded = False\n\n\t\t#if the search down still has nodes to expand\n\t\tif len(queue_down) > 0:\n\t\t\tnode_down = queue_down.pop(0)\n\t\t\tbottom_expanded = True\n\t\t\tnumber_nodes_visited += 1\n\t\t\tnode_down.count = number_nodes_visited\n\t\t\n\t\t#if the search up still has nodes to expand\n\t\tif len(queue_up) > 0:\n\t\t\tnode_up = queue_up.pop(0)\n\t\t\ttop_expanded = True\n\t\t\tnumber_nodes_visited += 1\n\t\t\tnode_up.count = number_nodes_visited\n\n\t\tt1 = time.time()\n\t\tif (t1 - t0) > 900:\n\t\t\tprint(\"It took more than 15 min\")\n\t\t\treturn False\n\n\t\tif bottom_expanded:\n\t\t\tnode_down_hash = node_down.build_hash()\n\n\t\t\tif node_down_hash not in visited_nodes_down:\n\t\t\t\tnumber_nodes_expanded += 1\n\t\t\t\tvisited_nodes_down.add(node_down_hash)\n\t\t\t\thash_value_down[node_down_hash] = node_down\n\t\t\t\tchild_nodes_down = node_down.successors(improved_descendants)\n\n\t\t\t\tfor i in range(len(child_nodes_down)):\n\t\t\t\t\tqueue_down.append(child_nodes_down[i])\n\t\t\telse:\n\t\t\t\tchild_nodes_down = []\n\n\t\tif top_expanded:\n\t\t\tnode_up_hash = node_up.build_hash()\n\t\t\tif node_up_hash not in visited_nodes_up:\n\t\t\t\tvisited_nodes_up.add(node_up_hash)\n\t\t\t\thash_value_up[node_up_hash] = node_up\n\n\t\t\t\tnumber_nodes_expanded += 1\n\t\t\t\tchild_nodes_up = node_up.successors(improved_descendants)\n\t\t\t\n\t\t\t\tfor i in range(len(child_nodes_up)):\n\t\t\t\t\tqueue_up.append(child_nodes_up[i])\n\t\t\telse:\n\t\t\t\tchild_nodes_up = []\n\n\t\t#The node expanded on the search down was already expanded in the search up or vice-versa\n\t\tif bottom_expanded and (node_down_hash in visited_nodes_up):\n\t\t\tprint(\"Expanded nodes: \" + str(number_nodes_expanded))\n\t\t\tdepth_found = print_solution(node_down, number_nodes_expanded, goal_state, hash_value_up[node_down_hash])\n\t\t\treturn True\n\t\telif top_expanded and (node_up_hash in visited_nodes_down):\n\t\t\tprint(\"Expanded nodes: \" + str(number_nodes_expanded))\n\t\t\tdepth_found = print_solution(hash_value_down[node_up_hash], number_nodes_expanded, goal_state, node_up)\n\t\t\treturn True\n\t\t\t\t\n\treturn False", "def createBST(self, nums, start, end):\n if end < start:\n return None\n mid = int((start+end)/2)\n #create a new node\n root = TreeNode(nums[mid])\n root.left = self.createBST(nums, start,mid -1)\n root.right = self.createBST(nums, mid +1 ,end)\n return root", "def make_complete_graph(num_nodes):\n xgraph = {} #Create a Blank Dict\n if num_nodes - 1 < 0: # checks to see if the num_nodes is less then 0 (negative number) if it is return empty graph (dict). Could probably combine the If statments for negative nodes and 1 node together\n return xgraph\n if num_nodes - 1 == 0: # If the number of nodes is 1 or returns a one node dict because there are no edges to compute\n xgraph[0] = set([]) # creates a dict that represents a single node graph as per the requirement\n return xgraph # the empty Graph\n else:\n for base_node in range(num_nodes): # This portion starts the build phase. for each node it will compute the theretical maximum amount of edges\n xlist = set([]) # defines an empty list. We first build a list for each node and the append to a dict. This list is erased with each iteration\n #print base_node # testing - REMOVE\n for edge_node in range(num_nodes):\n #print edge_node # testing - REMOVE\n if edge_node != base_node: #No Looping is allowed for this project. Therefor we check to insure the we are not counting a self node connection (edge_node NOT equal base_node)\n xlist.add(edge_node) # Populating list that will be added to dict\n\n xgraph[base_node] = xlist # Appending created list to the dict\n\n return xgraph # returning populated dict", "def make_complete_graph(num_nodes):\n xgraph = {} #Create a Blank Dict\n if num_nodes - 1 < 0: # checks to see if the num_nodes is less then 0 (negative number) if it is return empty graph (dict). Could probably combine the If statments for negative nodes and 1 node together\n return xgraph\n if num_nodes - 1 == 0: # If the number of nodes is 1 or returns a one node dict because there are no edges to compute\n xgraph[0] = set([]) # creates a dict that represents a single node graph as per the requirement\n return xgraph # the empty Graph\n else:\n for base_node in range(num_nodes): # This portion starts the build phase. for each node it will compute the theretical maximum amount of edges\n xlist = set([]) # defines an empty list. We first build a list for each node and the append to a dict. This list is erased with each iteration\n #print base_node # testing - REMOVE\n for edge_node in range(num_nodes):\n #print edge_node # testing - REMOVE\n if edge_node != base_node: #No Looping is allowed for this project. Therefor we check to insure the we are not counting a self node connection (edge_node NOT equal base_node)\n xlist.add(edge_node) # Populating list that will be added to dict\n\n xgraph[base_node] = xlist # Appending created list to the dict\n\n return xgraph # returning populated dict", "def prepare_initial_nodes(x_start, x_end, nodes_y):\n nodes_x = [float(x_start + ((x_end - x_start) / (len(nodes_y) - 1)) * i) for i in range(0, len(nodes_y))]\n nodes_y = [float(y) for y in nodes_y]\n print(nodes_x)\n print(nodes_y)\n nodes = list(zip(nodes_x, nodes_y))\n return nodes", "def distance_of_x_to_y(start_node, end_node):\n\n # adding the base router's d_vec to n_d_vec\n # will simplify the problem and we can then\n # remove the first conditional statement\n\n # when a router comes back to its parent\n # its distance will ultimately increase\n # and hence that branch will be ignored\n\n global DATA\n\n # neighbor ids will now change for every router\n all_neighbor_ids = [\n neighbor[0]\n for neighbor in DATA[\"n_d_vec\"][start_node]\n ]\n # if start_node is not present at that moment\n # in n_d-vec then an exception (Type Error)\n # will be raised after which we need to\n # return math.inf\n try:\n if end_node in all_neighbor_ids:\n return [\n every_neighbor[1]\n for every_neighbor in DATA[\"n_d_vec\"][start_node]\n if end_node is every_neighbor[0]\n ][0]\n else:\n # we need to handle going back\n # we can pass an initial router\n # from which the algorithm has\n # started and hence we can avoid\n # going back\n\n # we may do some memoization here\n # and hence don't do reevaluation every time\n return min(\n [\n distance_of_x_to_y(start_node, neighbor) +\n distance_of_x_to_y(neighbor, end_node)\n for neighbor in all_neighbor_ids\n ]\n )\n except TypeError as node_err:\n with PRINT_LOCK:\n print(\"the start node is node is not present\\\n at this moment in the n_d_vec \\n{}\"\\\n .format(node_err)\\\n )\n return math.inf", "def create_graph(some_map):\n\n map_height = some_map.height\n map_width = some_map.width\n map_obstacles = some_map.obstacles\n\n nodes = [[None] * map_width] * map_height\n\n # create a node representing each position on the map\n for y in range(0, map_height):\n for x in range(0, map_width):\n position = (y, x)\n\n # create a node describing this position\n node = Node(position=position)\n\n # store it on the graph\n nodes[y][x] = node\n\n # look through all moving characters, non-moving characters, and items\n for map_obstacle in map_obstacles:\n # all characters must start somewhere\n node = nodes[map_obstacle.y][map_obstacle.x]\n\n # store the map_obstacle on this node.\n node.contents.add(map_obstacle)\n\n # only create threat zones for moving/turning entities\n if map_obstacle.can_move() or map_obstacle.can_turn_without_moving():\n threat_zone = ThreatZone(map_obstacle, nodes, some_map)\n threat_zone.mark_nodes_as_members_of_threat_zone()\n\n some_map.nodes = nodes\n\n return nodes", "def _loop_depth(self, start, connections):\n # This is just a slightly modified breadth-first search\n visited = {start: 1}\n frontier = [start]\n\n limit = []\n while len(frontier):\n node = frontier.pop(0)\n prev_depth = visited[node]\n if prev_depth >= self.depth:\n limit.append(node)\n continue\n\n for x in connections[node]:\n if x in visited:\n continue\n visited[x] = prev_depth + 1\n frontier.append(x)\n return limit", "def create_child_nodes(current_node: Node, goal: list, generated: set) -> list:\n\n children = []\n locations = state_to_locations(current_node.state)\n blank = locations[0]\n\n # Moving blank to the left\n if blank[1] != 0:\n new_locations = copy.deepcopy(locations)\n new_locations[0] = (new_locations[0][0], new_locations[0][1] - 1)\n # Modifies the location of the blank in the new list\n \"\"\" Note that the index 0 represents the first column. So long as \n the blank is not in the first column, it can be moved to the left.\"\"\"\n neighbor = current_node.state[blank[0]][blank[1] - 1]\n # Finds the number on the tile to the left of the blank\n new_locations[neighbor] = (new_locations[neighbor][0], new_locations[neighbor][1] + 1)\n # Modifies the location of the neighbor in the new list\n new_path_history = copy.deepcopy(current_node.path_history)\n new_path_history.append('L')\n new_state = locations_to_state(new_locations)\n # Constructs the new state by calling locations_to_state\n new_node = Node(new_state, current_node, current_node.path_cost + 1,\n heuristic_cal(new_state, goal), new_path_history)\n if new_node not in generated:\n children.append(new_node)\n \"\"\" Append the child node to the list only if it's not a \n repeated state.\"\"\"\n\n # Moving blank to the right\n if blank[1] != 3:\n new_locations = copy.deepcopy(locations)\n new_locations[0] = (new_locations[0][0], new_locations[0][1] + 1)\n \"\"\" Similar to the case above: so long as the blank is not in the fourth \n column, it can be moved to the right.\"\"\"\n neighbor = current_node.state[blank[0]][blank[1] + 1]\n # Finds the number on the tile to the right of the blank\n new_locations[neighbor] = (new_locations[neighbor][0], new_locations[neighbor][1] - 1)\n new_path_history = copy.deepcopy(current_node.path_history)\n new_path_history.append('R')\n new_state = locations_to_state(new_locations)\n new_node = Node(new_state, current_node, current_node.path_cost + 1,\n heuristic_cal(new_state, goal), new_path_history)\n if new_node not in generated:\n children.append(new_node)\n\n # Moving blank up\n if blank[0] != 0:\n new_locations = copy.deepcopy(locations)\n new_locations[0] = (new_locations[0][0] - 1, new_locations[0][1])\n \"\"\" So long as the blank is not in the first row, it can be moved up.\"\"\"\n neighbor = current_node.state[blank[0] - 1][blank[1]]\n # Finds the number on the tile above the blank\n new_locations[neighbor] = (new_locations[neighbor][0] + 1, new_locations[neighbor][1])\n new_path_history = copy.deepcopy(current_node.path_history)\n new_path_history.append('U')\n new_state = locations_to_state(new_locations)\n new_node = Node(new_state, current_node, current_node.path_cost + 1,\n heuristic_cal(new_state, goal), new_path_history)\n if new_node not in generated:\n children.append(new_node)\n\n # Moving the blank down\n if blank[0] != 3:\n new_locations = copy.deepcopy(locations)\n new_locations[0] = (new_locations[0][0] + 1, new_locations[0][1])\n \"\"\" So long as the blank is not in the fourth row, it can be moved down.\"\"\"\n neighbor = current_node.state[blank[0] + 1][blank[1]]\n # Finds the number on the tile below the blank\n new_locations[neighbor] = (new_locations[neighbor][0] - 1, new_locations[neighbor][1])\n new_path_history = copy.deepcopy(current_node.path_history)\n new_path_history.append('D')\n new_state = locations_to_state(new_locations)\n new_node = Node(new_state, current_node, current_node.path_cost + 1,\n heuristic_cal(new_state, goal), new_path_history)\n if new_node not in generated:\n children.append(new_node)\n\n return children", "def a_star(self, start_xy, end_xy):\n start_node = Node(start_xy, end_xy, self.graph_start_xy)\n start_node.shortest_dist = 0\n start_node.update_total_cost()\n pq = []\n pq.append(start_node)\n heapify(pq)\n stack = []\n while pq:\n current_node = heappop(pq)\n if (current_node.coord_xy == end_xy).all():\n print(\"it's done \")\n iterator = current_node\n while iterator:\n stack.append(iterator.coord_xy)\n iterator = iterator.prev_node\n break\n\n for neighbour, dist in current_node.connections:\n neighbouring_node = Node(neighbour, end_xy, self.graph_start_xy)\n if current_node.shortest_dist + dist < neighbouring_node.shortest_dist:\n neighbouring_node.shortest_dist = current_node.shortest_dist + dist\n neighbouring_node.update_total_cost()\n neighbouring_node.prev_node = current_node\n heappush(pq, neighbouring_node)\n return stack", "def dijkstra(self, start, end):\n unvisited = self.nodes()\n distance = {}\n previous = {}\n for node in unvisited:\n distance[node] = sys.maxsize\n distance[start] = 0\n while len(unvisited) > 0:\n node = unvisited[0]\n smallest_curr = sys.maxsize\n for d in distance:\n if d in unvisited and distance[d] < smallest_curr:\n node = d\n smallest_curr = distance[d]\n unvisited.remove(node)\n for neighbor in self.neighbors(node).keys():\n alt_path = distance[node] + self.weight(node, neighbor)\n if alt_path < distance[neighbor]:\n distance[neighbor] = alt_path\n previous[neighbor] = node\n result = []\n result.append(end)\n curr = end\n while curr in previous:\n result.append(previous[curr])\n curr = previous[curr]\n return result", "def create_nodepairs_and_edges_df(left_and_right_nodepairs: pandas.DataFrame) -> None:\n # read_node.cache_clear() # Optional, we don't need this for now.\n print('\\nCache used at start of function: ' + str(read_node.cache_info()) + '.')\n print('There are ' + str(len(left_and_right_nodepairs)) + ' rows, creating nodes and edges for row: 0 ', end='')\n count = 0\n columns = left_and_right_nodepairs.columns\n for row in left_and_right_nodepairs.itertuples():\n count += 1\n if count % 250 == 0:\n print(count, ' ', end='', flush=True)\n if count % 10000 == 0:\n print('\\n', end='', flush=True)\n\n node_properties = {}\n for prop_name in RICGRAPH_PROPERTIES_ADDITIONAL:\n for other_name in columns:\n if prop_name + '1' == other_name:\n node_properties[prop_name + '1'] = getattr(row, other_name)\n for other_name in columns:\n if prop_name + '2' == other_name:\n node_properties[prop_name + '2'] = getattr(row, other_name)\n\n create_two_nodes_and_edge(name1=row.name1, category1=row.category1, value1=row.value1,\n name2=row.name2, category2=row.category2, value2=row.value2,\n **node_properties)\n print(count, '\\n', end='', flush=True)\n print('Cache used at end of function: ' + str(read_node.cache_info()) + '.')\n return", "def decorate_graph(self, G, roommap, heightmap, thingsmap):\n # Selecting candidates for starting and exiting nodes:\n # leaves of the spanning tree are the most suitable\n # Connected components (floors)\n\n H = G.copy()\n # print(G)\n H.remove_node(0)\n floors = sorted(nx.connected_components(H), key=len, reverse=True)\n # print(floors)\n level_solution = list()\n corrected_heights = dict()\n\n for id, floor_rooms in enumerate(floors):\n # Creating a spanning tree for each floor\n F = H.subgraph(floor_rooms)\n T = nx.minimum_spanning_tree(F)\n degree = dict(T.degree())\n # Entry point has minimum node degree\n floor_entry = min(degree, key=degree.get)\n # Finding all paths in the level to determine the best exit (longest path)\n paths = list()\n for n in T.nodes():\n p = list(nx.all_simple_paths(T, floor_entry, n))\n if len(p) > 0:\n paths += p\n else:\n # If a floor has a single room then there are no path from n to n and a max cannot be calculated\n paths += [[n]]\n\n floor_solution = max(paths, key=len)\n if floor_rooms==max(floors, key=len):\n level_solution.append(floor_solution)\n\n # Fixing the heights along all paths so every path becomes walkable\n for path in paths:\n for rid, room in enumerate(path):\n if room not in corrected_heights:\n height = np.nanmedian(np.where(roommap == room, heightmap, np.nan))\n if rid > 0:\n # Alter this room height to be walkable\n if height > path[rid-1] + 24:\n height = path[rid-1] + 24\n corrected_heights[room] = int(height)\n nx.set_node_attributes(G, corrected_heights, \"height\")\n\n for id, floor_path in enumerate(level_solution):\n if id == 0:\n # Place the level start\n start_x, start_y = G.nodes[floor_path[0]][\"centroid\"]\n nx.set_node_attributes(G, {floor_path[0]: {\"location\": (start_x, start_y)}}, \"level_start\")\n else:\n # place a teleport source\n possible_places = np.stack(np.where(roommap==floor_path[0]), axis=1)\n random_pixel_index = np.random.choice(possible_places.shape[0])\n x, y = possible_places[random_pixel_index]\n\n nx.set_node_attributes(G, {floor_path[0]: {\"location\": (x, y)}}, \"floor_start\")\n if id == len(level_solution)-1:\n # This is the last floor to visit, place the level exit\n possible_places = np.stack(np.where(roommap == floor_path[-1]), axis=1)\n random_pixel_index = np.random.choice(possible_places.shape[0])\n x, y = possible_places[random_pixel_index]\n nx.set_node_attributes(G, {floor_path[-1]: {\"location\": (x, y)}}, \"level_exit\")\n else:\n # There's another unvisited floor, place a teleporter to the next floor\n possible_places = np.stack(np.where(roommap==floor_path[-1]), axis=1)\n random_pixel_index = np.random.choice(possible_places.shape[0])\n x, y = possible_places[random_pixel_index]\n\n nx.set_node_attributes(G, {floor_path[-1]: {\"destination\":level_solution[id+1][0], \"location\": (x, y)}}, \"floor_exit\")\n\n level_objects = {}\n # Scanning the room for objects\n for room in H.nodes():\n things_in_room = (roommap == room)*thingsmap\n things_pixels_indices = np.delete(np.unique(things_in_room), 0)\n # Converting thing pixels to doom types, need to search from essentials\n things_types = [get_type_id_from_index(i, essential=True) for i in things_pixels_indices]\n categories = [get_category_from_type_id(t) for t in things_types]\n\n things_dict = {}\n\n for thing_id, thing_type, thing_cat in zip(things_pixels_indices, things_types, categories):\n # skipping generated player starts teleports and keys since they are placed statically\n if thing_cat is not None and thing_cat not in [\"other\", \"start\", \"keys\"]:\n if thing_cat not in things_dict:\n things_dict[thing_cat] = {}\n if thing_type not in things_dict[thing_cat]:\n things_dict[thing_cat][thing_type] = []\n\n x_list, y_list = np.where(things_in_room==thing_id)\n for x, y in zip(x_list, y_list):\n things_dict[thing_cat][thing_type].append((x, y))\n level_objects[room] = things_dict\n\n nx.set_node_attributes(G, level_objects, \"things\")\n\n return G", "def Dijkstra(node_init, node_end, graph):\n\n ### Parameter initialisation\n node_list = list(graph.vertices.keys())\n dist = np.full(len(node_list), -np.inf)\n # At the beginning we have not reached the end_node\n node_end_reached = False\n # At the beginning, we assume there is a shortest path:\n no_path = False\n\n # Initialising the distances of the nodes\n dist[node_init] = 0\n # Setting the father_node which contains the provenance of the nodes\n father_node = np.full(len(node_list), -np.inf)\n # Initialising the current node\n current_node = node_init \n # Initialising the dictionnary of fixed node which has the following shape:\n #{fixed_node: previous_node}\n # Fixing the number of iterations\n k = 0\n dict_fixed_node = {node_init:(None,k)}\n \n # In the trivial case where the two nodes are identical\n if node_init == node_end:\n cost = 0\n shortest_path = [node_init]\n no_path = False\n return cost, shortest_path, no_path\n \n # While the end node has not been reached\n while not node_end_reached:\n current_node_adj = graph.node_get_adj(current_node).copy()\n # We get rid off the node that have been fixed, except at the first iteration\n if k != 0:\n current_node_adj.remove(dict_fixed_node[current_node][0])\n ## Updating the distances : either the node are neighbors and \n # something might change, either they are not, and their distance \n # does not change.\n # For the neighbors node\n for e in current_node_adj:\n dist_temp = dist[current_node] + 1\n # We change the distance only if it is lower than it used to be\n # otherwise, we keep it\n if dist_temp < dist[e] or dist[e] == -np.inf:\n dist[e] = dist_temp\n # Setting the father node\n father_node[e] = current_node\n father_node[current_node] = None\n # We set the distance of the current node to 0\n dist[current_node] = 0 \n # Index and distances which are not 0 and not minus infty\n sub_dist_index = [i for i, e in enumerate(dist) if e > 0]\n sub_dist_value = np.array([e for i, e in enumerate(dist) if e > 0])\n # If these two lists are empty, we stop the algorithm and that means\n # that we cannot reach our point\n if not sub_dist_index or sub_dist_value.size == 0:\n no_path = True\n cost = 'impossible path'\n shortest_path = 'impossible path'\n break\n # Now we need to set our choice for the next node\n if np.array_equal(sub_dist_value, np.ones(len(sub_dist_value))):\n ## If there are only ones : we pick them up randomly\n current_node = int(random.choice(list(sub_dist_index)))\n else:\n ## If not we just pick up the one with the minimum distance.\n current_node = sub_dist_index[sub_dist_value.argmin()]\n # Adding this node to the dictionnary\n dict_fixed_node[current_node] = (int(father_node[current_node]), k)\n # If the end_node has been reached, we stop the search algorithm\n if node_end in dict_fixed_node.keys():\n node_end_reached = True\n # Incrementing the counter\n k += 1\n\n # Now we need to get the shortest path from our iterations whose information \n # are in dict_fixed_node. To do this, we need to circle back from the end_node\n # to the init_node in this dictionnary.\n # This is done only if some path between node_init and end_node exists.\n if no_path == False:\n list_father_node = list(dict_fixed_node.values())\n previous_node = list_father_node[-1][0]\n shortest_path = [node_end, previous_node]\n # While the initial node has not been reached, we add the relevant\n # nodes to our shortest path\n while previous_node != node_init:\n previous_node = dict_fixed_node[previous_node][0]\n shortest_path.append(previous_node)\n \n # Computing the cost of this shortest path in terms of weights\n cost = len(shortest_path) - 1\n \n return cost, shortest_path, no_path", "def identify_level_elements(self, nodes, node_ids_in_level): # pylint: disable=R0914,R0201,R0915,R0912\n # assume all node ids are NOT in a chordless cycle to begin with\n non_cycle_node_ids = dict(zip(node_ids_in_level, map(lambda i: True, node_ids_in_level)))\n\n level_cycles = []\n level_paths = []\n\n # Step 1: Define all edges and declare them as untraversed\n untraversed_edges = {}\n for node_id in node_ids_in_level:\n node = nodes.get(node_id)\n related_node_ids = list(filter(lambda id: id in node_ids_in_level, node['relations']))\n for related_node_id in related_node_ids:\n untraversed_edges[\"{}-{}\".format(related_node_id, node_id)] = True\n untraversed_edges[\"{}-{}\".format(node_id, related_node_id)] = True\n\n # given a starting path (array of two connected points), this will\n # traverse all connected points in a counter clockwise fashion\n def traverse_edges_for_cycles(path):\n # get next edge counter clockwise\n last_node = nodes.get(path[-1])\n second_to_last_node = nodes.get(path[-2])\n\n # NB: this relies on relations already having been sorted clockwise\n related_node_ids = list(filter(lambda id: id in node_ids_in_level, last_node['relations'])) # pylint: disable=C0301\n second_to_last_node_index = related_node_ids.index(second_to_last_node['id'])\n index = (second_to_last_node_index + 1) % len(related_node_ids)\n next_node_id = related_node_ids[index]\n\n # remove from traversed edges\n edge_id = \"{}-{}\".format(last_node['id'], next_node_id)\n if untraversed_edges.get(edge_id):\n del untraversed_edges[edge_id]\n\n if next_node_id in path:\n if next_node_id != second_to_last_node['id']:\n level_cycle = path[path.index(next_node_id):]\n for node_id in level_cycle:\n # remove new node from traversed nodes\n if non_cycle_node_ids.get(node_id):\n del non_cycle_node_ids[node_id]\n level_cycles.append(level_cycle)\n else:\n path.append(next_node_id)\n traverse_edges_for_cycles(path)\n\n def traverse_edges_for_path(path, prev_node_id=None):\n last_node = nodes.get(path[-1])\n if non_cycle_node_ids.get(last_node['id']):\n del non_cycle_node_ids[last_node['id']]\n related_node_ids = list(filter(lambda id: id in node_ids_in_level, last_node['relations'])) # pylint: disable=C0301\n for node_id in related_node_ids:\n if non_cycle_node_ids.get(node_id):\n del non_cycle_node_ids[node_id]\n if node_id != prev_node_id:\n path.append(node_id)\n if node_id in non_cycle_node_ids:\n traverse_edges_for_path(path, prev_node_id=node_id)\n return path\n\n # this loop identifies all level cycles\n while untraversed_edges:\n # pick an edge and add to the path, delete from traversed\n current_edge = next(iter(untraversed_edges))\n del untraversed_edges[current_edge]\n starting_path = list(map(int, current_edge.split('-')))\n traverse_edges_for_cycles(path=starting_path)\n\n # this loop identifies all level paths\n while non_cycle_node_ids:\n current_node_id = next(iter(non_cycle_node_ids))\n level_path = traverse_edges_for_path([current_node_id])\n level_paths.append(level_path)\n\n\n # remove duplicate cycles\n non_duplicate_cycles = []\n existing_cycles_as_sets = []\n for level_cycle in level_cycles:\n if set(level_cycle) not in existing_cycles_as_sets:\n non_duplicate_cycles.append(level_cycle)\n existing_cycles_as_sets.append(set(level_cycle))\n\n # remove duplicate paths\n non_duplicate_paths = []\n existing_paths_as_sets = []\n for level_path in level_paths:\n if set(level_path) not in existing_paths_as_sets:\n non_duplicate_paths.append(level_path)\n existing_paths_as_sets.append(set(level_path))\n\n\n # adds ids to nodes for level paths and level cycles\n for cycle_id, level_cycle in enumerate(non_duplicate_cycles):\n for node_id in level_cycle:\n node = nodes.get(node_id)\n if not cycle_id in node['level_cycles']:\n node['level_cycles'].append(cycle_id)\n if len(level_cycle) == 3:\n node['is_root_element'] = True\n\n for path_id, level_path in enumerate(non_duplicate_paths):\n for node_id in level_path:\n node = nodes.get(node_id)\n if not path_id in node['level_paths']:\n node['level_paths'].append(path_id)\n node['is_root_element'] = True\n\n return non_duplicate_cycles, non_duplicate_paths", "def gen_nodes(modelfile, starting_genes):\n # read json file with final model variables\n shape, top_genes, weights, output_key, biases = read_json(modelfile)\n\n # initialize database\n database = db.Database()\n\n # create list to store all layers\n NN = []\n\n # get input probe sequences\n input_seqs_df = inputs.probes_df(top_genes)\n # each layer is a dictionary with keys as names of strands and values as a list of seqs\n l_0 = {}\n probe_seqs = []\n for probe in input_seqs_df[\"Probe Sequences\"]:\n index = 0\n size = database.size\n while database.size < size + 1:\n try:\n database.database_insert(Seq(probe[index]))\n index += 1\n # except block handles case that NONE of the probe sequences were accepted into the database\n # ***TEMPORARY FIX***\n except IndexError:\n index -= 1\n break\n probe_seqs.append(Seq(probe[index]))\n l_0[\"Probe Sequence\"] = probe_seqs\n print(\"Layer 0: \", l_0)\n NN.append(l_0)\n\n # add the tether and promotor to the database\n database.database_insert(starting_genes[\"Tether\"])\n database.database_insert(starting_genes[\"T7 Promoter\"])\n\n # generate all the sequences for every node in each layer\n for layer in range(1, len(shape)):\n # add the cage and tether sequences to the layer dictionary\n l_i = {}\n l_i[\"Cage Sense\"] = [starting_genes[\"Cage Sense\"]] * shape[layer]\n l_i[\"Cage Antisense\"] = [starting_genes[\"Cage Antisense\"]] * shape[layer]\n l_i[\"Tether\"] = [starting_genes[\"Tether\"]] * shape[layer]\n\n print(\"getting anchor strands\")\n tether_length = len(starting_genes[\"Tether\"])\n size = database.size\n # generate anchor strands until all of them have been accepted into the database\n while database.size < size + shape[layer]:\n anchor = oligo.oligo(tether_length)\n database.database_insert(anchor)\n anchor_seqs = [Seq(x) for x in database.contents['Strand'][size:]]\n print(\"DONE\")\n\n print(\"getting transcription factors\")\n threshold_energy = 9 # variable that can be changed, pos integer, see gen_tf for description\n static_tf_seqs = []\n tf_seqs = []\n for anchor in anchor_seqs:\n static_tf, tf = gen_tf(anchor, starting_genes[\"Tether\"], threshold_energy)\n static_tf_seqs.append(static_tf)\n tf_seqs.append(tf)\n print(\"DONE\")\n\n print(\"getting outputs\")\n output_length = 25 # length of dna transcript from one node\n size = database.size\n while database.size < size + shape[layer]:\n output = oligo.oligo(output_length).sequence\n database.database_insert(output)\n transcript_seqs = [Seq(x) for x in database.contents['Strand'][size:]]\n print(\"DONE\")\n\n # assemble longer strands in the node\n l_i[\"Static TF + Transcript Sense\"] = [static_tf_seqs[i] + starting_genes[\"T7 Promoter\"] + transcript_seqs[i]\n for i in range(shape[layer])]\n l_i[\"Transcript Antisense + Anchor\"] = [\n oligo.complement(transcript_seqs[i]) + oligo.complement(starting_genes[\"T7 Promoter\"]) + anchor_seqs[i] for\n i in range(shape[layer])]\n\n # intermediates are the strands that determine weights in toehold-mediated displacement\n print(\"getting intermediate\")\n toe_length = 20 # standard length for all toehold sequences\n # get the 2D matrix for this layer and round the values to one decimal place\n weight_matrix = np.array(weights[layer - 1])\n weight_matrix = np.round(weight_matrix, 1)\n intermediate_seqs = []\n tf_appendage_seqs = []\n for i in range(shape[layer - 1]):\n if layer == 1:\n output = NN[0][\"Probe Sequence\"][i]\n else:\n output = NN[layer - 1][\"Static TF + Transcript Sense\"][i][-output_length:]\n inters = []\n top_toe = output[:toe_length]\n b_dom = output[toe_length:]\n tf_appendage_seqs.append(b_dom)\n # get all the possible sequences for toehold weights between 0 and 1\n weight_dict = quant.find_quanta(top_toe)\n for j in range(shape[layer]):\n w = weight_matrix[j, i]\n tf = tf_seqs[j]\n a_star_tf = tf[:len(tf) // 2]\n if w < 0:\n # negative weights\n inters.append(a_star_tf + oligo.complement(b_dom) + weight_dict[w * -1])\n else:\n # positive weights\n inters.append(oligo.complement(a_star_tf) + oligo.complement(b_dom) + weight_dict[w])\n\n intermediate_seqs.append(inters)\n # each list in the nested list is for one node in the layer, get nodes row-wise\n l_i[\"Intermediate\"] = np.array(intermediate_seqs).T.tolist()\n print(\"DONE\")\n\n # TF and TF Inhibitor are products of toehold-mediated displacement for pos and neg weights, respectively\n full_tf_seqs_2D = []\n attack_seqs_2D = []\n for tf in tf_seqs:\n full_tf_seqs = []\n attack_seqs = []\n for appendage in tf_appendage_seqs:\n full_tf_seq = appendage + tf\n attack_seq = appendage + oligo.complement(tf[:len(tf) // 2])\n full_tf_seqs.append(full_tf_seq)\n attack_seqs.append(attack_seq)\n full_tf_seqs_2D.append(full_tf_seqs)\n attack_seqs_2D.append(attack_seqs)\n l_i[\"TF\"] = full_tf_seqs_2D\n l_i[\"TF Inhibitor\"] = attack_seqs_2D\n\n print(\"Layer {}: \".format(layer), l_i)\n # add the completed layer to the NN list\n NN.append(l_i)\n\n return NN", "def buildTree(self, nums, start, end):\n if start > end: # did not have this stop condition causing error of corner case: nums=[]\n return None\n node = SegmentTreeNode(start, end)\n if start == end:\n node.val = nums[start]\n else:\n mid = start+(end-start)/2\n left = self.buildTree(nums, start, mid)\n right = self.buildTree(nums, mid+1, end)\n node.left, node.right = left, right\n node.val = left.val + right.val \n # node.val = (left.val if left else 0) + (right.val if right else 0) : not necessary because non-leaf node always have two children\n return node", "def dijkstra(self,start):\n path_weight = {i : float('inf') for i in range(self.n)}\n path_weight[start] = 0\n previous = {i : float('nan') for i in range(self.n)}\n remaining = PriorityQueue()\n for node,priority in path_weight.items():\n remaining.put((priority,node))\n\n while not remaining.empty():\n priority,node = remaining.get()\n for tgt,weight in self.edges[node].items():\n possibleNewWeight = path_weight[node] + weight\n if (possibleNewWeight < path_weight[tgt]):\n path_weight[tgt] = possibleNewWeight\n previous[tgt] = node\n \n return path_weight, previous", "def make_complete_graph (num_nodes) :\n graph = dict ()\n if (num_nodes < 1) :\n return graph\n\n for node_ind in range (num_nodes) :\n # create a set containing nodes adjacent to node node_ind\n # node node_ind of the complete graph will have edges to all other nodes except itself\n adj_nodes = range (num_nodes) # list containing numbers from 0 - num_nodes-1\n adj_nodes.remove(node_ind)\n graph[node_ind] = set(adj_nodes)\n\n return graph", "def to_start(self, node):\n if node in self.graph:\n if node in self.keep_index_backward:\n for pred in self.keep_index_backward[node]:\n self.to_start(pred)\n\n if node in self.graph:\n self.start.append(node)\n self.graph.remove_node(node)\n\n if node in self.keep_index_forward:\n for succ in self.keep_index_forward[node]:\n self.to_start(succ)\n self.logger.debug('%s %s\\t(to_start: %s)', self.start, self.end, node)", "def build_graph():\n file = open(\"../data/data.json\", \"r\")\n data = json.load(file)\n node_dict = {}\n for id in data:\n node_dict[id] = Node(data[id][\"name\"], data[id][\"product\"], data[id][\"production_volume\"])\n for id in data:\n current_node = node_dict[id]\n for costumer_id in data[id][\"costumers\"]:\n current_node.costumers.append(node_dict[str(costumer_id)])\n current_node.out_edge_capacity_drop[node_dict[str(costumer_id)].name] = 0\n for supplier_id in data[id][\"suppliers\"]:\n current_node.suppliers.append(node_dict[str(supplier_id)])\n current_node.in_edge_capacity_drop[node_dict[str(supplier_id)].name] = 0\n return node_dict", "def create_basic_cyclic_adjacency_map():\n sample_adj_map = {\n \"A\": [\"B\", \"C\"],\n \"C\": [\"D\"],\n \"D\": [\"E\"],\n \"E\": [\"C\"]\n }\n graph = generate_graph(sample_adj_map, node_start_name=\"A\")\n return graph", "def generate_graph(self):\n glw = GraphLineWeights()\n\n node_id = 0\n last_key = list(self.storage.keys())[-1]\n\n for key in tqdm.tqdm(self.storage):\n for key_line in self.storage[key]:\n for node in self.storage[key][key_line]:\n # set unique node id and calculate centroid\n node.id = node_id\n node.center_x = node.left + int(node.width / 2)\n node.center_y = node.top + int(node.height / 2)\n node_id += 1\n for key in self.storage:\n for key_line in self.storage[key]:\n for node_with_id in self.storage[key][key_line]:\n # print(node_with_id.word)\n # print(node_with_id.left, node_with_id.top, node_with_id.width, node_with_id.height)\n # consider 4 sides: top, right, bottom, left\n # glw: 0 -> 1 -> 2 -> 3\n # 1. top, verified\n min_dist = self.get_top_node(node_with_id, key - 1, key_line, last_key)\n glw.add_node_id_connection(node_with_id.id, 0, node_with_id.top_node_id, min_dist)\n # 2. bottom\n min_dist = self.get_bottom_node(node_with_id, key + 1, key_line, last_key)\n glw.add_node_id_connection(node_with_id.id, 2, node_with_id.bottom_node_id, min_dist)\n # 3. left\n min_dist = self.get_left_node(node_with_id, key, key_line, last_key)\n glw.add_node_id_connection(node_with_id.id, 3, node_with_id.left_node_id, min_dist)\n # 4. right\n min_dist = self.get_right_node(node_with_id, key, key_line, last_key)\n glw.add_node_id_connection(node_with_id.id, 1, node_with_id.right_node_id, min_dist)\n\n return glw", "def dfs_paths_dict_recur(\n graph: Mapping[Node, set[Node]],\n start: Node,\n goal: Node,\n path: Optional[list[Node]] = None\n) -> Iterable[list[Node]]:\n if path is None:\n path = [start]\n if start == goal:\n yield path\n else:\n for next_node in graph[start].difference(path):\n next_path = path + [next_node]\n yield from dfs_paths_dict_recur(graph, next_node, goal, next_path)", "def calculateroomhops(self,nodeStart):\n roomDists = np.tile(-1,(game.NROOMS,))\n locs = [nodeStart]\n nodesVisited = set((nodeStart,))\n #\n # If we have already entered a room, there are some squares we can \n # eliminate with certainty: even for any possible configuration of\n # player pieces on the board.\n #if self.hasEnteredRoomYet:\n # GAMEBOARD = game.TRIMMEDNODES\n #else:\n GAMEBOARD = game.BOARDNODES\n #\n ixRoomStart = game.ROOMNODEIXS.get(nodeStart,None)\n if not ixRoomStart is None:\n # If we start in a room, set the distance to this room to zero and\n # set the distances to any rooms connected by secret passages to one\n roomDists[ixRoomStart] = 0\n passageNode = game.PASSAGES.get(nodeStart,None)\n if not passageNode is None:\n ixRoomPassage = game.ROOMNODEIXS.get(passageNode,None)\n if not ixRoomPassage is None:\n roomDists[ixRoomPassage] = 1\n nodesVisited.add(passageNode)\n #\n # For remaining rooms, go hunting for the fastest route!\n moveDist = 0\n while any(roomDists < 0) and len(locs):\n moveDist += 1\n newLocs = []\n for ixLoc in range(len(locs)):\n loc = locs[ixLoc]\n for newLoc in GAMEBOARD[loc]:\n # Nodes are only visited if they've not been reached before\n # (this algorithm finds fastest routes, not all possible \n # routes)\n if not newLoc in nodesVisited:\n # If the new location is a room, we enter it and \n # terminate whether or not someone else is present\n if newLoc in game.ALLOWEDROOMNODES:\n roomDists[game.ROOMNODEIXS[newLoc]] = moveDist\n nodesVisited.add(newLoc)\n else:\n # Given that the new location is not a room, we may\n # only enter if nobody else is stood on this square\n if not newLoc in self.charLocations:\n newLocs.append(newLoc)\n nodesVisited.add(newLoc)\n locs = newLocs\n #\n return roomDists", "def neighborJoining(distances):\n\n tree = {}\n\n while(len(distances.keys()) > 2):\n\n r = calcRs(distances)\n M = makeMMatrix(distances, r)\n\n smallest = 10000\n smallestKey = (\"\",\"\")\n\n #Find nearest neighbors\n for key in M.keys():\n for subkey in M[key].keys():\n if M[key][subkey] < smallest:\n smallest = M[key][subkey]\n smallestKey = (key, subkey)\n\n #Add new node and update distances to rest of tree\n newname = smallestKey[0] + \"-\" + smallestKey[1]\n distances[newname] = {}\n tree[smallestKey[0]] = {}\n tree[smallestKey[1]] = {}\n dij = distances[smallestKey[0]][smallestKey[1]]\n for key in M.keys():\n if key in smallestKey:\n continue\n distances[newname][key] = .5*(distances[smallestKey[0]][key] \\\n + distances[smallestKey[1]][key] - dij)\n distances[key][newname] = distances[newname][key]\n\n #Update distances to parents of node\n dik = (dij + r[smallestKey[0]] - r[smallestKey[1]])/2\n tree[smallestKey[0]][newname] = dik\n tree[smallestKey[1]][newname] = dij-dik\n detachDict(distances, smallestKey[0], smallestKey[1])\n\n #Connect final two nodes\n tree[distances.keys()[0]] = {}\n tree[distances.keys()[0]][distances[distances.keys()[0]].keys()[0]] =\\\n distances[distances.keys()[0]][distances[distances.keys()[0]].keys()[0]] \n return tree", "def nodes_at_depth(depth):\n return list(range(2**depth-1, 2**(depth+1)-1))", "def wsngraph():\n G = nx.Graph()\n G.add_node(1)\n G.add_node(2)\n G.add_node(3)\n G.add_node(4)\n G.add_node(5)\n G.add_node(6)\n G.add_node(7)\n G.add_node(8)\n G.add_node(9)\n G.add_node(10)\n G.add_node(11)\n G.add_node(12)\n G.add_edge(1,3,weight=1)\n G.add_edge(1,2,weight=6)\n G.add_edge(1,12,weight=16)\n G.add_edge(2,11,weight=12)\n G.add_edge(2,6,weight=10)\n G.add_edge(2,5,weight=11)\n G.add_edge(3,4,weight=10)\n G.add_edge(3,7,weight=11)\n G.add_edge(3,8,weight=14)\n G.add_edge(3,9,weight=11)\n G.add_edge(4,7,weight=9)\n G.add_edge(5,6,weight=7)\n G.add_edge(5,9,weight=12)\n G.add_edge(6,9,weight=9)\n G.add_edge(7,10,weight=10)\n G.add_edge(8,10,weight=2)\n G.add_edge(8,11,weight=11)\n G.add_edge(8,9,weight=12)\n G.add_edge(9,11,weight=8)\n G.add_edge(10,12,weight=3)\n G.pos={}\n G.pos[1]=(6,4)\n G.pos[2]=(-1,3.7)\n G.pos[3]=(4.7,3.5)\n G.pos[4]=(5.3,3.2)\n G.pos[5]=(0,3)\n G.pos[6]=(1.4,3.4)\n G.pos[7]=(5,2.6)\n G.pos[8]=(4.7,0)\n G.pos[9]=(1.4,2.4)\n G.pos[10]=(5.2,0.5)\n G.pos[11]=(1.3,0)\n G.pos[12]=(6,2.4)\n elarge=[(u,v) for (u,v,d) in G.edges(data=True) if d['weight'] > 8]\n esmall=[(u,v) for (u,v,d) in G.edges(data=True) if d['weight'] <= 8]\n nx.draw_networkx_nodes(G,G.pos,node_color='w')\n nx.draw_networkx_edges(G,G.pos,elarge,width=3,edge_color='r',alpha=0.3)\n nx.draw_networkx_edges(G,G.pos,esmall,width=1,edge_color='b',alpha=0.3)\n nx.draw_networkx_labels(G,G.pos)\n ax=plt.gca()\n ax.axison = False\n label = {} \n for (u,v) in G.edges():\n d = G.get_edge_data(u,v)\n label[(u,v)]=d['weight']\n edge_label=nx.draw_networkx_edge_labels(G,G.pos,edge_labels=label)\n\n return(G)", "def create_time_tree(time_start, time_end):\n start = str_to_timedelta(time_start)\n end = str_to_timedelta(time_end)\n num_ticks = timedelta_to_ticks(end - start)\n _log.debug(\"Number of ticks: %s\" % num_ticks)\n nodes = []\n for tick in xrange(num_ticks):\n nodes.append(Node(Tick(tick)))\n return nodes", "def Dijkstra(gridMap: Map, iStart: int, jStart: int, openType=Open, closedType=Closed):\n OPEN = openType()\n CLOSED = closedType()\n\n node = Node(iStart, jStart, g=0, h=0)\n OPEN.AddNode(node)\n while not OPEN.isEmpty():\n node = OPEN.GetBestNode()\n\n neighbors = gridMap.GetNeighbors(node.i, node.j)\n for neighbor in neighbors:\n if not CLOSED.WasExpanded(Node(*neighbor)):\n g = node.g + CalculateCost(node.i, node.j,\n *neighbor)\n node_n = Node(*neighbor, g=g, h=0, parent=node)\n OPEN.AddNode(node_n)\n\n CLOSED.AddNode(node)\n\n return CLOSED", "def create_basic_adjacency_map_1():\n sample_adj_map = {\n \"A\": [\"B\", \"C\"],\n \"C\": [\"D\", \"E\"],\n \"D\": [\"X\"]\n }\n graph = generate_graph(sample_adj_map, node_start_name=\"A\")\n return graph", "def _node_walk_downhill(self, node):\n \n chain = -np.ones(self.tri.npoints, dtype=np.int) # in case the mesh is a spiral ziggurat\n\n idx = 0\n maxIdx = self.tri.npoints\n chain[idx] = node\n low_neighbour = self._node_lowest_neighbour(node)\n junction = -1\n\n while low_neighbour != -1:\n idx += 1\n chain[idx] = low_neighbour \n if self.node_chain_lookup[low_neighbour] != -1:\n junction = self.node_chain_lookup[low_neighbour]\n break \n\n low_neighbour = self._node_lowest_neighbour(low_neighbour)\n \n return junction, chain[0:idx+1]", "def depth_first_traversal_iterative(self, start):\n try:\n res = []\n stack = Stack([start])\n track = set()\n while stack.top:\n cur_node = stack.pop()\n if cur_node not in track:\n res.append(cur_node)\n track.add(cur_node)\n for child in reversed(self.node_dict[cur_node]):\n stack.push(child)\n except KeyError:\n raise KeyError(str(start) + ' not in graph')\n return res", "def _DFS_loop(nodes, edges, t_n=None):\n\n if t_n is not None:\n n_t = dict((b,a) for a,b in t_n.items()) # {time: node}\n get_node_by_time = lambda time: time if t_n is None else n_t[time]\n get_time_by_node = lambda node: node if t_n is None else t_n[node]\n gen_edges = lambda node: map(get_time_by_node,edges[get_node_by_time(node)])\n\n explored = set()\n leader = dict()\n _DFS_loop.t = 0 # finishing time\n times = dict() # {time: node}\n\n def DFS(i):\n explored.add(i)\n leader[i] = s\n for j in gen_edges(i):\n if j not in explored:\n DFS(j)\n _DFS_loop.t += 1\n times[i] = _DFS_loop.t\n\n for i in nodes:\n if i not in explored:\n s = i # leader node\n DFS(i)\n\n leaders = defaultdict(list)\n for n,l in leader.items():\n leaders[get_node_by_time(l)].append(get_node_by_time(n))\n\n return times, leaders", "def dfs(graph_dict, node, track):\n\n track.explored.add(node)\n track.leader[node] = track.current_source\n for head in graph_dict[node]:\n if head not in track.explored:\n dfs(graph_dict, head, track)\n track.current_time += 1\n track.finish_time[node] = track.current_time", "def create_graph(events):\n events.sort()\n base_str = events[0].time_start\n ticks = create_time_tree(base_str, events[-1].time_end)\n base = str_to_timedelta(base_str)\n for e in events:\n start = str_to_timedelta(e.starts())\n end = str_to_timedelta(e.ends())\n start_tick = timedelta_to_ticks(start - base)\n end_tick = timedelta_to_ticks(end - base)\n for tick in xrange(start_tick, end_tick):\n _log.debug(\"Node %s has neighbour node %s.\" % (e, ticks[tick]))\n e.add_neighbour(ticks[tick])\n ticks[tick].add_neighbour(e)\n return events", "def dijkstra(\n from_node: T,\n expand: typing.Callable[[T], typing.Iterable[typing.Tuple[int, T]]],\n to_node: typing.Optional[T] = None,\n heuristic: typing.Optional[typing.Callable[[T], int]] = None,\n) -> typing.Tuple[typing.Dict[T, int], typing.Dict[T, T]]:\n if heuristic is None:\n heuristic = lambda _: 0\n seen = set() # type: typing.Set[T]\n g_values = {from_node: 0} # type: typing.Dict[T, int]\n parents = {} # type: typing.Dict[T, T]\n\n # (f, g, n)\n todo = [(0 + heuristic(from_node), 0, from_node)] # type: typing.List[typing.Tuple[int, int, T]]\n\n while todo:\n f, g, node = heapq.heappop(todo)\n\n assert node in g_values\n assert g_values[node] <= g\n\n if node in seen:\n continue\n\n assert g_values[node] == g\n if to_node is not None and node == to_node:\n break\n seen.add(node)\n\n for cost, new_node in expand(node):\n new_g = g + cost\n if new_node not in g_values or new_g < g_values[new_node]:\n parents[new_node] = node\n g_values[new_node] = new_g\n heapq.heappush(todo, (new_g + heuristic(new_node), new_g, new_node))\n \n return (g_values, parents)", "def dijkstra(graph, start):\n unvisited = []\n weight = {}\n prev = {}\n time = {}\n imp = {}\n cost = {}\n dist = {}\n for node in graph.keys():\n # add all nodes to 'unvisited' with no previous node and a weight of infinity\n unvisited.append(node)\n weight[node] = float('inf')\n time[node] = float('inf')\n imp[node] = float('inf')\n cost[node] = float('inf')\n dist[node] = float('inf')\n prev[node] = None\n\n # set the starting distance to be 0\n weight[start] = 0\n time[start] = 0\n imp[start] = 0\n cost[start] = 0\n dist[start] = 0\n\n # iterate until no node is left unvisited\n while len(unvisited) > 0:\n # get the lowest distance that has not yet been visited\n curr_node = min(weight.viewkeys() & unvisited, key=weight.get)\n # mark the node as visited\n unvisited.remove(curr_node)\n # iterate through each neighbor of the current node\n for neighbor in graph[curr_node]:\n # calculate distance to that node from this node\n tmp_weight = weight[curr_node] + neighbor[WEIGHT]\n tmp_time = time[curr_node] + neighbor[TIME]\n tmp_imp= imp[curr_node] + neighbor[IMP]\n tmp_cost = cost[curr_node] + neighbor[COST]\n tmp_dist = dist[curr_node] + neighbor[DISTANCE]\n # if this distance is less than the one already stored at that node\n if tmp_weight < weight[neighbor[NEXT_NODE]]:\n # we store this distance as its distance,\n weight[neighbor[NEXT_NODE]] = tmp_weight\n time[neighbor[NEXT_NODE]] = tmp_time\n imp[neighbor[NEXT_NODE]] = tmp_imp\n cost[neighbor[NEXT_NODE]] = tmp_cost\n dist[neighbor[NEXT_NODE]] = tmp_dist\n # and this node as its previous node\n prev[neighbor[NEXT_NODE]] = curr_node\n\n return weight, prev, time, imp, cost, dist", "def generate_level(level):\n seed = level * 69420 # multiply by 69420 to not have the seeds too close to each other\n random.seed(seed)\n dimensions = get_map_size(level)\n level_map = np.full(dimensions, -1)\n while -1 in level_map:\n choice = random.choice(np.argwhere(level_map == -1))\n next_index = (choice[0], choice[1])\n # get indices of the tiles next to the current index\n left_index, up_index, right_index, down_index = get_direction_indices(next_index)\n left = tile_needs_connection(left_index, level_map, has_connection_right)\n up = tile_needs_connection(up_index, level_map, has_connection_down)\n right = tile_needs_connection(right_index, level_map, has_connection_left)\n down = tile_needs_connection(down_index, level_map, has_connection_up)\n level_map[next_index] = get_tile(left, up, right, down)\n return un_solve(level_map)", "def dfs(start_node, goal_state, limit = None, iterative = False, graphSearch = False, improved_descendants = False):\t\n\tfringe = [start_node]\n\tnumber_nodes_expanded = 0\n\tnumber_nodes_visited = 0\n\n\tt0 = time.time()\n\n\tif graphSearch:\n\t\tclosed = {} #hash_map\n\n\twhile len(fringe) > 0:\n\t\tnumber_nodes_visited += 1\n\t\tnode = fringe.pop()\n\t\tnode.count = number_nodes_visited\n\n\t\tt1 = time.time()\n\t\tif (t1 - t0) > 900:\n\t\t\tprint(\"It took more than 15 min\")\n\t\t\tif iterative:\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\treturn False\n\t\t\n\t\tif node.check_solution(goal_state):\n\t\t\t_ = print_solution(node, number_nodes_expanded, goal_state)\n\t\t\tif iterative:\n\t\t\t\treturn True, number_nodes_visited\n\t\t\tprint(\"Expanded nodes: \" + str(number_nodes_expanded))\n\t\t\treturn True \n\n\n\t\tif limit == None or node.depth < limit:\n\t\t\tif graphSearch:\n\t\t\t\tnode_hash = node.build_hash()\n\t\t\t\tnode_depth = node.depth\n\t\t\t\t#can also add if it's found i at smaller depth. Grants solution every time\n\t\t\t\tif node_hash not in closed or closed[node_hash] > node_depth:\n\t\t\t\t\tclosed[node_hash] = node_depth\n\t\t\t\t\tnumber_nodes_expanded += 1\n\t\t\t\t\tchild_nodes = node.successors(improved_descendants)\n\t\t\t\t\tfor i in range(len(child_nodes)):\n\t\t\t\t\t\tfringe.append(child_nodes[i])\n\t\t\telse:\n\t\t\t\tnumber_nodes_expanded += 1\n\t\t\t\tchild_nodes = node.successors(improved_descendants)\n\t\t\t\tfor i in range(len(child_nodes)):\n\t\t\t\t\tfringe.append(child_nodes[i])\n\t\n\tif iterative:\n\t\treturn False, number_nodes_visited\n\t\t\t\n\treturn False", "def dijkstras(occupancy_map,x_spacing,y_spacing,start,goal):\n ROWS, COLS = occupancy_map.shape\n #convert physical location to index in the grid\n startNode = locToIndex(start, x_spacing, y_spacing)\n startingNodeLoc = indexToLoc(startNode, x_spacing, y_spacing)\n initialcost = math.sqrt((startingNodeLoc[0] - start[0])**2 + (startingNodeLoc[1] - start[1])**2)\n goalNode = locToIndex(goal, x_spacing, y_spacing)\n \n freelist = np.where(occupancy_map == 0)\n if occupancy_map[startNode[0], startNode[1]] != 0:\n #raise ValueError(\"start : ({}, {}) invalid, is an obstacle\".format(startNode[0], startNode[1]))\n startNode = findValidNode(startNode, start, occupancy_map, x_spacing, y_spacing)\n if occupancy_map[goalNode[0], goalNode[1]] != 0:\n #raise ValueError(\"goal: ({}, {}) invalid, is an obstacle\".format(goalNode[0], goalNode[1]))\n goalNode = findValidNode(goalNode, goal, occupancy_map, x_spacing, y_spacing)\n candidate = [ [sys.float_info.max, \n i, (freelist[0][i], freelist[1][i])] for i in range(len(freelist[0]))] \n visited = set([])\n queue = PriorityQueue(candidate)\n paths = {}\n found = False\n\n #update initial cost\n queue.remove(startNode)\n queue.insert(startNode, initialcost)\n paths[startNode] = None\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, 0, 1, queue, paths, x_spacing, y_spacing, initialcost)\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, 0, -1, queue, paths, x_spacing, y_spacing, initialcost)\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, 1, 0, queue, paths, x_spacing, y_spacing, initialcost)\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, -1, 0, queue, paths, x_spacing, y_spacing, initialcost)\n while queue.size() > 0:\n priority, current = queue.pop()\n if current == goalNode:\n found = True\n break\n #not reaching goal node yet, for each of its neighbor, update the weight\n visited.add(current)\n update(occupancy_map, ROWS, COLS, current, 0, 1, priority, queue, paths, visited, x_spacing, y_spacing)\n update(occupancy_map, ROWS, COLS, current, 0, -1, priority, queue, paths, visited, x_spacing, y_spacing)\n update(occupancy_map, ROWS, COLS, current, 1, 0, priority, queue, paths, visited, x_spacing, y_spacing)\n update(occupancy_map, ROWS, COLS, current, -1, 0, priority, queue, paths, visited, x_spacing, y_spacing)\n \n if not found:\n raise ValueError(\"fail to find shortest path\")\n node = goalNode\n shortestpath = []\n while node is not None:\n shortestpath.append(node)\n node = paths[node]\n #shortestpath.append(startNode)\n #print (startNode)\n #print ('*', list(reversed(shortestpath)))\n #print (goalNode)\n p = list(reversed([ indexToLoc(n, x_spacing, y_spacing) for n in shortestpath]))\n #start and final position may not fall on center of the grid\n if abs(p[0][0] - start[0]) > 0.0005 or abs(p[0][1] - start[1]) > 0.0005:\n p.insert(0, [start[0][0], start[1][0]])\n if abs(p[-1][0] - goal[0]) > 0.0005 or abs(p[-1][1] - goal[1]) > 0.0005:\n p.append([goal[0][0], goal[1][0]])\n res = np.array(p)\n print (res)\n return res", "def make_ws_graph(num_nodes, clockwise_neighbours, rewiring_prob):\r\n #initialize empty graph\r\n ws_graph = {}\r\n for vertex in range(num_nodes): ws_graph[vertex] = []\r\n #add each vertex to clockwise neighbours\r\n for vertex in range(num_nodes):\r\n for neighbour in range(vertex + 1, vertex + clockwise_neighbours + 1):\r\n neighbour = neighbour % num_nodes\r\n ws_graph[vertex] += [neighbour]\r\n ws_graph[neighbour] += [vertex]\r\n for vertex in range(num_nodes):\r\n for neighbour in ws_graph[vertex]:\r\n if random.random() < rewiring_prob:\r\n ws_graph[vertex].remove(neighbour)\r\n ws_graph[neighbour].remove(vertex)\r\n randNode = random.randint(0, num_nodes-1)\r\n while(vertex == randNode):\r\n randNode = random.randint(0, num_nodes - 1)\r\n ws_graph[vertex] += [randNode]\r\n ws_graph[randNode] += [vertex]\r\n\r\n\r\n return ws_graph\r\n #rewire each edge with probability rewiring_prob\r\n\r\n #consider each vertex\r\n\r\n #consider each neighbour\r\n\r\n #decide whether to rewire and join to a random node\r\n\r\n #update if necessary\r", "def makeGraph(locations, distances):\n graph = dict() # maps (lat, lng) to Node\n for location,distance in zip(locations, distances):\n currLocation = location[0]\n neighbors = location[1:]\n makeNode(currLocation, neighbors, distance[1:], graph)\n return graph", "def make_complete_graph(num_nodes):\n complete_digraph = {}\n if num_nodes > 0 and type(num_nodes) == int:\n neighbors = set([idx for idx in range(num_nodes)])\n for idx in range(num_nodes):\n complete_digraph[idx] = neighbors.copy() #creates adjacency set\n complete_digraph[idx].remove(idx) # pop out self-loop \n return complete_digraph", "def _add_graph_level(graph, level, parent_ids, names, scores):\n for i, parent_id in enumerate(parent_ids):\n new_node = (level, i)\n parent_node = (level - 1, parent_id)\n graph.add_node(new_node)\n graph.node[new_node][\"name\"] = names[i]\n graph.node[new_node][\"score\"] = str(scores[i])\n graph.node[new_node][\"size\"] = 100\n # Add an edge to the parent\n graph.add_edge(parent_node, new_node)", "def dfs_iter(graph, start):\n # vkladam uzel a index potencialniho naslednika, kterym mam pokracovat\n stack = [(start, 0)]\n time = 1\n graph.discovery_time[start] = time\n graph.visited[start] = True\n\n while stack: # not empty\n u, v = stack.pop()\n\n while v < graph.size and not is_edge(graph, u, v):\n v += 1\n\n if v < graph.size:\n # found successor, u is not yet finished\n stack.append((u, v + 1))\n\n if not graph.visited[v]:\n # we have discovered v\n stack.append((v, 0))\n graph.parent[v] = u\n graph.visited[v] = True\n time += 1\n graph.discovery_time[v] = time\n else:\n # u has no more successors\n time += 1\n graph.finishing_time[u] = time", "def shortest_path(start, end, verbose=0, termination_check=True):\n # Set up\n dict_from_start = d()\n dict_form_end = d()\n agenda_start = Queue()\n agenda_end = Queue()\n operations = rubik.quarter_twists\n\n agenda_start.put_elt(SearchNode((start, None), None))\n agenda_end.put_elt(SearchNode((end, None), None))\n flip = True\n if termination_check: counter = 0\n\n if verbose:\n print \"====================================================================================\"\n print \"start: {0}\".format(start)\n print \"end: {0}\".format(end)\n print \"State: {0}\".format(\"SetUp\")\n print \"dict_start: {0}\".format(dict_from_start)\n print \"dict_end: {0}\".format(dict_form_end)\n print \"agenda_start: {0}\".format(agenda_start)\n print \"agenda_end: {0}\".format(agenda_end)\n print \"====================================================================================\"\n\n while not agenda_start.is_empty() and not agenda_end.is_empty():\n if verbose:\n print \"====================================================================================\"\n print \"State: {0}\".format(\"InLoop_s\")\n if flip:\n print \"At Start\"\n print \"dict_start: {0}\".format(dict_from_start)\n print \"agenda_start: {0}\".format(agenda_start)\n if not flip:\n print \"At End\"\n print \"dict_end: {0}\".format(dict_form_end)\n print \"agenda_end: {0}\".format(agenda_end)\n print \"calculating...\"\n\n # Flipping style\n if flip:\n _dict = dict_from_start\n _agenda = agenda_start\n _other = dict_form_end\n else:\n _dict = dict_form_end\n _agenda = agenda_end\n _other = dict_from_start\n # do one level permutation\n current = _agenda.pop_elt()\n _dict[current.name] = current\n children = [SearchNode((rubik.perm_apply(op, current.value), op), current) for op in operations]\n # dynamic programming\n for child in children:\n if child.name not in _dict:\n _agenda.put_elt(child)\n\n if verbose:\n print \"State: {0}\".format(\"InLoop_e\")\n if flip:\n print \"dict_start: {0}\".format(dict_from_start)\n print \"agenda_start: {0}\".format(agenda_start)\n else:\n print \"dict_end: {0}\".format(dict_form_end)\n print \"agenda_end: {0}\".format(agenda_end)\n print \"====================================================================================\"\n\n # Termination Check\n if termination_check:\n if len(_dict) >= 100 and counter == 0:\n counter += 1\n print \"100\"\n elif len(_dict) >= 1000 and counter == 1:\n counter += 1\n print \"1000\"\n elif len(_dict) >= 10000 and counter == 2:\n counter += 1\n print \"10000\"\n elif len(_dict) >= 100000 and counter == 3:\n counter += 1\n print \"100000\"\n elif len(_dict) >= 1000000 and counter == 4:\n counter += 1\n print \"1000000\"\n # elif len(_dict) >= 2000000 and counter == 5:\n # counter += 1\n # print \"2000000\"\n # elif len(_dict) >= 2000000 and counter == 6:\n # counter += 1\n # print \"3000000\"\n if len(_dict) >= 3674160//2:\n break\n\n # Flip\n flip = not flip\n # Terminate condition\n if verbose: print \"check Termination..........\"\n if current.name in _other:\n from_start = dict_from_start[current.name].get_path()\n from_end = [rubik.perm_inverse(op) for op in reversed(dict_form_end[current.name].get_path())]\n if verbose: print \"Result: {0}\".format([rubik.quarter_twists_names[op] for op in from_start+from_end])\n return from_start + from_end\n if verbose: print \"Done checking.\"\n\n if verbose: print \"No solution\"\n return None", "def build_2_node_graph(directed=False):\n if directed:\n graph = DirectedGraph()\n else:\n graph = UndirectedGraph()\n\n graph.new_node()\n graph.new_node()\n graph.new_edge(1, 2)\n\n return graph", "def traverse_graph_start(graph):\n\n def traverse(graph, node):\n\n children = [int(c) for c in graph[node][\"children\"]]\n tagged_children = []\n for child in children:\n ellipsed_parents = [int(p) for p in graph[child][\"ellipsed_parents\"]]\n # if the child is explicit\n if node not in ellipsed_parents:\n if graph[child][\"terminal\"] == \"yes\":\n tagged_children.append(ParentedTree(graph[child][\"tag\"], [graph[child][\"text\"]]))\n else:\n tagged_children.append(traverse(graph, child))\n # if the child is ellipsed\n else:\n ellipsis_tag = get_ellipsis_tag_from_graph(graph, child)\n tagged_children.append(ParentedTree(ellipsis_tag, []))\n \n tree = ParentedTree(graph[node][\"tag\"], tagged_children)\n\n return tree\n \n tree = traverse(graph, 0)\n positions = [pos for pos in tree.treepositions() if pos not in tree.treepositions(\"leaves\")]\n rev_positions = [pos for pos in reversed(positions)]\n for pos_i, pos in enumerate(rev_positions):\n # append starting_node tag to the previous node\n if tree[pos].label().startswith(\"start\"):\n prev_pos_i = pos_i + 1\n prev_pos = rev_positions[prev_pos_i]\n tree[prev_pos].set_label(tree[prev_pos].label() + tree[pos].label())\n del tree[pos]\n\n return tree", "def bfs(start_node, goal_node, max_depth) -> \"solution path\":\n\td = deque([start_node,[]])\n\texplored = {}\n\tlevel = 0\n\n\t# Return empty path if start is equal to goal\n\tif start_node == goal_node:\n\t\treturn []\n\n\t# Keep exploring while the deque has nodes\n\twhile len(d) > 0:\n\t\tpath = d.popleft()\n\n\t\tif level == 0:\n\t\t\tnode = path\n\t\telse:\n\t\t\t# To keep track of levels an empty node gets popped between levels which will cause an exception\n\t\t\ttry:\n\t\t\t\tnode = path[-1]\n\t\t\texcept Exception:\n\t\t\t\tnode = []\n\t\t\t\tpass\n\n\t\tif len(node) == 0:\n\t\t\tlevel += 1\n\t\t\t# Return empty list if max depth was reached\n\t\t\tif max_depth == level:\n\t\t\t\treturn []\n\t\t\td.append(node)\n\n\t\telse:\n\t\t\tval = getNodeVal(node)\n\t\t\tif val not in explored:\n\n\t\t\t\t# Mark node as explored\n\t\t\t\texplored[val] = True\n\n\t\t\t\tfor row in range(len(node)):\n\t\t\t\t\tfor col in range(len(node)):\n\t\t\t\t\t\tchild = toggle(node, row, col)\n\t\t\t\t\t\tnew_path = list(path)\n\t\t\t\t\t\tif level == 0:\n\t\t\t\t\t\t\tnew_path = [new_path]\n\t\t\t\t\t\tnew_path.append(child)\n\t\t\t\t\t\td.append(new_path)\n\t\t\t\t\t\tif child == goal_node:\n\t\t\t\t\t\t\tlevel+=1\n\t\t\t\t\t\t\treturn new_path\n\t# No solution found\n\treturn []", "def shortest_path(start, end):\n\troot1=node(start)\n\troot2=node(end)\n\tarr1=[[] for wqw in range(8)]\n\tarr2=[[] for qwq in range(8)]\n\tser1=[[] for lp in range(100000)]\n\tser2=[[] for lp in range(100000)]\n\tarr1[0].append(root1)\n\tarr2[0].append(root2)\n\tser1[(hash(start))%100000].append(start)\n\tser2[(hash(end))%100000].append(end)\n\tflag=0\n\tchk=None\n\tfor h in range(0,7):\n\t\tchk=check_match(arr1[h],arr2[h])\n\t\tif not chk is None:\n\t\t\tflag=1\n\t\t\tbreak\n\t\telse:\n\t\t\tfor w in range(len(arr1[h])):\n\t\t\t\tfin=insert_e(arr1[h][w].data,h+1,arr1,ser1)\n\t\t\t\tarr1=fin[0]\n\t\t\t\tser1=fin[1]\n\t\tchk=check_match(arr2[h],arr1[h+1])\n\t\tif not chk is None:\n\t\t\tflag=2\n\t\t\tbreak\n\t\telse:\n\t\t\tfor q in range(len(arr2[h])):\n\t\t\t\tfim=insert_e(arr2[h][w].data,h+1,arr2,ser2)\n\t\t\t\tarr2=fim[0]\n\t\t\t\tser2=fim[1]\n\tif flag==1:\n\t\twhile not chk[0].parent is None:\n\t\t\tres1.append(chk[0].parent.data)\n\t\t\tchk[0]=chk[0].parent\n\t\tres+=res1.reverse()\n\t\tres.append(chk[1])\n\t\twhile not chk[1].parent is None:\n\t\t\tres2.append(chk[1].parent.data)\n\t\t\tchk[1]=chk[1].parent\n\t\treturn res\n\telif flag==2:\n\t\twhile not chk[1].parent is None:\n\t\t\tres1.append(chk[1].parent.data)\n\t\t\tchk[1]=chk[1].parent\n\t\tif not res1 is None:\n\t\t\tres+=res1.reverse()\n\t\tres.append(chk[0])\n\t\twhile not chk[0].parent is None:\n\t\t\tres2.append(chk[0].parent.data)\n\t\t\tchk[0]=chk[0].parent\n\t\treturn res\n\telse:\n\t\tprint \"The given configuration is not solvable/wrong.\"", "def solve(problem):\n\n # *** YOUR CODE HERE ***\n\n # The core of Iterative Deepening Search are iterations of Depth Limited\n # Search with given increasing depth.\n\n # A recursive version of Depth Limited Search\n def depth_limited_search(problem, limit):\n \"\"\"\n Return a list of nodes we traversed (or None).\n :param problem: the starting set up.\n :param limit: a given numeric depth limit.\n :return: a list of nodes.\n \"\"\"\n\n # in this case, we simply use a list to keep track of nodes we\n # traversed, instead of the data structure, Stack.\n path = list()\n visited = set() # as before, to prevent duplicated nodes\n root = problem.get_initial_state()\n\n def rec_dls(state, action, depth):\n\n visited.add(state)\n\n # if it is a goal\n if problem.goal_test(state):\n path.append((state, action))\n return path\n\n # or if it reaches a certain depth, but not a goal\n elif depth == 0:\n visited.remove(state)\n return None\n\n else:\n path.append([state, action])\n for successor, action, cost in problem.get_successors(state):\n if successor not in visited:\n # recursively expands the deepest node\n res = rec_dls(successor, action, depth-1)\n if res is not None:\n return res\n path.pop()\n visited.remove(state)\n\n # \"Stared From the Bottom\" (root)\n result = rec_dls(root, 'None', limit)\n # return the path if the we DID have achieved something\n if result is not None:\n return path\n\n import sys\n for depth in range(sys.maxsize): # depth from 0 to infinity\n print(\"Lower-bound of the optimal cost is {}\".format(depth))\n res2 = depth_limited_search(problem, depth)\n if res2 is not None:\n action_list = list()\n for move in res2:\n action_list.append(move[1]) # recall index 0 is the parent\n # do not forget a None returned in iteration 0 (with depth 0)\n action_list.remove('None')\n return action_list", "def adjacent_pairs(self, nodes: Tuple[int], k: int) -> List[Tuple[int, int]]:\n n = len(nodes)\n return [(u, nodes[j % n])\n for i, u in enumerate(nodes)\n for j in range(i + 1, i + 1 + k // 2)]", "def __create_node(self, from_node_id, to_node_id):\n #ensure from_node_id and start_node_id is not the same\n if from_node_id == to_node_id:\n print(\"Cannot insert same node\")\n return\n \n # 1. declare two variable nodes\n n1 = n2 = None\n \n # 2. check if exist\n for x in self.__node:\n if x.getId()==from_node_id:\n n1 = x\n if x.getId()==to_node_id:\n n2 = x\n\n # 3. if n1 or n2 is None, create from_node_id / to_node_id\n if n1 is None:\n n1 = Node(from_node_id)\n self.__node.append(n1)\n \n if n2 is None:\n n2 = Node(to_node_id)\n self.__node.append(n2)\n\n #return from_node and to_node\n return n1, n2", "def dijsktra(graph, initial, end):\n shortest_paths = {initial: (None, 0)}\n current_node = initial\n visited = set()\n\n while current_node != end:\n visited.add(current_node)\n destinations = graph.edges[current_node]\n weight_to_current_node = shortest_paths[current_node][1]\n logger.debug(\"shortest_paths = {}\".format(shortest_paths))\n\n for next_node in destinations:\n weight = graph.weights[(current_node, next_node)] + weight_to_current_node\n logger.debug(\"next_node = {}\".format(next_node))\n logger.debug(\"graph.weights[(current_node, next_node)] = {}\".format(graph.weights[(current_node, next_node)]))\n logger.debug(\"weight_to_current_node = {}\".format(weight_to_current_node))\n logger.debug(\"weight = {}\".format(weight))\n if next_node not in shortest_paths:\n shortest_paths[next_node] = (current_node, weight)\n else:\n current_shortest_weight = shortest_paths[next_node][1]\n if current_shortest_weight > weight:\n shortest_paths[next_node] = (current_node, weight)\n\n next_destinations = {node: shortest_paths[node] for node in shortest_paths if node not in visited}\n logger.debug(\"next_destinations = {}\".format(next_destinations))\n\n if not next_destinations:\n return \"Route not possible\"\n\n current_node = min(next_destinations, key=lambda k: next_destinations[k][1])\n logger.debug(\"current_node = {}\".format(current_node))\n\n path = []\n while current_node is not None:\n path.append(current_node)\n next_node = shortest_paths[current_node][0]\n current_node = next_node\n\n # Reverse the path\n path = path[::-1]\n return path", "def add_node(self, node: dict):\n # check if it is not overriding existing node\n if node.get('id') is not None:\n if node['id'] in self._nodes:\n raise ValueError('tried to override node %s' % node['id'])\n else:\n raise ValueError('no id for node provided')\n\n # append node to list\n id_ = node['id']\n del node['id']\n\n # set default values for node\n # remember to add new attributes here and in __init__ root node\n node['player'] = '0' if node.get('player') is None else node['player']\n node['value'] = [0, 0] if node.get('value') is None else node['value']\n node['parents'] = {} if node.get('parents') is None else node['parents']\n node['children'] = {} if node.get('children') is None else node['children']\n node['probability'] = 1 if node.get('probability') is None else node['probability']\n node['branch'] = {} if node.get('branch') is None else node['branch']\n node['branch']['probability'] = 1 \\\n if node['branch'].get('probability') is None else node['branch']['probability']\n\n # add player to the list of players if he is not there already\n if node['player'] not in self._players_list:\n self._players_list.append(node['player'])\n\n # add parenthood\n for parent in node['parents']:\n # noinspection PyTypeChecker\n self._nodes[parent]['children'][id_] = str(node['parents'][parent])\n\n # set depth to one more than first parent\n if node['parents']:\n node['depth'] = self._nodes[str(list(node['parents'].keys())[0])]['depth'] + 1\n else:\n node['depth'] = 0 if node.get('depth') is None else node['depth']\n\n # calculate total probability of node:\n # total probability equals sum of probabilities of parents multiplied by probability of node\n branch_probability = 0\n for parent in node['parents']:\n branch_probability += self._nodes[parent]['branch']['probability']\n node['branch']['probability'] = branch_probability * node['probability']\n\n # validate against the error of node not being connected to the rest of the tree via parents removal:\n if id_ is not 'root' and not node['parents']:\n raise ValueError('node [%s] is not connected to the tree - parents are empty' % id_)\n\n # add node\n self._nodes[id_] = node", "def make_complete_graph(num_nodes):\r\n if num_nodes <= 0:\r\n return dict()\r\n else:\r\n all_nodes_list = [node for node in range(num_nodes)]\r\n tmp_graph = dict()\r\n for node in range(num_nodes):\r\n adjacent_nodes_list = all_nodes_list[:]\r\n adjacent_nodes_list.remove(node)\r\n tmp_graph.update({node: set(adjacent_nodes_list)})\r\n return tmp_graph" ]
[ "0.58267266", "0.5793475", "0.56011593", "0.55371314", "0.55277735", "0.55161214", "0.55161214", "0.55161214", "0.54637635", "0.5456182", "0.5430679", "0.5403021", "0.5386815", "0.53318155", "0.5256505", "0.5214026", "0.5162665", "0.51556855", "0.5142307", "0.5126955", "0.5087093", "0.5069114", "0.50669694", "0.5053542", "0.5027528", "0.50077564", "0.49944353", "0.4976715", "0.49651432", "0.49625945", "0.49506918", "0.49350524", "0.49232978", "0.4921756", "0.49010968", "0.4900198", "0.4893417", "0.4892854", "0.48898625", "0.48777756", "0.48676413", "0.48538136", "0.4850837", "0.48359984", "0.48335856", "0.48172167", "0.48172167", "0.4815525", "0.48088372", "0.48033223", "0.47934732", "0.47924474", "0.47838452", "0.47691414", "0.47665337", "0.4764113", "0.4761959", "0.4760979", "0.47597376", "0.47523504", "0.47503382", "0.47486857", "0.4747754", "0.47448415", "0.47412306", "0.47348946", "0.47331247", "0.47297427", "0.47229403", "0.47138676", "0.4713276", "0.4708125", "0.47044072", "0.47020516", "0.46929404", "0.46925685", "0.4689564", "0.46871445", "0.46852103", "0.46808523", "0.46731737", "0.46663263", "0.46637198", "0.46611804", "0.46557373", "0.4654932", "0.46529147", "0.46512663", "0.46505257", "0.46503216", "0.4646572", "0.46465576", "0.4640012", "0.46355247", "0.46186537", "0.4615063", "0.46111804", "0.4607659", "0.45953846", "0.45953736" ]
0.7375074
0
Runs simulations to determine time (ticks) to level up attack or strength Enemy is set as sand crab (60hp, 1 def, 0 def bonus) Weapon is best available scimitar
def level_time_simulate(start_levels, attack_style, attack_bonus, strength_bonus): ticks_per_attack = 4 # Scimitar attack speed enemy_health = 60 # Sand crab health max_hit, accuracy = get_max_hit_and_accuracy( start_levels, attack_style, attack_bonus, strength_bonus) if attack_style == Attack_Style.ATTACK: start_exp = osrs.experience[start_levels.attack] end_exp = osrs.experience[start_levels.attack+1] elif attack_style == Attack_Style.STRENGTH: start_exp = osrs.experience[start_levels.strength] end_exp = osrs.experience[start_levels.strength+1] experience = end_exp - start_exp avg_ticks = combat_simulator.ticks_until_exp(max_hit, accuracy, ticks_per_attack, enemy_health, experience, osrs.BASE_EXP_PER_DAMAGE, ITERATIONS) return avg_ticks
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testrandom(self):\n for i in range(100):\n WeaponAbility()", "def scenario1(height, speed):\n time = math.sqrt((2 * height) / 9.81)\n result = speed * time\n return result", "def attack(health_meter):\n hit_list = 4 * ['igrac'] + 6 * ['neprijatelj']\n injured_unit = random.choice(hit_list)\n hit_points = health_meter[injured_unit]\n injury = random.randint(10, 15)\n health_meter[injured_unit] = max(hit_points - injury, 0)\n print(\"NAPAD! \", end='')\n show_health(health_meter)", "def attack(health_meter):\n hit_list = 4 * ['player'] + 6 * ['enemy']\n injured_unit = random.choice(hit_list)\n hit_points = health_meter[injured_unit]\n injury = random.randint(10, 15)\n health_meter[injured_unit] = max(hit_points - injury, 0)\n print(\"ATTACK! \", end='')\n show_health(health_meter)", "def runSim(self):\n self.simKillResults = {}\n self.simHitResults = {}\n if self.fromArmy == False:\n self.attackingSquad = copy.deepcopy(squad.Squads[self.attackingSpin.get()])\n for num in range(eval(self.simulationSpin.get())):\n defSquad = copy.deepcopy(squad.DefSquads[self.defendingSpin.get()])\n result = self.attackingSquad.squadFire(defSquad)\n if result[0] not in self.simHitResults:\n self.simHitResults[result[0]] = 0\n self.simHitResults[result[0]] += 1\n if result[1] not in self.simKillResults:\n self.simKillResults[result[1]] = 0\n self.simKillResults[result[1]] += 1\n self.simResultsFrame = Frame(self.__mainWindow, padx=15, pady=15)\n self.simResultsFrame.grid(row=2,column=0,sticky=\"nsew\")\n self.hitResultsFrame = Frame(self.simResultsFrame, padx=10, pady=15)\n self.hitResultsFrame.grid(row=0, column=0,sticky=\"nsew\")\n self.killResultsFrame = Frame(self.simResultsFrame, padx=10, pady=15)\n self.killResultsFrame.grid(row=0, column=1,sticky=\"nsew\")\n self.maxPosFrame = Frame(self.simResultsFrame, padx=10, pady=15)\n self.maxPosFrame.grid(row=1, sticky=\"nsew\")\n numHitPoss = 0\n numWoundsPoss = 0\n if isinstance(self.attackingSquad, squad.Squad):\n for unit in self.attackingSquad.units:\n numHitPoss += eval(unit.ranged_weapon.attacks)\n else:\n for i in range(self.attackingSquad.current_size):\n for weapon in self.attackingSquad.ranged_weapons:\n numHitPoss += eval(weapon.attacks)\n for unit in squad.DefSquads[self.defendingSpin.get()].units:\n numWoundsPoss += unit.wounds\n rf = 1\n Label(self.hitResultsFrame, text=\"{} hits possible\".format(min(numWoundsPoss,numHitPoss)), font=__item_format__).grid(row=0)\n for hit in self.simHitResults:\n percent = self.simHitResults[hit]/eval(self.simulationSpin.get())*100\n t = \"{} hits: {:6.2f}%\".format(hit, percent)\n Label(self.hitResultsFrame, text=t, font=__item_format__).grid(row=rf)\n rf+=1\n Label(self.killResultsFrame, text=\"{} kills possible\".format(defSquad.current_size), font=__item_format__).grid(row=0)\n for kill in self.simKillResults:\n percent = self.simKillResults[kill]/eval(self.simulationSpin.get())*100\n t = \"{} kills: {:6.2f}%\".format(kill, percent)\n Label(self.killResultsFrame, text=t, font=__item_format__).grid(row=rf)\n rf+=1", "def test_reward_tiers_after_battle(self):\n self.alice.loyalists = 350\n self.conf[\"game\"][\"rewardtiers\"] = {\n \"newcomer\": {\n \"begin\": 0,\n \"end\": 300,\n \"reward\": 25,\n }\n }\n self.assertEqual(self.alice.loyalists, 350)\n self.assertEqual(self.bob.loyalists, 100)\n\n s1 = self.battle.create_skirmish(self.alice, 50)\n s1.react(self.bob, 50, troop_type=\"cavalry\")\n\n self.end_battle(self.battle, self.conf)\n\n # Bob wins the fight and the war\n self.assertEqual(self.battle.victor, self.bob.team)\n\n # Alice doesn't match a tier, gets the default 10%/15% settings\n # which mean 10% for her -> 5 troops\n self.assertEqual(self.alice.loyalists, 355)\n # Bob matches the 0-300 tier. Because he used 50% of his troops, he\n # gets 50% of the flat gain -> 12 troops\n self.assertEqual(self.bob.loyalists, 112)", "def run_simulation(self, state):\n \"*** YOUR CODE HERE ***\"\n player = 0\n visited_states = [(player, state)]\n depth_limited = self.depth != -1\n depth = self.depth\n expand = True\n while not visited_states[-1][1].isWin() and not visited_states[-1][1].isLose():\n if depth_limited and depth == 0: break\n state = self.UCB1(state, player) # Selection & Simulation\n if expand and state not in self.plays: # Expansion\n expand = False\n self.plays[state] = 0\n self.wins[state] = 0\n visited_states.append((player, state))\n player = (player + 1) % state.getNumAgents()\n if not expand and depth_limited and player == 0: depth -= 1\n \n for player, state in visited_states:\n if state in self.plays: # Not simulated nodes\n self.plays[state] += 1\n eval = self.evaluationFunction(visited_states[-1][1])\n if depth_limited:\n if player == 0: self.wins[state] += eval\n if player != 0: self.wins[state] -= eval\n else:\n if player == 0: self.wins[state] += eval\n if player != 0: self.wins[state] += (1 - eval)", "def run(): \n learning_rate = 0.42\n discount_rate = 0.15\n initial_q_hat = 4\n \n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent, learning_rate, discount_rate, initial_q_hat) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n print \"Failed trials: \"\n print a.get_failed_trials()\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line", "def main():\r\n y1, y2 = 0, -s.screen_height\r\n clock = pg.time.Clock()\r\n time1 = time() # records time that game starts\r\n FPS = 50\r\n level = 1 # game starts on first level\r\n\r\n # first run the start menu\r\n score = start_menu()\r\n\r\n # creates the players' sprites\r\n ship1 = player.spaceship(s.p1_image, False)\r\n\r\n if s.two_player:\r\n ship2 = player.spaceship(s.p2_image, False)\r\n else:\r\n ship2 = 0\r\n\r\n # while player is still alive\r\n while ship1.lives > 0:\r\n clock.tick(FPS) # set the games FPS\r\n helpers.refresh_hearts(ship1.lives)\r\n\r\n # calculates total time played, excluding time spent paused\r\n timer = time() - time1 - s.time_paused\r\n # meteor falling speed gradually increases as time goes on\r\n speed_increase_factor = 100\r\n speed_increase = timer / speed_increase_factor\r\n # checks for collisions between meteors and players\r\n sprites.sprite_collision(ship1, ship2, s.meteor_group)\r\n # checks for colliisions between power ups and players\r\n powerup_collision = pg.sprite.spritecollide(ship1, s.powerup_group, True)\r\n # if collision has occured, player earns money\r\n powerup_reward = 500\r\n for sprite in powerup_collision:\r\n ship1.money += powerup_reward\r\n\r\n if ship2 != 0:\r\n powerup_collision2 = pg.sprite.spritecollide(ship2, s.powerup_group, True)\r\n for sprite in powerup_collision2:\r\n ship1.money += powerup_reward\r\n\r\n # updates all the sprites\r\n helpers.update_screen(ship1, ship2, y1, y2, score, level)\r\n _, score = sprites.update_sprites(s.meteor_group, speed_increase, score)\r\n _, score = sprites.update_sprites(s.powerup_group, speed_increase, score)\r\n\r\n # moves every bullet up the screen, disappearing if it goes over the top\r\n for bullet in s.bullet_group:\r\n bullet.update()\r\n\r\n # list of meteors hit by a bullet\r\n rocks_shot = pg.sprite.groupcollide(s.meteor_group, s.bullet_group, False, True)\r\n\r\n for rock in rocks_shot:\r\n # destroys the meteor and randomly spawns a coin in its place\r\n rock.get_shot()\r\n # score gain for hitting a meteor\r\n score_increase = 200 if s.double_p else 100\r\n score += score_increase\r\n\r\n # despawns coins if they haven't been collected after a given time\r\n if time() >= s.coin_despawn_time:\r\n s.coin_group.empty()\r\n\r\n s.coin_group.draw(screen)\r\n coin_bonus = 50 # money gained from picking up a coin\r\n coins_collected = pg.sprite.spritecollide(ship1, s.coin_group, True)\r\n if ship2 != 0:\r\n coins_collected += pg.sprite.spritecollide(ship2, s.coin_group, True)\r\n\r\n # player gains money for every coin picked up\r\n for sprite in coins_collected:\r\n ship1.money += coin_bonus\r\n\r\n # if all meteors are destroyed, initiate the boss fight\r\n if len(s.meteor_group) == 0:\r\n boss.boss_fight(ship1, ship2, level)\r\n # player advances to the next level\r\n level += 1\r\n s.num_meteors += 10\r\n s.double_p = False\r\n # shows level on the screen\r\n helpers.display_text(\r\n (\"Level \" + str(level)),\r\n \"centerx\",\r\n \"centery\",\r\n screen.get_rect().centerx,\r\n screen.get_rect().centery,\r\n )\r\n pg.display.update()\r\n sleep(1)\r\n # once boss is defeated, enter the shop to buy upgrades\r\n shop(ship1, ship2)\r\n\r\n # draw all sprites\r\n sprites.add_sprites(s.meteor_group, s.powerup_group)\r\n s.meteor_group.draw(screen)\r\n s.bullet_group.draw(screen)\r\n s.powerup_group.draw(screen)\r\n\r\n # makes the screen constantly scroll, giving the illusion of movement\r\n scroll_speed = 0.5 + timer * 0.001\r\n y1 += scroll_speed\r\n y2 += scroll_speed\r\n if y1 > s.screen_height:\r\n y1 = -s.screen_height\r\n if y2 > s.screen_height:\r\n y2 = -s.screen_height\r\n\r\n # show score and number of lives in the corner of the screen\r\n helpers.display_text((str(int(timer ** 2) + int(score))), \"left\", \"top\", 10, 10)\r\n s.heart_group.draw(screen)\r\n pg.display.update()\r\n\r\n # if player runs out of lives, game ends\r\n clock.tick(30) # Sets FPS to 30\r\n game_over(score, ship1, ship2, timer)", "def simulationTwoDrugsDelayedTreatment():\n\n # TODO", "def ninja_turn():\r\n\tglobal men\r\n\tl = [chop, fly, firebreath]\r\n\tx = randint(0,3)\r\n\tif men >= 85 and x == 3:\r\n\t\tx = randint(0,2)\r\n\tif x != 3 and men - l[x][5] >= 0:\r\n\t\treturn ninja.hit(*l[x])\r\n\telse:\r\n\t\tmen += ninja.sleep(*nsleep)\r\n\t\treturn 0", "def run_func():\n global gain\n if random.random() < 0.8:\n if random.random() < .9:\n gain = random.randint(0, 5) #For example, you have an 80% chance of gaining between 1 and 9 yards on a run play, and you have about a 72% chance to gain between 1 and 6 yards\n else:\n gain = random.randint(6, 9)\n elif random.random() > 0.8 and random.random() < 0.97:\n if random.random() < 0.3:\n gain = random.randint(10, 20)\n else:\n gain = random.randint(-5, -1)\n elif random.random() > 0.97 and random.random() < 0.99:\n gain = random.randint(21, 30)\n else:\n if random.random() < .8:\n gain = random.randint(31, 45)\n else:\n gain = random.randint(46, 99)", "def play_game(self):\n TF = self.TF\n # keep updating\n actions = collections.defaultdict(dict)\n for i in range(10):\n for j in range(self.N):\n actions[i][j] = 0\n\n sums = []\n for time in range(self.MAX):\n print(\"begin time epoch: \" + str(time))\n train_state_pool = collections.defaultdict(dict)\n flow_num = 0\n sum_all = 0\n for i in TF.keys():\n for j in TF[i].keys():\n for agent in self.Ns:\n actions[flow_num][agent.id] = random.randint(0, agent.n_actions - 1)\n\n # update states to ss_\n sum_all = self.update_state(flow_num, actions)\n\n flow_num += 1\n\n sums.append(sum_all)\n print('cut-random: ' + str(sum_all))\n if time % 10000 == 0 and time != 0:\n str1 = 'cut-mini-random' + str(time) + '.txt'\n file = open(str1, 'w')\n file.write(str(sums))\n file.close()", "def RunTurn( lobound=1, hibound=20 ):\n\tpass", "def test_do_boss_science(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['boss_science'])\n nExp = 2\n self._do_boss_science(nExp, 35, 0, 0, nExp=nExp)", "def testrandom(self):\n for i in range(100):\n AmuletAbility()", "def simulate(self):\n while self.character_1.is_alive and self.character_2.is_alive:\n # flip a coin (0,1), if 1 player 1 attacks\n if random.randint(0, 1):\n self.turn(self.character_1, self.character_2)\n else:\n self.turn(self.character_2, self.character_1)\n\n print('_____-----<< -*- >>-----_____')\n time.sleep(.5)\n # if a character dies print final stats of winner\n if self.character_1.is_alive:\n print(f'{self.character_1.name} has won!! o.o7\\nfinal stats:')\n print(self.character_1)\n else:\n print(f'{self.character_2.name} has won!! o.o7\\nfinal stats:')\n print(self.character_2)", "def takeHit(self, amount, type, enemyShip):\n if type == 'energy':\n # go through shields in quadrant first\n if self.currentSP > 0:\n if self.currentSP >= amount:\n self.currentSP -= amount\n amount = 0\n else:\n amount -= self.currentSP\n self.currentSP = 0\n # go through armor next\n if self.currentAP > 0 and amount > 0:\n # set experience only if shot goes through shields\n if self.typeAP == 'energy':\n if self.currentAP >= (amount * globals.reflectiveArmorModifier):\n self.currentAP -= (amount * globals.reflectiveArmorModifier)\n amount = 0\n else:\n amount -= (self.currentAP/globals.reflectiveArmorModifier)\n self.currentAP = 0\n else:\n if self.currentAP >= amount:\n self.currentAP -= amount\n amount = 0\n else:\n amount -= self.currentAP\n self.currentAP = 0\n elif type == 'impact':\n # go through shields in quadrant first\n if self.currentSP > 0:\n if self.currentSP >= amount:\n self.currentSP -= amount/2\n amount = amount/2\n else:\n amount -= self.currentSP\n self.currentSP = 0\n \n # now goto armor\n if self.currentAP > 0 and amount > 0:\n if self.typeAP == 'impact':\n if self.currentAP >= (amount * globals.impactArmorModifier):\n self.currentAP -= (amount * globals.impactArmorModifier)\n amount = 0\n else:\n amount -= (self.currentAP/globals.impactArmorModifier)\n self.currentAP = 0\n else:\n if self.currentAP >= amount:\n self.currentAP -= amount\n amount = 0\n else:\n amount -= self.currentAP\n self.currentAP = 0\n \n # now that shields and armor are taken care of transfer remaining damage to internal components\n self.myParent.setExperience(amount, enemyShip)\n componentDamage = 0\n if amount > 0 and self.components != {}:\n while amount > 0:\n keyList = funcs.sortStringList(self.components.keys())\n componentDamage = 1\n for componentID in keyList:\n component = self.components[componentID]\n if component.currentHP > amount:\n component.currentHP -= amount\n amount = 0\n break\n elif component.currentHP > 0:\n # remove component\n amount -= component.currentHP\n del self.components[componentID]\n \n # check if all components destroyed, or damage absorbed\n if self.components == {} or amount == 0:\n break\n \n if componentDamage == 1:\n self.setMyStatus()\n self.myParent.setMyStatus()\n \n if amount > 0:\n if self.myParent.currentISP > amount:\n self.myParent.currentISP -= amount\n self.myParent.setMyStatus()\n amount = 0\n else:\n self.myParent.destroyMe()\n amount = 0\n \n self.myParent.updateAllGUIValues()", "def test_smoke(self):\n\t\tinit_state = torch.tensor(0.0)\n\t\ttotal_time = torch.tensor(4.0)\n\t\tprint('Agent state trajectory and actions:')\n\t\tAgent().play(init_state, total_time)\n\t\tpyro.clear_param_store()", "def calculate_hit(self):\n weapon = self.game_data['player inventory']['equipped weapon']\n weapon_power = self.game_data['player inventory'][weapon]['power']\n max_strength = weapon_power\n min_strength = max_strength - 7\n return random.randint(min_strength, max_strength)", "def startBattle(self):\n defender = self.map.getUnitAt(self.pos)\n attacker = self.selectedUnit\n defender.takeDamage(int(attacker.firepower * attacker.hp))\n attacker.takeDamage(int(defender.firepower * defender.hp))\n self.endBattle()", "def test_attack_types(self):\n battle = self.battle\n s1 = battle.create_skirmish(self.alice, 10) # Attack 10 infantry\n s1.react(self.bob, 8, troop_type='cavalry') # --Attack 8 cavalry\n\n # Cavalry should get a 50% bonus here, for a total of 8+4=12\n # So Bob should win by 2 despite lesser numbers\n result = s1.resolve()\n self.assert_(result)\n self.assertEqual(result.victor, self.bob.team)\n self.assertEqual(result.margin, 2)\n self.assertEqual(result.vp, 10)\n\n s2 = battle.create_skirmish(self.bob, 10,\n troop_type='cavalry') # attack 10 cavalry\n s2.react(self.alice, 8, troop_type='ranged') # -- oppose 8 ranged\n result = s2.resolve()\n self.assert_(result)\n self.assertEqual(result.victor, self.alice.team)\n self.assertEqual(result.margin, 2)\n self.assertEqual(result.vp, 10)\n\n s3 = battle.create_skirmish(self.carol, 10, # Attack 10 ranged\n troop_type='ranged')\n s3.react(self.bob, 8) # -- oppose 8 infantry\n result = s3.resolve()\n self.assert_(result)\n self.assertEqual(result.victor, self.bob.team)\n self.assertEqual(result.margin, 2)\n self.assertEqual(result.vp, 10)", "def __init__(\n self,\n height=20,\n width=20,\n initial_sheep=100,\n initial_wolves=50,\n sheep_reproduce=0.04,\n wolf_reproduce=0.05,\n wolf_gain_from_food=20,\n grass=False,\n grass_regrowth_time=30,\n sheep_gain_from_food=4,\n trees_carrots_ratio=0.5,\n YEAR=20,\n nb_of_hunters=0,\n ):\n super().__init__()\n # Set parameters\n self.height = height\n self.width = width\n self.initial_sheep = initial_sheep\n self.initial_wolves = initial_wolves\n self.sheep_reproduce = sheep_reproduce\n self.wolf_reproduce = wolf_reproduce\n self.wolf_gain_from_food = wolf_gain_from_food\n self.grass = grass\n self.grass_regrowth_time = grass_regrowth_time\n self.sheep_gain_from_food = sheep_gain_from_food\n self.trees_carrots_ratio = trees_carrots_ratio\n self.YEAR = YEAR\n self.nb_of_hunters = nb_of_hunters\n\n self.schedule = RandomActivationByBreed(self) # classe contenant un dictionnaire des types d'agents et agents existants par type, avec une ordre d'activation possible\n self.grid = MultiGrid(self.height, self.width, torus=True)\n self.datacollector = DataCollector(\n {\n \"Fox\": lambda m: m.schedule.get_breed_count(Predator),\n \"Rabbit\": lambda m: m.schedule.get_breed_count(Prey),\n }\n )\n\n # Create sheep:\n for i in range(self.initial_sheep):\n x = self.random.randrange(self.width)\n y = self.random.randrange(self.height)\n age = self.random.randrange(3*self.YEAR)\n energy = self.random.randrange( int(self.sheep_gain_from_food/2), 2 * self.sheep_gain_from_food)\n sheep = Prey(self.next_id(), (x, y), self, True, energy, age)\n self.grid.place_agent(sheep, (x, y))\n self.schedule.add(sheep)\n\n # Create wolves\n for i in range(self.initial_wolves):\n x = self.random.randrange(self.width)\n y = self.random.randrange(self.height)\n age = self.random.randrange(4*self.YEAR)\n energy = self.random.randrange(int(self.wolf_gain_from_food/2), 2 * self.wolf_gain_from_food)\n wolf = Predator(self.next_id(), (x, y), self, True, energy, age)\n self.grid.place_agent(wolf, (x, y))\n self.schedule.add(wolf)\n\n # Create grass patches\n if self.grass:\n for agent, x, y in self.grid.coord_iter():\n if self.trees_carrots_ratio < self.random.random(): # aléatoire du nombre d'arbres et de carottes\n fully_grown = self.random.choice([True, False])\n if fully_grown: # carottes ou pousses de carotes\n countdown = self.grass_regrowth_time\n else:\n countdown = self.random.randrange(self.grass_regrowth_time)\n plant = Plant(self.next_id(), (x, y), self, fully_grown, countdown)\n else:\n plant = Tree(self.next_id(), (x, y), self)\n self.grid.place_agent(plant, (x, y))\n self.schedule.add(plant)\n\n # create hunters\n for i in range(self.nb_of_hunters):\n x = self.random.randrange(self.width-13, self.width-7) # HUNTERMODIF\n y = self.random.randrange(self.height-13, self.height-7) # HUNTERMODIF\n hunter = Hunter(self.next_id(), (x, y), self)\n self.grid.place_agent(hunter, (x, y))\n self.schedule.add(hunter)\n\n self.running = True\n self.datacollector.collect(self)", "def main(player):\n saved_score = 0\n rat_array = [\"reset\"]\n current_fight = \"\"\n while player.hp >= 1:\n\n system.clear_screen()\n if player.location == route_list[0]:\n pass\n else:\n rat_array = []\n rat_chance = randint(1, 100)\n if rat_chance >= 50:\n rat_array = system.npc_swarm_spawn()\n else:\n # must reset here, or a sub 50 roll crashes with no rat_array found\n rat_array = [\"reset\"]\n pass\n if player.location == current_fight:\n rat_array = [\"reset\"]\n else:\n pass\n\n # encounter spawn gotta go somewhere how bout here\n system.encounter_chance(player)\n\n status_array = system.status_message(route_list, player, rat_array)\n print(f\"{status_array[0]}\\n{status_array[1]}\")\n\n movement_options = system.movement_options(route_list, player)\n print(\"\\nAdjacent systems to your current location are:\")\n for movement_option in movement_options:\n print(movement_option)\n if len(movement_options) == 1:\n print(\n f\"\\nWhat is your decision? \\n\\nAvailable commands are {movement_options[0]}, \"\n + \"or type 'rat' to shoot rats.\"\n )\n else:\n print(\n f\"\\nWhat is your decision? \\n\\nAvailable commands are {movement_options[0]}, \"\n + f\"{movement_options[1]} or type 'rat' to shoot rats.\"\n )\n try:\n player_action = str(input())\n except ValueError:\n print(\"You spin your ship.\")\n\n action = system.parse_input(player_action, movement_options, player)\n # print(rat_array)\n if action.lower() == \"rat\":\n if rat_array[0] != \"reset\":\n # print('fightin')\n system.rat_fight(rat_array, player)\n # system.clear_screen()\n try:\n for rat_item in rat_array:\n rat_array[rat_item].remove()\n rat_array = [\"reset\"]\n current_fight = player.location\n except:\n rat_array = [\"reset\"]\n current_fight = player.location\n\n if player.location == destination_system:\n print(\n f\"\\n\\nCongratulations, you have arrived at {player.location}. \"\n + \"\\nYou may now set a new destination, or dock up and use your points you've gained to reship. \"\n + \"\\nOr you may choose to either hold onto your points, in which case they might be lost on death \"\n + \"or save them to buy bigger and better ships\"\n + \"\\no7 capsuleer the system is clear. \"\n + f\"\\n\\nYour final score from this trip was {player.score}\")\n saved_score += player.score\n\n if(player.hp < 1):\n print(\n f\"\\n\\nYour ship explodes in to tiny pieces at the stargate in {player.location}. \"\n + \"\\nYour capsule containing your body shatters from the force of the explosion. \"\n + \"\\nYou are dead. You wake up in your hangar where your death clone is set to and \"\n + \"prepare to voyage out once again. \"\n + \"\\no7 capsuleer the cyno is now lit. \"\n + f\"\\n\\nYour final score was {player.score}\"\n )", "def __init__(self, name, loot, strength):\n self.name = name\n self.x = 0\n self.y = 0\n self.health = 10\n self.strength = strength\n self.loot = loot\n self.is_alive = True\n self.MAX_HEALTH = 15\n self.magic_key = False\n logging.debug(\"{0} created with health of {1} and strength of {2}\"\n .format(self.name, self.health, self.strength))\n \"\"\" Test Results Part A:\n When increasing MAX_HEATH to 100, rounds tended to go on.\n When decreasing MAX_HEATH to 0.05, rounds end very quickly.\n This is expected because the Sprites will be easier or harder \n to defeat depending on how high their health can get. It will \n take more attacks to defeat a Sprite with more health and less\n attacks to defeat a Sprite with less health. \n \n Test Results Part B:\n Test: change strength of Enemy to 20 (higher than Avatar)\n Prediction: the Enemy should win most/all of the time because the player \n with more strength has a harder attack.\n Results: The Enemy won during all trials. If the roles were switched, the \n same could be said about Avatar.\n \n Test: set health of Avatar to 5\n Prediction: the Avatar will die more often than the Enemy because it can \n receive less attacks\n Results: The Avatar died during most trials. \n \n Test: set MAX_HEALTH for Enemy to 5\n Prediction: Enemy will be able to have less health, so it will be defeated\n more often than the Avatar\n Results: The enemy died in almost all trials\n \"\"\"", "def run(self):\n #Level Musik starten\n pygame.mixer.music.stop()\n pygame.mouse.set_cursor(*pygame.cursors.broken_x)\n pygame.key.set_repeat(10)\n pygame.mixer.music.load(os.path.join(self.sourceFileDir,\"Musik\",\"Decktonic - Night Drive (Strong Suit Remix).wav\"))\n \n pygame.mixer.music.play(-1)\n \n #Level Daten aus Datei einlesen\n for i in range(25):\n \n self.level += 1 \n spiel = Spiel(self,self.data[\"levels\"][self.level-1],self.win,godmode= self.godmode)\n #Wenn der Spieler das Level nicht geschafft haben sollte\n if not spiel.schleife_haupt():\n return self.level, self.kills\n \n geschwindigkeit = 10\n\n #Grundlevel erstellen\n lvl_data = {\n \n \"level\": self.level,\n \"hindernisse\": 2,\n \n \"GBall_Shoot\": {\n \"Anzahl\": 2,\n \"geschwindigkeit\": geschwindigkeit\n },\n \"GBall_Normal\": {\n \"Anzahl\": 2,\n \"geschwindigkeit\": geschwindigkeit\n },\n \"GBall_Verdoppler\": {\n \"Anzahl\": 3,\n \"geschwindigkeit\": geschwindigkeit\n },\n \"GBall_RNG\": {\n \"Anzahl\": 4,\n \"geschwindigkeit\": geschwindigkeit\n },\n \"GBall_Two\": {\n \"Anzahl\": 3,\n \"geschwindigkeit\": geschwindigkeit\n }\n \n }\n #Level immer schwerer machen\n for k in range(self.maxlvl):\n\n\n self.level += 1\n geschwindigkeit += 1\n spiel = Spiel(self,lvl_data,self.win,godmode= self.godmode)\n if not spiel.schleife_haupt():\n return self.level, self.kills", "def time(self):\n\n self.timing = True\n self.scramble()\n\n self.disp = False", "async def play_shotgun(game_state) -> None:\n big_inside, lesser_counter = count_zombies(game_state)\n if big_inside and lesser_counter == 0:\n play_weapon(game_state, Supply.SHOTGUN, strong=True)\n elif lesser_counter <= 1 and not big_inside:\n play_weapon(game_state, Supply.SHOTGUN)\n elif lesser_counter > 1 and not big_inside:\n play_weapon(game_state, Supply.SHOTGUN, destroyed=False)\n play_weapon(game_state, Supply.SHOTGUN)\n else:\n message = 'What survivors should do [0/1]?\\n[0]: kill big zombie\\n' \\\n f'[1]: kill up to two lesser zombies ({lesser_counter} inside)\\n>'\n action = await get_action(game_state, message, ['0', '1'])\n if action == '0':\n play_weapon(game_state, Supply.SHOTGUN, strong=True)\n elif lesser_counter == 1:\n play_weapon(game_state, Supply.SHOTGUN)\n else:\n play_weapon(game_state, Supply.SHOTGUN, destroyed=False)\n play_weapon(game_state, Supply.SHOTGUN)", "def run(\n asgn,\n std_per_petal=10,\n sky_per_petal=40,\n sky_per_slitblock=0,\n start_tile=-1,\n stop_tile=-1,\n redistribute=True,\n use_zero_obsremain=True\n):\n gt = GlobalTimers.get()\n\n log = Logger.get()\n\n def print_counts(when=None):\n counts = asgn.get_counts(start_tile, stop_tile)\n tiles = list(counts.keys())\n tiles.sort()\n for tile in tiles:\n msg = 'Tile %i: ' % tile\n if when is not None:\n msg += when\n tilecounts = counts[tile]\n keys = [('SCIENCE',True), ('SCIENCE not STANDARD',False), ('STANDARD',True),\n ('SKY',True), ('SUPPSKY',False), ('SAFE',False)]\n ss = []\n for k,always in keys:\n n = tilecounts.get(k, None)\n if n is None:\n log.warning('Key', k, 'missing from Assignment.get_counts return value')\n else:\n if n>0 or always:\n ss.append('%s: %i' % (k,n))\n log.info(msg + ', '.join(ss))\n\n print_counts('Start: ')\n\n # First-pass assignment of science targets\n gt.start(\"Assign unused fibers to science targets\")\n asgn.assign_unused(TARGET_TYPE_SCIENCE, -1, -1, \"POS\", start_tile, stop_tile)\n gt.stop(\"Assign unused fibers to science targets\")\n print_counts('After assigning unused fibers to science targets: ')\n\n # Redistribute science targets across available petals\n if redistribute:\n gt.start(\"Redistribute science targets\")\n asgn.redistribute_science(start_tile, stop_tile)\n gt.stop(\"Redistribute science targets\")\n print_counts('After redistributing science targets: ')\n\n # Assign standards, up to some limit\n gt.start(\"Assign unused fibers to standards\")\n asgn.assign_unused(\n TARGET_TYPE_STANDARD, std_per_petal, -1, \"POS\", start_tile, stop_tile\n )\n gt.stop(\"Assign unused fibers to standards\")\n print_counts('After assigning standards: ')\n\n def do_assign_unused_sky(ttype, supp=False):\n tag = 'supp' if supp else ''\n if sky_per_petal > 0 and sky_per_slitblock > 0:\n # Assign using the slitblock requirement first, because it is\n # more specific\n asgn.assign_unused(\n ttype, -1, sky_per_slitblock, \"POS\",\n start_tile, stop_tile\n )\n print_counts('After assigning %ssky per-slitblock: ' % tag)\n\n # Then assign using the petal requirement, because it may(should) require\n # more fibers overall.\n asgn.assign_unused(\n ttype, sky_per_petal, -1, \"POS\",\n start_tile, stop_tile\n )\n print_counts('After assigning %ssky per-petal: ' % tag)\n else:\n asgn.assign_unused(\n ttype, sky_per_petal, sky_per_slitblock, \"POS\",\n start_tile, stop_tile\n )\n print_counts('After assigning %ssky: ' % tag)\n\n # Assign sky to unused fibers, up to some limit\n gt.start(\"Assign unused fibers to sky\")\n do_assign_unused_sky(TARGET_TYPE_SKY)\n gt.stop(\"Assign unused fibers to sky\")\n\n # Assign suppsky to unused fibers, up to some limit\n gt.start(\"Assign unused fibers to supp_sky\")\n do_assign_unused_sky(TARGET_TYPE_SUPPSKY, supp=True)\n gt.stop(\"Assign unused fibers to supp_sky\")\n\n # Force assignment if needed\n gt.start(\"Force assignment of sufficient standards\")\n asgn.assign_force(\n TARGET_TYPE_STANDARD, std_per_petal, -1, start_tile, stop_tile\n )\n gt.stop(\"Force assignment of sufficient standards\")\n print_counts('After force-assigning standards: ')\n\n def do_assign_forced_sky(ttype, supp=False):\n tag = 'supp' if supp else ''\n # This function really feels redundant with do_assign_unused_sky, but\n # when I tried to make a single function to do both calls, I had to call\n # f(*(preargs + pos_arg + postargs)) and it looked too mysterious.\n if sky_per_petal > 0 and sky_per_slitblock > 0:\n # Slitblock first\n asgn.assign_force(\n ttype, -1, sky_per_slitblock, start_tile, stop_tile)\n print_counts('After force-assigning %ssky per-slitblock: ' % tag)\n # Then petal\n asgn.assign_force(\n ttype, sky_per_petal, -1, start_tile, stop_tile)\n print_counts('After force-assigning %ssky per-petal: ' % tag)\n else:\n asgn.assign_force(\n ttype, sky_per_petal, sky_per_slitblock, start_tile, stop_tile)\n print_counts('After force-assigning %ssky: ' % tag)\n\n gt.start(\"Force assignment of sufficient sky\")\n do_assign_forced_sky(TARGET_TYPE_SKY)\n gt.stop(\"Force assignment of sufficient sky\")\n\n gt.start(\"Force assignment of sufficient supp_sky\")\n do_assign_forced_sky(TARGET_TYPE_SUPPSKY, supp=True)\n gt.stop(\"Force assignment of sufficient supp_sky\")\n\n # If there are any unassigned fibers, try to place them somewhere.\n # When assigning science targets to these unused fibers, also consider targets\n # with no remaining observations. Getting extra observations of science\n # targets is preferred over additional standards and sky. See desi-survey email\n # list archive message 1865 and preceding discussion thread.\n gt.start(\"Assign remaining unassigned fibers\")\n asgn.assign_unused(\n TARGET_TYPE_SCIENCE,\n -1,\n -1,\n \"POS\",\n start_tile,\n stop_tile,\n use_zero_obsremain=use_zero_obsremain\n )\n print_counts('After assigning reobservations of science targets: ')\n\n asgn.assign_unused(TARGET_TYPE_STANDARD, -1, -1, \"POS\", start_tile, stop_tile)\n asgn.assign_unused(TARGET_TYPE_SKY, -1, -1, \"POS\", start_tile, stop_tile)\n asgn.assign_unused(TARGET_TYPE_SUPPSKY, -1, -1, \"POS\", start_tile, stop_tile)\n\n # Assign safe location to unused fibers (no maximum). There should\n # always be at least one safe location (i.e. \"BAD_SKY\") for each fiber.\n # So after this is run every fiber should be assigned to something.\n asgn.assign_unused(TARGET_TYPE_SAFE, -1, -1, \"POS\", start_tile, stop_tile)\n gt.stop(\"Assign remaining unassigned fibers\")\n print_counts('Final assignments: ')\n\n # Assign sky monitor fibers\n gt.start(\"Assign sky monitor fibers\")\n asgn.assign_unused(TARGET_TYPE_SKY, -1, -1, \"ETC\", start_tile, stop_tile)\n asgn.assign_unused(TARGET_TYPE_SUPPSKY, -1, -1, \"ETC\", start_tile, stop_tile)\n asgn.assign_unused(TARGET_TYPE_SAFE, -1, -1, \"ETC\", start_tile, stop_tile)\n gt.stop(\"Assign sky monitor fibers\")\n\n return asgn", "def retrieve_handcrafted_inputs(self, obs):\n self.detect_self_unit_types(obs)\n\n feature_units = obs.observation.feature_units\n allies = [unit for unit in feature_units if unit.alliance == _PLAYER_SELF]\n selected_allies = [unit for unit in allies if unit.unit_type == self.current_group_id]\n enemies = [unit for unit in feature_units if unit.alliance == _PLAYER_ENEMY]\n\n hitpoints = 0\n for unit in selected_allies:\n hitpoints += unit.health\n\n if self.current_group_id in unit_health.keys():\n init_hp = 0\n init_hp = unit_health[self.current_group_id] * self.init_unit_counts[self.current_group_id]\n else:\n init_hp = self.initial_self_hit_points\n current_hp = hitpoints / init_hp\n\n weapon_cooldown = 0\n for ally in selected_allies:\n if ally.weapon_cooldown > 0:\n weapon_cooldown += 1\n if weapon_cooldown > (len(selected_allies) / 2):\n # nn input weapon cooldown = 1 means the majority cannot fire\n weapon_cooldown = 1\n else:\n weapon_cooldown = 0\n\n self_weapon_range = 5\n self_radius = 1\n self_unit_type = 1\n self_speed = 1\n if len(selected_allies) > 0:\n self_weapon_range = weapon_ranges[self.current_group_id]\n self_radius = unit_sizes[self.current_group_id] / float(2)\n self_unit_type = unit_type[self.current_group_id]\n self_speed = unit_speed[self.current_group_id]\n\n enemy_radius = 1\n enemy_weapon_range = 1\n enemy_unit_type = 0\n enemy_speed = 1\n if len(enemies) > 0:\n self.enemy_id = enemies[0].unit_type\n enemy_weapon_range = weapon_ranges[self.enemy_id]\n enemy_radius = unit_sizes[self.enemy_id] / float(2)\n enemy_unit_type = unit_type[self.enemy_id]\n enemy_speed = unit_speed[self.enemy_id]\n\n # TODO can be inaccurate if using melee units\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_avg_location_of_self_subgroup(obs)) < (\n self_radius + self_weapon_range + enemy_radius):\n enemy_in_range = 1\n else:\n enemy_in_range = 0\n\n in_enemy_range = 0\n for ally in selected_allies:\n for enemy in enemies:\n if self.retrieve_distance_between_positions([enemy.x, enemy.y], [ally.x, ally.y]) < (\n self_radius + enemy_weapon_range + enemy_radius):\n in_enemy_range = 1\n break\n else:\n in_enemy_range = 0\n if in_enemy_range:\n break\n\n north_bound, south_bound, west_bound, east_bound = self.calculate_distance_to_bounds(obs, for_subgroup=True)\n\n if self.previous_commands[self.current_group_id] == \"FIGHT\":\n prev_cmd = 1\n elif self.previous_commands[self.current_group_id] == \"FLEE\":\n prev_cmd = 0\n\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence = self.detect_enemies_by_region(obs,\n for_subgroup=True)\n\n distance_to_enemy = self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_avg_location_of_self_subgroup(obs))\n distance_to_enemy = distance_to_enemy / float((32 ** 2 + 20 ** 2) ** 0.5)\n\n return [current_hp, weapon_cooldown, enemy_in_range, in_enemy_range, prev_cmd, north_bound, south_bound,\n west_bound, east_bound,\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence, self_unit_type,\n enemy_unit_type, self_weapon_range, enemy_weapon_range, self_speed, enemy_speed, distance_to_enemy]", "def timings_across_runs(self):\n\n\t\t# first determine individual run duration (to make sure that stimulus timings of all runs are correct)\n\t\trun_duration = []\n\t\tfor r in [self.runList[i] for i in self.conditionDict['WMM']]:\n\t\t\tniiFile = NiftiImage(self.runFile(stage = 'processed/mri', run = r))\n\t\t\ttr, nr_trs = round(niiFile.rtime*1)/1000.0, niiFile.timepoints\n\t\t\trun_duration.append(tr * nr_trs)\n\t\trun_duration = np.r_[0,np.cumsum(np.array(run_duration))]\n\n\t\t# timing information stimuli\n\t\tstim_info = []\n\t\trun = 0\n\t\tfor r in [self.runList[i] for i in self.conditionDict['WMM']]:\n\t\t\tstim_events = np.loadtxt(self.runFile(stage = 'processed/behavior', run = r, extension = '.txt', postFix = ['stim' ,'all','task']))\n\t\t\tstim_events[:,:2] += run_duration[run]\n\t\t\tstim_info.append(stim_events)\n\t\t\trun += 1\n\n\t\t# save stim_info as text_file\t\n\t\tnp.savetxt(self.runFile(stage = 'processed/behavior', postFix = ['stim_info_all'],extension = '.txt'), np.vstack(stim_info), fmt = '%3.2f', delimiter = '\\t')", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [\n ghostState.scaredTimer for ghostState in newGhostStates]\n\n \"*** YOUR CODE HERE ***\"\n # print \"newScaredTimes\", newScaredTimes\n # print successorGameState.getCapsules()\n\n newGhostPos = newGhostStates[0].getPosition()\n ghost_dist = ghost_distance(newPos, newGhostPos)\n capsules = successorGameState.getCapsules()\n # food_dist = food_distance(newPos, newFood)\n\n # approach 1: 2/4 win = 10, average < 500\n # if ghost_dist <= 1:\n # return -999999\n # return -food_num(newFood)\n\n # approach 2: 2/4 win = 10, average < 500 but close to 500\n # if newScaredTimes[0] == 0:\n # if ghost_dist <= 1:\n # return -999999\n # return -food_num(newFood) -capsule_distance(newPos, capsules)\n\n # final approach: 4/4 win = 10, average = 1310.5\n if newScaredTimes[0] == 0:\n if ghost_dist <= 1:\n return -999999\n return -food_distance(newPos, newFood) * .01 - food_num(newFood) - capsule_distance(newPos, capsules)", "def tick(self, game, ticks=1):\n \n self.structure.time_until_harvest -= ticks\n if self.structure.time_until_harvest <= 0:\n if ref.structure_type_dct[self.structure.structure_type]['site type'] == 'resource':\n resources_harvested = 0\n for worker in xrange(self.structure.workers):\n workload = randint(500, 1500)\n if workload <= self.harvestable:\n self.harvestable -= workload\n resources_harvested += workload\n else:\n resources_harvested += self.harvestable\n self.harvestable = 0\n self.structure.workers = 0\n self.structure.transform()\n game.action_log.append('transformation')\n break\n #Adds resource to 'available' town resources\n entities.town['object'].resources[\n ref.material_type_dct[self.resource]['class']][\n self.resource]['available'] += resources_harvested\n #Removes resource from 'harvestable' town resources\n entities.town['object'].resources[\n ref.material_type_dct[self.resource]['class']][\n self.resource]['harvestable'] -= resources_harvested\n\n self.structure.time_until_harvest = ref.structure_type_dct[\n self.structure.structure_type]['time per harvest']\n return\n\n elif ref.structure_type_dct[self.structure.structure_type]['site type'] == 'adventure':\n if len(self.structure.workers) > 0:\n for hero in [h for h in entities.heroes['object list'] if (\n h.hero_id in self.structure.workers)]:\n hero.boredom += randint(0, 100)\n try:\n monster = next(m for m in entities.monsters['object list'] if \n m.monster_id in self.structure.monsters)\n self.battle(hero, monster)\n except StopIteration:\n adventure_sites = [\n s for s in entities.sites['object list'] if ref.structure_type_dct[\n s.structure.structure_type]['site type'] == 'adventure'\n ]\n if hero.boredom < 100 and len(adventure_sites) > 0:\n hero.destination = choice(adventure_sites).location\n else:\n hero.boredom = 0\n hero.destination = entities.town['object'].location\n hero.traveling = True\n self.structure.workers.remove(hero.hero_id)\n self.structure.worker_capacity += 1", "def choose_piece_duration(self):\n self.piece_duration = random.randint(900, 1100)", "def actions(self, ship: SpaceShip, input_data: Dict[str, Tuple]) -> None:\n # ship.turn_rate = 180.0\n ship.thrust = ship.thrust_range[1]\n ship.shoot()", "def simulateOneTimeStep(self):\n\n self.susceptibleToInfected()\n self.infectedToRecovered()\n\n # add the new values of healthy/infected/recovered to the arrays keeping track\n SIR_t = np.array([self.getSusceptible(), self.getInfected(), self.getRecovered()])\n #update SIR time series\n self.SIR = np.concatenate([self.SIR, SIR_t[:,np.newaxis]], axis=1)\n\n # add the new snapshot of the simulation\n self.snapshots.append(self.getSpace().copy())", "def event2547():\n header(2547, 1)\n\n skip_if_event_flag_on(38, EVENT.HasBonerust) # Skip trigger and dice roll.\n\n # Wait for last hit to end.\n if_player_does_not_have_special_effect(7, SPEFFECT.BonerustHit3Percent)\n if_player_does_not_have_special_effect(7, SPEFFECT.BonerustHit5Percent)\n if_player_does_not_have_special_effect(7, SPEFFECT.BonerustHit10Percent)\n if_player_does_not_have_special_effect(7, SPEFFECT.BonerustHit15Percent)\n if_player_does_not_have_special_effect(7, SPEFFECT.BonerustHit20Percent)\n if_condition_true(0, 7)\n\n if_player_has_special_effect(1, SPEFFECT.BonerustHit3Percent)\n if_player_has_special_effect(2, SPEFFECT.BonerustHit5Percent)\n if_player_has_special_effect(3, SPEFFECT.BonerustHit10Percent)\n if_player_has_special_effect(4, SPEFFECT.BonerustHit15Percent)\n if_player_has_special_effect(5, SPEFFECT.BonerustHit20Percent)\n if_condition_true(-1, 1)\n if_condition_true(-1, 2)\n if_condition_true(-1, 3)\n if_condition_true(-1, 4)\n if_condition_true(-1, 5)\n if_condition_true(0, -1)\n\n if_player_has_special_effect(6, SPEFFECT.SerousBond)\n restart_if_condition_true(6)\n\n # Roll dice. (11025620-11025669) 11025620 or 11025621 are hits (only first with Archwood shield).\n # Note all odds have been lowered from their stated values as of v1.2.0.\n flag.disable_chunk(11025620, 11025679)\n skip_if_condition_false_finished(2, 1)\n flag.enable_random_in_chunk(11025620, 11025679) # 2/60\n skip(10)\n skip_if_condition_false_finished(2, 2)\n flag.enable_random_in_chunk(11025620, 11025659) # 2/40\n skip(7)\n skip_if_condition_false_finished(2, 3)\n flag.enable_random_in_chunk(11025620, 11025649) # 2/30\n skip(4)\n skip_if_condition_false_finished(2, 4)\n flag.enable_random_in_chunk(11025620, 11025644) # 2/25\n skip(1)\n flag.enable_random_in_chunk(11025620, 11025635) # 2/16\n\n if_event_flag_on(-2, 11025620)\n if_player_has_special_effect(6, 6890) # Grass Crest Shield halves odds.\n skip_if_condition_true(1, 6)\n if_event_flag_on(-2, 11025621)\n restart_if_condition_false(-2)\n\n # Skips to here if player already has Bonerust on load.\n\n chr.set_special_effect(CHR.Player, SPEFFECT.Bonerust)\n flag.enable(EVENT.HasBonerust)\n\n # Show message on first time. Evil Eye icon also active.\n skip_if_event_flag_on(2, 11302010)\n message.status_explanation(TEXT.AfflictedWithBonerust, True) # Dialog too easily skipped. It's only one time.\n flag.enable(11302010)\n\n if_player_has_special_effect(-3, SPEFFECT.GreenMossUsed)\n if_player_has_special_effect(-3, SPEFFECT.Remedy)\n if_player_has_special_effect(-3, SPEFFECT.SunlightElixirEffect)\n if_has_tae_event(-3, CHR.Player, 700) # Sitting at bonfire.\n if_condition_true(0, -3)\n chr.cancel_special_effect(CHR.Player, SPEFFECT.Bonerust)\n flag.disable(EVENT.HasBonerust)\n wait_frames(5)\n restart()", "def run():\n\n Number_repetitions = 1\n Rate = np.zeros((Number_repetitions,1))\n Rate20 = np.zeros((Number_repetitions,1))\n Penalty20 = np.zeros((Number_repetitions, 1))\n\n # Loop to average\n for idx in np.arange(0,Number_repetitions,1):\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0000001, display=True) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n # I've edited the enviroment variable to do the plot creating an completions array\n completions = np.array(e.completions)\n rate = float(completions.sum())/float((len(completions)))\n rate20 = float(completions[-20:].sum())/20\n\n Rate[idx] = rate\n Rate20[idx] = rate20\n\n Wrong = np.array(a.wrong_moves_per_run[-20:]).mean()\n Penalty20[idx] = Wrong\n\n plt.scatter(np.arange(0,len(completions)),completions)\n plt.plot(Wrong)\n plt.xlabel('Trial')\n plt.ylabel('1 = Get in the destination, 0 = did not get')\n plt.title('Reiforcement learning progress')\n plt.legend(['Rate of completion: ' + str(rate) + '. Rate last 20: ' + str(rate20) + '.Mean penalty last 20: ' + str(Wrong)])\n plt.show()\n\n #print 'Accuracy: ' + str(Rate) + '. Mean: ' + str(np.mean(Rate))\n #print 'Mean 20: ' + str(np.mean(Rate20))#'Accuracy 20: ' + str(Rate20) + '. Mean 20: ' + str(np.mean(Rate20))\n #print 'Mean_penalty: ' + str(np.mean(Penalty20))\n\n # Print state table with actions\n #t = 0\n #for state in a.states:\n #print 'State ' + str(state) + '. Best action: ' + str((str(np.argmax(a.QTable[t][:]))))\n #t += 1", "def cat_turn():\r\n\tglobal men\r\n\tl = [bat, pounce, legkick]\r\n\tx = randint(0, 3)\r\n\tif men >= 85 and x == 3:\r\n\t\tx = randint(0,2)\r\n\tif x != 3 and men - l[x][5] >= 0:\r\n\t\treturn cat.hit(*l[x])\r\n\telse:\r\n\t\tmen += cat.sleep(*csleep)\r\n\t\treturn 0", "def use_skill(self, g, i, x, y):\n # @ param g a reference to the game engine\n # @ param i the index of the skill (basically what skill)\n # @ param x the x target coordinate in game pixels\n # @ param y the y target coordinate in game pixels\n if self.attackTimer < self.attackDelay:\n print(\"attack on CD\")\n return\n \n if self.skill[i].skillAttr == 0:\n g.fire_skill_sound.play()\n elif self.skill[i].skillAttr == 1:\n g.ice_skill_sound.play()\n elif self.skill[i].skillAttr == 2:\n g.lightning_skill_sound.play()\n elif self.skill[i].skillAttr == 3:\n g.poison_skill_sound.play()\n \n \n if self.skill[i].skillKey == 0: #Aura\n #turn the aura on/off\n if self.skill[i].active == False:\n #print(\"aura on\")\n self.skill[i].active = True\n else:\n self.skill[i].active = False\n #print(\"aura off\")\n \n elif self.skill[i].skillKey == 1: #Missile\n if self.mana[0] > self.skill[i].skillCost:\n self.mana[0] -= self.skill[i].skillCost\n self.attackTimer = 0\n target = Target(x, y)\n center_x = self.rect.x + (self.rect.width / 2)\n center_y = self.rect.y + (self.rect.height / 2)\n #bullet types: fire 5, ice 6, lightning 7\n #skill types: fire 0, ice 1, lightning 2\n g.bullets.append(self.bulletFactory.createBullet(g, self.skill[i].skillAttr + 5, 0, self.attack, 1024, target, center_x, center_y))\n #print(\"missile\")\n\n elif self.skill[i].skillKey == 2: #Breath\n #for each creep in the AoE cone, do damage.\n if self.mana[0] > self.skill[i].skillCost:\n self.mana[0] -= self.skill[i].skillCost\n self.attackTimer = 0\n #get low and high angle (-45 degrees and +45 degrees from player -> point angle)\n lowAngle = math.atan2(y - self.rect.centery, x - self.rect.centerx) - 3.1415 / 2.0\n highAngle = math.atan2(y - self.rect.centery, x - self.rect.centerx) + 3.1415 / 2.0\n for creep in g.creeps:\n #get angle to creep\n creepAngle = math.atan2(creep.rect.centery - self.rect.centery, creep.rect.centerx - self.rect.centerx)\n \n #if angle to the creep is between the two angles\n if creepAngle > lowAngle and creepAngle < highAngle:\n #and the distance to the creep is below the skill's range\n if ( (creep.rect.centerx - self.rect.centerx) ** 2 + (creep.rect.centery - self.rect.centery) ** 2 ) ** 0.5 < 4 * 24:\n creep.take_damage( self.attack )\n #print(\"breath\")\n #apply debuffs, based on type\n if self.skill[i].skillAttr == 0: #fire\n creep.applyBurning()\n elif self.skill[i].skillAttr == 1: #frost\n creep.applyChilled()\n elif self.skill[i].skillAttr == 2: #lightning\n creep.applyShocked()", "def enemyrawdmg(self):\n\n enemystr = globalvalues.ai.getstatus()[3]\n # rngfactor will ensure that regular mobs won't absolutely crush you\n rngfactor = float(float(random.randint(45, 65)) / 100)\n level = (\n globalvalues.p1.getlevel()\n - globalvalues.ai.getstatus()[0]\n )\n lvlfactor = float(1 - level * 0.05)\n\n return int((enemystr) * 102 * 0.12 * rngfactor * lvlfactor)", "def __init__(\n self,\n width=20,\n height=20,\n initial_sheep=100,\n initial_wolves=50,\n sheep_reproduce=0.04,\n wolf_reproduce=0.05,\n wolf_gain_from_food=20,\n grass=False,\n grass_regrowth_time=30,\n sheep_gain_from_food=4,\n ):\n super().__init__()\n # Set parameters\n self.width = width\n self.height = height\n self.initial_sheep = initial_sheep\n self.initial_wolves = initial_wolves\n self.sheep_reproduce = sheep_reproduce\n self.wolf_reproduce = wolf_reproduce\n self.wolf_gain_from_food = wolf_gain_from_food\n self.grass = grass\n self.grass_regrowth_time = grass_regrowth_time\n self.sheep_gain_from_food = sheep_gain_from_food\n\n self.schedule = RandomActivationByTypeFiltered(self)\n self.grid = mesa.space.MultiGrid(self.width, self.height, torus=True)\n self.datacollector = mesa.DataCollector(\n {\n \"Wolves\": lambda m: m.schedule.get_type_count(Wolf),\n \"Sheep\": lambda m: m.schedule.get_type_count(Sheep),\n \"Grass\": lambda m: m.schedule.get_type_count(\n GrassPatch, lambda x: x.fully_grown\n ),\n }\n )\n\n # Create sheep:\n for i in range(self.initial_sheep):\n x = self.random.randrange(self.width)\n y = self.random.randrange(self.height)\n energy = self.random.randrange(2 * self.sheep_gain_from_food)\n sheep = Sheep(self.next_id(), (x, y), self, True, energy)\n self.grid.place_agent(sheep, (x, y))\n self.schedule.add(sheep)\n\n # Create wolves\n for i in range(self.initial_wolves):\n x = self.random.randrange(self.width)\n y = self.random.randrange(self.height)\n energy = self.random.randrange(2 * self.wolf_gain_from_food)\n wolf = Wolf(self.next_id(), (x, y), self, True, energy)\n self.grid.place_agent(wolf, (x, y))\n self.schedule.add(wolf)\n\n # Create grass patches\n if self.grass:\n for agent, (x, y) in self.grid.coord_iter():\n fully_grown = self.random.choice([True, False])\n\n if fully_grown:\n countdown = self.grass_regrowth_time\n else:\n countdown = self.random.randrange(self.grass_regrowth_time)\n\n patch = GrassPatch(self.next_id(), (x, y), self, fully_grown, countdown)\n self.grid.place_agent(patch, (x, y))\n self.schedule.add(patch)\n\n self.running = True\n self.datacollector.collect(self)", "def step(self):\n self.age += 1\n self.move_agent()\n self.sugar -= self.metabolism\n\n # Eat sugar\n available_sugar = self.get_sugar(self.pos).amount\n self.sugar += available_sugar\n# self.total_sugar_in_field -= available_sugar\n # Set sugar in current cell to zero\n self.get_sugar(self.pos).eat_sugar() \n \n \n \n if self.sugar == 0:\n self.model.remove_agent(self)\n \n self.gen += 1\n x = self.model.random.randrange(self.model.grid.width)\n y = self.model.random.randrange(self.model.grid.height)\n new_pos = (x,y)\n \n self.model.add_agent(Consumer, new_pos, f\"{self.unique_id.split('-')[0]}-{self.gen}\", self.gen, self.model.vision, self.model.metabolism, self.model.starting_sugar)\n \n \n if self.reproduction_and_death:\n if self.age > self.max_age: # Agent dies\n # Tax inheritance\n self.model.inheritance_tax_agent(self)\n \n if self.model.spawn_at_random:\n self.gen += 1\n x = self.model.random.randrange(self.model.grid.width)\n y = self.model.random.randrange(self.model.grid.height)\n new_pos = (x,y)\n \n self.model.add_agent(Consumer, new_pos, f\"{self.unique_id.split('-')[0]}-{self.gen}\", self.gen, self.model.vision, self.model.metabolism, self.model.starting_sugar)\n self.model.remove_agent(self) #agent dies\n \n \n else:\n #spawn new agent\n self.gen += 1\n if self.sugar != 0:\n self.model.add_agent(Consumer, self.pos, f\"{self.unique_id.split('-')[0]}-{self.gen}\", self.gen, self.vision, self.metabolism, self.sugar)\n else:\n self.model.add_agent(Consumer, self.pos, f\"{self.unique_id.split('-')[0]}-{self.gen}\", self.gen, self.vision, self.metabolism, self.model.starting_sugar)\n \n self.model.remove_agent(self) #agent dies", "def fight(who_fight=None):\r\n global monsters_defeated\r\n \r\n if isinstance(who_fight,helpful.Being):\r\n ###specific monster\r\n enemy = who_fight\r\n\r\n elif isinstance(who_fight,list):\r\n ###list of categories\r\n enemy = items_lists.random_monster(random.choice(who_fight))\r\n\r\n else:\r\n ###else picks a monster at random, not boss though\r\n enemy = items_lists.random_monster()\r\n \r\n\r\n\r\n # print 'fighting:\\n' + enemy.advanced_str()\r\n encountered = words.being_adj().capitalize() + ' ' + str(enemy)\r\n raw_input(str(player) + ' encounters a ' + encountered + '!\\n')\r\n choice = helpful.pick_item(['yes','no','inventory'],'Fight?','inventory')\r\n\r\n while choice == 'inventory':\r\n inspect_inventory()\r\n choice = helpful.pick_item(['yes','no','inventory'],'Fight?','inventory')\r\n\r\n if choice == 'yes':\r\n\r\n while enemy.get_health() > 0 and player.get_health() > 0:\r\n #player attacks\r\n item = helpful.pick_item(player.get_inventory(), 'What to use?')\r\n player.use(item)\r\n attack = item.get_damage()\r\n defend = item.get_health()\r\n\r\n if attack > 0:\r\n enemy.hit(item)\r\n raw_input('You dealt ' +str(attack) + ' damage!')\r\n elif defend > 0:\r\n raw_input('You gained ' + str(defend) + ' HP!')\r\n else:\r\n raw_input('That was pretty dumb.\\n')\r\n \r\n if enemy.get_health() > 0: #if the enemy is still alive\r\n\r\n ###enemy attacks, using random item in enemy's inventory\r\n enemy_choice = random.choice(enemy.get_inventory())\r\n player.hit(enemy_choice)\r\n raw_input(str(enemy).capitalize() + ' used ' + str(enemy_choice) + '!\\n')\r\n raw_input('You lost ' + str(enemy_choice.get_damage()) + ' health!\\n')\r\n \r\n player.set_health(max(0,player.get_health())) #make health nonnegative\r\n enemy.set_health(max(0,enemy.get_health()))\r\n\r\n print('Player Health: ' + str(player.get_health()) + '\\n')\r\n raw_input(str(enemy) + ' Health: ' + str(enemy.get_health()) + '\\n')\r\n \r\n if enemy.get_health() == 0:\r\n winner = str(player)\r\n raw_input('You looted the following items:\\n' + enemy.get_inv_string())\r\n player.grab_items(enemy.get_inventory())\r\n result = 'win'\r\n monsters_defeated += 1\r\n\r\n if player.get_health() == 0:\r\n winner = str(enemy)\r\n result = 'death'\r\n\r\n print(winner + ' wins!\\n')\r\n\r\n elif choice == 'no':\r\n\r\n ouch = random.randrange(0,2)\r\n if enter_two == config.confus(config.config2):\r\n ouch = 0\r\n global cheated\r\n cheated = True\r\n print '<yolo>'\r\n if ouch:\r\n enemy_choice = random.choice(enemy.get_inventory())\r\n player.hit(enemy_choice)\r\n print 'You got away, but were hit by the ' + \\\r\n str(enemy) +\"'s \" + str(enemy_choice) +'!' + '\\n'\r\n raw_input('You sustained ' + str(enemy_choice.get_damage()) +' damage.\\n')\r\n if player.get_health() <= 0:\r\n return 'death'\r\n else:\r\n raw_input('You got away safely!\\n\\nThat was close!\\n')\r\n result = 'lose'\r\n\r\n return result", "def RunExactTimestep(self): \n if self.sim_t == 0:\n randoms = np.random.random(1000) \n self.randoms_log = np.log(randoms)*-1\n self.randoms = np.random.random(1000)\n self.count = 0 \n elif self.count == 1000:\n randoms = np.random.random(1000) \n self.randoms_log = np.log(randoms)*-1\n self.randoms = np.random.random(1000) \n self.count = 0 \n \n self.sim_tau = self.randoms_log[self.count]/float(self.sim_a_0) # reaction time generation\n self.sim_r2 = self.randoms[self.count] # Draw random number 2 [0-1]\n self.count +=1\n \n if (self.sim_t + self.sim_tau) < self.settings.endtime:\n self.sim_t += self.sim_tau # Time update\n self.reaction_index = 0\n sum_of_as = self.sim_a_mu[self.reaction_index]\n criteria = self.sim_r2*self.sim_a_0\n while sum_of_as < criteria: # Use r2 to determine which reaction will occur\n self.reaction_index += 1\t # Index\n sum_of_as += self.sim_a_mu[self.reaction_index] \n\n try:\n self.X_matrix += self.N_matrix_transpose[self.reaction_index]\n self.timestep += 1\n except MemoryError as ex:\n print(ex)\n sys.exit() \n else: \n self.sim_t = self.settings.endtime \n self.reaction_index = np.nan", "def play(self):\n self.player = Knight()\n self._occupy_huts()\n acquired_hut_counter = 0\n\n self.show_game_mission()\n self.player.show_health(bold=True)\n\n while acquired_hut_counter < 5:\n idx = self._process_user_choice()\n self.player.acquire_hut(self.huts[idx-1])\n\n if self.player.health_meter <= 0:\n print_bold(\"YOU LOSE :( Better luck next time\")\n break\n\n if self.huts[idx-1].is_acquired:\n acquired_hut_counter += 1\n\n if acquired_hut_counter == 5:\n print_bold(\"Congratulations! YOU WIN!!!\")", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline= True ) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.5) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit", "def simulate_data(self, S):\r\n \r\n # extract parameters from state to make equations clear\r\n (carb_bg_ratio, time_to_breakdown, insulin_bg_ratio, \r\n time_to_peak, basal_delta, digestion_speed,\r\n activation_speed) = S\r\n \r\n\r\n # initialize blood glucose data\r\n simulated_data = np.full(self.bg_data.shape,\r\n self.bg_initial, dtype=float)\r\n\r\n # simulate bg effect due to basal\r\n simulated_data += self.time_data*basal_delta\r\n\r\n # simulate bg effect due to meals\r\n for meal in self.meals:\r\n\r\n # starting time point\r\n time_delta = (meal[0] - self.start_time).total_seconds()\r\n discrete_start_time = int(time_delta / 300.0)\r\n discrete_time_to_breakdown = time_to_breakdown / 5.0\r\n bg_rise = meal[1]*carb_bg_ratio\r\n bg_shift = discrete_start_time + discrete_time_to_breakdown\r\n meal_effect = Sigmoid.logistic(\r\n bg_rise, bg_shift, digestion_speed,\r\n self.time_data[discrete_start_time:])\r\n simulated_data += np.concatenate((np.zeros(discrete_start_time),\r\n meal_effect))\r\n\r\n # simulate bg effect due to injections\r\n for injection in self.injections:\r\n time_delta = (injection[0] - self.start_time).total_seconds()\r\n discrete_start_time = int(time_delta / 300.0)\r\n discrete_time_to_peak = time_to_peak / 5.0\r\n insulin_rise = injection[1]*insulin_bg_ratio\r\n insulin_shift = discrete_start_time + discrete_time_to_peak\r\n insulin_effect = Sigmoid.logistic(\r\n insulin_rise,\r\n insulin_shift,\r\n activation_speed,\r\n self.time_data[discrete_start_time:])\r\n simulated_data -= np.concatenate((np.zeros(discrete_start_time),\r\n insulin_effect))\r\n\r\n # return the simulated data\r\n return simulated_data", "def run_single(self):\n self.run_sim_time(1)", "def fight(self):\r\n\t\tif self.death():\r\n\t\t\treturn 0\r\n\t\tif self.ctime < 1:\r\n\t\t\tself.ctime += 0.05\r\n\t\telse:\r\n\t\t\tself.ctime = 0\r\n\t\t\tself.hit()", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.5) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit", "def test(self):\n winsound.PlaySound('SystemExclamation', winsound.SND_ALIAS)\n \n pulses=1000*3\n winsound.Beep(200, 1000) # .Beep(1650Hz, (XXXXms)) #e.g 1000ms=1second\n self.run(pulses); self.run(pulses, ANTI_CLK_W)\n sleep(1)\n\n winsound.Beep(400, 1000)\n self.swing(128, count=30); self.stop() #0.9 degrees\n sleep(1)\n\n winsound.Beep(800, 1000)\n print('Testing I.....')\n self.swing(32, count=120); self.stop() #0.225 degrees \n sleep(1)\n\n winsound.Beep(1600, 1000)\n print('Testing II.....')\n self.swing(2, count=1800); self.stop() #0.05625 degrees\n \n winsound.PlaySound('SystemExclamation', winsound.SND_ALIAS)\n print(' Testings Done! ')\n return self.stop() #set low before exist ", "def SADamageFunction(\n skill: AdventurerSkill | None,\n adventurer: \"Adventurer\",\n enemy: \"Enemy\",\n memboost: dict[str, int | float],\n combo: int,\n saRng: float,\n) -> int:\n if skill is None:\n return 0\n\n # lowercase everything\n target = skill.target.lower()\n tempBoostName = skill.tempBoost.lower()\n powerCoefficientName = skill.powerCoefficient.lower()\n powerCoefficient = 1.0\n\n if tempBoostName == \"none\":\n tempBoost = 1.0\n elif \"normal\" in tempBoostName:\n tempBoost = 1.4\n else:\n tempBoost = 1.7\n\n if skill.target == \"foe\":\n match powerCoefficientName:\n case \"low\" | \"lo\":\n powerCoefficient = 1.5\n case \"mid\" | \"medium\":\n powerCoefficient = 1.7\n case \"high\":\n powerCoefficient = 1.9\n case \"super\":\n powerCoefficient = 2.1\n case \"ultra\":\n powerCoefficient = 4.0\n else:\n match powerCoefficientName:\n case \"low\" | \"lo\":\n powerCoefficient = 1.1\n case \"mid\" | \"medium\":\n powerCoefficient = 1.15\n case \"high\":\n powerCoefficient = 1.2\n case \"super\":\n powerCoefficient = 1.4\n case \"ultra\":\n powerCoefficient = 3.6\n\n if \"physical\" in skill.type:\n stat_key = \"strength\"\n resist_key = \"physical\"\n else:\n stat_key = \"magic\"\n resist_key = \"magic\"\n\n tempPower = adventurer.stats[stat_key]\n tempPowerBoostAdv = adventurer.statsBoostAdv[stat_key]\n tempPowerBoostAst = adventurer.statsBoostAst[stat_key]\n tempMemBoost = memboost[stat_key]\n\n tempTypeResistDownBase = enemy.typeResistDownBase[resist_key]\n tempTypeResistDownAdv = enemy.typeResistDownAdv[resist_key]\n tempTypeResistDownAst = enemy.typeResistDownAst[resist_key]\n # check enemy buffs p/m resist\n tempTypeResistBuff = enemy.get_buff_mod(f\"{resist_key}_resist\")\n\n # get strength/magic debuff\n powerDebuff = adventurer.get_boostCheckAdv(False, stat_key)\n tempPowerBoostDebuff = 0.0\n if powerDebuff is not None:\n tempPowerBoostDebuff = abs(powerDebuff.modifier)\n else:\n tempPowerBoostDebuff = 0\n\n if len(skill.index_to) != 0:\n tempPower = 0\n tempPowerBoostAdv = 0.0\n tempPowerBoostAst = 0.0\n tempMemBoost = 0\n powerCoefficient = powerCoefficient * 1.96\n for index_to_attributes in skill.index_to:\n tempPower += adventurer.stats[index_to_attributes]\n tempPowerBoostAdv += adventurer.statsBoostAdv[index_to_attributes]\n tempPowerBoostAst += adventurer.statsBoostAst[index_to_attributes]\n tempMemBoost += memboost[index_to_attributes]\n tempElementBoostDebuff = 0.0\n if skill.element != \"\" and skill.noType != 1:\n # elementResistDownBase\n tempElementResistDownBase = enemy.elementResistDownBase[skill.element]\n # elementResistDownAdv\n tempElementResistDownAdv = enemy.elementResistDownAdv[skill.element]\n # elementResistDownAst\n tempElementResistDownAst = enemy.elementResistDownAst[skill.element]\n # elementDamageBoostAdv[location]\n\n tempElementDamageBoostAdv = adventurer.elementDamageBoostAdv[skill.element]\n if memboost.get(f\"{skill.element}_attack\") is not None:\n tempElementDamageBoostAdv += memboost[f\"{skill.element}_attack\"]\n # elemental damage boost from weapon\n if adventurer.stats.get(skill.element) is not None:\n tempElementDamageBoostAdv += cast(float, adventurer.stats[skill.element])\n # elementDamageBoostAst[location]\n tempElementDamageBoostAst = adventurer.elementDamageBoostAst[skill.element]\n # element debuff\n tempEleDebuff = adventurer.get_boostCheckAdv(False, f\"{skill.element}_attack\")\n if tempEleDebuff is not None:\n tempElementBoostDebuff = abs(tempEleDebuff.modifier)\n else:\n tempElementResistDownBase = 0.0\n tempElementResistDownAdv = 0.0\n tempElementResistDownAst = 0.0\n tempElementDamageBoostAdv = 0.0\n tempElementDamageBoostAst = 0.0\n\n if target == \"foe\":\n temptargetResistDownAdv = enemy.targetResistDownAdv[\"st\"]\n temptargetResistDownAst = enemy.targetResistDownAst[\"st\"]\n # foes\n else:\n temptargetResistDownAdv = enemy.targetResistDownAdv[\"aoe\"]\n temptargetResistDownAst = enemy.targetResistDownAst[\"aoe\"]\n\n temp_enemy_end = enemy.stats\n\n tempDamage = (\n (\n max(\n 2\n * tempPower\n * tempBoost\n * (\n 1\n + tempPowerBoostAdv\n + tempPowerBoostAst\n + tempMemBoost\n - tempPowerBoostDebuff\n )\n - temp_enemy_end[\"endurance\"],\n 0,\n )\n )\n * (\n 1\n - tempElementResistDownBase\n - tempElementResistDownAdv\n - tempElementResistDownAst\n - tempTypeResistDownBase\n - tempTypeResistDownAdv\n - tempTypeResistDownAst\n - tempTypeResistBuff\n )\n * (\n 1\n + tempElementDamageBoostAdv\n + tempElementDamageBoostAst\n - tempElementBoostDebuff\n )\n * (1 + adventurer.critPenBoost + 0.06)\n * (1 - temptargetResistDownAdv - temptargetResistDownAst)\n * powerCoefficient\n * 1.5\n * (skill.extraBoost)\n * (0.8 + combo * 0.2)\n * saRng\n )\n return int(tempDamage)", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n \n \"*** YOUR CODE HERE ***\"\n\n final_score=0.0\n #for ghosts\n Ghost_dist= []\n for ghost in newGhostStates:\n Ghost_Position = ghost.getPosition() \n d=manhattanDistance(Ghost_Position, newPos)\n Ghost_dist.append(d)\n \n for i in Ghost_dist:\n factor=1\n if(i<=1):\n if(ghost.scaredTimer==0): \n final_score=final_score-200\n else:\n final_score=final_score + 1500\n factor=-1\n\n #for capsule\n capsule_state= currentGameState.getCapsules()\n capsule_dist= []\n for capsule in capsule_state:\n b=manhattanDistance(capsule,newPos)\n capsule_dist.append(b)\n\n for j in capsule_dist:\n if(b==0):\n final_score=final_score + 100\n else:\n final_score=final_score + (10.0/b)\n\n #for food\n Food= currentGameState.getFood() \n food_list = Food.asList()\n food_pos = []\n for k in food_list:\n a=manhattanDistance(k,newPos)\n food_pos.append(a)\n for i in food_pos:\n if(i==0):\n final_score=final_score + 100\n else:\n final_score=final_score + (1.0/(i**2))\n return final_score", "def event2541():\n header(2541)\n\n if_player_has_special_effect(0, SPEFFECT.RingOfTemptationEquipped)\n wait(5.0)\n if_player_has_special_effect(1, SPEFFECT.RingOfTemptationEquipped)\n skip_if_condition_true(1, 1)\n restart()\n wait(5.0)\n if_player_has_special_effect(2, SPEFFECT.RingOfTemptationEquipped)\n skip_if_condition_true(1, 2)\n restart()\n wait(5.0)\n if_player_has_special_effect(3, SPEFFECT.RingOfTemptationEquipped)\n skip_if_condition_true(1, 3)\n restart()\n\n chr.set_special_effect(CHR.Player, SPEFFECT.RingOfTemptationActive)\n # NOTE: You can tell it's active from the humanity icon in your HUD.\n\n if_player_does_not_have_special_effect(0, SPEFFECT.RingOfTemptationEquipped)\n chr.cancel_special_effect(CHR.Player, SPEFFECT.RingOfTemptationActive)\n restart()", "def __pass_time(self):\n self.hunger += 1\n self.boredom += 1", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create learning agent\n # a = e.create_agent(RandomAgent) # create random agent\n e.set_primary_agent(a, enforce_deadline=True) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.01)\n # reduce update_delay to speed up simulation\n sys.stdout = open(\"./output.txt\", \"w\")\n tic = time()\n sim.run(n_trials=100) # press Esc or close pygame window to quit\n toc = time()\n sys.stdout = sys.__stdout__\n\n print \"Totoal time used: {}.\".format(toc - tic)\n parse(\"./output.txt\")", "def update_boss_ai(ai_settings,screen,boss,boss_bullets,ai_counter,ship):\n\tif not ship.second_stage:\n\t\tif ai_counter == 60:\n\t\t\tboss_shoot(ai_settings,screen,boss,boss_bullets)\n\t\t\n\t\telif ai_counter == 80:\n\t\t\tboss_shoot(ai_settings,screen,boss,boss_bullets)\n\t\t\n\t\telif ai_counter == 100:\n\t\t\tboss_shoot(ai_settings,screen,boss,boss_bullets)\n\t\t\n\t\telif ai_counter > 120 and ai_counter < 130:\n\t\t\tai_settings.boss_bullet_color = (200, 150, 0)\n\t\t\tai_settings.boss_bullet_speed_factor += 5\n\t\t\tboss_shoot(ai_settings,screen,boss,boss_bullets)\n\t\t\tai_settings.boss_bullet_speed_factor = ai_settings.boss_bullet_speed_factorD\n\t\t\tai_settings.boss_bullet_color = (255, 255, 255)\n\t\n\tif ship.second_stage:\n\t\tif ai_counter == 60:\n\t\t\tboss_shoot(ai_settings,screen,boss,boss_bullets)\n\t\t\n\t\telif ai_counter == 70:\n\t\t\tboss_shoot(ai_settings,screen,boss,boss_bullets)\n\t\t\n\t\telif ai_counter > 75 and ai_counter < 85:\n\t\t\tai_settings.boss_bullet_color = (0, 0, 255)\n\t\t\tai_settings.boss_bullet_speed_factor += 5\n\t\t\tboss_shoot(ai_settings,screen,boss,boss_bullets)\n\t\t\tai_settings.boss_bullet_speed_factor = ai_settings.boss_bullet_speed_factorD\n\t\t\tai_settings.boss_bullet_color = (0, 0, 0)\n\t\t\n\t\telif ai_counter == 90:\n\t\t\tboss_shoot(ai_settings,screen,boss,boss_bullets)\n\t\t\n\t\telif ai_counter == 100:\n\t\t\tboss_shoot(ai_settings,screen,boss,boss_bullets)\n\t\t\n\t\telif ai_counter > 120 and ai_counter < 130:\n\t\t\tai_settings.boss_bullet_color = (0, 0, 255)\n\t\t\tai_settings.boss_bullet_speed_factor += 5\n\t\t\tboss_shoot(ai_settings,screen,boss,boss_bullets)\n\t\t\tai_settings.boss_bullet_speed_factor = ai_settings.boss_bullet_speed_factorD\n\t\t\tai_settings.boss_bullet_color = (0, 0, 0)", "def set_damage():\n\n global character\n character['Damage'] = randint(1, 6)", "def step(self, game: Game):\n\n print(\"Tick #{}\".format(game.time_left))\n\n splitValue = getSplitValue(game)\n print (getSplitValue(game))\n\n for cell in game.me.cells:\n\n if game.time_left < 6:\n cell.trade(99999)\n\n\n\n\n if cell.mass >= splitValue:\n if len(game.me.cells) < 10:\n cell.split()\n #else:\n #cell.trade(cell.mass - 100)\n else:\n distance = cell.position.distance_to(cell.target)\n possibleVictims = findVictims(cell, game.enemies)\n\n if (cell.mass <= 100):\n target = closestRessource(game, cell, possibleVictims + game.resources.allResources, len(possibleVictims))\n else:\n #cell.burst()\n target = closestRessource(game, cell, possibleVictims, len(possibleVictims))\n\n\n for e in game.enemies:\n for c in e.cells:\n if enemyComingthrough(cell, c):\n target = cell.position + (c.target - c.position)\n #cell.burst()\n pass\n\n if (target != None):\n cell.move(target)\n else:\n print (' KES TU FAIS, VA PAS LÀ ')", "def main():\n\tresults = []\n\n\tconfig = configparser.ConfigParser()\n\tconfig.read(\"simulation.ini\")\n\tsettings = config['sim']\n\n\tcompleted_obj_hw = int(settings[\"ClientsPerCampaign\"]) * float(settings[\"CompletedPctgHW\"])\n\texceeded_obj_hw = float(settings[\"ExceededPctgHW\"])\n\tsignificance_level = float(settings[\"SignificanceLevel\"])\n\tz_val_two_tails = scipy.stats.norm.ppf(1 - (significance_level / 2))\n\n\tprint(\"Completed Target HW: \" + str(completed_obj_hw))\n\tprint(\"Exceeded Target HW: \" + str(exceeded_obj_hw))\n\n\tcompleted_vals = []\n\texceeded_vals = []\n\tdone = False\n\n\tcompleted_avg = 0\n\texceeded_avg = 0\n\tcompleted_hw = 0\n\texceeded_hw = 0\n\n\ti = 0\n\twhile not done:\n\t\tprint(\"RUN: \" + str(i + 1))\n\t\tenv = simpy.Environment()\n\t\tsim = Simulation(env, settings, i == 0)\n\t\tsim.run()\n\t\tresults.append(sim.results)\n\t\ti += 1\n\n\t\tif settings['RunOnce'] == 'yes':\n\t\t\tprint(\"RUN ONCE\")\n\t\t\tsys.exit()\n\n\t\tcompleted_vals.append(sim.results['completed_count'])\n\t\texceeded_vals.append(sim.results['exceeded_proportion'])\n\n\t\tif i < 2:\n\t\t\tprint(\"---------------\")\n\t\t\tcontinue\n\n\t\tcompleted_avg = sum(completed_vals) / len(completed_vals)\n\t\tcompleted_S = sum([(v - completed_avg) ** 2 for v in completed_vals]) / (i - 1)\n\t\tcompleted_S = math.sqrt(completed_S)\n\t\tcompleted_hw = (z_val_two_tails * completed_S) / math.sqrt(i)\n\t\tprint(\"runs: \" + str(i) + \" completed HW: \" + str(completed_hw))\n\n\t\texceeded_avg = sum(exceeded_vals) / len(exceeded_vals)\n\t\texceeded_S = math.sqrt(exceeded_avg * (1 - exceeded_avg))\n\t\texceeded_hw = (z_val_two_tails * exceeded_S) / math.sqrt(i)\n\t\tprint(\"runs: \" + str(i) + \" exceeded HW: \" + str(exceeded_hw))\n\n\t\tif completed_hw < completed_obj_hw and exceeded_hw < exceeded_obj_hw:\n\t\t\tprint(\"END ITERATIONS\")\n\t\t\tdone = True\n\n\t\tprint(\"---------------\")\n\n\n\tfilename = 'results/Results_' + settings['FileSizeGB'] + '_' + settings['TorrentThreshold'] + '_' + settings['HTTPDownThreshold'] \\\n\t\t+ '_' + settings['HTTPUp'] + '_' + str(random.randint(0,10000)) + '.xlsx'\n\n\tprint(\"Saving XLSX to: \" + filename)\n\twb = xs.Workbook(filename)\n\n\tws = wb.add_worksheet()\n\n\tws.write(0, 1, 'Exceded')\n\tws.write(0, 2, 'Completed')\n\n\ti = 1\n\tfor result in results:\n\t\tws.write(i, 0, i)\n\t\tws.write(i, 1, result['exceeded_proportion'])\n\t\tws.write(i, 2, result['completed_count'])\n\t\ti += 1\n\n\tws.write(i, 0, 'average')\n\tws.write(i, 1, exceeded_avg)\n\tws.write(i, 2, completed_avg)\n\ti += 1\n\tws.write(i, 0, 'half width')\n\tws.write(i, 1, exceeded_hw)\n\tws.write(i, 2, completed_hw)\n\n\twb.close()", "def test_guider_start_ffsOpen(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['boss_science'])\n self._guider_start(5, 17, 0, 0)", "def plant(self):\n\t\ttic=time.clock()\n\t\tcommands=[]\n\t\tt=self.m.times\n\t\tauto=self.m.automatic\n\t\tpHeads=self.plantHeads\n\t\t#gather information about the soil at site\n\t\tdeepest=0\n\t\tdeepestPos=None\n\t\tfor h in pHeads:\n\t\t\tdepth=self.G.terrain.humusLayer.getDepth(h.getPos())\n\t\t\tassert depth>=0\n\t\t\tif depth>deepest:\n\t\t\t\tdeepest=depth\n\t\t\t\tdeepestPos=h.getPos()\n\t\tdepth=deepest\n\t\tdigTime=self.m.getDigTime(deepestPos)\n\t\tself.sim.stats['humus depths'].append(depth)\n\t\tif self.m.inverting: #determine the time. Dependent on digTime\n\t\t\tif self.m.invertingMethod=='KO':\n\t\t\t\tinvertTime=self.G.simParam['tCWhenInvKO']\n\t\t\telif self.m.invertingMethod=='Excavator':\n\t\t\t\tinvertTime=self.G.simParam['tInvExcavator']-digTime\n\t\t\telse:\n\t\t\t\traise Exception('cannot identify inverting method %s'%self.m.invertingMethod)\n\t\tfor pH in pHeads:\n\t\t\tpH.reset()\n\t\t\tmoundBould=[]\n\t\t\torig=pH.getPos()#middle of plantinghead\n\t\t\tboul=self.G.terrain.GetBoulders(orig, R=pH.radius)\n\t\t\troots=self.G.terrain.GetRoots(orig,R=pH.radius)\n\t\t\tdirect=self.m.direction-pi/2.+self.posCyl[1] #same dir as from machine to point\n\t\t\tsumA=0\n\t\t\timmobile=self.G.simParam['critStoneSize']\n\t\t\tdibbleDisturb=0.001\n\t\t\tself.m.stopControl()\n\t\t\tself.sim.stats['mound attempts']+=1\n\t\t\tfor r in roots: #determine if a root is hit in the critical area.\n\t\t\t\tif pH.rootCollide(r): #root is within area..\n\t\t\t\t\tprint \"striked a root..\"\n\t\t\t\t\tangle=abs(r.direction-direct)\n\t\t\t\t\tray1=[orig,fun.getCartesian([0,1],fromLocalCart=True, origin=orig, direction=r.direction)]\n\t\t\t\t\tray2=[orig,fun.getCartesian([0,1],fromLocalCart=True, origin=orig, direction=direct)]\n\t\t\t\t\tangle=fun.getAngle(ray1, ray2) #angle between root and planting head\n\t\t\t\t\tpH.strikedImmobile=True\n\t\t\t\t\tself.cmnd(commands, t['haltTime'],auto['haltMound'])\n\t\t\t\t\tfor head in pHeads: head.timeConsumption['halting']+=t['haltTime']\n\t\t\t\t\tif self.G.simParam['noRemound'] or angle>self.m.rootDegreesOK:\n\t\t\t\t\t\tself.debugPrint('pos: %s collided with root. angle was too much %s'%(str(orig), str(angle*180.0/pi)))\n\t\t\t\t\t\tpH.abort=True\n\t\t\t\t\t\tpH.done=True\n\t\t\t\t\telse: #remound\n\t\t\t\t\t\tprint \"remounds\"\n\t\t\t\t\t\tself.cmnd(commands, t['haltTime'],auto['haltMound'])\n\t\t\t\t\t\ttimeTmp=digTime+t['heapTime']\n\t\t\t\t\t\tself.cmnd(commands, timeTmp, auto['mound'])\n\t\t\t\t\t\tfor pH in pHeads:\n\t\t\t\t\t\t\tpH.timeConsumption['halting']+=t['haltTime'] #that's for both, if 2h\n\t\t\t\t\t\t\tpH.remounded=True\n\t\t\t\t\t\t\tpH.timeConsumption['mounding']+=timeTmp\n\t\t\t\t\t\t\n\n\t\t\t\t\t\n\t\t\tif not (pH.abort or pH.strikedImmobile):\n\t\t\t\tfor b in boul:\n\t\t\t\t\t#check if we are inside the scoop. It's the middle of the stone that matters\n\t\t\t\t\t#get local xy-coordinates\n\t\t\t\t\tcylPos=self.m.getCylindrical(b.pos,origin=orig, direction=direct)\n\t\t\t\t\ttwoDdist=self.m.getCartesian(cylPos, origin=orig, direction=direct, local=True)#not really optimal, could be improved\n\t\t\t\t\tinside=False #just to skip a really long if-statement\n\t\t\t\t\tif self.G.simParam['rectangular']:\n\t\t\t\t\t\tif b.radius+b.z>-pH.depth and collide(pH, b, o1pos=orig):\n\t\t\t\t\t\t\tinside=True\n\t\t\t\t\telif b.z**2+twoDdist[1]**2<(b.radius+pH.depth)**2 and collide(pH, b, o1pos=orig): #the first check is for the cylinder, through pythagoras with 2D[1] since cylinder and not sphere\n\t\t\t\t\t\tinside=True\n\t\t\t\t\tif inside: \n \t\t\t\t\t\t#old one: abs(bpos[0])<pH.width/2. and abs(bpos[1])<pH.length/2.:\n\t\t\t\t\t\tmoundBould.append(b)\n\t\t\t\t\t\tsumA+=b.area\n\t\t\t\t\t\tlocalPos=-twoDdist[1], b.z #2D position with z as y-axis\n\t\t\t\t\t\t#now, look how much it occuppies vertically.\n\t\t\t\t\t\ttwoDdist=self.m.getCartesian(cylPos, origin=orig, direction=direct, local=True)#not really optimal, could be improved\n\t\t\t\t\t\tif self.G.simParam['rectangular']:\n\t\t\t\t\t\t\tnodes=[(-pH.length*0.5,0), (-pH.length*0.5, -pH.depth), (pH.length*0.5, -pH.depth), (pH.length*0.5, 0)]\n\t\t\t\t\t\t\tlast=None\n\t\t\t\t\t\t\tpoints=[]\n\t\t\t\t\t\t\tfor node in nodes:#loop over the rectangle edges.\n\t\t\t\t\t\t\t\tif last:\n\t\t\t\t\t\t\t\t\tray=(last,node)\n\t\t\t\t\t\t\t\t\ttmp=col.intersectRaySphere(np.array(ray),b.radius,localPos, additionalInfo=True)\n\t\t\t\t\t\t\t\t\tif type(tmp)!=bool:\n\t\t\t\t\t\t\t\t\t\tfor point in tmp[1:]:\n\t\t\t\t\t\t\t\t\t\t\tpoints.append(list(point))\n\t\t\t\t\t\t\t\tlast=node\n\t\t\t\t\t\t\tassert len(points)!=1 #would be tangent but..\n\t\t\t\t\t\t\tupper=(-twoDdist[1], b.z+b.radius)\n\t\t\t\t\t\t\tlower=(-twoDdist[1], b.z-b.radius)\n\t\t\t\t\t\t\tif not col.pointInPolygon(upper, nodes):\n\t\t\t\t\t\t\t\tif len(points)==0: #it passed through the easy check above...\n\t\t\t\t\t\t\t\t\tupper=-pH.depth\n\t\t\t\t\t\t\t\t\tmoundBould.remove(b)\n\t\t\t\t\t\t\t\t\tsumA-=b.area\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tupper=max([p[1] for p in points])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tupper=upper[1]\n\t\t\t\t\t\t\tif not col.pointInPolygon(lower, nodes):\n\t\t\t\t\t\t\t\tif len(points)==0:\n\t\t\t\t\t\t\t\t\tlower=-pH.depth\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tlower=min([p[1] for p in points])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tlower=lower[1]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tr=b.radius\n\t\t\t\t\t\t\t#look how much of the stone that is within the scoop.\n\t\t\n\t\t\t\t\t\t\tpoints=col.circlesIntersectPoints((0,0), localPos, pH.depth, b.radius)\n\t\t\t\t\t\t\tassert points != False # we know that these circles collide.\n\t\t\t\t\t\t\tif points== True: #all of the stone inside or huge stone\n\t\t\t\t\t\t\t\tupper=b.z+b.radius\n\t\t\t\t\t\t\t\tlower=b.z-b.radius\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tupper=max(points[0][1], points[1][1])\n\t\t\t\t\t\t\t\tif col.pointInCircle((-twoDdist[1], b.z+b.radius), (0,0), pH.depth):\n\t\t\t\t\t\t\t\t\tassert b.z+b.radius>=upper\n\t\t\t\t\t\t\t\t\tupper=b.z+b.radius\n\t\t\t\t\t\t\t\tlower=min(points[0][1], points[1][1])\n\t\t\t\t\t\t\t\tif col.pointInCircle((-twoDdist[1], b.z-b.radius), (0,0), pH.depth):\n\t\t\t\t\t\t\t\t\tassert b.z-b.radius<=lower\n\t\t\t\t\t\t\t\t\tlower=b.z-b.radius\n\t\t\t\t\t\thInside=upper-lower\n\t\t\t\t\t\tassert hInside>=0\n\t\t\t\t\t\tratio=hInside/float(pH.depth)\n\t\t\t\t\t\tpH.strikedImmobile=True\n\t\t\t\t\t\tself.sim.stats['immobile boulder struck']+=1\n\t\t\t\t\t\tself.sim.stats['immobile vol sum']+=b.volume\n\t\t\t\t\t\tif ratio>self.m.immobilePercent:\n\t\t\t\t\t\t\tself.debugPrint(\"ABORTS %s percent is vertically occupided by an imobile boulder\"%str(ratio))\n\t\t\t\t\t\t\tpH.abort=True\n\t\t\t\t\t\t\tpH.done=True\n\t\t\t\t\t\t\tcommands=self.cmnd(commands, t['haltTime'],auto['haltMound'])\n\t\t\t\t\t\t\tfor head in pHeads:\n\t\t\t\t\t\t\t\thead.timeConsumption['halting']+=t['haltTime'] #that's for both, if 2h\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif b.radius>pH.biggestBlock:\n\t\t\t\t\t\t\tpH.biggestBlock=b.radius*2\n\t\t\t\tpH.moundSumA=sumA\t\t\n\t\t\tpH.moundObst=moundBould\n\t\t\th=Hole(orig,terrain=self.G.terrain,z=pH.depth, nodes=pH.getNodes(orig) , isSpherical=False)\n\t\t#time to mound and heap. With the Excavator inverting method, we don't take time for heaping now.\n\t\tif not self.m.inverting:\n\t\t\ttimeTmp=digTime+t['heapTime']\n\t\t\tcommands=self.cmnd(commands, timeTmp,auto['mound'])\n\t\telif self.m.inverting and self.m.invertingMethod=='KO': #heap first..\n\t\t\ttimeTmp=digTime+t['heapTime']\n\t\t\tcommands=self.cmnd(commands, timeTmp,auto['mound'])\n\t\telif self.m.inverting and self.m.invertingMethod=='Excavator': #don't heap..\n\t\t\ttimeTmp=digTime\n\t\t\tcommands=self.cmnd(commands, timeTmp,auto['mound'])\n\t\telse:\n\t\t\traise Exception('Logical error. If we are inverting, we need to use methods KO or Excavator, not %s'%self.invertingMethod)\n\t\tfor pH in pHeads:\n\t\t\tpH.timeConsumption['mounding']+=timeTmp\n\t\t#mounding failures\n\t\tfor h in self.plantHeads:\n\t\t\tif random.uniform(0,1)<self.m.moundingFailureProb and not h.remounded: #failure..\n\t\t\t\t\t\n\t\t\t\tif self.G.simParam['noRemound']:\n\t\t\t\t\th.debugPrint('failed mounding')\n\t\t\t\t\th.abort=True\n\t\t\t\telse:\n\t\t\t\t\th.debugPrint('Failed mounding.. the other heads have to wait')\n\t\t\t\t\tcommands=self.cmnd(commands, digTime+t['heapTime'],auto['mound'])\n\t\t\t\t\tfor pH in self.plantHeads:\n\t\t\t\t\t\tself.sim.stats['remound attempts']+=1\n\t\t\t\t\t\tpH.timeConsumption['mounding']+=digTime+t['heapTime']\n\t\t\t\t\t\tpH.remounded=True\n\t\t#it's time to invert\n\t\tif self.m.inverting:\n\t\t\tcommands=self.cmnd([], invertTime, auto=False)\n\t\t\treinverted=False\n\t\t\treinvertTime=digTime+t['heapTime'] #same for KO and Excv\n\t\t\tfor h in self.plantHeads:\n\t\t\t\tif pH.abort: continue\n\t\t\t\tself.sim.stats['inverting attempts']+=1\n\t\t\t\th.timeConsumption['inverting']+=invertTime\n\t\t\t\tif random.uniform(0,1)<self.m.invertFailureProb: #failure..\n\t\t\t\t\tself.debugPrint('reinverts')\n\t\t\t\t\tif self.G.simParam['noRemound']:\n\t\t\t\t\t\th.debugPrint('failed inverting')\n\t\t\t\t\t\th.abort=True\n\t\t\t\t\telif not reinverted:\n\t\t\t\t\t\treinverted=True\n\t\t\t\t\t\th.debugPrint('Failed mounding.. the other heads have to wait')\n\t\t\t\t\t\tcommands=self.cmnd(commands,reinvertTime,auto['mound'])\n\t\t\t\t\t\tfor pH in self.plantHeads:\n\t\t\t\t\t\t\tself.sim.stats['reinverting attempts']+=1\n\t\t\t\t\t\t\th.timeConsumption['inverting']+=reinvertTime\n\t\tself.plantSignals=0\n\t\tself.pHeadsUsed=0\n\t\tev=[]\n\t\tfor pH in pHeads:\n\t\t\tif not pH.abort: \n\t\t\t\tself.pHeadsUsed+=1\n\t\t\t\tpH.cause=\"plant\"\n\t\t\t\tpH.debugPrint(\"instructs pH to plant %s\")\n\t\t\t\tev.append(pH)\n\t\tif self.pHeadsUsed>0:\n\t\t\tcommands.append((\"interrupt\", ev)) #will later be recognized in run and self.interupt(pH) will be invoked. \n\t\t\tcommands.append((waituntil, self, self.plantingFinished)) #waits for one or both events.\n\t\tPlantingDevice.timesProf[1]+=time.clock()-tic\n\t\treturn commands", "def lab_run_small(character_id, time_step):\n pass", "def evaluateIFsetup(ants=0, timerange=3.4, deltat=0.1,\n verbose=False, iniAtten=0) :\n antlist = makeAntList(ants)\n for a in antlist:\n if a < 16:\n print \"SZA antennas only can be used - exiting\"\n return\n tpMP = \"Ifmod.ifTotalPower\"\n attenMP = \"Ifmod.totalAtten\"\n currentPos = \"AMBIENT\"\n print \"Inserting ambient load into beam\"\n rtn = amb(ants)\n pos1ants = rtn.ready\n mplist = []\n for a in pos1ants:\n prefix = \"Sza%d.\" %(a-15)\n mplist.append([prefix+tpMP, prefix+attenMP])\n #print mplist\n if len(rtn.notready) != 0:\n print \"Ambient load not inserted for ants:\", rtn.notready\n if iniAtten != None :\n print \"Setting IF atten to %.1f\" %iniAtten\n antennaIFatten(iniAtten, 1, invalidateTsys=False, \n ants=antlist)\n wait(tmo=2.0)\n t0=time.time()\n antennaIFpower(0.3, ants)\n t1 = time.time()\n cmdtime = t1-t0\n results = []\n times = []\n nsamps = int(round(timerange/deltat))\n for i in range(nsamps):\n sleep(deltat)\n dt=time.time()-t1\n times.append(dt)\n results.append(queryMpValues(mplist))\n #print results\n print \"Command time = %.3f seconds\" %cmdtime\n for j in range(len(pos1ants)):\n aname = \"Sza%d\" %(pos1ants[j]-15)\n if verbose: print \"======== %s ========\" %aname\n for i in range(nsamps):\n r = results[i][j]\n st = \"%s: %4.1f %6.3f %4.1f\" %(aname,times[i],r[0],r[1])\n if verbose: print st\n print \"\\n==== Inferred timing & final atten =====\"\n for j in range(len(pos1ants)):\n aname = \"Sza%d\" %(pos1ants[j]-15)\n finalAtten = results[nsamps-1][j][1]\n timing = times[nsamps-1]\n for i in range(nsamps):\n index = nsamps-1-i\n atten = results[index][j][1]\n if atten != finalAtten: break\n timing = times[index]\n st = \" %s: %4.1f %4.1f\" %(aname,timing, finalAtten)\n print st\n \n #print \"Moving ambient load out of the beam\"\n pos2ants = sky(ants).ready\n # Wait for two seconds to get final psys reading\n wait(tmo=2.0)\n skyants = pos2ants\n complete = []\n for a in pos1ants :\n if (pos2ants.count(a) > 0) : complete.append(a)\n return complete", "def run(num_trials):\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.1, display=True) \n # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=num_trials) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n a.performace_report(num_trials)", "def play_game(self):\n player = Player(input(\"What is your name?\"))\n while player.health > 0:\n input(\"Press t to start another turn\")\n n = random.randint(0, 3)\n if n == 0:\n if self.monster_attack(player):\n break\n elif n == 1:\n self.find_gold(player)\n else:\n print(\"Nothing happened!\")", "def test_reward_after_battle(self):\n self.assertEqual(self.alice.loyalists, 100)\n self.assertEqual(self.bob.loyalists, 100)\n\n s1 = self.battle.create_skirmish(self.alice, 50)\n s1.react(self.bob, 50, troop_type=\"cavalry\")\n\n self.end_battle(self.battle, self.conf)\n\n # Bob wins the fight and the war\n self.assertEqual(self.battle.victor, self.bob.team)\n\n # Alice should have gotten a 10% reward (5 troops)\n self.assertEqual(self.alice.loyalists, 105)\n # Bob gets 15% (7 troops)\n self.assertEqual(self.bob.loyalists, 107)", "def attack(self):\n # TODO: Use integer division to find half of the max_damage value\n # then return a random integer between half of max_damage and max_damage\n \n weapon_attack_value = random.randint(self.max_damage//2, self.max_damage)\n return weapon_attack_value", "def checkhealth(currentstrength, currenthunger):\n global HUNGER\n global STRENGTH\n flash = False\n grizzly_text = \"\"\n\n if currentstrength <= 0:\n if FIGHT:\n if GRIZZLY_BEAR:\n grizzly_text = \"grizzly \"\n printmessage(\"The %sbear has killed you.\" % grizzly_text, 7, MAGENTA, 2)\n else:\n printmessage(\"You have died from severe exhaustion.\", 5, RED, 2)\n die('tooweak')\n\n for i in range(0, 5): \n strengthrange = (79, 59, 39, 19, 0)\n if currentstrength in range(strengthrange[i], strengthrange[i] + 20):\n STRENGTH = STRENGTH_TEXT[i]\n if currentstrength > 99:\n STRENGTH = STRENGTH_TEXT[0]\n if currentstrength <= 19: \n flash = True\n update_strength(flash)\n flash = False # Make sure flash isnt incorrectly set for hunger too\n\n if currenthunger <= 0:\n printmessage(\"You have died from malnutrition.\", 5, RED, 2)\n die('starved')\n\n for i in range(0, 5): \n hungerrange = (79, 59, 39, 19, 0)\n if currenthunger in range(hungerrange[i], hungerrange[i] + 20): \n HUNGER = HUNGER_TEXT[i]\n if currenthunger > 99:\n HUNGER = HUNGER_TEXT[0]\n if currenthunger <= 19: \n flash = True\n update_hunger(flash)", "def run(self):\r\n\r\n # t=0 is singular point\r\n\r\n print 'Time of laboratory clock Tw =', self.tick\r\n tt = self.tmp\r\n ll = self.lst\r\n car = self.interaction(self.carr)\r\n ll.item_run(tt, self.tick, car)\r\n tt = tt.next\r\n\r\n # run of local time\r\n\r\n while not tt is None:\r\n\r\n if tt.dedicated_node:\r\n self.tick = self.tick + 1\r\n print 'Time of laboratory clock Tw =', self.tick\r\n\r\n # self.move() # It is classical motion of particle (example).\r\n\r\n self.move_reset()\r\n car = self.interaction(self.carr)\r\n\r\n ll = self.lst\r\n while not ll is None:\r\n ll.item_run(tt, self.tick, car)\r\n ll = ll.right\r\n\r\n tt = tt.next", "def drawStats(mario, marioInfo, points, coins, startTime, level, fastMode, timesUp, coinPic, spriteCount, forceTime = None):\n ONGROUND, JUMPFRAMES, INGROUND, ISCROUCH, ONPLATFORM, ISFALLING, ISANIMATING, INVULFRAMES = 0, 1, 2, 3, 4, 5, 6, 7\n X, Y, VX, VY, DIR, STATE = 0, 1, 2, 3, 4, 5\n currentTime = 200 - int((time.get_ticks() - startTime) / 1000) # Getting the time in seconds\n if forceTime != None: # If a forced time was entered, ignore the calculated time and use the forced time\n currentTime = forceTime\n # Time checks (ignored if a forced time is entered)\n if currentTime < 100 and not fastMode and forceTime is None: # If the time is lower than 100 and fast mode hasn't been activated\n playSound(timeLowSound, \"music\") # Play the \"time low\" sound\n playSound(backgroundFastSound, \"music\", True) # Queue the sped up background music\n fastMode = True # Set fast mode to on\n if currentTime == 0 and not timesUp and forceTime is None: # If the time is 0 and the times up animation hasn't been played\n currentTime = 0 # Set the current time to zero\n marioInfo[ISANIMATING] = True # Make Mario animate\n mario[STATE] = -1 # Set his state to -1 (-1 means dead)\n timesUp = True # Set times up to on\n # Rendering text\n points = marioFontBig.render(\"%06i\" %int(points), False, (255,255,255))\n coins = marioFontBig.render(\"x%02i\" %int(coins), False, (255,255,255))\n world = marioFontBig.render(\"1-%i\" %int(level), False, (255,255,255))\n timer = marioFontBig.render(\"%03i\" %int(currentTime), False, (255,255,255))\n # Blitting text and coin sprite\n screen.blit(points, (75,50))\n screen.blit(marioText, (75,25))\n screen.blit(coins, (300,50))\n screen.blit(worldText, (450,25))\n screen.blit(world, (470,50))\n screen.blit(timeText, (625,25))\n screen.blit(timer, (640, 50))\n screen.blit(coinPic[int(spriteCount//2)], (275,48))\n return fastMode, timesUp # Returning fast mode and time up values (Which could stay the same)", "def speed(self, s=0):", "def simulate_memories(simulation_length):\n \n \n pass", "def action_normal(self):\n obs = self.observation\n shoot = False\n eb = self.__class__.enemy_base\n \n ammopacks = filter(lambda x: x[2] == \"Ammo\", obs.objects)\n if ammopacks:\n self.updateAllAmmoSpots(ammopacks)\n # Walk to ammo\n if obs.ammo < SUFFICIENT_AMMO:\n self.goal = self.getClosestLocation(ammopacks)\n self.motivation = MOTIVATION_AMMO\n self.debugMsg(\"*> Recharge (%d,%d)\" % (self.goal[0],self.goal[1]))\n \n '''if (obs.ammo > 0 and obs.foes):\n self.goal = self.getClosestLocation(obs.foes)\n self.debugMsg(\"*> Go to enemy (%d,%d)\" % self.goal)\n # If the enemy is within range, shoot.\n if(point_dist(self.goal, obs.loc) < self.settings.max_range\n and not line_intersects_grid(obs.loc, self.goal, self.grid, self.settings.tilesize)):\n self.debugMsg(\"*> Shoot (%d,%d)\" % self.goal)\n #if self.goal not in obs.friends:\n self.motivation = MOTIVATION_SHOOT_TARGET\n shoot = True'''\n \n # Attack strategy 1\n #########################\n # 1) Shoot live enemies #\n #########################\n # Aim at the closest enemy outside the enemy base\n if obs.ammo > 0 and obs.foes:\n living = filter(lambda x: point_dist(x[0:2], eb) > ENEMY_BASE_RANGE, obs.foes)\n self.debugMsg(\"Living: %s\" % (living,))\n if living:\n self.debugMsg(1)\n self.goal = min(living, key=lambda x: point_dist(obs.loc, x[0:2]))[0:2]\n self.motivation = MOTIVATION_SHOOT_TARGET\n self.debugMsg(2)\n # Check if enemy in fire range\n if (\n point_dist(self.goal, obs.loc) < self.settings.max_range and\n not line_intersects_grid(\n obs.loc, \n self.goal, \n self.grid, \n self.settings.tilesize\n )\n ):\n self.debugMsg(3)\n self.debugMsg(\"*> Shoot (%d,%d)\" % self.goal)\n #return self.getActionTriple(True,None,0) ###?? SHOULD WE STOP MOVING WHEN WE SHOOT?\n return self.getActionTriple(True)\n else:\n self.debugMsg(4)\n return self.getActionTriple()\n self.debugMsg(5)\n \n # Walk to an enemy CP\n if self.goal is None and len(self.friendlyCPs) < 2:\n self.goal = self.getClosestLocation(self.getQuietEnemyCPs())\n if self.goal:\n self.debugMsg(\"Crowded location: %d\" % self.getCrowdedValue(self.goal))\n self.motivation = MOTIVATION_CAPTURE_CP\n self.debugMsg(\"*> Capture (%d,%d)\" % (self.goal[0],self.goal[1]))\n \n '''# If you can't think of anything to do\n # at least walk to a friendly control point\n if self.goal is None:\n self.goal = self.getClosestLocation(self.getQuietRestlessFriendlyCPs())\n if self.goal:\n self.motivation = MOTIVATION_GUARD_CP\n self.debugMsg(\"*> Guard (%d,%d)\" % (self.goal[0],self.goal[1]))'''\n \n if self.goal is None:\n self.goal = max(\n self.__class__.ammoSpots,\n key=lambda x: point_dist(x, obs.loc),\n )\n self.debugMsg(\"Going to ammospot far away (%d, %d)\" % (self.goal[0],self.goal[1]))\n self.motivation = MOTIVATION_STAY_PUT\n \n\n if self.goal:\n return self.getActionTriple(shoot)\n else:\n return self.getActionTriple(shoot)", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.01) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit\n return [a.state_action_table, a.reward_hist]", "def test_troop_cap(self):\n self.conf[\"game\"][\"troopcap\"] = 106\n\n self.assertEqual(self.alice.loyalists, 100)\n self.assertEqual(self.bob.loyalists, 100)\n\n s1 = self.battle.create_skirmish(self.alice, 50)\n s1.react(self.bob, 50, troop_type='cavalry')\n\n self.end_battle(self.battle, self.conf)\n\n # Bob wins the fight and the war\n self.assertEqual(self.battle.victor, self.bob.team)\n\n # Alice's 10% reward puts her under cap\n self.assertEqual(self.alice.loyalists, 105)\n # Bob's 15% reward puts him over\n self.assertEqual(self.bob.loyalists, 106)", "def run(self):\n game_time = 0\n while game_time < self.params.t:\n self.play_turn()\n game_time += 1\n\n reward = self.total_tx_reward\n successes = self.message_success_count\n self.reset() \n\n return reward / self.params.t, successes / self.params.t", "def event11515497():\n header(11515497)\n\n # Starts disabled.\n if_entity_health_less_than_or_equal(1, CHR.DarkSmough, 0.5)\n skip_if_condition_true(2, 1)\n wait_random_seconds(1, 2) # longer interval (for healing) above 50% health.\n skip(1)\n wait_random_seconds(0.1, 0.5)\n\n # End if battle is over.\n end_if_event_flag_on(11512001)\n\n # Fade back in with a random attack.\n flag.disable_chunk(11515470, 11515479)\n flag.enable_random_in_chunk(11515470, 11515479) # Any attack.\n\n # Wait until he fades back out naturally through AI.\n if_does_not_have_tae_event(0, CHR.DarkSmough, 700)\n if_has_tae_event(0, CHR.DarkSmough, 700)\n chr.disable(CHR.DarkSmough)\n\n restart()", "def tick(self, dt:float):\n\t\tif not self.is_over:\n\t\t\t# Gather agents commands\n\t\t\tfor pid, agent in self._agents.items():\n\t\t\t\taction = self._get_agent_input(pid, agent)\n\t\t\t\tif action: \n\t\t\t\t\tself.enqueue_action(pid, action)\n\n\t\t\t# Apply enqueued actions\n\t\t\torders_for_tick = []\n\t\t\tfor pid, action_queue in self._action_queue.items():\n\t\t\t\tif action_queue:\n\t\t\t\t\taction = action_queue.pop(0)\n\t\t\t\t\torders_for_tick.append((pid, action))\n\t\t\t\n\t\t\t# Randomize the order of actions?\n\t\t\trandom.shuffle(orders_for_tick)\n\t\t\tfor pid, action in orders_for_tick:\n\t\t\t\tself._apply_action(pid, action)\n\n\t\t# Apply fire!\n\t\t## Check if any players stepped into a file-zone\n\t\tfor pid, player in self._alive_players():\n\t\t\thit_list = self._collision_list(player.pos, self.fire_list)\n\t\t\tfor hit in hit_list:\n\t\t\t\tfire_owner = self.players[hit.owner_id] if hit.owner_id in self.players else None\n\n\t\t\t\t# Apply fire damage to the player\n\t\t\t\tplayer.apply_hit(self.FIRE_HIT)\n\n\t\t\t\tplayer.reward -= self.FIRE_PENALTY\n\t\t\t\tif player != fire_owner and fire_owner:\n\t\t\t\t\tfire_owner.reward += self.FIRE_REWARD\n\n\n\t\t## Apply fire damage to static entities\n\t\tfor fire in self.fire_list:\n\t\t\tfire_owner = self.players[fire.owner_id] if fire.owner_id in self.players else None\n\t\t\t\n\t\t\t## Check for fire damage of static blocks and collect rewads\n\t\t\thitblock_list = self._collision_list(fire.pos, self.value_block_list)\n\t\t\tfor block in hitblock_list:\n\t\t\t\tblock.apply_hit(self.FIRE_HIT)\n\t\t\t\tif not block.is_alive and fire_owner:\n\t\t\t\t\tfire_owner.reward += block.reward\n\t\t\t\n\t\t\t## Check for fire damage of nearby bombs and set them off\n\t\t\thitbomb_list = self._collision_list(fire.pos, self.bomb_list)\n\t\t\tfor bomb in hitbomb_list:\n\t\t\t\tbomb.apply_hit(bomb.hp)\n\n\t\t# Alive players get to pickup static rewards:\n\t\tfor pid, player in self._alive_players():\n\t\t\tif player.is_alive: # Dead players are not allowed to pickup items\n\t\t\t\t# Pickup ammo:\n\t\t\t\tammo_found = self._collision_list(player.pos, self.ammunition_list)\n\t\t\t\tfor am in ammo_found:\n\t\t\t\t\tplayer.ammo += am.value\n\t\t\t\t\tam.value = 0\n\n\t\t\t\t# Pickup treasures:\n\t\t\t\ttreasure_found = self._collision_list(player.pos, self.treasure_list)\n\t\t\t\tfor treasure in treasure_found:\n\t\t\t\t\tplayer.reward += treasure.value\n\t\t\t\t\ttreasure.value = 0\n\n\t\t# Update effects and lists\n\t\tself.__update_list(self._delayed_effects)\n\t\tself.__update_list(self.bomb_list)\n\t\tself.__update_list(self.fire_list)\n\t\tself.__update_list(self.ammunition_list)\n\t\tself.__update_list(self.value_block_list)\n\t\tself.__update_list(self.players.values())\n\t\t\n\t\t# Turn not alive player into dead bodies:\n\t\tfor pid, player in self.players.items():\n\t\t\tif not player.is_alive:\n\t\t\t\tself.dead_player_list.append(self._DeadBody(pid, player.pos))\n\n\t\t# Convert expired bomb into fire\n\t\tfor p in self.bomb_list:\n\t\t\tif not p.is_alive: # Start a fire\n\t\t\t\tself._start_fire(p.owner_id, p.pos, p.power)\n\n\t\t# Apply delayed effects\n\t\tfor p in self._delayed_effects:\n\t\t\tif not p.is_alive: # Apply delayed effect\n\t\t\t\tself._apply_effect(p.effect)\n\n\t\t# Remove expired entiries\n\t\tself._delayed_effects = self._only_alive(self._delayed_effects)\n\n\t\tself.ammunition_list =\tself._only_alive(self.ammunition_list)\n\t\tself.treasure_list = \tself._only_alive(self.treasure_list)\n\t\tself.bomb_list = \t\tself._only_alive(self.bomb_list)\n\t\tself.fire_list =\t\tself._only_alive(self.fire_list)\n\t\tself.value_block_list =\tself._only_alive(self.value_block_list)\n\t\t#self.players = dict(filter(lambda p: p.is_alive, self.players.values()))\n\n\t\t# Evaluate game termination rule\n\t\tover_iter_limit = True if self.max_iterations and self.tick_counter > self.max_iterations else False\n\t\thas_opponents = sum(p.is_alive for p in self.players.values()) > 1\n\t\tself.is_over = not has_opponents or over_iter_limit # There can be only one!\n\t\tself.winner = next(((pid,p) for pid,p in self.players.items() if p.is_alive), None) if self.is_over else None\n\n\t\tif not self.is_over:\t\t\n\t\t\tgame_state = self._serialize_state()\n\t\t\t# Update agents view of the world\n\t\t\tfor pid, agent in self._agents.items():\n\t\t\t\tself._update_agent(dt, pid, agent, game_state)\n\n\t\tself.tick_counter += 1", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=False) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0000001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n print 'alpha, gamma:', a.alpha, a.gamma\n print 'penalties:', a.total_penalties\n print 'total rewards:', a.total_rewards", "def simulate(seconds):\n\n #Grab the start time\n start_time = dt.datetime.now()\n\n # fill list with the start\n times_on_the_second = [start_time + dt.timedelta(seconds=x) for x in range(seconds + 1)]\n\n #end_time = start_time + dt.timedelta(seconds=seconds)\n\n end_time = times_on_the_second[-1]\n epochs = 0\n\n\n\n print(f\"Simulation started at {start_time}\")\n\n while dt.datetime.now() < end_time:\n\n while dt.datetime.now() < times_on_the_second[epochs]:\n pass\n\n for asteroid in Controller.currentAsteroids:\n asteroid.move()\n print(asteroid, F\"time: {dt.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]}\")\n epochs += 1\n\n\n\n # time.sleep(1)", "def on_update(self, delta_time: float) -> None:\n #inventory of items \"picked up\"\n hit_list = arcade.check_for_collision_with_list(self.player_sprite, self.levels[self.current_level].item_list)\n for item in hit_list:\n item.remove_from_sprite_lists()\n self.inventory += 1\n\n #update player sprite \"outfit\" is sword item is picked up\n self.player_list.update()\n self.player_list.update_animation(self.inventory)\n\n #update physics engine for player sprite and walls\n self.physics_engine.update()\n\n #go to next level\n #level 2 blocked if coin item is not picked up\n if self.player_sprite.center_y > settings.HEIGHT and self.current_level == 0 and self.inventory >= 1: \n self.current_level = 1\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.levels[self.current_level].wall_list)\n self.player_sprite.center_y = 0\n elif self.player_sprite.center_y > settings.HEIGHT and self.current_level == 0 and self.inventory == 0: \n self.player_sprite.center_y = settings.HEIGHT\n\n #level 3 blocked if sword item is not picked up\n elif self.player_sprite.center_y > settings.HEIGHT and self.current_level == 1 and self.inventory >= 2:\n self.current_level = 2\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.levels[self.current_level].wall_list)\n self.player_sprite.center_y = 0\n elif self.player_sprite.center_y > settings.HEIGHT and self.current_level == 1 and self.inventory == 1:\n self.player_sprite.center_y = settings.HEIGHT\n\n #go up to empty level after winning game\n elif self.player_sprite.center_y > settings.HEIGHT and self.current_level == 2:\n self.current_level = 3\n\n #go down levels\n elif self.player_sprite.center_y < 0 and self.current_level == 1:\n self.current_level = 0\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.levels[self.current_level].wall_list)\n self.player_sprite.center_y = settings.HEIGHT\n elif self.player_sprite.center_y < 0 and self.current_level == 2:\n self.current_level = 1\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.levels[self.current_level].wall_list)\n self.player_sprite.center_y = settings.HEIGHT", "def main():\n character1 = generate_random_character(\"Dr. Bones\", 100, 60, 15, 5)\n character2 = generate_random_character(\"Mr. Meeseeks\", 100, 60,\n 15, 5)\n battle = BattleSimulator(character1, character2)\n battle.simulate()", "def __init__(self):\n smach.State.__init__(self, \n outcomes=['GoToNormal','GoToSleep','GoToPlay'])\n\n self.rate = rospy.Rate(1) \n self.counter = 0", "def run(self, start_level):\n self.world.set_level(self.world.levels[start_level])\n self.goal = self.world.level.end_time()\n\n time_of_death = None\n level_start = pygame.time.get_ticks() / 1000\n\n while True:\n\n time = (pygame.time.get_ticks() / 1000) - level_start\n\n # TODO: Remove some day\n self.stats['fps'] = self.clock.get_fps()\n\n if time > 1 and time < 3 and not self.world.stage_start:\n self.world.stage_start = True\n self.assets.sounds['incoming-alarm'].play()\n elif time > 3:\n self.world.stage_start = False\n\n if time > self.goal + 3:\n self.world.set_level(self.world.levels[self.world.level.number])\n self.goal = self.world.level.end_time()\n level_start = pygame.time.get_ticks() / 1000 # Reset timer\n continue\n\n if time > self.goal and not self.world.stage_clear:\n if self.world.level.number == len(self.world.levels):\n return 'victory' # Beat the final level\n self.world.stage_clear = True\n self.assets.sounds['level-success'].play()\n\n for event in pygame.event.get():\n if event.type == QUIT:\n return 'quit'\n elif event.type in (KEYUP, KEYDOWN):\n self.world.hero.receive_message(event.type, event.key)\n if event.key in (K_q, K_ESCAPE):\n return 'quit'\n elif event.key == K_p:\n if self.pause_game() == 'quit':\n return 'quit'\n\n self.world.update(time)\n\n if self.world.infection >= 100:\n return 'infected'\n\n self.collider.update()\n\n if self.world.hero.dead:\n if time_of_death is None:\n time_of_death = time\n else:\n if time - time_of_death > 2:\n return 'died'\n self.renderer.render()\n\n self.clock.tick(self.renderer.fps)", "async def squat(ctx):\r\n author = ctx.message.author\r\n await ctx.send(author.mention + \" puts on their game face and does \" + str(randint(2, 1000)) +\r\n \" squats in \" + str(randint(4, 90)) + \" minutes. Wurk it!\")\r\n ctx.counter(n)", "def simulateForHeight(rocket,H_1):\n rocket.setLandingBurnStartHeight(H_1)\n \n # Simulate the dynamics\n t_f = 100.\n x_0 = np.array([rocket.H_0,rocket.v_0,rocket.m_0, # Ideal system\n rocket.H_0,rocket.v_0,rocket.m_0 # Real system\n ])\n \n state = solve_ivp(fun = rocket.dynamics,\n t_span = (0,t_f),\n y0 = x_0,\n events = [rocket.burnEvent,rocket.ascentEvent],\n max_step = 0.01)\n \n # Extract simulation results\n t = state.t\n h = state.y[3]\n m = state.y[5]\n \n fuel_used = m[0]-m[-1]\n burn_thrust = rocket.T\n burn_time = rocket.t_b\n time_history = t\n height_history = h\n \n return fuel_used,burn_thrust,burn_time,time_history,height_history", "def event2516():\n header(2516)\n\n if_player_does_not_have_special_effect(1, SPEFFECT.PaleWhiteRune)\n\n if_event_flag_on(1, EVENT.PaleWhiteRuneActive)\n\n if_player_has_special_effect(-1, SPEFFECT.RunicHit0)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit1)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit2)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit3)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit4)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit5)\n if_condition_true(1, -1)\n\n if_condition_true(0, 1)\n\n # Random twenty-flag roll.\n flag.disable_chunk(970, 999)\n flag.enable_random_in_chunk(970, 999)\n\n # Count appropriate flag range as success and apply Pale White Rune effect (only one level).\n if_player_has_special_effect(2, SPEFFECT.RunicHit0)\n skip_if_condition_false(4, 2)\n if_at_least_one_true_flag_in_range(-2, 970, 972) # 3/30 chance at Pale White level 0.\n restart_if_condition_false(-2)\n chr.set_special_effect(CHR.Player, SPEFFECT.PaleWhiteRune)\n skip(29)\n\n if_player_has_special_effect(3, SPEFFECT.RunicHit1)\n skip_if_condition_false(4, 3)\n if_at_least_one_true_flag_in_range(-2, 970, 973) # 4/30 chance at Pale White level 1.\n restart_if_condition_false(-2)\n chr.set_special_effect(CHR.Player, SPEFFECT.PaleWhiteRune)\n skip(23)\n\n if_player_has_special_effect(4, SPEFFECT.RunicHit2)\n skip_if_condition_false(4, 4)\n if_at_least_one_true_flag_in_range(-2, 970, 974) # 5/30 chance at Pale White level 2.\n restart_if_condition_false(-2)\n chr.set_special_effect(CHR.Player, SPEFFECT.PaleWhiteRune)\n skip(17)\n\n if_player_has_special_effect(5, SPEFFECT.RunicHit3)\n skip_if_condition_false(4, 5)\n if_at_least_one_true_flag_in_range(-2, 970, 975) # 6/30 chance at Pale White level 3.\n restart_if_condition_false(-2)\n chr.set_special_effect(CHR.Player, SPEFFECT.PaleWhiteRune)\n skip(11)\n\n if_player_has_special_effect(6, SPEFFECT.RunicHit4)\n skip_if_condition_false(4, 6)\n if_at_least_one_true_flag_in_range(-2, 970, 976) # 7/30 chance at Pale White level 4.\n restart_if_condition_false(-2)\n chr.set_special_effect(CHR.Player, SPEFFECT.PaleWhiteRune)\n skip(5)\n\n if_player_has_special_effect(7, SPEFFECT.RunicHit5)\n restart_if_condition_false(7) # This shouldn't happen.\n if_at_least_one_true_flag_in_range(-2, 970, 977) # 8/30 chance at Pale White level 5.\n restart_if_condition_false(-2)\n chr.set_special_effect(CHR.Player, SPEFFECT.PaleWhiteRune)\n\n # Watch for spell being cast to end buff. (But doesn't need rune or runic weapon to continue.)\n # Buff lasts 15 seconds otherwise.\n\n if_player_has_special_effect(-3, SPEFFECT.SorceryCast) # Normal sorcery.\n if_player_has_special_effect(-3, SPEFFECT.SableRuneTempTrigger) # Dark sorcery.\n if_player_does_not_have_special_effect(-3, SPEFFECT.PaleWhiteRune) # Times out.\n if_condition_true(0, -3)\n\n wait(2.0) # Wait for spell projectile to land. (I assume this is needed.)\n\n chr.cancel_special_effect(CHR.Player, SPEFFECT.PaleWhiteRune)\n restart()", "def update_hp_for_higher_level(chosen_class,level):\n #Checks to see if your character is level 4,8,12,etc.\n def upgradedAbilityAt4(level):\n if level % 4 == 0:\n upgraded_ability = raw_input(\"Level \"+str(level)+\"!\\n Which two abilities would you like to upgrade? (Adds +1 to ability)\\n Please input two from str/dex/con/int/wis/cha with a space in between.\\n (ex: cha dex) \").split(' ')\n print\n #To write:\n #if either ability pushes ability score over 20, redo input\n\n \n for i in upgraded_ability:\n self.stealthUpdate(i,1)\n #class specific HP calculations\n if chosen_class == 'barbarian': \n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,12) + self.con + self.classMods[6]\n elif chosen_class == 'cleric':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,8) + self.con + self.classMods[6]\n elif chosen_class == 'druid':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,8) + self.con + self.classMods[6]\n elif chosen_class == 'fighter':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,10) + self.con + self.classMods[6]\n elif chosen_class == 'monk':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,8) + self.con + self.classMods[6]\n elif chosen_class == 'paladin':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,10) + self.con + self.classMods[6]\n elif chosen_class == 'ranger':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,10) + self.con + self.classMods[6]\n elif chosen_class == 'rogue':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,6) + self.con + self.classMods[6]\n elif chosen_class == 'wizard':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,6) + self.con + self.classMods[6]", "def run_simulation(self):\n env = simpy.Environment()\n env.process(self._simulation(env))\n env.run(until=24 * HORIZON)\n return self.total_cost, self.total_profit, self.number_of_courses", "def run():\n trials = 100\n\n multipliers = [0.25, 0.3, 0.35, 0.5, 0.75, 1, 1.25, 1.45, 1.5, 1.55, 1.6] # Coefficients for learning rate\n\n mean_penalty = []\n median_penalty = []\n std_penalty = []\n\n mean_trial_time = []\n median_trial_time = []\n std_trial_time = []\n\n mean_success_rate = []\n median_success_rate = []\n std_success_rate = []\n\n for m in multipliers:\n all_penalties = [] # All penalties from trail sets\n all_average_trial_time = []\n all_success_rates = []\n\n for i in range(0, 20):\n # print \"Trial set:\", i\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n agent = e.create_agent(LearnerAgent) # create agent\n agent.mult = m\n e.set_primary_agent(agent, enforce_deadline=True) # specify agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0, display=False) # create simulator (uses pygame when display=True, if available)\n\n sim.run(n_trials=trials) # run for a specified number of trials\n\n all_penalties.append(agent.all_trails_penalties)\n all_average_trial_time.append(agent.time/float(trials))\n all_success_rates.append(float(trials-agent.aborted_trials)/trials)\n\n mean_penalty.append(np.mean(all_penalties))\n median_penalty.append(np.median(all_penalties))\n std_penalty.append(np.std(all_penalties))\n\n mean_trial_time.append(np.mean(all_average_trial_time))\n median_trial_time.append(np.median(all_average_trial_time))\n std_trial_time.append(np.std(all_average_trial_time))\n\n mean_success_rate.append(np.mean(all_success_rates))\n median_success_rate.append(np.median(all_success_rates))\n std_success_rate.append(np.std(all_success_rates))\n\n for i in range(0, len(multipliers)):\n print \"\"\n print \"Multiplier:\", multipliers[i]\n print \"\"\n print \"Mean penalty per {} trials:\".format(trials), mean_penalty[i]\n print \"Median penalty per {} trials:\".format(trials), median_penalty[i]\n print \"Std.Dev. penalty per {} trials:\".format(trials), std_penalty[i]\n\n print \"\"\n print \"Mean trial time:\", mean_trial_time[i]\n print \"Median trial time:\", median_trial_time[i]\n print \"Std.Dev. trial time:\", std_trial_time[i]\n\n print \"\"\n print \"Mean success rate per {} trials:\".format(trials), mean_success_rate[i]\n print \"Median success rate per {} trials:\".format(trials), median_success_rate[i]\n print \"Std.Dev. success rate per {} trials:\".format(trials), std_success_rate[i]", "def attack(self):\n return random.randint(self.max_damage//2, self.max_damage)", "def tick(self):\n time.sleep(self.sleep_time)\n self.time += 1\n print(\"[Turn \" + str(self.time) + \"] Tick tock...\")\n directions = [(0, -1), (1, -1), (1, 0), (1, 1), (0, 1), (-1, 1), (-1, 0), (-1, -1)]\n for i in range(len(self.robots)):\n self.robots[i][2] = (self.robots[i][2] + self.robots[i][3]) % 8\n self.robots[i][3] = 0\n self.robots[i][0] += directions[self.robots[i][2]][0]\n self.robots[i][1] += directions[self.robots[i][2]][1]\n if self.robots[i][0] < 0 or self.robots[i][0] >= self.width or \\\n self.robots[i][1] < 0 or self.robots[i][1] >= self.height:\n self.robots = []\n raise RobotWallCrashException # A robot crashed into a wall! Simulation over!\n for j in range(len(self.robots)):\n if i != j:\n if self.robots[i][0] == self.robots[j][0] and self.robots[i][1] == self.robots[j][1]:\n self.robots = []\n raise RobotCollisionException # A robot crashed into another robot! Simulation over!\n for j in range(len(self.items)):\n if self.robots[i][0] == self.items[j][0] and self.robots[i][1] == self.items[j][1]:\n if self.items[j][2] == 1:\n self.robots = []\n raise RobotFoundTreasureException # A robot found the treasure! You win!\n elif self.items[j][2] == 2:\n self.robots = []\n raise RobotObjectCrashException # A robot crashed into an object!\n if random.random() > self.reliability:\n print(\"*glug-glug-glug* Oil leak detected!\")\n self.items.append([self.robots[i][0], self.robots[i][1], 2])", "def after_attack(self):\n for key in self.troop_list:\n self.troop_list[key][1].health += (5 * self.troop_list['Priest'][0])\n self.troop_list[key][1].attack += (5 * self.troop_list['BlackSmith'][0])", "def event_m20_11_x10(z120=2011000):\n \"\"\"State 0,1: Did you beat the boss?\"\"\"\n IsEventBossKill(0, z120, 0, 1)\n assert ConditionGroup(0)\n \"\"\"State 2: End state\"\"\"\n return 0", "def fire_tick(self):\n SOUNDS['fire'].play()\n for room in self.room_list:\n if room.fire_level == 0:\n for j in room.adjacent:\n if self.lookup(j).fire_level == 2 and random.random() <= room.spread_chance:\n room.fire_level = 1\n self.num_onfire += 1\n break\n elif room.fire_level == 1:\n room.fire_level = 2", "def test_run_sim_1():\n rnd = rand.Arrivals(36, 41)\n sim.run_sim(3, 2, 5, 6, 22, rnd)", "def main():\n global levels\n difficulty = select_difficulty()\n start_game(difficulty)", "def evaluationFunction(self, currentGameState, action):\r\n # Useful information you can extract from a GameState (pacman.py)\r\n successorGameState = currentGameState.generatePacmanSuccessor(action)\r\n newPos = successorGameState.getPacmanPosition()\r\n newFood = successorGameState.getFood()\r\n newGhostStates = successorGameState.getGhostStates()\r\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\r\n\r\n \"*** YOUR CODE HERE ***\"\r\n #evaluation function - assigning weights to important aspects of the game state(good food, ghosts, high scaredTimes of ghosts)\r\n #retVal - value to be returned after calculating the weights\r\n retVal = 0\r\n \r\n #Getting the distance of pacman from food\r\n foodDist = 'Inf'\r\n for i in newFood.asList():\r\n #Need to get the closest food \r\n foodDist = min(foodDist, manhattanDistance(i, newPos))\r\n \r\n #weight for min food dist\r\n if(foodDist != 0):\r\n retVal += 1.0/(1000*float(foodDist))\r\n else:\r\n retVal = 0\r\n \r\n #Getting the distance of pacman from ghosts\r\n ghostDist = 0\r\n for j in newGhostStates:\r\n #max dist from ghosts\r\n ghostDist = max(ghostDist, manhattanDistance(j.getPosition(), newPos))\r\n #min dist from ghosts\r\n ghostMinDist = min('Inf', manhattanDistance(j.getPosition(), newPos))\r\n \r\n #weight for min dist from ghost\r\n if ghostMinDist < 2:\r\n retVal -= 1000\r\n \r\n #Getting the scaredTimes of the ghosts and adding weights\r\n for k in newScaredTimes:\r\n retVal += k \r\n \r\n #Final retVal\r\n retVal = retVal + successorGameState.getScore()\r\n \r\n return retVal" ]
[ "0.6155685", "0.6128568", "0.61060804", "0.6067592", "0.6018623", "0.5902906", "0.5877961", "0.5872772", "0.5862628", "0.58594483", "0.57941294", "0.5765011", "0.57348096", "0.5728712", "0.57282984", "0.5714876", "0.5714854", "0.5682435", "0.56775427", "0.5648126", "0.5637001", "0.5620304", "0.56162286", "0.5608905", "0.56052065", "0.56012285", "0.5587965", "0.5584335", "0.5582074", "0.55809677", "0.5576884", "0.55667365", "0.55546105", "0.5554421", "0.5552628", "0.55470115", "0.5544984", "0.55350536", "0.55310607", "0.55221426", "0.5511048", "0.5504773", "0.55009395", "0.54985726", "0.5486476", "0.54843134", "0.5481763", "0.5473105", "0.5472987", "0.547069", "0.54694283", "0.5467384", "0.5466807", "0.5459629", "0.5459249", "0.54567456", "0.5454835", "0.5447846", "0.5442127", "0.54418766", "0.54408246", "0.5438153", "0.54344606", "0.54328996", "0.54315794", "0.5426965", "0.54254735", "0.54235226", "0.5419621", "0.54149044", "0.54143125", "0.54131335", "0.5408056", "0.5406691", "0.54037267", "0.5394457", "0.5387831", "0.53857577", "0.5383029", "0.5377929", "0.537435", "0.53729945", "0.5371129", "0.5370304", "0.5370093", "0.536779", "0.53673923", "0.5367004", "0.536663", "0.5359953", "0.5358073", "0.5357762", "0.53531027", "0.5353047", "0.5349363", "0.53485423", "0.5341989", "0.53300524", "0.5325099", "0.5323157" ]
0.6836062
0
Uses average damage to determine time (ticks) to level up attack or strength Enemy has 1 def and 0 def bonus Weapon is best available scimitar
def level_time_average(start_levels, attack_style, attack_bonus, strength_bonus): ticks_per_attack = 4 # Scimitar attack speed max_hit, accuracy = get_max_hit_and_accuracy( start_levels, attack_style, attack_bonus, strength_bonus) if attack_style == Attack_Style.ATTACK: start_exp = osrs.experience[start_levels.attack] end_exp = osrs.experience[start_levels.attack+1] elif attack_style == Attack_Style.STRENGTH: start_exp = osrs.experience[start_levels.strength] end_exp = osrs.experience[start_levels.strength+1] experience = end_exp - start_exp avg_hit = accuracy * max_hit / 2 exp_per_hit = avg_hit * osrs.BASE_EXP_PER_DAMAGE ticks = experience / exp_per_hit * ticks_per_attack return ticks
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def attack(self):\n # TODO: Use integer division to find half of the max_damage value\n # then return a random integer between\n # half of max_damage and max_damage\n print(\"max damage of \" + self.name + \" is \")\n print(str(self.attack_strength))\n min_damage = self.attack_strength // 2\n weapon_attack_value = random.randint(min_damage, self.attack_strength)\n return weapon_attack_value", "def take_damage(self, dmg, dtype = 1):\n self.game.hit_sound.play()\n \n #DR% = 1 - (100 / x). \n damageMultiplier = 100.0 / float(self.defense)\n #Apply defense buffs/debuffs\n #calculate damage:\n dmg -= self.absorbtion\n dmg *= damageMultiplier\n #apply damage\n self.hp[0] -= dmg", "def do_damage(self) -> float:\n res = 0.05 + self.experience / 100\n self.experience = self.experience + 1\n return res", "def attack(self):\n # TODO: Use integer division to find half of the max_damage value\n # then return a random integer between half of max_damage and max_damage\n \n weapon_attack_value = random.randint(self.max_damage//2, self.max_damage)\n return weapon_attack_value", "def enemyrawdmg(self):\n\n enemystr = globalvalues.ai.getstatus()[3]\n # rngfactor will ensure that regular mobs won't absolutely crush you\n rngfactor = float(float(random.randint(45, 65)) / 100)\n level = (\n globalvalues.p1.getlevel()\n - globalvalues.ai.getstatus()[0]\n )\n lvlfactor = float(1 - level * 0.05)\n\n return int((enemystr) * 102 * 0.12 * rngfactor * lvlfactor)", "def takeHit(self, amount, type, enemyShip):\n if type == 'energy':\n # go through shields in quadrant first\n if self.currentSP > 0:\n if self.currentSP >= amount:\n self.currentSP -= amount\n amount = 0\n else:\n amount -= self.currentSP\n self.currentSP = 0\n # go through armor next\n if self.currentAP > 0 and amount > 0:\n # set experience only if shot goes through shields\n if self.typeAP == 'energy':\n if self.currentAP >= (amount * globals.reflectiveArmorModifier):\n self.currentAP -= (amount * globals.reflectiveArmorModifier)\n amount = 0\n else:\n amount -= (self.currentAP/globals.reflectiveArmorModifier)\n self.currentAP = 0\n else:\n if self.currentAP >= amount:\n self.currentAP -= amount\n amount = 0\n else:\n amount -= self.currentAP\n self.currentAP = 0\n elif type == 'impact':\n # go through shields in quadrant first\n if self.currentSP > 0:\n if self.currentSP >= amount:\n self.currentSP -= amount/2\n amount = amount/2\n else:\n amount -= self.currentSP\n self.currentSP = 0\n \n # now goto armor\n if self.currentAP > 0 and amount > 0:\n if self.typeAP == 'impact':\n if self.currentAP >= (amount * globals.impactArmorModifier):\n self.currentAP -= (amount * globals.impactArmorModifier)\n amount = 0\n else:\n amount -= (self.currentAP/globals.impactArmorModifier)\n self.currentAP = 0\n else:\n if self.currentAP >= amount:\n self.currentAP -= amount\n amount = 0\n else:\n amount -= self.currentAP\n self.currentAP = 0\n \n # now that shields and armor are taken care of transfer remaining damage to internal components\n self.myParent.setExperience(amount, enemyShip)\n componentDamage = 0\n if amount > 0 and self.components != {}:\n while amount > 0:\n keyList = funcs.sortStringList(self.components.keys())\n componentDamage = 1\n for componentID in keyList:\n component = self.components[componentID]\n if component.currentHP > amount:\n component.currentHP -= amount\n amount = 0\n break\n elif component.currentHP > 0:\n # remove component\n amount -= component.currentHP\n del self.components[componentID]\n \n # check if all components destroyed, or damage absorbed\n if self.components == {} or amount == 0:\n break\n \n if componentDamage == 1:\n self.setMyStatus()\n self.myParent.setMyStatus()\n \n if amount > 0:\n if self.myParent.currentISP > amount:\n self.myParent.currentISP -= amount\n self.myParent.setMyStatus()\n amount = 0\n else:\n self.myParent.destroyMe()\n amount = 0\n \n self.myParent.updateAllGUIValues()", "def average_damage(self) -> float:\r\n number_of_dice = int(self.damage.split(\"d\")[0])\r\n damage_of_dice = int(self.damage.split(\"d\")[1])\r\n average_damage = (number_of_dice + number_of_dice * damage_of_dice) / 2\r\n return average_damage", "def get_weapon_stats(attack_level):\n if attack_level >= 60:\n # Dragon scimitar\n return (67, 66)\n elif attack_level >= 40:\n # Rune scimitar\n return (45, 44)\n elif attack_level >= 30:\n # Adamant scimitar\n return (29, 28)\n elif attack_level >= 20:\n # Mithril scimitar\n return (21, 20)\n elif attack_level >= 10:\n # Black scimitar\n return (19, 14)\n elif attack_level >= 5:\n # Steel scimitar\n return (15, 14)\n else:\n # Iron scimitar\n return (10, 9)", "def take_damage(self, damage):\n alive_units = self.get_alive_units()\n units_count = len(alive_units)\n if units_count == 0:\n return\n damage_for_unit = damage / units_count\n for unit in alive_units:\n unit.take_damage(damage_for_unit)", "def attack(self):\n\t if self.damage == 0:\n\t\treturn None\n\t elif self.name == \"die\":\n\t roll = random.randint(1,20)\n\t if roll == 1:\n\t return 0\n\t else:\n\t return 1\n\t elif self.damage == 1 or self.damage == 2:\n\t\treturn self.damage\n\t elif self.damage == 3:\n\t\treturn random.randint(3,5)\n\t elif self.damage == -4:\n\t return 4\n\t elif self.damage == 10:\n\t\trandomInt = random.randint(1,4)\n\t\tif randomInt == 1:\n\t\t return 10\n\t\telse:\n\t\t return 0\n\t else:\n\t return self.damage", "def attack(self):\n return random.randint(self.max_damage//2, self.max_damage)", "def take_damage(self, damage):\n attack = damage - (0.05 + self.__experience / 1000)\n self.set_health(self.get_health - attack)", "def attack(health_meter):\n hit_list = 4 * ['igrac'] + 6 * ['neprijatelj']\n injured_unit = random.choice(hit_list)\n hit_points = health_meter[injured_unit]\n injury = random.randint(10, 15)\n health_meter[injured_unit] = max(hit_points - injury, 0)\n print(\"NAPAD! \", end='')\n show_health(health_meter)", "def attack(health_meter):\n hit_list = 4 * ['player'] + 6 * ['enemy']\n injured_unit = random.choice(hit_list)\n hit_points = health_meter[injured_unit]\n injury = random.randint(10, 15)\n health_meter[injured_unit] = max(hit_points - injury, 0)\n print(\"ATTACK! \", end='')\n show_health(health_meter)", "def damage(self, damage):\n return self.damage", "def _attack(self,target):\r\n damage = self.get_strength() * self.get_lvl()\r\n target.receive_damage(damage)", "def attack(self):\n if random.random() < self.chance_critical:\n return self.strength * 2\n return self.strength", "def calculate_hit(self):\n weapon = self.game_data['player inventory']['equipped weapon']\n weapon_power = self.game_data['player inventory'][weapon]['power']\n max_strength = weapon_power\n min_strength = max_strength - 7\n return random.randint(min_strength, max_strength)", "def do_damage(self) -> float:\n sum = 0\n for operator in self.__operators:\n if operator.is_alive:\n operator.experience += 1\n sum += operator.experience / 100\n return 0.1 + sum", "def ability_1(self,target):\r\n damage = (self.get_strength()+2)\r\n target.receive_damage(damage)", "def damage(self):\n out = (self.blurbs[self.state][\"damage\"])\n self.next_state(\"damage\")\n return out", "def shoot(self, a_fighter):\n if self.get_ammos()>0:\n lostPoints = int(self.get_damage() / a_fighter.get_agility())\n lostPoints = int(lostPoints * uniform(0.5,1)) # some random added\n a_fighter.__health_points = a_fighter.get_health_points() - lostPoints\n self.__ammos -= 1 # remove one ammo\n return a_fighter.get_health_points()", "def __attackDamage(self, attack, suit=0):\n if suit:\n for dmg in attack[SUIT_HP_COL]:\n if (dmg > 0):\n return dmg\n return 0\n else:\n for dmg in attack[TOON_HP_COL]:\n if (dmg > 0):\n return dmg\n return 0", "def take_damage(self, damage):\n list_operators_experience = [i.get_experience / 1000 for i in\n self.operators]\n damage -= 0.1 + sum(list_operators_experience)\n # 60% of damage will receive a vehicle\n self.set_health(self.get_health - damage * 0.6)\n # A random operator, who will receive 20% of damage.\n random_operator = random.randint(0, len(self.operators) - 1)\n j = 0\n while j < len(self.operators):\n if j == random_operator:\n self.operators[j].take_damage(damage * 0.2)\n else:\n self.operators[j].take_damage(damage * 0.1)\n j += 1", "def ability_2(self,target):\r\n damage1 = (self.get_lvl()+self.get_strength())\r\n target.receive_damage(damage1)", "def get_damage():\n\n return character['Damage']", "def heavy_attack(self, enemy):\n #Generate damage\n damage = random.randint(0, 50)\n \n #All pykemon will have a list moves = [light, heavy, restore, special]\n #All heavy attacks will appear at index 1 in the list moves\n #This attribute will be initialized in the child class\n print(\"Pykemon \" + self.name + \" used \" + self.moves[1] + \".\")\n\n #Dealt no damage\n if damage < 10:\n print(\"The attack missed!!!\")\n else:\n print(\"It dealt \" + str(damage) + \" damage.\")\n #Deal the damage to the enemy\n enemy.current_health -= damage", "def update_hp_for_higher_level(chosen_class,level):\n #Checks to see if your character is level 4,8,12,etc.\n def upgradedAbilityAt4(level):\n if level % 4 == 0:\n upgraded_ability = raw_input(\"Level \"+str(level)+\"!\\n Which two abilities would you like to upgrade? (Adds +1 to ability)\\n Please input two from str/dex/con/int/wis/cha with a space in between.\\n (ex: cha dex) \").split(' ')\n print\n #To write:\n #if either ability pushes ability score over 20, redo input\n\n \n for i in upgraded_ability:\n self.stealthUpdate(i,1)\n #class specific HP calculations\n if chosen_class == 'barbarian': \n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,12) + self.con + self.classMods[6]\n elif chosen_class == 'cleric':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,8) + self.con + self.classMods[6]\n elif chosen_class == 'druid':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,8) + self.con + self.classMods[6]\n elif chosen_class == 'fighter':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,10) + self.con + self.classMods[6]\n elif chosen_class == 'monk':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,8) + self.con + self.classMods[6]\n elif chosen_class == 'paladin':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,10) + self.con + self.classMods[6]\n elif chosen_class == 'ranger':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,10) + self.con + self.classMods[6]\n elif chosen_class == 'rogue':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,6) + self.con + self.classMods[6]\n elif chosen_class == 'wizard':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,6) + self.con + self.classMods[6]", "def getDamage(self, player, is_random=True):\n \n if \"restrained\" in self.debuffs:\n return 0, 0\n \n mitigation, num_cats = player.getCatBonus(player.defending_kittens,\n \"defending\")\n raw_dmg = random.randint(self._damage[0], self._damage[1])\n \n true_dmg = raw_dmg - mitigation\n if true_dmg < 0:\n true_dmg = 0\n \n return true_dmg, num_cats", "def reportDamage(self,damage):\n\t\tdefense = self.getDefense()\n\t\tif(type(damage) is int):\n\t\t\tif(damage >= 0):\n\t\t\t\tif(defense <= damage):\n\t\t\t\t\tself.__del__()\n\t\t\t\t\treturn 0\n\t\t\t\tif(defense > damage):\n\t\t\t\t\trecalculateddefense = defense - damage\n\t\t\t\t\tself.maneuverability = 1\n\t\t\t\t\tself.protection = recalculateddefense\n\t\t\t\t\treturn self.getDefense()", "def on_deal_dmg(self, target, friendly):\n if self.hurt:\n self.dfs -= target.atk\n if self.dfs <= 0 or target.poison:\n self.dead = True\n if target.hurt:\n target.dfs -= self.atk\n if target.dfs <= 0 or self.poison:\n target.dead = True\n\n # some special events may take place here\n # ... \n return self.atk", "def get_damage(self):\n return self.__damage", "def deal_damage(self, damage):\n # Another cool trick\n self.current_health = max(\n 0,\n self.current_health-damage\n )", "def getDamage(self):\n \n weapon_dmg = self.weapon.getDamage()\n cat_bonus, att_cats = self.getCatBonus(self.attacking_kittens,\n \"attacking\")\n true_dmg = weapon_dmg + cat_bonus + self.getBonusDamageFromInsanity()\n return true_dmg, att_cats", "def checkhealth(currentstrength, currenthunger):\n global HUNGER\n global STRENGTH\n flash = False\n grizzly_text = \"\"\n\n if currentstrength <= 0:\n if FIGHT:\n if GRIZZLY_BEAR:\n grizzly_text = \"grizzly \"\n printmessage(\"The %sbear has killed you.\" % grizzly_text, 7, MAGENTA, 2)\n else:\n printmessage(\"You have died from severe exhaustion.\", 5, RED, 2)\n die('tooweak')\n\n for i in range(0, 5): \n strengthrange = (79, 59, 39, 19, 0)\n if currentstrength in range(strengthrange[i], strengthrange[i] + 20):\n STRENGTH = STRENGTH_TEXT[i]\n if currentstrength > 99:\n STRENGTH = STRENGTH_TEXT[0]\n if currentstrength <= 19: \n flash = True\n update_strength(flash)\n flash = False # Make sure flash isnt incorrectly set for hunger too\n\n if currenthunger <= 0:\n printmessage(\"You have died from malnutrition.\", 5, RED, 2)\n die('starved')\n\n for i in range(0, 5): \n hungerrange = (79, 59, 39, 19, 0)\n if currenthunger in range(hungerrange[i], hungerrange[i] + 20): \n HUNGER = HUNGER_TEXT[i]\n if currenthunger > 99:\n HUNGER = HUNGER_TEXT[0]\n if currenthunger <= 19: \n flash = True\n update_hunger(flash)", "def take_damage(self, value, type_=None):\n if type_ in self.resistances:\n taken = math.floor(value / 2)\n # TODO (phillip): event log should show that damage was reduced\n elif type_ in self.vulnerabilities:\n taken = value * 2\n else:\n taken = value\n\n # Only used to return at the end\n actual_taken = min(self.hp, taken)\n\n self.hp -= taken\n if self.hp < -self.max_hp:\n # TODO (phillip): Implement creature death\n pass\n\n self.hp = max(0, self.hp)\n return actual_taken", "def get_damage(self, amount: float) -> None:\n self.health = self.health - amount * self.DMG_TO_VEHICLE\n rnd_operator = random.choice(self.__operators)\n rnd_operator.get_damage(amount * self.DMG_TO_ONE_OPER)\n for operator in self.__operators:\n if operator != rnd_operator:\n operator.get_damage(amount * self.DMG_TO_OPER)\n self.estimate_total_health()\n self.check_is_alive()", "def level_time_simulate(start_levels, attack_style, attack_bonus, strength_bonus):\n ticks_per_attack = 4 # Scimitar attack speed\n enemy_health = 60 # Sand crab health\n\n max_hit, accuracy = get_max_hit_and_accuracy(\n start_levels, attack_style, attack_bonus, strength_bonus)\n \n if attack_style == Attack_Style.ATTACK:\n start_exp = osrs.experience[start_levels.attack]\n end_exp = osrs.experience[start_levels.attack+1]\n elif attack_style == Attack_Style.STRENGTH:\n start_exp = osrs.experience[start_levels.strength]\n end_exp = osrs.experience[start_levels.strength+1]\n \n experience = end_exp - start_exp\n avg_ticks = combat_simulator.ticks_until_exp(max_hit, accuracy,\n ticks_per_attack, enemy_health, experience,\n osrs.BASE_EXP_PER_DAMAGE, ITERATIONS)\n return avg_ticks", "def get_damage(self, amount: float) -> None:\n self.health = self.health - amount", "def take_damage(self, damage):\n damage /= len(self.__units)\n for i in self.__units:\n i.take_damage(damage)", "def ability_4(self,target):\r\n damage = (self.get_strength()*3)\r\n target.receive_damage(damage)", "def damage(self):\n if not self.damage_mode and not self.attack_mode and not self.death_mode:\n self.damage_mode = True\n self.cut_frame_update = 0", "def take_damage(self, damage):\n if random.random() < self.chance_dodge:\n self.set_health(self.health - damage)\n return True\n return False", "def set_damage():\n\n global character\n character['Damage'] = randint(1, 6)", "def attack_success(self) -> float:\n return (0.5 * (1 + self.health / 100) *\n random.randint(50 + self.experience, 100) / 100)", "def deal_dmg(self):\n return self.damage", "def SADamageFunction(\n skill: AdventurerSkill | None,\n adventurer: \"Adventurer\",\n enemy: \"Enemy\",\n memboost: dict[str, int | float],\n combo: int,\n saRng: float,\n) -> int:\n if skill is None:\n return 0\n\n # lowercase everything\n target = skill.target.lower()\n tempBoostName = skill.tempBoost.lower()\n powerCoefficientName = skill.powerCoefficient.lower()\n powerCoefficient = 1.0\n\n if tempBoostName == \"none\":\n tempBoost = 1.0\n elif \"normal\" in tempBoostName:\n tempBoost = 1.4\n else:\n tempBoost = 1.7\n\n if skill.target == \"foe\":\n match powerCoefficientName:\n case \"low\" | \"lo\":\n powerCoefficient = 1.5\n case \"mid\" | \"medium\":\n powerCoefficient = 1.7\n case \"high\":\n powerCoefficient = 1.9\n case \"super\":\n powerCoefficient = 2.1\n case \"ultra\":\n powerCoefficient = 4.0\n else:\n match powerCoefficientName:\n case \"low\" | \"lo\":\n powerCoefficient = 1.1\n case \"mid\" | \"medium\":\n powerCoefficient = 1.15\n case \"high\":\n powerCoefficient = 1.2\n case \"super\":\n powerCoefficient = 1.4\n case \"ultra\":\n powerCoefficient = 3.6\n\n if \"physical\" in skill.type:\n stat_key = \"strength\"\n resist_key = \"physical\"\n else:\n stat_key = \"magic\"\n resist_key = \"magic\"\n\n tempPower = adventurer.stats[stat_key]\n tempPowerBoostAdv = adventurer.statsBoostAdv[stat_key]\n tempPowerBoostAst = adventurer.statsBoostAst[stat_key]\n tempMemBoost = memboost[stat_key]\n\n tempTypeResistDownBase = enemy.typeResistDownBase[resist_key]\n tempTypeResistDownAdv = enemy.typeResistDownAdv[resist_key]\n tempTypeResistDownAst = enemy.typeResistDownAst[resist_key]\n # check enemy buffs p/m resist\n tempTypeResistBuff = enemy.get_buff_mod(f\"{resist_key}_resist\")\n\n # get strength/magic debuff\n powerDebuff = adventurer.get_boostCheckAdv(False, stat_key)\n tempPowerBoostDebuff = 0.0\n if powerDebuff is not None:\n tempPowerBoostDebuff = abs(powerDebuff.modifier)\n else:\n tempPowerBoostDebuff = 0\n\n if len(skill.index_to) != 0:\n tempPower = 0\n tempPowerBoostAdv = 0.0\n tempPowerBoostAst = 0.0\n tempMemBoost = 0\n powerCoefficient = powerCoefficient * 1.96\n for index_to_attributes in skill.index_to:\n tempPower += adventurer.stats[index_to_attributes]\n tempPowerBoostAdv += adventurer.statsBoostAdv[index_to_attributes]\n tempPowerBoostAst += adventurer.statsBoostAst[index_to_attributes]\n tempMemBoost += memboost[index_to_attributes]\n tempElementBoostDebuff = 0.0\n if skill.element != \"\" and skill.noType != 1:\n # elementResistDownBase\n tempElementResistDownBase = enemy.elementResistDownBase[skill.element]\n # elementResistDownAdv\n tempElementResistDownAdv = enemy.elementResistDownAdv[skill.element]\n # elementResistDownAst\n tempElementResistDownAst = enemy.elementResistDownAst[skill.element]\n # elementDamageBoostAdv[location]\n\n tempElementDamageBoostAdv = adventurer.elementDamageBoostAdv[skill.element]\n if memboost.get(f\"{skill.element}_attack\") is not None:\n tempElementDamageBoostAdv += memboost[f\"{skill.element}_attack\"]\n # elemental damage boost from weapon\n if adventurer.stats.get(skill.element) is not None:\n tempElementDamageBoostAdv += cast(float, adventurer.stats[skill.element])\n # elementDamageBoostAst[location]\n tempElementDamageBoostAst = adventurer.elementDamageBoostAst[skill.element]\n # element debuff\n tempEleDebuff = adventurer.get_boostCheckAdv(False, f\"{skill.element}_attack\")\n if tempEleDebuff is not None:\n tempElementBoostDebuff = abs(tempEleDebuff.modifier)\n else:\n tempElementResistDownBase = 0.0\n tempElementResistDownAdv = 0.0\n tempElementResistDownAst = 0.0\n tempElementDamageBoostAdv = 0.0\n tempElementDamageBoostAst = 0.0\n\n if target == \"foe\":\n temptargetResistDownAdv = enemy.targetResistDownAdv[\"st\"]\n temptargetResistDownAst = enemy.targetResistDownAst[\"st\"]\n # foes\n else:\n temptargetResistDownAdv = enemy.targetResistDownAdv[\"aoe\"]\n temptargetResistDownAst = enemy.targetResistDownAst[\"aoe\"]\n\n temp_enemy_end = enemy.stats\n\n tempDamage = (\n (\n max(\n 2\n * tempPower\n * tempBoost\n * (\n 1\n + tempPowerBoostAdv\n + tempPowerBoostAst\n + tempMemBoost\n - tempPowerBoostDebuff\n )\n - temp_enemy_end[\"endurance\"],\n 0,\n )\n )\n * (\n 1\n - tempElementResistDownBase\n - tempElementResistDownAdv\n - tempElementResistDownAst\n - tempTypeResistDownBase\n - tempTypeResistDownAdv\n - tempTypeResistDownAst\n - tempTypeResistBuff\n )\n * (\n 1\n + tempElementDamageBoostAdv\n + tempElementDamageBoostAst\n - tempElementBoostDebuff\n )\n * (1 + adventurer.critPenBoost + 0.06)\n * (1 - temptargetResistDownAdv - temptargetResistDownAst)\n * powerCoefficient\n * 1.5\n * (skill.extraBoost)\n * (0.8 + combo * 0.2)\n * saRng\n )\n return int(tempDamage)", "def ship_took_damage(self, damage: Damage):\n pass", "def ability_3(self,target):\r\n damage = (self.get_dexterity()+self.get_strength())\r\n target.receive_damage(damage)", "def attack(self, enemy):\r\n best_weapon = None\r\n max_damage = 0\r\n # Searches your inventory for your highest damaging weapon\r\n for i in self._inventory:\r\n if isinstance(i, items.Weapon):\r\n if i._damage > max_damage:\r\n best_weapon = i\r\n max_damage = i._damage\r\n\r\n print(\"You use {} against {}!\".format(best_weapon._name, enemy._name))\r\n enemy._health_points -= best_weapon._damage\r\n if not enemy.is_alive():\r\n print(\"You've killed {}!\".format(enemy._name))\r\n\r\n else:\r\n print(\"The {} isn't dead yet. It has {} health remaining. Keep fighting!\".format(enemy._name, enemy._health_points))", "def __init__(self, attacker, target, damage,\n ticker='Unknown', weapon='Unknown', enemy_ships=[]):\n self._attacker = attacker\n self._target = target\n self._damage = list(damage)\n self._ticker = ticker\n self._weapon = weapon or 'Unknown'\n self._enemy_ships = ', '.join(enemy_ships) or 'Unknown'\n self._total_damage = sum(d[1] for d in self._damage)\n if self._damage:\n self._start_time = self._damage[0][0]\n self._end_time = self._damage[-1][0]\n else:\n self._start_time = None\n self._end_time = None", "def get_damage_roll(self):\n\t\tif self.difficulty == 1:\n\t\t\treturn 4\n\t\tif self.difficulty == 2:\n\t\t\treturn 6\n\t\tif self.difficulty == 3:\n\t\t\treturn 8\n\t\tif self.difficulty > 3:\n\t\t\treturn 10", "def attack(self):\n \n half_max_damage = int(self.max_damage) // 2\n random_value = randint(half_max_damage, int(self.max_damage))\n\n return random_value", "def attack1(self, command):\n\n if random.randint(1,3) == 1 or random.randint(1,3) == 3:\n p.health -= self.weapon[0].damage\n print(\"You've been hit! \\nHealth at \" + str(p.health))\n else:\n print('Enemy tried to attack, missed!')", "def take_damage(self):\n if self.cur_hp > 0:\n self.cur_hp -= 1\n self.hp_rect.width = (self.cur_hp / self.max_hp) * self.hp_bar_width", "def damageSubtractor(self, damage, target, caller):\n # Build the target av objects\n target_shield_value = target.db.shield_value # Applied conditionally\n target_armor = target.db.armor\n target_tough = target.db.tough\n target_armor_specialist = target.db.armor_specialist\n\n # Apply damage in order\n if target_shield_value:\n # Get value of shield damage to check if it's under 0. Need to pass\n # this on to armor\n shield_damage = target_shield_value - damage\n if shield_damage < 0:\n # Check if damage would make shield go below 0\n damage = abs(shield_damage)\n # Set shield_value to 0\n target.db.shield_value = 0\n # Recalc and set av with new shield value\n else:\n target.db.shield_value = shield_damage\n damage = 0\n\n if target_armor_specialist and damage:\n # Get value of damage\n armor_specialist_damage = target_armor_specialist - damage\n if armor_specialist_damage < 0:\n damage = abs(armor_specialist_damage)\n target.db.armor_specialist = 0\n else:\n target.db.armor_specialist = armor_specialist_damage\n damage = 0\n\n if target_armor and damage:\n # Get value of damage\n armor_damage = target_armor - damage\n if armor_damage < 0:\n damage = abs(armor_damage)\n target.db.armor = 0\n else:\n target.db.armor = armor_damage\n damage = 0\n\n if target_tough and damage:\n tough_damage = target_tough - damage\n if tough_damage < 0:\n damage = abs(tough_damage)\n target.db.tough = 0\n else:\n target.db.tough = tough_damage\n damage = 0\n else:\n self.deathSubtractor(damage, target, caller)\n\n new_av = self.updateArmorValue(target.db.shield_value, target.db.armor, target.db.tough, target.db.armor_specialist)\n\n return new_av", "def __attackDamageForTgt(self, attack, tgtPos, suit=0):\n if suit:\n return attack[SUIT_HP_COL][tgtPos]\n else:\n return attack[TOON_HP_COL][tgtPos]", "def take_damage(self, damage):\n if self.hp - damage <= 0:\n self.hp = 0\n self.die()\n else:\n self.hp -= damage", "def attackProcess(attack: \"Attack\", attacker: \"PlayerCharacter or Monster\", enemies: list, targetID: int):\n hits, target, ail = 1, [targetID], None\n damageRange, critChance, critMultiplier = (95, 105), 0.1, 2\n if (attack.special):\n hits = attack.special[\"HITS\"] if \"HITS\" in attack.special.keys() else 1\n target = attack.special[\"TARGET\"] if \"TARGET\" in attack.special.keys() else target\n ail = attack.special[\"INFLICT\"] if \"INFLICT\" in attack.special.keys() else None\n damageRange = attack.special[\"DAMAGE RANGE\"] if \"DAMAGE RANGE\" in attack.special.keys() else damageRange\n critChance = attack.special[\"CRIT CHANCE\"] if \"CRIT CHANCE\" in attack.special.keys() else critChance\n critMultiplier = attack.special[\"CRIT MULTIPLIER\"] if \"CRIT MULTIPLIER\" in attack.special.keys() else critMultiplier\n target = targeting(len(enemies), target, hits)\n if attack.category == \"MAGICAL\":\n attackerPower = attacker.stats[\"MAGIC\"]\n attackerPower *= 0.6 if \"MUDDLE\" in attacker.conditions.keys() else 1\n else:\n attackerPower = attacker.stats[\"STRENGTH\"]\n attackerPower *= 0.6 if \"BURN\" in attacker.conditions.keys() else 1\n attackerPower *= 0.8 if \"LETHARGY\" in attacker.conditions.keys() else 1\n power = attack.power * attackerPower\n for i in target:\n roll = random.random()\n targetSpeed = 1 if \"STUN\" in enemies[i].conditions.keys() else enemies[i].stats[\"SPEED\"]\n hitChance = ((attacker.stats[\"SPEED\"] + attackerPower/10) / targetSpeed)\n hitChance *= 0.6 if \"BLIND\" in attacker.conditions.keys() else 1\n hitCheck = roll < hitChance\n if hitCheck:\n critCheck = roll < critChance\n resist = enemies[i].resist[attack.element] if attack.element in enemies[i].resist.keys() else 1\n damage = power * resist * (random.randint(damageRange[0], damageRange[1])/100)\n if critCheck:\n damage *= critMultiplier\n print(\"Critical hit!\")\n damage /= 2 if enemies[i].defend else 1\n damage //= enemies[i].stats[\"DEFENSE\"] if attack.category == \"PHYSICAL\" else enemies[i].stats[\"RESISTANCE\"]\n enemies[i].hp -= damage\n if enemies[i].hp < 0:\n enemies[i].ko = True\n enemies[i].hp = 0\n print(f\"\\n{attacker.name}'s {attack.name} dealt {damage} damage to {enemies[i].name}!\")\n print(f\"{enemies[i].name} {enemies[i].hp}/{enemies[i].stats['MAXHP']}\\n\")\n if ail and not enemies[i].ko:\n inflict(ail, enemies[i])\n else:\n print(f\"\\n{attacker.name} missed!\")\n attacker.wait = attack.wait * (100 - (1 if \"STUN\" in attacker.conditions.keys() else attacker.stats[\"SPEED\"])) // 1000", "def Troll(self):\n self.type = \"Troll\"\n self.image = pygame.image.load(\"Troll.gif\")\n self.cost = 4\n self.health = 60\n self.max_health = self.health\n self.base_damage = 6 \n self.damagedice = (3,2)\n self.base_defense = 2\n self.defensedice = (3,1)\n self.color = TEAL\n self.activate()", "def defend(self, damage_amt):\n total_kills = 0\n teams_total_defense = 0\n for hero in self.heroes:\n teams_total_defense += hero.defend()\n excess_total = damage_amt - teams_total_defense\n self.deal_damage(excess_total)\n\n for hero in self.heroes:\n if hero.health <= 0:\n total_kills += 1\n return total_kills", "def playerrawdmg(self):\n playerstr = globalvalues.p1.getstrength()\n # see combatvaluetable.xlsx to see some possible values of\n # playerrawdamage. Base formula is below:\n #\n rawdmg = int((playerstr - 4) * 102 * 0.32)\n\n # Things that will deviate the amount of damage done.\n level = globalvalues.p1.getlevel() - globalvalues.ai.getstatus()[0]\n modvalue = float(1 + level * 0.05)\n rngfactor = float(1 + float(random.randint(85, 105)) / 100)\n\n return int(rawdmg * modvalue * rngfactor)", "def attack(self, target, friendly):\n self.on_attack(target, friendly)\n dmg = self.on_deal_dmg(target, friendly)\n self.death_remove(friendly)\n # May remove other minions in special cases\n # ... \n\n return dmg", "def dealdamage(self, pokemon, movedata):\n mod = getmodifier(movedata['type'].lower(), pokemon.types)\n if movedata['category'].lower() == 'physical':\n damage = (((2*self.level/5+2)*movedata['power']*self.currentStats['ATK']/pokemon.currentStats['DEF'])/50+2)*mod\n if movedata['category'].lower() == 'special':\n damage = (((2*self.level/5+2)*movedata['power']*self.currentStats['SPATK']/pokemon.currentStats['SPDEF'])/50+2)*mod\n print(f\"HP: {pokemon.currentStats['HP']}, Damage: {damage}\")\n pokemon.takedamage(damage)\n if mod == 0:\n return [0, damage]\n if mod == 0.25:\n return [1, damage]\n if mod == 0.5:\n return [2, damage]\n if mod == 1:\n return [3, damage]\n if mod == 2:\n return [4, damage]\n if mod == 4:\n return [5, damage]", "def deal_damage(self, target):\n if hasattr(target, \"hp\"):\n dmg = random.randrange(self.atk + 1)\n target.take_damage(dmg)\n return dmg", "def fight(you, boss):\n you_attack = you['damage'] - boss['armor']\n if you_attack < 1:\n you_attack = 1\n boss_attack = boss['damage'] - you['armor']\n if boss_attack < 1:\n boss_attack = 1\n boss_turns = np.ceil(you['hit']/boss_attack)\n you_turns = np.ceil(boss['hit']/you_attack)\n return you_turns <= boss_turns", "def take_damage(self, damage):\n if damage <= 0: return\n self.hit_points[0] -= damage\n if self.hit_points[0] <= 0: self.die()", "def take_damage(self, damage: int, attacker: str) -> tuple:\n if self.hp < damage:\n self.hp = damage\n new_hp = self.hp - damage\n self.hp = new_hp\n if self.hp == 0:\n return True, f'{self.name} has been killed by {attacker}!'\n return False, f'{self.name} was hit for {damage} HP by {attacker} and now has {self.hp} HP left!'", "def Hit(self, damage):\n self.health -= damage", "def attackSpeedModifier(self):\n return 0", "def update(self, g):\n \n self.game = g\n \n #if the player is dead, KILL THEM\n if self.hp[0] <= 0 and self.dead == False:\n self.dead = True\n self.deadt = 0\n #clear debuffs\n\n if self.dead == True:\n self.deadt += g.deltaT / 1000.0\n if self.deadt > self.reviveTime: #recussitate after 30 seconds\n self.dead = False\n self.hp[0] = self.hp[1]\n return #if dead, ignore input and all other updates\n \n elif self.dead == False:\n self.hp[0] += self.regen * g.deltaT / 1000.0\n if self.hp[0] > self.hp[1]:\n self.hp[0] = self.hp[1]\n self.mana[0] += self.manaRegen * g.deltaT / 1000.0\n if self.mana[0] > self.mana[1]:\n self.mana[0] = self.mana[1]\n self.attackTimer += self.attackSpeedMultiplier * g.deltaT / 1000.0\n #check debuffs\n self.checkBurning()\n self.checkChilled()\n self.checkShocked()\n self.checkParalyzed()\n \n \n #AURA\n for skill in self.skill:\n if skill.skillKey == 0 and skill.active == True: #aura is on\n #take mana\n self.mana[0] -= float(skill.skillCost) * g.deltaT / 1000.0\n #damage all creeps in AoE\n r = 4 * 24 #the radius of the AoE, in pixels at zoom = 1.\n for creep in g.creeps:\n if ( (creep.rect.centerx - self.rect.centerx) ** 2 + (creep.rect.centery - self.rect.centery) ** 2 ) ** 0.5 < r:\n creep.take_damage( self.attack * 0.1 * g.deltaT / 1000.0, 2 ) #THIS SHOULD IGNORE ABSORBTION\n #apply debuffs, based on type\n if skill.skillAttr == 0: #fire\n creep.applyBurning()\n elif skill.skillAttr == 1: #frost\n creep.applyChilled()\n elif skill.skillAttr == 2: #lightning\n creep.applyShocked()\n \n #buff all players in AoE\n\n #AI\n if self.active == False and self.attackTimer >= self.attackDelay:\n self.do_ai()\n \n #collision detection\n self.collision = [False, False]\n #Needs to be floats to ensure the player doesn't get stuck in a wall (rounding errors cause this)\n self.futurex = self.x + self.speed * self.direction[0] * g.deltaT / 1000.0\n self.futurey = self.y + self.speed * self.direction[1] * g.deltaT / 1000.0\n \n #can't move outside the bounds of game area\n if self.futurex < 0 or self.futurex + self.rect.width > g.mapSize[0] * 24:\n #cannot move in x\n self.collision[0] = True\n if self.futurey < 0 or self.futurey + self.rect.height > g.mapSize[1] * 24:\n #cannot move in y\n self.collision[1] = True\n \n #tile collision\n for x in range( int(self.x / 24) - 1, int(self.x / 24) + 2):\n for y in range( int( (self.y + 8) / 24) - 1, int( (self.y + 8) / 24) + 2):\n if x > -1 and x < g.mapSize[0] and y > -1 and y < g.mapSize[1]:\n if g.tiles[x][y].blocking == True:\n #test if you would be in them (24 x 24 area, cut off head top)\n if self.futurex >= x * 24 and self.futurex <= x * 24 + 24 or \\\n self.futurex + 24 >= x * 24 and self.futurex + 24 <= x * 24 + 24:\n if self.futurey + 8 >= y * 24 and self.futurey + 8 <= y * 24 + 24 or \\\n self.futurey + 24 + 8 >= y * 24 and self.futurey + 24 + 8 <= y * 24 + 24:\n self.collision[0] = True\n self.collision[1] = True\n \n \n #move (or don't)\n if self.collision[0] == False:\n self.x += self.speed * self.direction[0] * g.deltaT / 1000.0\n self.rect.move_ip( (int)(self.x - self.rect.x), 0)\n if self.collision[1] == False:\n self.y += self.speed * self.direction[1] * g.deltaT / 1000.0\n self.rect.move_ip( 0, (int)(self.y - self.rect.y) )\n \n #parse direction\n if self.direction[0] == 1:\n self.frameDirection = 1\n elif self.direction[0] == -1:\n self.frameDirection = 3\n if self.direction[1] == 1:\n self.frameDirection = 0\n elif self.direction[1] == -1:\n self.frameDirection = 2\n \n #animate\n if self.direction != [0, 0]: #player is moving\n self.frameTimer += g.deltaT\n if self.frameTimer > self.frameDelay:\n self.frameTimer = 0\n self.frame += 1\n if self.frame > self.frameMax:\n self.frame = 0\n else: #player is idle\n self.frame = 0", "def fight(who_fight=None):\r\n global monsters_defeated\r\n \r\n if isinstance(who_fight,helpful.Being):\r\n ###specific monster\r\n enemy = who_fight\r\n\r\n elif isinstance(who_fight,list):\r\n ###list of categories\r\n enemy = items_lists.random_monster(random.choice(who_fight))\r\n\r\n else:\r\n ###else picks a monster at random, not boss though\r\n enemy = items_lists.random_monster()\r\n \r\n\r\n\r\n # print 'fighting:\\n' + enemy.advanced_str()\r\n encountered = words.being_adj().capitalize() + ' ' + str(enemy)\r\n raw_input(str(player) + ' encounters a ' + encountered + '!\\n')\r\n choice = helpful.pick_item(['yes','no','inventory'],'Fight?','inventory')\r\n\r\n while choice == 'inventory':\r\n inspect_inventory()\r\n choice = helpful.pick_item(['yes','no','inventory'],'Fight?','inventory')\r\n\r\n if choice == 'yes':\r\n\r\n while enemy.get_health() > 0 and player.get_health() > 0:\r\n #player attacks\r\n item = helpful.pick_item(player.get_inventory(), 'What to use?')\r\n player.use(item)\r\n attack = item.get_damage()\r\n defend = item.get_health()\r\n\r\n if attack > 0:\r\n enemy.hit(item)\r\n raw_input('You dealt ' +str(attack) + ' damage!')\r\n elif defend > 0:\r\n raw_input('You gained ' + str(defend) + ' HP!')\r\n else:\r\n raw_input('That was pretty dumb.\\n')\r\n \r\n if enemy.get_health() > 0: #if the enemy is still alive\r\n\r\n ###enemy attacks, using random item in enemy's inventory\r\n enemy_choice = random.choice(enemy.get_inventory())\r\n player.hit(enemy_choice)\r\n raw_input(str(enemy).capitalize() + ' used ' + str(enemy_choice) + '!\\n')\r\n raw_input('You lost ' + str(enemy_choice.get_damage()) + ' health!\\n')\r\n \r\n player.set_health(max(0,player.get_health())) #make health nonnegative\r\n enemy.set_health(max(0,enemy.get_health()))\r\n\r\n print('Player Health: ' + str(player.get_health()) + '\\n')\r\n raw_input(str(enemy) + ' Health: ' + str(enemy.get_health()) + '\\n')\r\n \r\n if enemy.get_health() == 0:\r\n winner = str(player)\r\n raw_input('You looted the following items:\\n' + enemy.get_inv_string())\r\n player.grab_items(enemy.get_inventory())\r\n result = 'win'\r\n monsters_defeated += 1\r\n\r\n if player.get_health() == 0:\r\n winner = str(enemy)\r\n result = 'death'\r\n\r\n print(winner + ' wins!\\n')\r\n\r\n elif choice == 'no':\r\n\r\n ouch = random.randrange(0,2)\r\n if enter_two == config.confus(config.config2):\r\n ouch = 0\r\n global cheated\r\n cheated = True\r\n print '<yolo>'\r\n if ouch:\r\n enemy_choice = random.choice(enemy.get_inventory())\r\n player.hit(enemy_choice)\r\n print 'You got away, but were hit by the ' + \\\r\n str(enemy) +\"'s \" + str(enemy_choice) +'!' + '\\n'\r\n raw_input('You sustained ' + str(enemy_choice.get_damage()) +' damage.\\n')\r\n if player.get_health() <= 0:\r\n return 'death'\r\n else:\r\n raw_input('You got away safely!\\n\\nThat was close!\\n')\r\n result = 'lose'\r\n\r\n return result", "def take_damage(self, damage_amt):\n\n self.health -= damage_amt\n if self.health <= 0:\n self.deaths += 1", "def _attack(self, weapon: int, defender):\n\n # Calculate damage\n round_damage = self.calculate_luck(\n self.weapons[weapon].damage\n )\n\n print(\n f\"{str(defender)}'s current health is {defender.current_health}\"\n )\n\n print(\n f\"damage for this round is {round_damage}\"\n )\n\n # Damage the defender\n defender.deal_damage(round_damage)\n\n # Return True if the attack killed the defender, false if it did not\n return defender.current_health <= 0", "def attack(self):\n total_amnt_attack = 0\n for new_attack in self.abilities:\n total_amnt_attack += new_attack.attack()\n return total_amnt_attack", "def bless_advanced(unit):\n return {DAMAGE: unit.maximum_damage + 1}", "def action_normal(self):\n obs = self.observation\n shoot = False\n eb = self.__class__.enemy_base\n \n ammopacks = filter(lambda x: x[2] == \"Ammo\", obs.objects)\n if ammopacks:\n self.updateAllAmmoSpots(ammopacks)\n # Walk to ammo\n if obs.ammo < SUFFICIENT_AMMO:\n self.goal = self.getClosestLocation(ammopacks)\n self.motivation = MOTIVATION_AMMO\n self.debugMsg(\"*> Recharge (%d,%d)\" % (self.goal[0],self.goal[1]))\n \n '''if (obs.ammo > 0 and obs.foes):\n self.goal = self.getClosestLocation(obs.foes)\n self.debugMsg(\"*> Go to enemy (%d,%d)\" % self.goal)\n # If the enemy is within range, shoot.\n if(point_dist(self.goal, obs.loc) < self.settings.max_range\n and not line_intersects_grid(obs.loc, self.goal, self.grid, self.settings.tilesize)):\n self.debugMsg(\"*> Shoot (%d,%d)\" % self.goal)\n #if self.goal not in obs.friends:\n self.motivation = MOTIVATION_SHOOT_TARGET\n shoot = True'''\n \n # Attack strategy 1\n #########################\n # 1) Shoot live enemies #\n #########################\n # Aim at the closest enemy outside the enemy base\n if obs.ammo > 0 and obs.foes:\n living = filter(lambda x: point_dist(x[0:2], eb) > ENEMY_BASE_RANGE, obs.foes)\n self.debugMsg(\"Living: %s\" % (living,))\n if living:\n self.debugMsg(1)\n self.goal = min(living, key=lambda x: point_dist(obs.loc, x[0:2]))[0:2]\n self.motivation = MOTIVATION_SHOOT_TARGET\n self.debugMsg(2)\n # Check if enemy in fire range\n if (\n point_dist(self.goal, obs.loc) < self.settings.max_range and\n not line_intersects_grid(\n obs.loc, \n self.goal, \n self.grid, \n self.settings.tilesize\n )\n ):\n self.debugMsg(3)\n self.debugMsg(\"*> Shoot (%d,%d)\" % self.goal)\n #return self.getActionTriple(True,None,0) ###?? SHOULD WE STOP MOVING WHEN WE SHOOT?\n return self.getActionTriple(True)\n else:\n self.debugMsg(4)\n return self.getActionTriple()\n self.debugMsg(5)\n \n # Walk to an enemy CP\n if self.goal is None and len(self.friendlyCPs) < 2:\n self.goal = self.getClosestLocation(self.getQuietEnemyCPs())\n if self.goal:\n self.debugMsg(\"Crowded location: %d\" % self.getCrowdedValue(self.goal))\n self.motivation = MOTIVATION_CAPTURE_CP\n self.debugMsg(\"*> Capture (%d,%d)\" % (self.goal[0],self.goal[1]))\n \n '''# If you can't think of anything to do\n # at least walk to a friendly control point\n if self.goal is None:\n self.goal = self.getClosestLocation(self.getQuietRestlessFriendlyCPs())\n if self.goal:\n self.motivation = MOTIVATION_GUARD_CP\n self.debugMsg(\"*> Guard (%d,%d)\" % (self.goal[0],self.goal[1]))'''\n \n if self.goal is None:\n self.goal = max(\n self.__class__.ammoSpots,\n key=lambda x: point_dist(x, obs.loc),\n )\n self.debugMsg(\"Going to ammospot far away (%d, %d)\" % (self.goal[0],self.goal[1]))\n self.motivation = MOTIVATION_STAY_PUT\n \n\n if self.goal:\n return self.getActionTriple(shoot)\n else:\n return self.getActionTriple(shoot)", "def damage(self, dmg_value):\n if self.can_take_damage():\n # here we'll add if it's affected by negative buffs\n self.health -= dmg_value", "def use_skill(self, g, i, x, y):\n # @ param g a reference to the game engine\n # @ param i the index of the skill (basically what skill)\n # @ param x the x target coordinate in game pixels\n # @ param y the y target coordinate in game pixels\n if self.attackTimer < self.attackDelay:\n print(\"attack on CD\")\n return\n \n if self.skill[i].skillAttr == 0:\n g.fire_skill_sound.play()\n elif self.skill[i].skillAttr == 1:\n g.ice_skill_sound.play()\n elif self.skill[i].skillAttr == 2:\n g.lightning_skill_sound.play()\n elif self.skill[i].skillAttr == 3:\n g.poison_skill_sound.play()\n \n \n if self.skill[i].skillKey == 0: #Aura\n #turn the aura on/off\n if self.skill[i].active == False:\n #print(\"aura on\")\n self.skill[i].active = True\n else:\n self.skill[i].active = False\n #print(\"aura off\")\n \n elif self.skill[i].skillKey == 1: #Missile\n if self.mana[0] > self.skill[i].skillCost:\n self.mana[0] -= self.skill[i].skillCost\n self.attackTimer = 0\n target = Target(x, y)\n center_x = self.rect.x + (self.rect.width / 2)\n center_y = self.rect.y + (self.rect.height / 2)\n #bullet types: fire 5, ice 6, lightning 7\n #skill types: fire 0, ice 1, lightning 2\n g.bullets.append(self.bulletFactory.createBullet(g, self.skill[i].skillAttr + 5, 0, self.attack, 1024, target, center_x, center_y))\n #print(\"missile\")\n\n elif self.skill[i].skillKey == 2: #Breath\n #for each creep in the AoE cone, do damage.\n if self.mana[0] > self.skill[i].skillCost:\n self.mana[0] -= self.skill[i].skillCost\n self.attackTimer = 0\n #get low and high angle (-45 degrees and +45 degrees from player -> point angle)\n lowAngle = math.atan2(y - self.rect.centery, x - self.rect.centerx) - 3.1415 / 2.0\n highAngle = math.atan2(y - self.rect.centery, x - self.rect.centerx) + 3.1415 / 2.0\n for creep in g.creeps:\n #get angle to creep\n creepAngle = math.atan2(creep.rect.centery - self.rect.centery, creep.rect.centerx - self.rect.centerx)\n \n #if angle to the creep is between the two angles\n if creepAngle > lowAngle and creepAngle < highAngle:\n #and the distance to the creep is below the skill's range\n if ( (creep.rect.centerx - self.rect.centerx) ** 2 + (creep.rect.centery - self.rect.centery) ** 2 ) ** 0.5 < 4 * 24:\n creep.take_damage( self.attack )\n #print(\"breath\")\n #apply debuffs, based on type\n if self.skill[i].skillAttr == 0: #fire\n creep.applyBurning()\n elif self.skill[i].skillAttr == 1: #frost\n creep.applyChilled()\n elif self.skill[i].skillAttr == 2: #lightning\n creep.applyShocked()", "def normal_defense(self):\n if self.game.get_my_mana() > DEFENSE_MANA_CAP:\n self.portals.dumb_castle_defense(DEFENSE_MANA_CAP)\n self.portals.dumb_portal_defense(PORTAL_SELF_DEFENSE_MANA_CAP)", "def deal_damage(self, damage):\n total_damage_amount = damage // len(self.heroes)\n total_of_deaths = 0\n for hero in self.heroes:\n dead_hero = hero.take_damage(total_damage_amount)\n\n if dead_hero == 0:\n total_of_deaths += 1\n\n return total_of_deaths", "def strike(attacker, defender):\n #print(\"----------------------------------\")\n #print(\"{} attacks {}\".format(attacker.name, defender.name))\n #print(\"----------------------------------\") \n if attacker.ai:\n attack = random.choice(attacker.attacks)\n else:\n print(show_menu(attacker.attacks))\n playeraction = integer_input(prompt=\"please select attack for {}\".format(attacker.name),\n min_value = 0, max_value = len(attacker.attacks))\n if playeraction == 0:\n return None # user cancel\n attack = attacker.attacks[playeraction-1]\n if defender.ai:\n defense = random.choice(defender.defenses)\n else:\n print(show_menu(defender.defenses))\n playeraction = integer_input(prompt=\"please select defense for {}\".format(defender.name),\n min_value = 0, max_value = len(defender.defenses))\n if playeraction == 0:\n return None # user cancel\n defense = defender.defenses[playeraction - 1]\n # damage calculation\n text = [\"---------\"] # list of text lines\n if attacker.stunned > 0:\n text.append(\"{} is still stunned and can not attack!\".format(attacker.name))\n damage = 0\n else:\n text.append(\"{} attack with {}, {} defends by {}\".format(attacker.name,\n attack, defender.name, defense))\n success = random.random() # between 0.0 and 1.0\n raw_damage = random.randint(attacker.mindamage, attacker.maxdamage)\n # damage modifier matrix\n mod = Monster.dm[attack][defense]\n damage = raw_damage * mod\n text.append(\"raw damage: {} * modifier {:.1f} = damage: {:.2f}\".format(\n raw_damage, mod, damage))\n if defender.stunned > 0:\n text.append(\"{} is still stunned and can not defend itself.\".format(defender.name))\n text.append(\"Automatic double damage, ignoring defense !\".format(defender.name))\n damage = raw_damage * 2 \n elif success < attacker.criticalfail:\n text.append(\"critical fail! ({:.2f} < {:.2f}) No damage\".format(\n success, attacker.criticalfail))\n damage = 0\n elif mod >0:\n #new sucess chance for critical hit !\n success = random.random() \n if success < attacker.criticalhit:\n text.append(\"critical hit! ({:.2f} < {:.2f}) Triple damage !\".format(\n success, attacker.criticalhit))\n damage *= 3\n text.append(\"{} causes {:.2f} points of damage\".format(attacker.name,\n damage))\n defender.hitpoints -= damage \n text.append(\"{} has {:.1f} hitpoints left\".format(defender.name, \n defender.hitpoints))\n # -------------- special actions -------------\n # potion ?\n if defense == \"drink potion\":\n text.extend(defender.drink_potion()) # extend by more than one line\n # stun ?\n if attack == \"stun\":\n text.extend(attacker.stun(defender))\n if defender.hitpoints < 0:\n text.append(\"\\nvictory for {}!\".format(attacker.name))\n return text", "def drawHealth(self, maxHealth, currentHealth):\n percentageHealth = currentHealth/maxHealth\n if not currentHealth == 0 and not percentageHealth == 1:\n if(percentageHealth <= 1/3):\n pg.draw.rect(self.screen, pg.Color(\"snow 4\"), (self.position[0] + self.tileSize * .15, self.position[1] + self.tileSize * .75, self.tileSize * .70, self.tileSize * .15))\n pg.draw.rect(self.screen, pg.Color(\"red\"), (self.position[0] + self.tileSize * .15, self.position[1] + self.tileSize * .75, self.tileSize * .70 * percentageHealth, self.tileSize * .15))\n elif(percentageHealth <= 2/3):\n pg.draw.rect(self.screen, pg.Color(\"snow 4\"), (self.position[0] + self.tileSize * .15, self.position[1] + self.tileSize * .75, self.tileSize * .70, self.tileSize * .15))\n pg.draw.rect(self.screen, pg.Color(\"yellow\"), (self.position[0] + self.tileSize * .15, self.position[1] + self.tileSize * .75, self.tileSize * .70 * percentageHealth, self.tileSize * .15))\n elif(percentageHealth < 1):\n pg.draw.rect(self.screen, pg.Color(\"snow 4\"), (self.position[0] + self.tileSize * .15, self.position[1] + self.tileSize * .75, self.tileSize * .70, self.tileSize * .15))\n pg.draw.rect(self.screen, pg.Color(\"green\"), (self.position[0] + self.tileSize * .15, self.position[1] + self.tileSize * .75, self.tileSize * .70 * percentageHealth, self.tileSize * .15))\n pg.draw.rect(self.screen, pg.Color(\"black\"), (self.position[0] + self.tileSize * .15, self.position[1] + self.tileSize * .75, self.tileSize * .70, self.tileSize * .15), 2)", "def calculate_damage(\n self, amount:int, *, \n scan_dict:Optional[Dict]=None, \n precision:int=1, \n calculate_crew:bool=True, \n calculate_systems:bool=True, \n damage_type:DamageType,\n use_effective_values:bool=True\n ):\n #assume damage is 64, current shields are 80, max shields are 200\n #armor is 75, max armor is 100\n #80 * 2 / 200 = 160 / 200 = 0.8\n #0.8 * 64 = 51.2 = the amount of damage that hits the shields\n #64 - 51.2 = 12.8 = the amount of damage that hits the armor and hull\n #1 - (75 / 100) = 1 - 0.25 = 0.75\n #12.8 * 0.75 = 9.6 = the amount of damage that hits the armor\n #12.8 - 9.6 = 3.2 = the amount of damage that hits the hull\n \n random_varation = damage_type.damage_variation\n \n if random_varation > 0.0:\n amount = round(amount * uniform(1.0 - random_varation, 1.0))\n \n old_scan = scan_dict if scan_dict else self.scan_this_ship(\n precision, scan_for_crew=calculate_crew, \n scan_for_systems=calculate_systems, \n use_effective_values=use_effective_values\n )\n try:\n current_shields:int = old_scan[\"shields\"]\n except KeyError:\n current_shields = 0\n try:\n polarization:int = old_scan[\"polarization\"]\n \n if calculate_systems:\n \n polarization = round(polarization * (\n ajust_system_integrity(\n old_scan[\"sys_polarize\"]\n ) if use_effective_values else old_scan[\"sys_polarize\"]\n ))\n except KeyError:\n polarization = 0\n current_hull:int = old_scan[\"hull\"]\n \n old_status = self.ship_status\n \n is_hulk = current_hull < 0\n \n try:\n is_derlict = old_scan[\"able_crew\"] + old_scan[\"injured_crew\"] <= 0\n except KeyError:\n is_derlict = False\n try:\n shield_effectiveness = ajust_system_integrity(old_scan[\"sys_shield\"]) if use_effective_values else old_scan[\"sys_shield\"]\n except KeyError:\n shield_effectiveness = 1\n \n shields_are_already_down = shield_effectiveness <= 0 or current_shields <= 0 or not old_status.do_shields_work or not self.shield_generator.shields_up\n \n shields_dam = 0\n armorDam = amount\n hull_dam = amount\n \n shield_dam_multi = damage_type.damage_vs_shields_multiplier\n\n armorHullDamMulti = (\n damage_type.damage_vs_no_shield_multiplier \n if shields_are_already_down else damage_type.damage_vs_hull_multiplier\n ) \n try:\n shields_percentage = current_shields / self.ship_class.max_shields\n except ZeroDivisionError:\n shields_percentage = 0\n shields_are_already_down = True\n \n bleedthru_factor = min(shields_percentage + 0.5, 1.0)\n \n if shields_are_already_down:\n \n hull_dam = amount * armorHullDamMulti\n else:\n to_add = 0\n shields_dam = amount * bleedthru_factor * shield_dam_multi\n if shields_dam > current_shields:\n to_add = shields_dam - current_shields\n \n shields_dam = current_shields\n amount *= (1 - bleedthru_factor)\n amount += to_add\n hull_dam = amount * armorHullDamMulti\n \n hull_dam = round(calculate_polarization(hull_dam, polarization))\n \n new_shields = scan_assistant(current_shields - shields_dam, precision) if shields_dam > 0 else current_shields\n new_hull = scan_assistant(current_hull - hull_dam, precision) if hull_dam > 0 else current_hull\n \n hull_damage_as_a_percent = hull_dam / self.ship_class.max_hull\n try:\n new_shields_as_a_percent = new_shields / self.ship_class.max_shields\n except ZeroDivisionError:\n new_shields_as_a_percent = 0\n new_hull_as_a_percent = new_hull / self.ship_class.max_hull\n \n killed_outright = 0\n killed_in_sickbay = 0\n wounded = 0\n \n if calculate_crew and not is_derlict and not is_hulk:\n \n crew_killed = hull_dam > 0 and new_hull_as_a_percent < random() and not self.ship_class.is_automated\n \n if crew_killed:\n able_crew = old_scan[\"able_crew\"]\n injured_crew = old_scan[\"injured_crew\"]\n \n percentage_of_crew_killed = hull_damage_as_a_percent * random()\n \n total_crew = able_crew + injured_crew\n \n wounded_fac = uniform(0.25, 0.75)\n \n _able_crew_percentage = able_crew / total_crew\n \n percentage_of_able_crew_killed = _able_crew_percentage * (percentage_of_crew_killed * (1 - wounded_fac))\n percentage_of_able_crew_wounded = _able_crew_percentage * (percentage_of_crew_killed * (wounded_fac))\n percentage_of_injured_crew_killed = (injured_crew / total_crew) * percentage_of_crew_killed\n \n killed_outright = round(self.life_support.able_crew * percentage_of_able_crew_killed)\n killed_in_sickbay = round(0.5 * self.life_support.able_crew * percentage_of_injured_crew_killed)\n wounded = round(self.life_support.able_crew * percentage_of_able_crew_wounded)\n \n shield_sys_damage = 0\n energy_weapons_sys_damage = 0\n cannon_sys_damage = 0\n impulse_sys_damage = 0\n warp_drive_sys_damage = 0\n sensors_sys_damage = 0\n torpedo_sys_damage = 0\n warp_core_sys_damage = 0\n cloak_sys_damage = 0\n transporter_sys_damage = 0\n polarized_hull_damage = 0\n scanners_damage = 0\n \n if calculate_systems and not is_hulk:\n chance_to_damage_system = damage_type.chance_to_damage_system\n \n systems_damaged = hull_dam > 0 and new_hull_as_a_percent < uniform(\n hull_damage_as_a_percent, 1.25 + hull_damage_as_a_percent)\n \n if systems_damaged:\n system_damage_chance = damage_type.damage_chance_vs_systems_multiplier\n \n def chance_of_system_damage():\n # this is cumbersome. A better way may be random() * chance_to_damage_system > (old_hull_as_a_percent + new_hull_as_a_percent) * 0.5\n return uniform(\n hull_damage_as_a_percent, chance_to_damage_system + hull_damage_as_a_percent\n ) > new_hull_as_a_percent\n \n def random_system_damage():\n return uniform(0.0, system_damage_chance * hull_damage_as_a_percent)\n \n if self.ship_class.max_shields and chance_of_system_damage():\n shield_sys_damage = random_system_damage()\n \n if self.ship_class.max_beam_energy and chance_of_system_damage():\n energy_weapons_sys_damage = random_system_damage()\n \n if self.ship_class.max_cannon_energy and chance_of_system_damage():\n cannon_sys_damage = random_system_damage()\n \n if self.ship_class.evasion and chance_of_system_damage():\n impulse_sys_damage = random_system_damage()\n \n if self.ship_class.max_warp and chance_of_system_damage():\n warp_drive_sys_damage = random_system_damage()\n \n if chance_of_system_damage():\n sensors_sys_damage = random_system_damage()\n \n if self.ship_class.max_torpedos and chance_of_system_damage():\n torpedo_sys_damage = random_system_damage()\n \n if chance_of_system_damage():\n warp_core_sys_damage = random_system_damage()\n \n if self.ship_class.cloak_strength and chance_of_system_damage():\n cloak_sys_damage = random_system_damage()\n \n if self.ship_class.max_crew and chance_of_system_damage():\n transporter_sys_damage = random_system_damage()\n \n if self.ship_class.polarized_hull and chance_of_system_damage():\n polarized_hull_damage = random_system_damage()\n \n if chance_of_system_damage():\n scanners_damage = random_system_damage()\n \n return (\n new_shields, new_hull, shields_dam, hull_dam, new_shields_as_a_percent, \n new_hull_as_a_percent, killed_outright, killed_in_sickbay, wounded, shield_sys_damage, \n impulse_sys_damage, warp_drive_sys_damage, sensors_sys_damage, \n warp_core_sys_damage, \n energy_weapons_sys_damage, cannon_sys_damage, \n torpedo_sys_damage, cloak_sys_damage, transporter_sys_damage, polarized_hull_damage, scanners_damage\n )", "def test_get_damage_out_of_limit(self):\n self.sold.health = 0.2\n self.sold.get_damage(0.32)\n self.assertEqual(self.sold.health, 0)", "def attack(attacker, defender):\n if randint(1, 100) < attacker['rage']:\n defender['health'] -= randint(attacker['damage high'],\n attacker['damage low'])\n attacker['rage'] = 0\n else:\n defender['health'] -= randint(attacker['damage high'],\n attacker['damage low'])\n attacker['rage'] += 15", "def _calc_hp(self, average=False):\n dice = self.hd + self.constitution\n if average:\n return round((dice * self.level).average)\n\n return max(sum((dice * self.level).roll()), 1)", "def __init__(self, name, loot, strength):\n self.name = name\n self.x = 0\n self.y = 0\n self.health = 10\n self.strength = strength\n self.loot = loot\n self.is_alive = True\n self.MAX_HEALTH = 15\n self.magic_key = False\n logging.debug(\"{0} created with health of {1} and strength of {2}\"\n .format(self.name, self.health, self.strength))\n \"\"\" Test Results Part A:\n When increasing MAX_HEATH to 100, rounds tended to go on.\n When decreasing MAX_HEATH to 0.05, rounds end very quickly.\n This is expected because the Sprites will be easier or harder \n to defeat depending on how high their health can get. It will \n take more attacks to defeat a Sprite with more health and less\n attacks to defeat a Sprite with less health. \n \n Test Results Part B:\n Test: change strength of Enemy to 20 (higher than Avatar)\n Prediction: the Enemy should win most/all of the time because the player \n with more strength has a harder attack.\n Results: The Enemy won during all trials. If the roles were switched, the \n same could be said about Avatar.\n \n Test: set health of Avatar to 5\n Prediction: the Avatar will die more often than the Enemy because it can \n receive less attacks\n Results: The Avatar died during most trials. \n \n Test: set MAX_HEALTH for Enemy to 5\n Prediction: Enemy will be able to have less health, so it will be defeated\n more often than the Avatar\n Results: The enemy died in almost all trials\n \"\"\"", "def absorb(self, damage):\n\n dealt_damage = damage\n\n if self.health > 0:\n delta = min(self.health, damage) * self.absorb_coeff\n self.health = int(self.health - delta)\n dealt_damage = (1.0 - self.absorb_coeff) * damage\n\n return dealt_damage", "def test_get_damage(self):\n self.sold.health = 0.8\n self.sold.get_damage(0.32)\n self.assertEqual(self.sold.health, 0.48)", "def deal_damage(self, modifier: int, critical_hit: bool) -> int:\r\n if critical_hit:\r\n damage_dice = self.critical_hit()\r\n else:\r\n damage_dice = self.damage\r\n damage = dice.roll(damage_dice)[0] + modifier\r\n return damage", "def attack(self):\n\n lowest_attack = int(self.attack_strength)// 2\n attack_strength = random.randint(lowest_attack, int(self.attack_strength))\n return attack_strength", "def ground_dps(self) -> Union[int, float]:\n if hasattr(self.type_data.proto, \"weapons\"):\n weapons = self.type_data.proto.weapons\n weapon = next(\n (weapon for weapon in weapons if weapon.type in [TARGET_TYPE.Ground.value, TARGET_TYPE.Any.value]), None\n )\n if weapon:\n return (weapon.damage * weapon.attacks) / weapon.speed\n return 0", "def attack(self):\n return random.randint(0, self.attack_strength)", "def fight(self):\r\n\t\tif self.death():\r\n\t\t\treturn 0\r\n\t\tif self.ctime < 1:\r\n\t\t\tself.ctime += 0.05\r\n\t\telse:\r\n\t\t\tself.ctime = 0\r\n\t\t\tself.hit()", "def true_damage(damage, weapon_size, target_size, source_debuff, target_debuff):\n\n # source_debuffs: tracking disruption\n tracking_disrupt = 1 + source_debuff.get('active', {}).get(\n 'tracking_disruption', 0)\n # target_debuffs: target painter, web\n target_painter = 1 + target_debuff.get('active', {}).get(\n 'target_painter', 0)\n web = 1 - target_debuff.get('active', {}).get(\n 'web', 0)\n\n # painters gives > 1 multiplier to the target_size against target\n # reason - painters expand the target to make it easier to hit.\n\n # webbers give < 1 multiplier to the weapon_size against target\n # reason - weapons can focus more damage on a webbed target\n\n if web == 0 or weapon_size / web * tracking_disrupt <= \\\n target_size * target_painter:\n return damage\n\n true_weapon_size = (weapon_size / web) * tracking_disrupt\n true_target_size = target_size * target_painter\n damage_factor = size_damage_factor(true_weapon_size, true_target_size)\n return int(math.ceil(damage_factor * damage))", "def ground_dps(self) -> Union[int, float]:\n if hasattr(self.type_data.proto, \"weapons\"):\n weapons = self.type_data.proto.weapons\n weapon = next(\n (weapon for weapon in weapons if weapon.type in {TARGET_TYPE.Ground.value, TARGET_TYPE.Any.value}), None\n )\n if weapon:\n return (weapon.damage * weapon.attacks) / weapon.speed\n return 0", "def stealthUpdate(self,ability,amount):\n \n if ability == 'str':\n self.str += amount\n elif ability == 'dex':\n self.dex += amount \n elif ability == 'con':\n self.con += amount\n elif ability == 'int':\n self.int += amount\n elif ability == 'wis':\n self.wis += amount\n elif ability == 'cha':\n self.cha += amount\n elif ability == 'hp':\n self.hp += amount", "def defense(self):\n #return self.stats.dexterity + (self.stats.reiatsu * self.stats.density)\n return self.stats.defense", "def calc_damage(attacker: Character, defender: Character, min_damage: int = 1) -> int:\n return max(min_damage, attacker.damage - defender.armor)" ]
[ "0.6950379", "0.6850593", "0.68326175", "0.6792282", "0.6762298", "0.6743478", "0.6738781", "0.6719024", "0.6649889", "0.66238135", "0.6622487", "0.6583088", "0.65515465", "0.6544407", "0.65372705", "0.6528842", "0.65219796", "0.6520335", "0.6516249", "0.64586735", "0.6457608", "0.6446979", "0.64268696", "0.64217895", "0.64174503", "0.6403618", "0.6396451", "0.63930064", "0.63720644", "0.6360181", "0.6350848", "0.633305", "0.63327324", "0.63311505", "0.6328798", "0.63191825", "0.6312309", "0.6296601", "0.6295198", "0.62884796", "0.6282774", "0.6273778", "0.6265097", "0.6243358", "0.6240637", "0.62393576", "0.623168", "0.6215226", "0.6210223", "0.6205012", "0.61988914", "0.61962676", "0.61928225", "0.618776", "0.61871445", "0.6170711", "0.61611354", "0.615323", "0.6134221", "0.61169064", "0.61123836", "0.609451", "0.6093161", "0.60891026", "0.6084074", "0.6070703", "0.6066699", "0.60662484", "0.6065722", "0.6060577", "0.6057936", "0.6053683", "0.60450214", "0.60449874", "0.60407406", "0.60222834", "0.6021156", "0.59865916", "0.5976696", "0.5976114", "0.597537", "0.5973789", "0.5973608", "0.5969915", "0.5969116", "0.5958343", "0.5957015", "0.59502923", "0.594333", "0.59380716", "0.5935856", "0.5927723", "0.59202987", "0.59196514", "0.59177625", "0.59165496", "0.59101063", "0.5908747", "0.5908592", "0.5899278" ]
0.6319719
35
Returns tuple of the form, (max_hit, accuracy), for the given levels after factoring in the weapons available and the selected attack style. Assumes enemy has level 1 defence and 0 defence bonus
def get_max_hit_and_accuracy( levels, attack_style, attack_bonus, strength_bonus): weapon_attack, weapon_strength = get_weapon_stats(levels.attack) attack_bonus += weapon_attack strength_bonus += weapon_strength if attack_style == Attack_Style.ATTACK: effective_attack = osrs.effective_level(levels.attack, 1, 3, 1) effective_strength = osrs.effective_level(levels.strength, 1, 0, 1) elif attack_style == Attack_Style.STRENGTH: effective_attack = osrs.effective_level(levels.attack, 1, 0, 1) effective_strength = osrs.effective_level(levels.strength, 1, 3, 1) enemy_effective_defence = osrs.effective_level(1, 1, 0, 1) max_hit = osrs.max_hit(effective_strength, strength_bonus) accuracy = osrs.accuracy(effective_attack, attack_bonus, enemy_effective_defence, 0) return (max_hit, accuracy)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_weapon_stats(attack_level):\n if attack_level >= 60:\n # Dragon scimitar\n return (67, 66)\n elif attack_level >= 40:\n # Rune scimitar\n return (45, 44)\n elif attack_level >= 30:\n # Adamant scimitar\n return (29, 28)\n elif attack_level >= 20:\n # Mithril scimitar\n return (21, 20)\n elif attack_level >= 10:\n # Black scimitar\n return (19, 14)\n elif attack_level >= 5:\n # Steel scimitar\n return (15, 14)\n else:\n # Iron scimitar\n return (10, 9)", "def difficulty_for_level(level):\n return 0 if level==\"easy\" else (1 if level==\"medium\" else 2)", "def update_hp_for_higher_level(chosen_class,level):\n #Checks to see if your character is level 4,8,12,etc.\n def upgradedAbilityAt4(level):\n if level % 4 == 0:\n upgraded_ability = raw_input(\"Level \"+str(level)+\"!\\n Which two abilities would you like to upgrade? (Adds +1 to ability)\\n Please input two from str/dex/con/int/wis/cha with a space in between.\\n (ex: cha dex) \").split(' ')\n print\n #To write:\n #if either ability pushes ability score over 20, redo input\n\n \n for i in upgraded_ability:\n self.stealthUpdate(i,1)\n #class specific HP calculations\n if chosen_class == 'barbarian': \n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,12) + self.con + self.classMods[6]\n elif chosen_class == 'cleric':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,8) + self.con + self.classMods[6]\n elif chosen_class == 'druid':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,8) + self.con + self.classMods[6]\n elif chosen_class == 'fighter':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,10) + self.con + self.classMods[6]\n elif chosen_class == 'monk':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,8) + self.con + self.classMods[6]\n elif chosen_class == 'paladin':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,10) + self.con + self.classMods[6]\n elif chosen_class == 'ranger':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,10) + self.con + self.classMods[6]\n elif chosen_class == 'rogue':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,6) + self.con + self.classMods[6]\n elif chosen_class == 'wizard':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,6) + self.con + self.classMods[6]", "def weaponValue(self, level):\n if level == 1:\n bonus = 2\n elif level == 2:\n bonus = 4\n elif level == 3:\n bonus = 6\n elif level == 4:\n bonus = 8\n else:\n bonus = 0\n\n return bonus", "def set_game_level(user_level_input):\n if user_level_input == \"easy\":\n return sample_1, answer_sample_1\n elif user_level_input == \"medium\":\n return sample_2, answer_sample_2\n elif user_level_input == \"medium\":\n return sample_3, answer_sample_3\n else:\n return \"That level does not exist\"", "def level_time_average(start_levels, attack_style, attack_bonus, strength_bonus):\n ticks_per_attack = 4 # Scimitar attack speed\n max_hit, accuracy = get_max_hit_and_accuracy(\n start_levels, attack_style, attack_bonus, strength_bonus)\n \n if attack_style == Attack_Style.ATTACK:\n start_exp = osrs.experience[start_levels.attack]\n end_exp = osrs.experience[start_levels.attack+1]\n elif attack_style == Attack_Style.STRENGTH:\n start_exp = osrs.experience[start_levels.strength]\n end_exp = osrs.experience[start_levels.strength+1]\n \n experience = end_exp - start_exp\n avg_hit = accuracy * max_hit / 2\n exp_per_hit = avg_hit * osrs.BASE_EXP_PER_DAMAGE\n ticks = experience / exp_per_hit * ticks_per_attack\n return ticks", "def get_max_hit_increases(\n start_strength_level, end_strength_level,\n strength_bonus, stance_adder):\n greatest_max_hit = 0\n max_hit_increases = []\n cur_strength_level = start_strength_level\n while cur_strength_level < end_strength_level:\n effective_strength = osrs.effective_level(\n cur_strength_level, 1, stance_adder, 1)\n max_hit = osrs.max_hit(effective_strength, strength_bonus)\n\n if max_hit > greatest_max_hit:\n greatest_max_hit = max_hit\n max_hit_increases.append((cur_strength_level, max_hit))\n\n cur_strength_level += 1", "def find_ability(abilities: list, character_class: str, attack_type: str) -> Dict:\n # Find the ability to use\n ability_to_use = {\"effects\": [], \"enhancements\": []}\n for ability in abilities:\n if (ability[\"class\"] == character_class) and (ability[\"type\"] == attack_type):\n ability_to_use = ability\n break\n\n return ability_to_use", "def calculate_hit(self):\n weapon = self.game_data['player inventory']['equipped weapon']\n weapon_power = self.game_data['player inventory'][weapon]['power']\n max_strength = weapon_power\n min_strength = max_strength - 7\n return random.randint(min_strength, max_strength)", "def _determine_level(levels, points):\n import operator\n level = None\n sorted_levels = sorted(levels.iteritems(), key=operator.itemgetter(1))\n for el in sorted_levels:\n if points <= el[1]:\n level = el[0]\n break\n\n max_level = max(levels.iterkeys(), key=lambda threshold: levels[threshold])\n if points >= levels[max_level]:\n level = max_level\n return level", "def getAbilityScores(self):\n mods = [(self.str -10)/2,\n (self.dex-10)/2,\n (self.con-10)/2,\n (self.int-10)/2,\n (self.wis-10)/2,\n (self.cha-10)/2]\n print \"STR: {0} ({1}) \\nDEX: {2} ({3})\\nCON: {4} ({5})\".format(self.str,\n mods[0],\n self.dex,\n mods[1],\n self.con,\n mods[2])\n print \"INT: {0} ({1})\\nWIS: {2} ({3})\\nCHA: {4} ({5})\".format(self.int,\n mods[3],\n self.wis,\n mods[4],\n self.cha,\n mods[5])", "def attack_bonus_on_level(self, level):\n raise NotImplementedError", "def calculate_score(friendly_tiles, enemy_tiles):\n friendly_permanent_tiles = [tile for tile in friendly_tiles if tile.is_permanently_owned()]\n enemy_permanent_tiles = [tile for tile in enemy_tiles if tile.is_permanently_owned()]\n num_friendly_permanent_tiles = len(friendly_permanent_tiles)\n num_enemy_permanent_tiles = len(enemy_permanent_tiles)\n num_friendly_non_permanent_tiles = len(friendly_tiles) - num_friendly_permanent_tiles\n num_enemy_non_permanent_tiles = len(enemy_tiles) - num_enemy_permanent_tiles\n return (num_friendly_non_permanent_tiles + 2 * num_friendly_permanent_tiles, num_enemy_non_permanent_tiles + 2 * num_enemy_permanent_tiles)", "def attack(health_meter):\n hit_list = 4 * ['player'] + 6 * ['enemy']\n injured_unit = random.choice(hit_list)\n hit_points = health_meter[injured_unit]\n injury = random.randint(10, 15)\n health_meter[injured_unit] = max(hit_points - injury, 0)\n print(\"ATTACK! \", end='')\n show_health(health_meter)", "def attack(self):\n # TODO: Use integer division to find half of the max_damage value\n # then return a random integer between\n # half of max_damage and max_damage\n print(\"max damage of \" + self.name + \" is \")\n print(str(self.attack_strength))\n min_damage = self.attack_strength // 2\n weapon_attack_value = random.randint(min_damage, self.attack_strength)\n return weapon_attack_value", "def Value(self,enemies):\n if self.type == \"Goblin\":\n if \"Bard\" in enemies.inteam and not \"Fighter\" in enemies.inteam:\n return 2\n else:\n return 1\n\n if self.type == \"Ork\":\n if \"Archer\" in enemies.inteam or \"Fighter\" in enemies.inteam:\n return 3\n else:\n return 2\n if self.type == \"Skeleton\":\n if \"Mage\" in enemies.inteam or \"Archer\" in enemies.inteam:\n return 5\n else:\n return 3\n \n if self.type == \"Troll\":\n if \"Fighter\" in enemies.inteam and not \"Mage\" in enemies.inteam:\n return 7\n else:\n return 4", "def get_leveling_args(cards, card_attrs):\n if (len(card_attrs['evolve']) < len(card_attrs['level']) and\n len(cards) > 15):\n cards_to_consume = set()\n candidates = set(card_attrs['level'].keys())\n cards_by_xp = list(set(swizzle(cards, 'xp01')) & candidates)\n cards_by_rarity = list(set(swizzle(cards, 'type')) & candidates)\n cards_by_xp, cards_by_rarity, top_third = remove_rarest_third(\n cards_by_xp, cards_by_rarity)\n\n if cards_by_xp and top_third:\n # Number of cards to consume into our destination card will be between\n # min and max values (defined in config).\n num_to_consume = randint(\n cfg['level']['min_cards'],\n min(cfg['level']['max_cards'], len(top_third)))\n\n # Get the bottom n number of cards by xp to consume into a rare card\n lesser = min(num_to_consume, len(cards_by_xp))\n for i in range(lesser): # pylint: disable=unused-variable\n cur_card = cards_by_xp.pop(0)\n if cur_card in cards_by_rarity:\n cards_by_rarity.remove(cur_card)\n if cur_card not in cards_to_consume:\n cards_to_consume.add(cur_card)\n\n logger.debug(\"Cards to consume:\")\n logger.debug(cards_to_consume)\n\n # Choose one of the more rare cards as the target to level.\n # TODO: prefer rare cards with more xp pylint: disable=fixme\n dest_id = choice(top_third)\n\n return (dest_id, cards_to_consume)\n\n return False", "def max_diaphragmatic_level(levels):\n return [max(x) for x in levels]", "def get_highest_accuracy(self) -> Tuple[float, Dict[str, Any]]:\n highest_val = float('-inf')\n data = None\n for instance in self.stats:\n if self.stats[instance][\"Accuracy\"] > highest_val:\n highest_val = self.stats[instance][\"Accuracy\"]\n data = self.stats[instance]\n return highest_val, data", "def level_time_simulate(start_levels, attack_style, attack_bonus, strength_bonus):\n ticks_per_attack = 4 # Scimitar attack speed\n enemy_health = 60 # Sand crab health\n\n max_hit, accuracy = get_max_hit_and_accuracy(\n start_levels, attack_style, attack_bonus, strength_bonus)\n \n if attack_style == Attack_Style.ATTACK:\n start_exp = osrs.experience[start_levels.attack]\n end_exp = osrs.experience[start_levels.attack+1]\n elif attack_style == Attack_Style.STRENGTH:\n start_exp = osrs.experience[start_levels.strength]\n end_exp = osrs.experience[start_levels.strength+1]\n \n experience = end_exp - start_exp\n avg_ticks = combat_simulator.ticks_until_exp(max_hit, accuracy,\n ticks_per_attack, enemy_health, experience,\n osrs.BASE_EXP_PER_DAMAGE, ITERATIONS)\n return avg_ticks", "def attack(health_meter):\n hit_list = 4 * ['igrac'] + 6 * ['neprijatelj']\n injured_unit = random.choice(hit_list)\n hit_points = health_meter[injured_unit]\n injury = random.randint(10, 15)\n health_meter[injured_unit] = max(hit_points - injury, 0)\n print(\"NAPAD! \", end='')\n show_health(health_meter)", "def most_powerful_weapon(self):\n # sets inital damge to 0\n max_damage = 0\n # sets the best weapon to nothing\n best_weapon = None\n # Loop for each item in inventory\n for item in self.inventory:\n # Code adapted from Make Your own Python Text Based Adventure\n # tries to see if the item damage is greator than the current max\n # damage and then replaces the best weapon in inventory\n try:\n if item.damage > max_damage:\n best_weapon = item\n max_damage = item.damage\n except AttributeError:\n pass\n # sends the best weapon to function\n return best_weapon", "def calculate_hit(self, armor_list, inventory):\n armor_power = 0\n for armor in armor_list:\n armor_power += inventory[armor]['power']\n max_strength = max(1, (self.level * 5) - armor_power)\n min_strength = 0\n return random.randint(min_strength, max_strength)", "def abilityScores(self):\n mods = [(self.str -10)/2,\n (self.dex-10)/2,\n (self.con-10)/2,\n (self.int-10)/2,\n (self.wis-10)/2,\n (self.cha-10)/2]\n return \"STR: {0} ({1}) \\nDEX: {2} ({3})\\nCON: {4} ({5})\".format(self.str,\n mods[0],\n self.dex,\n mods[1],\n self.con,\n mods[2])+\"\\n\" \\\n \"INT: {0} ({1})\\nWIS: {2} ({3})\\nCHA: {4} ({5})\".format(self.int,\n mods[3],\n self.wis,\n mods[4],\n self.cha,\n mods[5])", "def _get_max_hits(build: Build, decimals:int) -> str:\n stats = ['Physical', 'Fire', 'Cold', 'Lightning', 'Chaos']\n emojis = [':drop_of_blood:', ':fire:', ':snowflake:', ':zap:', ':skull:']\n lines = []\n\n show = False\n for i, stat in enumerate(stats):\n max_hit_key = stat + 'MaximumHitTaken'\n max_hit_val = shorten_number_string(build.get_player_stat(max_hit_key, 0, 0), decimals)\n res_key = stat + 'DamageReduction' if stat == 'Physical' else stat + 'Resist'\n res_val = build.get_player_stat(res_key)\n if res_val:\n lines.append(f\"{emojis[i]} {max_hit_val} ({res_val:.0f}%)\")\n show = True\n\n output = '\\n'.join(lines)\n output += \"\\n\"\n return output if show else \"\"", "def retrieve_handcrafted_inputs(self, obs):\n feature_units = obs.observation.feature_units\n allies = [unit for unit in feature_units if unit.alliance == _PLAYER_SELF]\n enemies = [unit for unit in feature_units if unit.alliance == _PLAYER_ENEMY]\n\n current_hp = self.calculate_hitpoints(feature_units, _PLAYER_SELF)\n current_hp = current_hp / self.initial_self_hit_points\n\n weapon_cooldown = 0\n for ally in allies:\n if ally.weapon_cooldown > 0:\n weapon_cooldown += 1\n if weapon_cooldown > (len(allies) / 2):\n # nn input weapon cooldown = 1 means the majority cannot fire\n weapon_cooldown = 1\n else:\n weapon_cooldown = 0\n\n self_weapon_range = 5\n self_radius = 1\n self_unit_type = 1\n self_speed = 1\n if len(allies) > 0:\n self.self_id = allies[0].unit_type\n self_weapon_range = weapon_ranges[self.self_id]\n self_radius = unit_sizes[self.self_id] / float(2)\n self_unit_type = unit_type[self.self_id]\n self_speed = unit_speed[self.self_id]\n\n enemy_radius = 1\n enemy_weapon_range = 1\n enemy_unit_type = 0\n enemy_speed = 1\n if len(enemies) > 0:\n self.enemy_id = enemies[0].unit_type\n enemy_weapon_range = weapon_ranges[self.enemy_id]\n enemy_radius = unit_sizes[self.enemy_id] / float(2)\n enemy_unit_type = unit_type[self.enemy_id]\n enemy_speed = unit_speed[self.enemy_id]\n\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_current_location(obs)) < (\n self_radius + self_weapon_range + enemy_radius):\n enemy_in_range = 1\n else:\n enemy_in_range = 0\n\n in_enemy_range = 0\n for ally in allies:\n for enemy in enemies:\n if self.retrieve_distance_between_positions([enemy.x, enemy.y], [ally.x, ally.y]) < (\n self_radius + enemy_weapon_range + enemy_radius):\n in_enemy_range = 1\n break\n else:\n in_enemy_range = 0\n\n north_bound, south_bound, west_bound, east_bound = self.calculate_distance_to_bounds(obs)\n\n if self.previous_command == \"FIGHT\":\n prev_cmd = 1\n elif self.previous_command == \"FLEE\":\n prev_cmd = 0\n\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence = self.detect_enemies_by_region(obs)\n\n return [current_hp, weapon_cooldown, enemy_in_range, in_enemy_range, prev_cmd, north_bound, south_bound,\n west_bound, east_bound,\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence, self_unit_type,\n enemy_unit_type, self_weapon_range, enemy_weapon_range, self_speed, enemy_speed, self.self_id,\n self.enemy_id]", "def calc_tohit(attr, level):\n return level + calc_attr_mod(attr)", "def retrieve_handcrafted_inputs(self, obs):\n feature_units = obs.observation.feature_units\n allies = [unit for unit in feature_units if unit.alliance == _PLAYER_SELF]\n enemies = [unit for unit in feature_units if unit.alliance == _PLAYER_ENEMY]\n\n current_hp = self.calculate_hitpoints(feature_units, _PLAYER_SELF)\n current_hp = current_hp / self.initial_self_hit_points\n\n weapon_cooldown = 0\n for ally in allies:\n if ally.weapon_cooldown > 0:\n weapon_cooldown += 1\n if weapon_cooldown > (len(allies) / 2):\n # nn input weapon cooldown = 1 means the majority cannot fire\n weapon_cooldown = 1\n else:\n weapon_cooldown = 0\n\n self_weapon_range = 5\n self_radius = 1\n self_unit_type = 1\n if len(allies) > 0:\n self_weapon_range = weapon_ranges[allies[0].unit_type]\n self_radius = unit_sizes[allies[0].unit_type] / float(2)\n self_unit_type = unit_type[allies[0].unit_type]\n\n enemy_radius = 1\n enemy_weapon_range = 1\n enemy_unit_type = 0\n if len(enemies) > 0:\n enemy_weapon_range = weapon_ranges[enemies[0].unit_type]\n enemy_radius = unit_sizes[enemies[0].unit_type] / float(2)\n enemy_unit_type = unit_type[enemies[0].unit_type]\n\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_current_location(obs)) < (\n self_radius + self_weapon_range + enemy_radius):\n enemy_in_range = 1\n else:\n enemy_in_range = 0\n\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_current_location(obs)) < (\n self_radius + enemy_weapon_range + enemy_radius):\n in_enemy_range = 1\n else:\n in_enemy_range = 0\n\n north_bound, south_bound, west_bound, east_bound = self.calculate_distance_to_bounds(obs)\n\n if self.previous_command == \"FIGHT\":\n prev_cmd = 1\n elif self.previous_command == \"FLEE\":\n prev_cmd = 0\n\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence = self.detect_enemies_by_region(obs)\n\n return [current_hp, weapon_cooldown, enemy_in_range, in_enemy_range, prev_cmd, north_bound, south_bound,\n west_bound, east_bound,\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence, self_unit_type,\n enemy_unit_type]", "def level_and_fall_freq(\n complete_lines: float,\n base_speed: float=8.0,\n speed_limit: float=0.1,\n) -> tuple:\n # get the level that the player is on\n level = int(complete_lines / 10) + 1\n # get the frequency with which to move pieces down\n fall_freq = base_speed / level\n # reset the fall_frequency if it's below the speed limit\n if fall_freq < speed_limit:\n fall_freq = speed_limit\n\n return level, fall_freq", "def retrieve_handcrafted_inputs(self, obs):\n self.detect_self_unit_types(obs)\n\n feature_units = obs.observation.feature_units\n allies = [unit for unit in feature_units if unit.alliance == _PLAYER_SELF]\n selected_allies = [unit for unit in allies if unit.unit_type == self.current_group_id]\n enemies = [unit for unit in feature_units if unit.alliance == _PLAYER_ENEMY]\n\n hitpoints = 0\n for unit in selected_allies:\n hitpoints += unit.health\n\n if self.current_group_id in unit_health.keys():\n init_hp = 0\n init_hp = unit_health[self.current_group_id] * self.init_unit_counts[self.current_group_id]\n else:\n init_hp = self.initial_self_hit_points\n current_hp = hitpoints / init_hp\n\n weapon_cooldown = 0\n for ally in selected_allies:\n if ally.weapon_cooldown > 0:\n weapon_cooldown += 1\n if weapon_cooldown > (len(selected_allies) / 2):\n # nn input weapon cooldown = 1 means the majority cannot fire\n weapon_cooldown = 1\n else:\n weapon_cooldown = 0\n\n self_weapon_range = 5\n self_radius = 1\n self_unit_type = 1\n self_speed = 1\n if len(selected_allies) > 0:\n self_weapon_range = weapon_ranges[self.current_group_id]\n self_radius = unit_sizes[self.current_group_id] / float(2)\n self_unit_type = unit_type[self.current_group_id]\n self_speed = unit_speed[self.current_group_id]\n\n enemy_radius = 1\n enemy_weapon_range = 1\n enemy_unit_type = 0\n enemy_speed = 1\n if len(enemies) > 0:\n self.enemy_id = enemies[0].unit_type\n enemy_weapon_range = weapon_ranges[self.enemy_id]\n enemy_radius = unit_sizes[self.enemy_id] / float(2)\n enemy_unit_type = unit_type[self.enemy_id]\n enemy_speed = unit_speed[self.enemy_id]\n\n # TODO can be inaccurate if using melee units\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_avg_location_of_self_subgroup(obs)) < (\n self_radius + self_weapon_range + enemy_radius):\n enemy_in_range = 1\n else:\n enemy_in_range = 0\n\n in_enemy_range = 0\n for ally in selected_allies:\n for enemy in enemies:\n if self.retrieve_distance_between_positions([enemy.x, enemy.y], [ally.x, ally.y]) < (\n self_radius + enemy_weapon_range + enemy_radius):\n in_enemy_range = 1\n break\n else:\n in_enemy_range = 0\n if in_enemy_range:\n break\n\n north_bound, south_bound, west_bound, east_bound = self.calculate_distance_to_bounds(obs, for_subgroup=True)\n\n if self.previous_commands[self.current_group_id] == \"FIGHT\":\n prev_cmd = 1\n elif self.previous_commands[self.current_group_id] == \"FLEE\":\n prev_cmd = 0\n\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence = self.detect_enemies_by_region(obs,\n for_subgroup=True)\n\n distance_to_enemy = self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_avg_location_of_self_subgroup(obs))\n distance_to_enemy = distance_to_enemy / float((32 ** 2 + 20 ** 2) ** 0.5)\n\n return [current_hp, weapon_cooldown, enemy_in_range, in_enemy_range, prev_cmd, north_bound, south_bound,\n west_bound, east_bound,\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence, self_unit_type,\n enemy_unit_type, self_weapon_range, enemy_weapon_range, self_speed, enemy_speed, distance_to_enemy]", "def level(score):\n user_level = \"\"\n if score < 20:\n user_level = \"elementary\"\n elif score < 30:\n user_level = \"intermediate\"\n elif score < 35:\n user_level = \"upper intermediate\"\n else:\n user_level = \"advanced\"\n return user_level", "def retrieve_handcrafted_inputs(self, obs):\n feature_units = obs.observation.feature_units\n allies = [unit for unit in feature_units if unit.alliance == _PLAYER_SELF]\n enemies = [unit for unit in feature_units if unit.alliance == _PLAYER_ENEMY]\n\n current_hp = self.calculate_hitpoints(feature_units, _PLAYER_SELF)\n current_hp = current_hp / self.initial_self_hit_points\n\n weapon_cooldown = 0\n for ally in allies:\n if ally.weapon_cooldown > 0:\n weapon_cooldown += 1\n if weapon_cooldown > (len(allies) / 2):\n # nn input weapon cooldown = 1 means the majority cannot fire\n weapon_cooldown = 1\n else:\n weapon_cooldown = 0\n\n self_weapon_range = 5\n self_radius = 1\n self_unit_type = 1\n self_speed = 1\n if len(allies) > 0:\n self_weapon_range = weapon_ranges[allies[0].unit_type]\n self_radius = unit_sizes[allies[0].unit_type] / float(2)\n self_unit_type = unit_type[allies[0].unit_type]\n self_speed = unit_speed[allies[0].unit_type]\n\n enemy_radius = 1\n enemy_weapon_range = 1\n enemy_unit_type = 0\n enemy_speed = 1\n if len(enemies) > 0:\n enemy_weapon_range = weapon_ranges[enemies[0].unit_type]\n enemy_radius = unit_sizes[enemies[0].unit_type] / float(2)\n enemy_unit_type = unit_type[enemies[0].unit_type]\n enemy_speed = unit_speed[enemies[0].unit_type]\n\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_current_location(obs)) < (\n self_radius + self_weapon_range + enemy_radius):\n enemy_in_range = 1\n else:\n enemy_in_range = 0\n\n north_bound, south_bound, west_bound, east_bound = self.calculate_distance_to_bounds(obs)\n\n if self.previous_command == \"FIGHT\":\n prev_cmd = 1\n elif self.previous_command == \"FLEE\":\n prev_cmd = 0\n\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence = self.detect_enemies_by_region(obs)\n\n return [current_hp, weapon_cooldown, enemy_in_range, prev_cmd, north_bound, south_bound, west_bound, east_bound,\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence, self_unit_type,\n enemy_unit_type, self_weapon_range, enemy_weapon_range, self_speed, enemy_speed]", "def max_stat(self, stat: Stat, level: Optional[int]=None):\n if level is None:\n level = self.level\n\n if stat.name.upper() == \"HP\":\n value = (self.individual_values[stat] + self.species.base_stats[stat] + math.sqrt(self.effort_values[stat]) / 8 + 50) * level / 50 + 10\n else:\n value = (self.individual_values[stat] + self.species.base_stats[stat] + math.sqrt(self.effort_values[stat]) / 8) * level / 50 + 5\n\n return int(value)", "def extract_levels(enemy_behavior: List[Any]):\n levels = set()\n levels.add(1)\n for b in enemy_behavior:\n if type(b) == ESBranchLevel:\n levels.add(b.branch_value)\n elif hasattr(b, 'level'):\n levels.add(b.level)\n return levels", "def fetch_levels(self):\n rootLogger.info(\"[*] Fetching water levels...\")\n\n headers = {\"User-Agent\": \"Edwards Aquifer Bot - Follow on Twitter: @edwardsaquabot\"}\n\n response = requests.get(self.url, headers=headers, verify=True, timeout=60)\n if response.status_code != 200:\n rootLogger.error(\n \"HTTP status code: {} -- unsuccessfully retrieved: {}\".format(response.status_code, self.url)\n )\n return\n\n # Use beautiful soup to grab the levels...works, maybe not the best though.\n soup = BeautifulSoup(response.text, \"html.parser\")\n table = soup.find_all(\"table\")[1]\n\n # Today's Reading.\n column = table.find_all(\"td\")[0]\n today_water_level = column.find(\"span\").contents[0].strip()\n\n # Yesterday's Reading.\n column = table.find_all(\"td\")[2]\n yesterday_water_level = column.find(\"span\").contents[0].strip()\n\n # 10 Day Average Reading.\n column = table.find_all(\"td\")[4]\n ten_day_average = column.find(\"span\").contents[0].strip()\n\n return today_water_level, yesterday_water_level, ten_day_average", "def bless_advanced(unit):\n return {DAMAGE: unit.maximum_damage + 1}", "def getDefense(self):\n\t\treturn(self.maneuverability * self.protection)", "def maximum_level(self, question_type):\n\t\treturn 2", "def attackProcess(attack: \"Attack\", attacker: \"PlayerCharacter or Monster\", enemies: list, targetID: int):\n hits, target, ail = 1, [targetID], None\n damageRange, critChance, critMultiplier = (95, 105), 0.1, 2\n if (attack.special):\n hits = attack.special[\"HITS\"] if \"HITS\" in attack.special.keys() else 1\n target = attack.special[\"TARGET\"] if \"TARGET\" in attack.special.keys() else target\n ail = attack.special[\"INFLICT\"] if \"INFLICT\" in attack.special.keys() else None\n damageRange = attack.special[\"DAMAGE RANGE\"] if \"DAMAGE RANGE\" in attack.special.keys() else damageRange\n critChance = attack.special[\"CRIT CHANCE\"] if \"CRIT CHANCE\" in attack.special.keys() else critChance\n critMultiplier = attack.special[\"CRIT MULTIPLIER\"] if \"CRIT MULTIPLIER\" in attack.special.keys() else critMultiplier\n target = targeting(len(enemies), target, hits)\n if attack.category == \"MAGICAL\":\n attackerPower = attacker.stats[\"MAGIC\"]\n attackerPower *= 0.6 if \"MUDDLE\" in attacker.conditions.keys() else 1\n else:\n attackerPower = attacker.stats[\"STRENGTH\"]\n attackerPower *= 0.6 if \"BURN\" in attacker.conditions.keys() else 1\n attackerPower *= 0.8 if \"LETHARGY\" in attacker.conditions.keys() else 1\n power = attack.power * attackerPower\n for i in target:\n roll = random.random()\n targetSpeed = 1 if \"STUN\" in enemies[i].conditions.keys() else enemies[i].stats[\"SPEED\"]\n hitChance = ((attacker.stats[\"SPEED\"] + attackerPower/10) / targetSpeed)\n hitChance *= 0.6 if \"BLIND\" in attacker.conditions.keys() else 1\n hitCheck = roll < hitChance\n if hitCheck:\n critCheck = roll < critChance\n resist = enemies[i].resist[attack.element] if attack.element in enemies[i].resist.keys() else 1\n damage = power * resist * (random.randint(damageRange[0], damageRange[1])/100)\n if critCheck:\n damage *= critMultiplier\n print(\"Critical hit!\")\n damage /= 2 if enemies[i].defend else 1\n damage //= enemies[i].stats[\"DEFENSE\"] if attack.category == \"PHYSICAL\" else enemies[i].stats[\"RESISTANCE\"]\n enemies[i].hp -= damage\n if enemies[i].hp < 0:\n enemies[i].ko = True\n enemies[i].hp = 0\n print(f\"\\n{attacker.name}'s {attack.name} dealt {damage} damage to {enemies[i].name}!\")\n print(f\"{enemies[i].name} {enemies[i].hp}/{enemies[i].stats['MAXHP']}\\n\")\n if ail and not enemies[i].ko:\n inflict(ail, enemies[i])\n else:\n print(f\"\\n{attacker.name} missed!\")\n attacker.wait = attack.wait * (100 - (1 if \"STUN\" in attacker.conditions.keys() else attacker.stats[\"SPEED\"])) // 1000", "def _level_info(entity):\n if entity.is_max_level():\n return 'Maxed'\n if entity.max_level is not None:\n return '{entity.level}/{entity.max_level}'.format(entity=entity)\n return entity.level", "def offense(self):\n #return self.stats.strength + self.stats.level\n return self.stats.offense", "def attack(self):\n # TODO: Use integer division to find half of the max_damage value\n # then return a random integer between half of max_damage and max_damage\n \n weapon_attack_value = random.randint(self.max_damage//2, self.max_damage)\n return weapon_attack_value", "def max_health():\r\n max_health = max(Titan.health_titans())\r\n return max_health", "def ability_bonus_on_level(self, level):\n raise NotImplementedError", "def get_number_of_atmos_levels(description):\n if description == \"none\":\n return \"0\", \"0\"\n elif re.search(\"levels\", description):\n match = re.search(\"(?P<nl>\\d+)\\s?(levels|vertical levels)\", description)\n nlevs = match.groupdict()[\"nl\"]\n if int(nlevs) > 49: nlas = \"20\"\n else: nlas = \"10\"\n return nlevs, nlas\n else:\n return \"40\", \"20\"", "def interpretSkillAdventurerAttack(\n skillEffectsWithName: tuple[str, list], adventurer: \"Adventurer\", enemy: \"Enemy\"\n) -> AdventurerSkill | None:\n # for index_to maybe list {\"modifier\": \"End. & Mag.\", \"target\": \"skill\", \"attribute\": \"indexed_to\",\"speed\": \"None\" }\n\n # test if skill effects empty\n if skillEffectsWithName:\n _, skillEffects = skillEffectsWithName\n else:\n skillEffects = []\n\n damage_skills = [\n x\n for x in skillEffects\n if x.attribute.lower().strip() == \"damage\"\n or (\n (x.element is not None and x.element != \"\")\n and (x.type == \"physical_attack\" or x.type == \"magic_attack\")\n )\n ]\n if len(damage_skills) > 0:\n damage_skill = damage_skills[0]\n # do the damage first if attribute == element and modifier== high/medium etc, type = attack\n index_to_effects = [\n x for x in skillEffects if x.attribute.lower().strip() == \"indexed_to\"\n ]\n index_to_modifier = set()\n # modifier is the index_to target\n for index_to_effect in index_to_effects:\n # \"attribute\" index_to\n index_to_modifier.add(index_to_effect.modifier)\n \"\"\"\n For temp boosts\n {\n \"modifier\": \"normal2_str\",\n \"target\": \"skill\",\n \"attribute\": \"temp_boost\",\n }\n \"\"\"\n temp_boost_effects = [\n x for x in skillEffects if x.attribute.lower().strip() == \"temp_boost\"\n ]\n if len(temp_boost_effects) > 0:\n temp_boost_mod = temp_boost_effects[0].modifier\n else:\n temp_boost_mod = \"none\"\n\n # loop through the variables to check if attribute exists\n extra_boosts_effects = [\n x for x in skillEffects if \"per_each\" in x.attribute.lower().strip()\n ]\n extra_boosts_value = 1.0\n # for example str/mag debuff\n if len(extra_boosts_effects) > 0:\n for extra_boosts in extra_boosts_effects:\n temp_extra_boosts = interpretExtraBoostWrapper(\n extra_boosts, adventurer, enemy\n )\n extra_boosts_value = extra_boosts_value + temp_extra_boosts\n # SELECT ase.AdventurerSkillEffectsid, ase.AdventurerSkillid, ase.duration, e.name AS element, m.value AS modifier, ty.name AS type, ta.name AS target, a.name AS attribute, s.name AS speed, ad.stars, ad.title, ad.alias, ad.limited, c.name\n ret = AdventurerSkill(\n damage_skill.target,\n temp_boost_mod,\n damage_skill.modifier,\n extra_boosts_value,\n 0,\n damage_skill.type,\n damage_skill.element,\n index_to_modifier,\n )\n return ret\n else:\n return None", "def attack(self, enemy):\r\n best_weapon = None\r\n max_damage = 0\r\n # Searches your inventory for your highest damaging weapon\r\n for i in self._inventory:\r\n if isinstance(i, items.Weapon):\r\n if i._damage > max_damage:\r\n best_weapon = i\r\n max_damage = i._damage\r\n\r\n print(\"You use {} against {}!\".format(best_weapon._name, enemy._name))\r\n enemy._health_points -= best_weapon._damage\r\n if not enemy.is_alive():\r\n print(\"You've killed {}!\".format(enemy._name))\r\n\r\n else:\r\n print(\"The {} isn't dead yet. It has {} health remaining. Keep fighting!\".format(enemy._name, enemy._health_points))", "def calc_level(xp, dominion):\n if xp < 3:\n xp_potential = 1\n if xp >= 3 and xp < 6:\n xp_potential = 2\n if xp >= 6 and xp < 12:\n xp_potential = 3\n if xp >= 12 and xp < 24:\n xp_potential = 4\n if xp >= 24 and xp < 48:\n xp_potential = 5\n if xp >= 48 and xp < 72:\n xp_potential = 6\n if xp >= 72 and xp < 96:\n xp_potential = 7\n if xp >= 96 and xp < 130:\n xp_potential = 8\n if xp >= 130 and xp < 170:\n xp_potential = 9\n if xp >= 170:\n xp_potential = 10\n if dominion < 2:\n dom_potential = 1\n if dominion >= 2 and dominion < 4:\n dom_potential = 2\n if dominion >= 4 and dominion < 10:\n dom_potential = 3\n if dominion >= 10 and dominion < 22:\n dom_potential = 4\n if dominion >= 22 and dominion < 38:\n dom_potential = 5\n if dominion >= 38 and dominion < 57:\n dom_potential = 6\n if dominion >= 57 and dominion < 76:\n dom_potential = 7\n if dominion >= 76 and dominion < 95:\n dom_potential = 8\n if dominion >= 95 and dominion < 124:\n dom_potential = 9\n if dominion >= 124:\n dom_potential = 10\n return min(xp_potential, dom_potential)", "def BestLevel(self,wavelet=None,maxLevel=None):\n\n if wavelet is None:\n wavelet = self.wavelet\n if maxLevel is None:\n maxLevel = self.maxLevel\n\n previouslevelmaxE = self.ShannonEntropy(self.data)\n self.wp = pywt.WaveletPacket(data=self.data, wavelet=wavelet, mode='symmetric', maxlevel=maxLevel)\n level = 1\n currentlevelmaxE = np.max([self.ShannonEntropy(n.data) for n in self.wp.get_level(level, \"freq\")])\n while currentlevelmaxE < previouslevelmaxE and level<maxLevel:\n previouslevelmaxE = currentlevelmaxE\n level += 1\n currentlevelmaxE = np.max([self.ShannonEntropy(n.data) for n in self.wp.get_level(level, \"freq\")])\n return level", "def __calcToonAtkHit(self, attackIndex, atkTargets):\n if len(atkTargets) == 0:\n return 0, 0\n\n # If this is a tutorial, the toon always hits. I'm returning\n # a 95% accuracy just as a dummy number, because I don't think\n # it gets used. Just in case it does, 95% is a safe value. Greater\n # than that may cause something to crash somewhere.\n if self.tutorialFlag:\n return 1, 95\n\n if self.toonsAlways5050:\n roll = random.randint(0, 99)\n if roll < 50:\n return 1, 95\n else:\n return 0, 0\n\n if self.toonsAlwaysHit:\n return 1, 95\n elif self.toonsAlwaysMiss:\n return 0, 0\n\n debug = self.notify.getDebug()\n attack = self.battle.toonAttacks[attackIndex]\n\n atkTrack, atkLevel = self.__getActualTrackLevel(attack)\n\n # According to Justin's comment it might be risky to return 100%\n # accuracy for any attack other than trap\n # If the track comes back as NPCSOS, it's not a normal toon attack,\n # so it really doesn't matter what we return anyway\n if (atkTrack == NPCSOS):\n return (1, 95)\n \n if (atkTrack == FIRE):\n return (1, 95)\n\n # certain attack tracks (trap) always hit\n if (atkTrack == TRAP):\n if debug:\n self.notify.debug(\"Attack is a trap, so it hits regardless\")\n attack[TOON_ACCBONUS_COL] = 0\n return (1, 100)\n elif (atkTrack == DROP and attack[TOON_TRACK_COL] == NPCSOS):\n # NPC drop can hit multiple targets\n unluredSuits = 0\n for tgt in atkTargets:\n if (not self.__suitIsLured(tgt.getDoId())):\n unluredSuits = 1\n if (unluredSuits == 0):\n attack[TOON_ACCBONUS_COL] = 1\n return (0, 0)\n elif (atkTrack == DROP):\n # there can be only one target for a drop, and if the target is\n # lured, the drop misses\n # uber drop has multiple targets\n # RAU TODO, make uber drop be able hit only some of the suits and miss lured suits\n allLured = True\n for i in range(len(atkTargets)):\n if (self.__suitIsLured(atkTargets[i].getDoId())):\n assert(self.notify.debug(\"Drop on lured suit \" +\n str(atkTargets[i].getDoId()) + \" missed\"))\n else:\n allLured = False\n \n if allLured:\n attack[TOON_ACCBONUS_COL] = 1\n return (0, 0)\n elif (atkTrack == PETSOS):\n return self.__calculatePetTrickSuccess(attack)\n\n # in the case of suit targets, go through each target and pick out\n # defense of the highest level suit\n tgtDef = 0\n numLured = 0\n if (atkTrack != HEAL):\n for currTarget in atkTargets:\n thisSuitDef = self.__targetDefense(currTarget, atkTrack)\n if debug:\n self.notify.debug(\"Examining suit def for toon attack: \" +\n str(thisSuitDef))\n tgtDef = min(thisSuitDef, tgtDef)\n if (self.__suitIsLured(currTarget.getDoId())):\n numLured += 1\n\n # for combos (multiple toon attacks of the same track with the same\n # target), use the track exp acc bonus of the highest level toon\n # attacking in this combo\n trackExp = self.__toonTrackExp(attack[TOON_ID_COL], atkTrack)\n for currOtherAtk in self.toonAtkOrder:\n if currOtherAtk != attack[TOON_ID_COL]:\n nextAttack = self.battle.toonAttacks[currOtherAtk]\n nextAtkTrack = self.__getActualTrack(nextAttack)\n if atkTrack == nextAtkTrack and \\\n attack[TOON_TGT_COL] == nextAttack[TOON_TGT_COL]:\n currTrackExp = self.__toonTrackExp(\n nextAttack[TOON_ID_COL], atkTrack)\n if debug:\n self.notify.debug(\"Examining toon track exp bonus: \" +\n str(currTrackExp))\n trackExp = max(currTrackExp, trackExp)\n\n if debug:\n if atkTrack == HEAL:\n self.notify.debug(\"Toon attack is a heal, no target def used\")\n else:\n self.notify.debug(\"Suit defense used for toon attack: \" +\n str(tgtDef))\n self.notify.debug(\"Toon track exp bonus used for toon attack: \" +\n str(trackExp))\n\n # now determine the accuracy of the attack the toon is using\n #\n # NPCs always hit\n if (attack[TOON_TRACK_COL] == NPCSOS): \n randChoice = 0\n else:\n randChoice = random.randint(0, 99)\n propAcc = AvPropAccuracy[atkTrack][atkLevel]\n\n # use an adjusted LURE accuracy level if they have a fruiting gag-tree\n if atkTrack == LURE:\n treebonus = self.__toonCheckGagBonus(attack[TOON_ID_COL], atkTrack, atkLevel)\n propBonus = self.__checkPropBonus(atkTrack)\n if self.propAndOrganicBonusStack:\n propAcc = 0\n if treebonus :\n self.notify.debug( \"using organic bonus lure accuracy\")\n propAcc+= AvLureBonusAccuracy[atkLevel]\n if propBonus:\n self.notify.debug( \"using prop bonus lure accuracy\")\n propAcc+= AvLureBonusAccuracy[atkLevel]\n else:\n if treebonus or propBonus:\n self.notify.debug( \"using oragnic OR prop bonus lure accuracy\")\n propAcc = AvLureBonusAccuracy[atkLevel]\n \n attackAcc = propAcc + trackExp + tgtDef\n\n # see if the previous attack was the same track as the current\n # track, if so the hit or miss status of this attack is the same\n # as the that of the prevous attack\n currAtk = self.toonAtkOrder.index(attackIndex)\n # Heals shouldn't be affected by the previous attack\n if (currAtk > 0 and atkTrack != HEAL):\n prevAtkId = self.toonAtkOrder[currAtk - 1]\n prevAttack = self.battle.toonAttacks[prevAtkId]\n prevAtkTrack = self.__getActualTrack(prevAttack)\n\n # if the track of this attack is the same as the previous attack\n # and the target(s) of this and the previous attacks are the same\n # and the previous attack was side-stepped, then this attack\n # is also side-stepped since attacks of the same track occurr at\n # once NOTE: the target column for the attack is the id of the\n # target, if the attack is a group attack, this id is 0 since\n # there is no single target, in either case the real id or id of\n # 0 must be the same for this and the previous attack\n # we need to special handle a lure attack, since lures can be\n # either group or single targets and when one hits in a single\n # round all others during that same round also hit\n lure = atkTrack == LURE and \\\n ((not attackAffectsGroup(atkTrack, atkLevel, \n attack[TOON_TRACK_COL]) and \\\n self.successfulLures.has_key(attack[TOON_TGT_COL])) or \\\n attackAffectsGroup(atkTrack, atkLevel, attack[TOON_TRACK_COL]))\n if atkTrack == prevAtkTrack and \\\n (attack[TOON_TGT_COL] == prevAttack[TOON_TGT_COL] or \\\n lure):\n\n if prevAttack[TOON_ACCBONUS_COL] == 1:\n if debug:\n self.notify.debug(\"DODGE: Toon attack track dodged\")\n elif prevAttack[TOON_ACCBONUS_COL] == 0:\n if debug:\n self.notify.debug(\"HIT: Toon attack track hit\")\n else:\n assert 0, \"Unknown value for sidestep flag\"\n attack[TOON_ACCBONUS_COL] = prevAttack[TOON_ACCBONUS_COL]\n return (not attack[TOON_ACCBONUS_COL],\n# (not attack[TOON_ACCBONUS_COL]) * 100)\n attackAcc)\n\n atkAccResult = attackAcc\n if debug:\n self.notify.debug(\"setting atkAccResult to %d\" % atkAccResult)\n acc = attackAcc + self.__calcToonAccBonus(attackIndex)\n\n if (atkTrack != LURE and atkTrack != HEAL):\n # non-lure attacks are affected by how many targets are lured,\n # the more targets that are lured, the better chance the attack\n # has to hit, if all targets are lured, the attack will hit\n if atkTrack != DROP:\n if numLured == len(atkTargets):\n # all suits are lured so the attack hits regardless\n if debug:\n self.notify.debug(\"all targets are lured, attack hits\")\n attack[TOON_ACCBONUS_COL] = 0\n return (1, 100)\n else:\n # at least one target is not lured, but lets reduce the\n # defense of all the targets based on how many suits are lured\n luredRatio = float(numLured) / float(len(atkTargets))\n accAdjust = 100 * luredRatio\n if accAdjust > 0 and debug:\n self.notify.debug(str(numLured) + \" out of \" +\n str(len(atkTargets)) +\n \" targets are lured, so adding \" +\n str(accAdjust) +\n \" to attack accuracy\")\n acc += accAdjust\n else:\n #lets reverse the logic if it's a drop\n if numLured == len(atkTargets):\n # all suits are lured so the attack misses\n if debug:\n self.notify.debug(\"all targets are lured, attack misses\")\n attack[TOON_ACCBONUS_COL] = 0\n return (0, 0)\n else:\n #RAU this feels too overpowered\n pass\n # at least one target is not lured, but lets reduce the\n # defense of all the targets based on how many suits are lured\n #luredRatio = float(numLured) / float(len(atkTargets))\n #luredRatio = 1.0 - luredRatio\n #accAdjust = 100 * luredRatio\n #if accAdjust > 0 and debug:\n # self.notify.debug(str(numLured) + \" out of \" +\n # str(len(atkTargets)) +\n # \" targets are lured, so adding \" +\n # str(accAdjust) +\n # \" to attack accuracy\")\n #acc += accAdjust\n \n \n \n\n\n # NOTE: We are imposing an upper limit on accuracy, so there is\n # always some chance of missing (for toons anyway)\n if acc > MaxToonAcc:\n acc = MaxToonAcc\n\n if randChoice < acc:\n if debug:\n self.notify.debug(\"HIT: Toon attack rolled\" +\n str(randChoice) +\n \"to hit with an accuracy of\" + str(acc))\n attack[TOON_ACCBONUS_COL] = 0\n else:\n if debug:\n self.notify.debug(\"MISS: Toon attack rolled\" +\n str(randChoice) +\n \"to hit with an accuracy of\" + str(acc))\n attack[TOON_ACCBONUS_COL] = 1\n return (not attack[TOON_ACCBONUS_COL], atkAccResult)", "def findMinMaxFeatures(pl_accuracy, pw_accuracy, sl_accuracy, sw_accuracy):\r\n \r\n All_Accuracies = {pl_accuracy: 'Petal length', \r\n pw_accuracy: 'Petal width', \r\n sl_accuracy: 'Sepal length', \r\n sw_accuracy: 'Sepal width'}\r\n \r\n worst_accuracy = All_Accuracies[min(All_Accuracies.keys())]\r\n \r\n best_accuracy = All_Accuracies[max(All_Accuracies.keys())]\r\n\r\n return worst_accuracy, best_accuracy", "def normal_defense(self):\n if self.game.get_my_mana() > DEFENSE_MANA_CAP:\n self.portals.dumb_castle_defense(DEFENSE_MANA_CAP)\n self.portals.dumb_portal_defense(PORTAL_SELF_DEFENSE_MANA_CAP)", "def attack(self):\n total_amnt_attack = 0\n for new_attack in self.abilities:\n total_amnt_attack += new_attack.attack()\n return total_amnt_attack", "def weapon_strength(weapon):\n weapon_strength_int = WEAPON_STRENGTHS[weapon]\n #print weapon_strength_int\n return weapon_strength_int", "def chargen (level0number, level0die, level1number, level1die, level2number, level2die, level3number, level3die, level4number, level4die): #input dice numbers and types for all four potential character levels\r\n\tfilename=stellagama.savefile(\"txt\")\r\n\twith open (filename, \"w\") as output:\r\n\t\tfor i in range (0, stellagama.dice(level0number, level0die)):\r\n\t\t\tcharacter1=character(0)\r\n\t\t\toutput.write (\"%s, level %s %s %s %s, %s hit points\" %(character1.name, character1.level, character1.sex, character1.race, character1.cclass, character1.hp)+'\\r\\n')\r\n\t\t\toutput.write (\"STR: %s DEX: %s CON: %s INT: %s WIS: %s CHA: %s\" %(character1.strength, character1.dexterity, character1.constitution, character1.intelligence, character1.wisdom, character1.charisma)+'\\r\\n')\r\n\t\t\tproflist=\", \".join(character1.proficiencies)\r\n\t\t\toutput.write (\"Proficiencies: %s\" %(proflist)+'\\r\\n')\r\n\t\t\toutput.write (character1.quirk+'\\r\\n')\r\n\t\t\toutput.write (\"%s, %s, %s\" %(character1.weapon, character1.armor, character1.trinket)+'\\r\\n')\r\n\t\t\toutput.write('\\r\\n')\r\n\t\tfor i in range (0, stellagama.dice(level1number, level1die)):\r\n\t\t\tcharacter1=character(1)\r\n\t\t\toutput.write (\"%s, level %s %s %s %s, %s hit points\" %(character1.name, character1.level, character1.sex, character1.race, character1.cclass, character1.hp)+'\\r\\n')\r\n\t\t\toutput.write (\"STR: %s DEX: %s CON: %s INT: %s WIS: %s CHA: %s\" %(character1.strength, character1.dexterity, character1.constitution, character1.intelligence, character1.wisdom, character1.charisma)+'\\r\\n')\r\n\t\t\tproflist=\", \".join(character1.proficiencies)\r\n\t\t\toutput.write (\"Proficiencies: %s\" %(proflist)+'\\r\\n')\r\n\t\t\toutput.write (character1.quirk+'\\r\\n')\r\n\t\t\toutput.write (\"%s, %s, %s\" %(character1.weapon, character1.armor, character1.trinket)+'\\r\\n')\r\n\t\t\toutput.write('\\r\\n')\r\n\t\tfor i in range (0, stellagama.dice(level2number, level2die)):\r\n\t\t\tcharacter1=character(2)\r\n\t\t\toutput.write (\"%s, level %s %s %s %s, %s hit points\" %(character1.name, character1.level, character1.sex, character1.race, character1.cclass, character1.hp)+'\\r\\n')\r\n\t\t\toutput.write (\"STR: %s DEX: %s CON: %s INT: %s WIS: %s CHA: %s\" %(character1.strength, character1.dexterity, character1.constitution, character1.intelligence, character1.wisdom, character1.charisma)+'\\r\\n')\r\n\t\t\tproflist=\", \".join(character1.proficiencies)\r\n\t\t\toutput.write (\"Proficiencies: %s\" %(proflist)+'\\r\\n')\r\n\t\t\toutput.write (character1.quirk+'\\r\\n')\r\n\t\t\toutput.write (\"%s, %s, %s\" %(character1.weapon, character1.armor, character1.trinket)+'\\r\\n')\r\n\t\t\toutput.write('\\r\\n')\r\n\t\tfor i in range (0, stellagama.dice(level3number, level3die)):\r\n\t\t\tcharacter1=character(3)\r\n\t\t\toutput.write (\"%s, level %s %s %s %s, %s hit points\" %(character1.name, character1.level, character1.sex, character1.race, character1.cclass, character1.hp)+'\\r\\n')\r\n\t\t\toutput.write (\"STR: %s DEX: %s CON: %s INT: %s WIS: %s CHA: %s\" %(character1.strength, character1.dexterity, character1.constitution, character1.intelligence, character1.wisdom, character1.charisma)+'\\r\\n')\r\n\t\t\tproflist=\", \".join(character1.proficiencies)\r\n\t\t\toutput.write (\"Proficiencies: %s\" %(proflist)+'\\r\\n')\r\n\t\t\toutput.write (character1.quirk+'\\r\\n')\r\n\t\t\toutput.write (\"%s, %s, %s\" %(character1.weapon, character1.armor, character1.trinket)+'\\r\\n')\r\n\t\t\toutput.write('\\r\\n')\r\n\t\tfor i in range (0, stellagama.dice(level4number, level4die)):\r\n\t\t\tcharacter1=character(4)\r\n\t\t\toutput.write (\"%s, level %s %s %s %s, %s hit points\" %(character1.name, character1.level, character1.sex, character1.race, character1.cclass, character1.hp)+'\\r\\n')\r\n\t\t\toutput.write (\"STR: %s DEX: %s CON: %s INT: %s WIS: %s CHA: %s\" %(character1.strength, character1.dexterity, character1.constitution, character1.intelligence, character1.wisdom, character1.charisma)+'\\r\\n')\r\n\t\t\tproflist=\", \".join(character1.proficiencies)\r\n\t\t\toutput.write (\"Proficiencies: %s\" %(proflist)+'\\r\\n')\r\n\t\t\toutput.write (character1.quirk+'\\r\\n')\r\n\t\t\toutput.write (\"%s, %s, %s\" %(character1.weapon, character1.armor, character1.trinket)+'\\r\\n')\r\n\t\t\toutput.write('\\r\\n')", "def max_gain(self):\n if self.val1:\n val1_gain_tuple, val0_gain_tuple = self.val1.max_gain(), self.val0.max_gain()\n if val1_gain_tuple.gain > val0_gain_tuple.gain:\n return val1_gain_tuple\n else:\n return val0_gain_tuple\n elif self.attributes:\n filtered_data = filter_data(self.data,self.ancestors)\n max_attribute, max_gain = max([(attribute,\n self.heuristic(self,attribute)) for attribute in self.attributes],\n key = lambda x: x[1])\n return gain_tuple(self, max_attribute, max_gain)\n return gain_tuple(None, '', 0)", "async def tolevel(self, ctx, *args):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n if args[0].isdigit():\n level = int(args[0])\n skill = ' '.join(args[1:])\n else:\n level = None\n skill = ' '.join(args)\n out = users.calc_xp_to_level(ctx.user_object, skill, level)\n await ctx.send(out)", "def get_max_gains(self):\n return tuple([lib.is_SetHWGainFactor(self.hcam,0x800c+i,100)/100 for i in range(4)])", "def getLevels():", "def retrieve_scores(num_letters, language, folder, min_score, max_score):\n if (num_letters == 1):\n return retrieve_letter_scores(1, language, folder, min_score, max_score)\n elif (num_letters == 2):\n return retrieve_syllable_scores(num_letters, 1, language, folder, min_score, max_score)\n elif (num_letters == 3):\n return retrieve_syllable_scores(num_letters, 3, language, folder, min_score, max_score)\n else:\n print(\"Error: incorrect number of letters. Value ranges from 1 to 3.\\n\")", "def showBestStatLevelReached(self) :\n bestLevel = 0\n for level in self.level_history :\n bestLevel = level.level if bestLevel < level.level else bestLevel\n Scenario.messageBestStatLevelReached(bestLevel)", "def alphabeta_maximize_play(self, game, legal_moves, depth, alpha, beta):\n highest_score, selected_move = (float('-inf'), (-1, -1))\n for move in legal_moves:\n score, _ = self.alphabeta(game.forecast_move(move), depth - 1, alpha, beta, False)\n if score > alpha:\n alpha = score\n highest_score, selected_move = score, move\n if alpha >= beta:\n break\n return (highest_score, selected_move)", "def _updateLevel(self, level, lastUpdate, time, timeout):\n\t\ttimeoutsPassed = (time - lastUpdate) / timeout\n\t\treturn max(0, level - timeoutsPassed)", "def get_mad_lib_and_answers(difficulty):\n if difficulty == \"easy\":\n return (EASY_MAD_LIB, EASY_ANSWERS)\n elif difficulty == \"medium\":\n return (MEDIUM_MAD_LIB, MEDIUM_ANSWERS)\n elif difficulty == \"hard\":\n return (HARD_MAD_LIB, HARD_ANSWERS)\n else:\n print(\"Error! That isn't a difficulty!\")", "def calc_accuracy(batch_assignments, indices_gt, src_key_num_gt):\n\n values, indices = torch.max(\n batch_assignments, 2\n ) # get matches for source key points\n indices += (\n 1\n ) # remember that indices start counting from 1 for 0 is used to store empty key points\n\n accuracy = (indices_gt == indices).sum(dim=1).float()\n accuracy = torch.div(accuracy, src_key_num_gt)\n\n return accuracy", "def gainLevelUp(self, statsOnly=True):\n # Gain stats\n roundUpStrength = sumRollsOver(self._baseStrength, self.levelupStrength)\n self._baseStrength += self.levelupStrength\n displayStrengthGain = int(math.floor(self.levelupStrength))\n if roundUpStrength:\n displayStrengthGain += 1\n\n roundUpDexterity = sumRollsOver(self._baseDexterity, self.levelupDexterity)\n self._baseDexterity += self.levelupDexterity\n displayDexterityGain = int(math.floor(self.levelupDexterity))\n if roundUpDexterity:\n displayDexterityGain += 1\n\n roundUpCunning = sumRollsOver(self._baseCunning, self.levelupCunning)\n self._baseCunning += self.levelupCunning\n displayCunningGain = int(math.floor(self.levelupCunning))\n if roundUpCunning:\n displayCunningGain += 1\n\n roundUpSorcery = sumRollsOver(self._baseSorcery, self.levelupSorcery)\n self._baseSorcery += self.levelupSorcery\n displaySorceryGain = int(math.floor(self.levelupSorcery))\n if roundUpSorcery:\n displaySorceryGain += 1\n\n roundUpPiety = sumRollsOver(self._basePiety, self.levelupPiety)\n self._basePiety += self.levelupPiety\n displayPietyGain = int(math.floor(self.levelupPiety))\n if roundUpPiety:\n displayPietyGain += 1\n\n roundUpConstitution = sumRollsOver(self._baseConstitution, self.levelupConstitution)\n self._baseConstitution += self.levelupConstitution\n displayConstitutionGain = int(math.floor(self.levelupConstitution))\n if roundUpConstitution:\n displayConstitutionGain += 1\n\n self._baseHP += self.levelupHP\n self._baseMP += self.levelupMP\n\n self.HP = self.totalHP\n self.MP = self.totalMP", "def process(self):\n value, kw = self.check()\n \n level = logging.OK\n for i, j in self.thresholds:\n if value > i: level = j\n \n return level, kw", "def get_accuracy(self, predictions, targets, **kwargs):\n\n if self.metric_type == None:\n return -1\n\n else:\n return self.metric_object.get_accuracy(predictions, targets, **kwargs)", "async def get_xp(level, command):\n if command == \"profile\":\n return 250 * level\n return int((2 * 350) * (2 ** (level - 2))) # 350 is base value (level 1)", "def get_levels(self):\n return self.levels[self.game]", "def strategy(hand, num_die_sides):\n all_holds = list(gen_all_holds(hand))\n expect=[]\n for held_dice in all_holds:\n expect.append(expected_value(held_dice, num_die_sides, len(hand)-len(held_dice)))\n max_expect_index = expect.index(max(expect))\n return (max(expect), (all_holds[max_expect_index]))", "def get_modifiers(roll, critical_hit):\n modifiers = {\"Advantage\": False, \"Disadvantage\": False, \"Lucky\": False, \"Great Weapon\": False, \"Brutal\": 0,\n \"Critical\": critical_hit, \"Minimum Roll\": 1}\n if ADVANTAGE_CODE in roll:\n modifiers[\"Advantage\"] = True\n if DISADVANTAGE_CODE in roll:\n modifiers[\"Disadvantage\"] = True\n if LUCKY_CODE in roll:\n modifiers[\"Lucky\"] = True\n if GREAT_WEAPON_CODE in roll:\n modifiers[\"Great Weapon\"] = True\n modifiers[\"Brutal\"] += roll.count(\"B\")\n if MINIMUM_10_CODE in roll:\n modifiers[\"Minimum Roll\"] = 10\n return modifiers", "def DetermineAttackOrder(self):\n\n if self.fighter1.speed > self.fighter2.speed:\n self.attacker = self.fighter1\n self.defender = self.fighter2\n elif self.fighter2.speed > self.fighter1.speed:\n self.attacker = self.fighter2\n self.defender = self.fighter1\n else:\n if self.fighter1.luck > self.fighter2.luck:\n self.attacker = self.fighter1\n self.defender = self.fighter2\n elif self.fighter2.luck > self.fighter1.luck:\n self.attacker = self.fighter2\n self.defender = self.fighter1\n else:\n if random.random() <= 0.5:\n self.attacker = self.fighter1\n self.defender = self.fighter2\n else:\n self.attacker = self.fighter2\n self.defender = self.fighter1", "def enemyrawdmg(self):\n\n enemystr = globalvalues.ai.getstatus()[3]\n # rngfactor will ensure that regular mobs won't absolutely crush you\n rngfactor = float(float(random.randint(45, 65)) / 100)\n level = (\n globalvalues.p1.getlevel()\n - globalvalues.ai.getstatus()[0]\n )\n lvlfactor = float(1 - level * 0.05)\n\n return int((enemystr) * 102 * 0.12 * rngfactor * lvlfactor)", "def SADamageFunction(\n skill: AdventurerSkill | None,\n adventurer: \"Adventurer\",\n enemy: \"Enemy\",\n memboost: dict[str, int | float],\n combo: int,\n saRng: float,\n) -> int:\n if skill is None:\n return 0\n\n # lowercase everything\n target = skill.target.lower()\n tempBoostName = skill.tempBoost.lower()\n powerCoefficientName = skill.powerCoefficient.lower()\n powerCoefficient = 1.0\n\n if tempBoostName == \"none\":\n tempBoost = 1.0\n elif \"normal\" in tempBoostName:\n tempBoost = 1.4\n else:\n tempBoost = 1.7\n\n if skill.target == \"foe\":\n match powerCoefficientName:\n case \"low\" | \"lo\":\n powerCoefficient = 1.5\n case \"mid\" | \"medium\":\n powerCoefficient = 1.7\n case \"high\":\n powerCoefficient = 1.9\n case \"super\":\n powerCoefficient = 2.1\n case \"ultra\":\n powerCoefficient = 4.0\n else:\n match powerCoefficientName:\n case \"low\" | \"lo\":\n powerCoefficient = 1.1\n case \"mid\" | \"medium\":\n powerCoefficient = 1.15\n case \"high\":\n powerCoefficient = 1.2\n case \"super\":\n powerCoefficient = 1.4\n case \"ultra\":\n powerCoefficient = 3.6\n\n if \"physical\" in skill.type:\n stat_key = \"strength\"\n resist_key = \"physical\"\n else:\n stat_key = \"magic\"\n resist_key = \"magic\"\n\n tempPower = adventurer.stats[stat_key]\n tempPowerBoostAdv = adventurer.statsBoostAdv[stat_key]\n tempPowerBoostAst = adventurer.statsBoostAst[stat_key]\n tempMemBoost = memboost[stat_key]\n\n tempTypeResistDownBase = enemy.typeResistDownBase[resist_key]\n tempTypeResistDownAdv = enemy.typeResistDownAdv[resist_key]\n tempTypeResistDownAst = enemy.typeResistDownAst[resist_key]\n # check enemy buffs p/m resist\n tempTypeResistBuff = enemy.get_buff_mod(f\"{resist_key}_resist\")\n\n # get strength/magic debuff\n powerDebuff = adventurer.get_boostCheckAdv(False, stat_key)\n tempPowerBoostDebuff = 0.0\n if powerDebuff is not None:\n tempPowerBoostDebuff = abs(powerDebuff.modifier)\n else:\n tempPowerBoostDebuff = 0\n\n if len(skill.index_to) != 0:\n tempPower = 0\n tempPowerBoostAdv = 0.0\n tempPowerBoostAst = 0.0\n tempMemBoost = 0\n powerCoefficient = powerCoefficient * 1.96\n for index_to_attributes in skill.index_to:\n tempPower += adventurer.stats[index_to_attributes]\n tempPowerBoostAdv += adventurer.statsBoostAdv[index_to_attributes]\n tempPowerBoostAst += adventurer.statsBoostAst[index_to_attributes]\n tempMemBoost += memboost[index_to_attributes]\n tempElementBoostDebuff = 0.0\n if skill.element != \"\" and skill.noType != 1:\n # elementResistDownBase\n tempElementResistDownBase = enemy.elementResistDownBase[skill.element]\n # elementResistDownAdv\n tempElementResistDownAdv = enemy.elementResistDownAdv[skill.element]\n # elementResistDownAst\n tempElementResistDownAst = enemy.elementResistDownAst[skill.element]\n # elementDamageBoostAdv[location]\n\n tempElementDamageBoostAdv = adventurer.elementDamageBoostAdv[skill.element]\n if memboost.get(f\"{skill.element}_attack\") is not None:\n tempElementDamageBoostAdv += memboost[f\"{skill.element}_attack\"]\n # elemental damage boost from weapon\n if adventurer.stats.get(skill.element) is not None:\n tempElementDamageBoostAdv += cast(float, adventurer.stats[skill.element])\n # elementDamageBoostAst[location]\n tempElementDamageBoostAst = adventurer.elementDamageBoostAst[skill.element]\n # element debuff\n tempEleDebuff = adventurer.get_boostCheckAdv(False, f\"{skill.element}_attack\")\n if tempEleDebuff is not None:\n tempElementBoostDebuff = abs(tempEleDebuff.modifier)\n else:\n tempElementResistDownBase = 0.0\n tempElementResistDownAdv = 0.0\n tempElementResistDownAst = 0.0\n tempElementDamageBoostAdv = 0.0\n tempElementDamageBoostAst = 0.0\n\n if target == \"foe\":\n temptargetResistDownAdv = enemy.targetResistDownAdv[\"st\"]\n temptargetResistDownAst = enemy.targetResistDownAst[\"st\"]\n # foes\n else:\n temptargetResistDownAdv = enemy.targetResistDownAdv[\"aoe\"]\n temptargetResistDownAst = enemy.targetResistDownAst[\"aoe\"]\n\n temp_enemy_end = enemy.stats\n\n tempDamage = (\n (\n max(\n 2\n * tempPower\n * tempBoost\n * (\n 1\n + tempPowerBoostAdv\n + tempPowerBoostAst\n + tempMemBoost\n - tempPowerBoostDebuff\n )\n - temp_enemy_end[\"endurance\"],\n 0,\n )\n )\n * (\n 1\n - tempElementResistDownBase\n - tempElementResistDownAdv\n - tempElementResistDownAst\n - tempTypeResistDownBase\n - tempTypeResistDownAdv\n - tempTypeResistDownAst\n - tempTypeResistBuff\n )\n * (\n 1\n + tempElementDamageBoostAdv\n + tempElementDamageBoostAst\n - tempElementBoostDebuff\n )\n * (1 + adventurer.critPenBoost + 0.06)\n * (1 - temptargetResistDownAdv - temptargetResistDownAst)\n * powerCoefficient\n * 1.5\n * (skill.extraBoost)\n * (0.8 + combo * 0.2)\n * saRng\n )\n return int(tempDamage)", "def evaluate_power(soldier_list: List[Soldier]):\n inf_count = 0\n inf_avg_weapon = 0.0\n inf_avg_armor = 0.0\n arc_count = 0\n arc_avg_weapon = 0.0\n arc_avg_armor = 0.0\n cvl_count = 0\n cvl_avg_weapon = 0.0\n cvl_avg_armor = 0.0\n \n for soldier in soldier_list:\n ################################# YOUR CODE HERE #################################\n if soldier.typecode == \"ARC\":\n arc_count += 1\n arc_avg_armor += soldier.armor\n arc_avg_weapon += soldier.weapon\n elif soldier.typecode == \"INF\":\n inf_count += 1\n inf_avg_armor += soldier.armor\n inf_avg_weapon += soldier.weapon\n elif soldier.typecode == \"CVL\":\n cvl_count += 1\n cvl_avg_armor += soldier.armor\n cvl_avg_weapon += soldier.weapon\n if arc_count != 0:\n arc_avg_armor /= arc_count\n arc_avg_weapon /= arc_count\n\n if cvl_count != 0:\n cvl_avg_armor /= cvl_count\n cvl_avg_weapon /= cvl_count\n\n if inf_count != 0:\n inf_avg_armor /= inf_count\n inf_avg_weapon /= inf_count\n ##################################################################################\n return (inf_count, inf_avg_weapon, inf_avg_armor), (arc_count, arc_avg_weapon, arc_avg_armor), (cvl_count, cvl_avg_weapon, cvl_avg_armor)", "def enemy_bullet_hits(ai, screen, ship, enemies, slayers, lasers, shields, hub):\r\n\tif ship.shield:\r\n\t\tpygame.sprite.groupcollide(slayers, shields, True, False)\r\n\r\n\tfor slay in slayers.copy():\r\n\t\tif pygame.sprite.collide_rect(ship, slay):\r\n\t\t\tif not hub.death:\r\n\t\t\t\thub.h_bar = hub.h_bar - slay.damage\r\n\t\t\t\t#ship.damage()\r\n\t\t\t\tslayers.remove(slay)\r\n\t\r\n\tfor laser in lasers.copy():\r\n\t\tif laser.hurt:\r\n\t\t\tif pygame.sprite.collide_rect(ship, laser):\r\n\t\t\t\tif not hub.death:\r\n\t\t\t\t\thub.h_bar = hub.h_bar - laser.damage", "def tu_levels(active, levels):\n active = list(active)\n gene2level = dict(zip(active, levels))\n tu2level = defaultdict(list)\n for gene in active:\n # genes without associated TU are their own TU\n if len(gene.transcription_units) == 0:\n tu2level[gene].append(gene2level[gene])\n else:\n for tu in gene.transcription_units:\n tu2level[tu].append(gene2level[gene])\n t_units = tu2level.keys()\n levels = [np.mean(tu2level[tu]) for tu in t_units]\n return (t_units, levels)", "def addLevel(self, amount):\r\n debug.write(\"[SourceRPG] Handling addLevel\", 1)\r\n self.player['level'] += amount\r\n \r\n \"\"\" If turbo mode is on multipliy the credits received \"\"\"\r\n if currentTurboMode:\r\n self.player['credits'] += int( amount * int(creditsReceived) * float(turboCreditMultiplier))\r\n else:\r\n self.player['credits'] += amount * int(creditsReceived)\r\n \r\n \"\"\" Check if the level has reached the limit \"\"\"\r\n if int(maxLevel) and self.player['level'] > int(maxLevel):\r\n debug.write(\"Maximum level reached, ensure that resetSkills\", 1)\r\n \"\"\" If we want to reset the skills, reset them \"\"\"\r\n if int(maxLevelReset):\r\n self.resetSkills()\r\n tell(self.userid, 'maximum level reached')\r\n debug.write(\"Levels Reset\", 1)\r\n else:\r\n \"\"\" Othewise assign the level and XP to the maximum possible \"\"\"\r\n self.player['level'] = int(maxLevel)\r\n self.player['xp'] = (self.player['level'] - 1) * int(xpIncrement) + int(startXp) - 1\r\n debug.write(\"Assigned XP to maximum value\", 1)\r\n else: \r\n \"\"\" The level is okay, check for bots and play the message etc \"\"\"\r\n if not self.player.isbot:\r\n debug.write(\"Player is not a bot\", 2)\r\n \"\"\" Only do the following for humans \"\"\"\r\n if not int(levelUp):\r\n tokens = {}\r\n tokens['level'] = self.player['level']\r\n tokens['xp'] = self.player['xp']\r\n tokens['nextxp'] = (self.player['level'] - 1) * int(xpIncrement) + int(startXp) - self.player['xp']\r\n tell( self.userid, 'level gained private', tokens )\r\n \r\n if self.player['popup']:\r\n debug.write(\"Building skill menu\", 1)\r\n buildSkillMenu(self.userid)\r\n \r\n else:\r\n \"\"\" Player is a bot, check for the maximum possible level for a bot \"\"\"\r\n debug.write(\"Bot leveled up, choose a random skill\", 2)\r\n if int(botMaxLevel) and self.player['level'] > int(botMaxLevel):\r\n debug.write(\"Reset bot's skills, maximum level achieved\", 2)\r\n self.resetSkills()\r\n else:\r\n \"\"\" Upgrade a random skill if possible \"\"\"\r\n while True:\r\n \"\"\" Loop until we manually break \"\"\"\r\n possibleChoices = []\r\n credits = self.player['credits']\r\n for skill in skills:\r\n \"\"\" \r\n Iterate through all loaded skills and if the bot\r\n can afford the skill, append it to the possible choices\r\n \"\"\"\r\n if credits >= self.player[skill.name] * skill.creditIncrement + skill.startCredit:\r\n if self.player[skill.name] < skill.maxLevel:\r\n possibleChoices.append(skill.name)\r\n if not possibleChoices:\r\n \"\"\" \r\n The bot cannot afford any skills or has maxed out\r\n the skills, the manually break\r\n \"\"\"\r\n break\r\n \r\n \"\"\" \r\n Finally call the checkSkillForUpgrading function passing\r\n the arguments manually rather than letting a popup do it\r\n \"\"\"\r\n debug.write(\"Checking to update a skill\", 2)\r\n checkSkillForUpgrading(self.userid, random.choice(possibleChoices), None, False )\r\n \r\n if int(levelUp):\r\n tokens = {}\r\n tokens['name'] = self.player.name\r\n tokens['level'] = self.player['level']\r\n tokens['xp'] = self.player['xp']\r\n tokens['nextxp'] = (self.player['level'] - 1) * int(xpIncrement) + int(startXp)\r\n \r\n for userid in filter( lambda x: not es.isbot(x), es.getUseridList()):\r\n tell(userid, 'level gained global', tokens)\r\n \r\n if str(levelupSound):\r\n es.emitsound('player', self.userid, str(levelupSound), 0.7, 0.5 )\r\n \r\n \"\"\" Create and fire the levelup event \"\"\"\r\n values = {}\r\n values[\"userid\"] = (\"setint\", self.userid)\r\n values[\"newlevel\"] = (\"setint\", self.player['level'])\r\n values[\"oldlevel\"] = (\"setint\", self.player['level'] - amount)\r\n values[\"amount\"] = (\"setint\", amount)\r\n values[\"xp\"] = (\"setint\", self.player['xp'])\r\n values[\"xpneeded\"] = (\"setint\", (self.player['level'] - 1) * int(xpIncrement) + int(startXp))\r\n gamethread.delayed(0, fireEvent, (\"sourcerpg_levelup\", values))\r\n debug.write(\"[SourceRPG] Handled addLevel\", 1)", "def get_best_accuracy_metrics(self,\n verbose: bool = True) -> Tuple[int, int]:\n accuracy_scores = list()\n for i in self.search_space:\n classes = self.convert_classes(threshold=i)\n accuracy_scores.append(accuracy_score(classes, self.y_true))\n best_accuracy_score, best_accuracy_threshold = self._get_best_metrics(\n metric_type='accuracy_score',\n scores=accuracy_scores,\n greater_is_better=True,\n verbose=verbose\n )\n return best_accuracy_score, best_accuracy_threshold", "def accuracy(outputs, targets):\n\n batch_size = targets.size(0)\n\n _, pred = torch.max(outputs.data, 1)\n correct = (pred == targets).sum().item()\n\n res = 100 * correct / batch_size\n return res", "def input_level():\n\tlevel_instructions = \"\\n\" + \"Please select a game difficulty by typing it in!\" + \"\\n\"\n\tlevel_options = [\"easy\", \"medium\", \"hard\"]\n\terror_level = \"\\n\" + \"That's not an option! \" + \"\\n\"\n\tlevel = \"\" \n\tprint level_instructions\n\tlevel = raw_input('Possible choices include: easy, medium, or hard: ')\n\tlevel = level.lower()\n\twhile check_level(level, level_options) == None:\n\t\tprint \"\\n\" + error_level + \"\\n\"\n\t\tprint \"\\n\" + level_instructions + \"\\n\"\n\t\tlevel = raw_input('Possible choices include: easy, medium, or hard: ')\n\t\tcheck_level(level, level_options)\n\tprint \"\\n\" + \"You've chosen %s!\" % level\n\tlevel_index = level_options.index(level)\n\treturn level_index", "def assign_level(self, minibatch_reference_proboxes):\n with tf.name_scope('assign_levels'):\n ymin, xmin, ymax, xmax = tf.unstack(minibatch_reference_proboxes, axis=2)\n\n w = tf.maximum(xmax - xmin, 0.) # avoid w is negative\n h = tf.maximum(ymax - ymin, 0.) # avoid h is negative\n\n levels = tf.round(4. + tf.log(tf.sqrt(w*h + 1e-8)/224.0) / tf.log(2.)) # 4 + log_2(***)\n\n levels = tf.maximum(levels, tf.ones_like(levels) * (np.float32(self.min_level))) # level minimum is 2\n levels = tf.minimum(levels, tf.ones_like(levels) * (np.float32(self.max_level))) # level maximum is 5\n\n return tf.cast(levels, tf.int32)", "def GetNormalEnemy(self, diffmode, mapname, careAboutLimit, maxSize, desiredDifficulty, diffStrictness, originalEnemyID):\n newC = -1\n if (not careAboutLimit or len(self.uniqueIndices) < self.MAX_UNIQUE):\n if (diffmode == 1 or diffmode == 4):\n diffList = self.getDifficultyList(desiredDifficulty, diffStrictness, False, maxSize)\n if (len(diffList) > 0):\n newC = self.GetEnemyFromListWithRetry(diffList, originalEnemyID)\n else:\n newC = -6\n else:\n newC = self.GetEnemyFromListWithRetry(self.validSizeNormal[maxSize], originalEnemyID)\n else:\n newC = self.GetEnemyFromListWithRetry(self.uniqueNormals[maxSize], originalEnemyID)\n\n if (diffmode >= 3 and mapname == \"m18_01_00_00\" and originalEnemyID in self.EASYASYLUM_TARGETS):\n newC = self.getRandomFromList(self.HARDCODED_ASYLUM_NORMAL)\n\n return newC", "def check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship, \n\t\taliens, bullets, long_bullets):\n\t#Remove any bullets and aliens that have collided\n\tcollisions = pygame.sprite.groupcollide(bullets, aliens, True, True)\n\tcollisions1 = pygame.sprite.groupcollide(long_bullets, aliens, False, True)\n\tif collisions or collisions1:\n\t\tfor aliens in collisions.values():\n\t\t\tstats.score += ai_settings.alien_points * len(aliens)\n\t\t\tsb.prep_score()\n\t\t\tstats.alien_kills += len(aliens)\n\t\tfor aliens in collisions1.values():\n\t\t\tstats.score += ai_settings.alien_points * len(aliens)\n\t\t\tsb.prep_score()\t\n\t\t\tstats.alien_kills += len(aliens)\t\t\n\t\tcheck_high_score(stats, sb)\n\t\n\tif len(aliens) == 0:\n\t\tstart_new_level(ai_settings, screen, stats, sb, ship, aliens, bullets)", "def ai_w_level(gstate: TicTacToe, game_tree, level=3):\n assert isinstance(level, int), \"`level` must be `int`\"\n assert 0 <= level <= 4, \"level values must be from 0 to 4\"\n\n seed = random.random()\n logging.debug(f\"seed value: {seed:.3f}\")\n\n if level == 0:\n ai_func = ai_derp\n elif level == 1:\n ai_func = ai_derp if seed <= 0.3 else ai_strategy1\n elif level == 2:\n ai_func = ai_derp if seed <= 0.2 else ai_strategy2\n elif level == 3:\n ai_func = ai_derp if seed <= 0.1 else ai_strategy3\n elif level == 4:\n ai_func = ai_strategy3\n\n return ai_func(gstate, game_tree)", "def max_levels(self):\n\n return self._max_levels", "def __addAttackExp(self, attack, track=-1, level=-1, attackerId=-1):\n trk = -1\n lvl = -1\n id = -1\n if track != -1 and level != -1 and attackerId != -1:\n trk = track\n lvl = level\n id = attackerId\n elif self.__attackHasHit(attack):\n if self.notify.getDebug():\n self.notify.debug(\"Attack \" + repr(attack) + \" has hit\")\n # now be sure to update the skill points gained for the\n # attacking toon, but only if the attack actually 'hit'\n # something, meaning it was a 'successful' attack\n trk = attack[TOON_TRACK_COL]\n lvl = attack[TOON_LVL_COL]\n id = attack[TOON_ID_COL]\n\n if trk != -1 and trk != NPCSOS and trk != PETSOS and \\\n lvl != -1 and id != -1:\n expList = self.toonSkillPtsGained.get(id, None)\n if expList == None:\n expList = [0, 0, 0, 0, 0, 0, 0]\n self.toonSkillPtsGained[id] = expList\n \n expList[trk] = min(ExperienceCap,\n expList[trk] + (lvl + 1) * self.__skillCreditMultiplier)", "def generateEnemyStats(healthRange, powerRange, smartsRating):\n\n stats = {\n 'healthRating': healthRange,\n 'powerRating': powerRange,\n 'smartsRating': smartsRating\n }\n return stats", "def takeHit(self, amount, type, enemyShip):\n if type == 'energy':\n # go through shields in quadrant first\n if self.currentSP > 0:\n if self.currentSP >= amount:\n self.currentSP -= amount\n amount = 0\n else:\n amount -= self.currentSP\n self.currentSP = 0\n # go through armor next\n if self.currentAP > 0 and amount > 0:\n # set experience only if shot goes through shields\n if self.typeAP == 'energy':\n if self.currentAP >= (amount * globals.reflectiveArmorModifier):\n self.currentAP -= (amount * globals.reflectiveArmorModifier)\n amount = 0\n else:\n amount -= (self.currentAP/globals.reflectiveArmorModifier)\n self.currentAP = 0\n else:\n if self.currentAP >= amount:\n self.currentAP -= amount\n amount = 0\n else:\n amount -= self.currentAP\n self.currentAP = 0\n elif type == 'impact':\n # go through shields in quadrant first\n if self.currentSP > 0:\n if self.currentSP >= amount:\n self.currentSP -= amount/2\n amount = amount/2\n else:\n amount -= self.currentSP\n self.currentSP = 0\n \n # now goto armor\n if self.currentAP > 0 and amount > 0:\n if self.typeAP == 'impact':\n if self.currentAP >= (amount * globals.impactArmorModifier):\n self.currentAP -= (amount * globals.impactArmorModifier)\n amount = 0\n else:\n amount -= (self.currentAP/globals.impactArmorModifier)\n self.currentAP = 0\n else:\n if self.currentAP >= amount:\n self.currentAP -= amount\n amount = 0\n else:\n amount -= self.currentAP\n self.currentAP = 0\n \n # now that shields and armor are taken care of transfer remaining damage to internal components\n self.myParent.setExperience(amount, enemyShip)\n componentDamage = 0\n if amount > 0 and self.components != {}:\n while amount > 0:\n keyList = funcs.sortStringList(self.components.keys())\n componentDamage = 1\n for componentID in keyList:\n component = self.components[componentID]\n if component.currentHP > amount:\n component.currentHP -= amount\n amount = 0\n break\n elif component.currentHP > 0:\n # remove component\n amount -= component.currentHP\n del self.components[componentID]\n \n # check if all components destroyed, or damage absorbed\n if self.components == {} or amount == 0:\n break\n \n if componentDamage == 1:\n self.setMyStatus()\n self.myParent.setMyStatus()\n \n if amount > 0:\n if self.myParent.currentISP > amount:\n self.myParent.currentISP -= amount\n self.myParent.setMyStatus()\n amount = 0\n else:\n self.myParent.destroyMe()\n amount = 0\n \n self.myParent.updateAllGUIValues()", "def _cal_score(self, primary_category_label, secondary_category_label, actual_category_label, max_level):\n primary_categories = self._get_categories(primary_category_label)\n if not secondary_category_label:\n secondary_categories = None\n else:\n secondary_categories = self._get_categories(secondary_category_label)\n actual_categories = self._get_categories(actual_category_label)\n total_score = 0\n ignored_primary = False\n ignored_secondary = False\n for i in range(0, max_level + 1):\n if primary_categories and i < len(primary_categories):\n pv = primary_categories[i]\n else:\n pv = None\n if actual_categories and i < len(actual_categories):\n av = actual_categories[i]\n else:\n av = None\n if secondary_categories and i < len(secondary_categories):\n sv = secondary_categories[i]\n else:\n sv = None\n primary_equal = pv == av\n secondary_equal = (secondary_categories is not None) and sv == av\n if not primary_equal:\n ignored_primary = True\n if not secondary_equal:\n ignored_secondary = True\n\n if not ignored_primary:\n total_score += self.level_scores[i]\n\n if ignored_primary and (not ignored_secondary):\n total_score += 0.5 * self.level_scores[i]\n\n return total_score", "def bless_basic(unit):\n return {DAMAGE: unit.maximum_damage}", "def _handlespecials(self, secondary, typeset, abilitiesset, \n secondaryweapon):\n\n # Guided weapons must be distance weapons.\n GUIDED = WeaponAbility('Guided')\n if GUIDED in self.abilities and not self.isdistance():\n if typeset and abilitiesset:\n raise ArtifactError(\"Cannot comply with both type/style\" +\n \" and ability requirements.\")\n elif not typeset:\n newstyle = random.choice(('Missile Weapon', 'Thrown Weapon'))\n self._setweapontype(newstyle)\n else: # not abilitiesset\n self.abilities.remove(GUIDED)\n replacement = WeaponAbility()\n while (replacement == GUIDED or\n replacement in self.abilities):\n replacement = WeaponAbility()\n self.abilities.append(replacement)\n _MultiAbilityArtifact._setname(self)\n\n # Changlings are two weapons in one, with two separate ability sets\n CHANGLING = WeaponAbility('Changling')\n self.changling = False\n if not secondary and CHANGLING in self.abilities:\n\n # What we have so far becomes the primary\n self.primaryweapon = Weapon(self.style, self.type,\n abilities=list(self.abilities), \n secondary=True)\n\n # And we roll a new set for the secondary...\n if secondaryweapon is not None:\n self.secondaryweapon = secondaryweapon\n if not isinstance(self.secondaryweapon, Weapon):\n raise ArtifactError('%s is not a weapon!' % \n self.secondaryweapon)\n else:\n if 'Bow' in self.style:\n newstyle = random.choice(('Sword', 'Ax/Mace/Hammer',\n 'Pole Weapon', 'Unusual Weapon'))\n else:\n newstyle = random.choice(('Drawn Bow', 'Cross Bow'))\n self.secondaryweapon = Weapon(style=newstyle, secondary=True)\n\n # ... which must also include Changling (so max four)\n if CHANGLING in self.secondaryweapon.abilities:\n self.secondaryweapon.abilities.remove(CHANGLING)\n if len(self.secondaryweapon.abilities) == 5:\n self.secondaryweapon.abilities = \\\n self.secondaryweapon.abilities[:4]\n\n # Update this weapon to show ALL abilities, types, etc\n self.abilities += self.secondaryweapon.abilities\n self.type = \"%s / %s\" % (self.primaryweapon.type,\n self.secondaryweapon.type)\n self.itemtype = \"Changling %s\" % self.type\n self.value = (5000 + max(self.primaryweapon.value,\n self.secondaryweapon.value) +\n 2 * min(self.primaryweapon.value,\n self.secondaryweapon.value))\n\n # Finally, remove 'Changling' from the primary's list\n # to clean up the display, and update.\n self.primaryweapon.abilities.remove(CHANGLING)\n _MultiAbilityArtifact._setname(self.primaryweapon)\n self.changling = True\n _MultiAbilityArtifact._setname(self)", "def lvl_algo(next_level):\n total_xp_needed = (next_level * next_level)\n return total_xp_needed", "def map_level(level):\n return {\"critical\": 50, \"error\": 40, \"warning\": 30, \"info\": 20, \"debug\": 10}.get(\n level, 10\n )", "def map_level(level):\n return {\"critical\": 50, \"error\": 40, \"warning\": 30, \"info\": 20, \"debug\": 10}.get(\n level, 10\n )", "def __init__(self, name, loot, strength):\n self.name = name\n self.x = 0\n self.y = 0\n self.health = 10\n self.strength = strength\n self.loot = loot\n self.is_alive = True\n self.MAX_HEALTH = 15\n self.magic_key = False\n logging.debug(\"{0} created with health of {1} and strength of {2}\"\n .format(self.name, self.health, self.strength))\n \"\"\" Test Results Part A:\n When increasing MAX_HEATH to 100, rounds tended to go on.\n When decreasing MAX_HEATH to 0.05, rounds end very quickly.\n This is expected because the Sprites will be easier or harder \n to defeat depending on how high their health can get. It will \n take more attacks to defeat a Sprite with more health and less\n attacks to defeat a Sprite with less health. \n \n Test Results Part B:\n Test: change strength of Enemy to 20 (higher than Avatar)\n Prediction: the Enemy should win most/all of the time because the player \n with more strength has a harder attack.\n Results: The Enemy won during all trials. If the roles were switched, the \n same could be said about Avatar.\n \n Test: set health of Avatar to 5\n Prediction: the Avatar will die more often than the Enemy because it can \n receive less attacks\n Results: The Avatar died during most trials. \n \n Test: set MAX_HEALTH for Enemy to 5\n Prediction: Enemy will be able to have less health, so it will be defeated\n more often than the Avatar\n Results: The enemy died in almost all trials\n \"\"\"", "def getDamage(self):\n \n weapon_dmg = self.weapon.getDamage()\n cat_bonus, att_cats = self.getCatBonus(self.attacking_kittens,\n \"attacking\")\n true_dmg = weapon_dmg + cat_bonus + self.getBonusDamageFromInsanity()\n return true_dmg, att_cats", "def get_max(im, class_name, dets, thresh=0.5):\n inds = np.where(dets[:, -1] >= thresh)[0]\n max_inds = 0\n max_score = 0.0\n if len(inds) == 0:\n # print('Warning: no target detected!')\n return\n elif len(inds) > 1:\n # print('Warning: ' + str(len(inds)) + ' targets detected! Choose the highest one')\n for i in inds:\n if(dets[i, -1] > max_score):\n max_inds = i\n max_score = dets[i, -1]\n bbox = dets[max_inds, :4]\n score = dets[max_inds, -1]\n return [max_inds,score]", "def _calc_hp(self, average=False):\n dice = self.hd + self.constitution\n if average:\n return round((dice * self.level).average)\n\n return max(sum((dice * self.level).roll()), 1)" ]
[ "0.6369547", "0.6126176", "0.5764121", "0.5721204", "0.56999665", "0.56620437", "0.5656497", "0.5552577", "0.5546967", "0.554252", "0.5487898", "0.54512966", "0.5447283", "0.5400377", "0.53890437", "0.5385318", "0.5371194", "0.53344166", "0.532357", "0.5270881", "0.5253192", "0.5244573", "0.52291995", "0.5176173", "0.51564693", "0.5151702", "0.51353246", "0.5125361", "0.5108481", "0.5092772", "0.50633276", "0.5050805", "0.5046204", "0.5042835", "0.5008651", "0.50068974", "0.50030106", "0.5002263", "0.49718755", "0.49677044", "0.495551", "0.49461436", "0.4942204", "0.49262965", "0.492454", "0.490921", "0.49070692", "0.49068847", "0.49049634", "0.49012238", "0.4892983", "0.48928317", "0.48910254", "0.48888052", "0.4879064", "0.4864303", "0.48595694", "0.48553655", "0.48401842", "0.48399618", "0.48299786", "0.48155624", "0.48023564", "0.4793754", "0.47795743", "0.47705418", "0.47695026", "0.47592166", "0.47568494", "0.4753759", "0.4746599", "0.4746311", "0.4742348", "0.4742002", "0.47400215", "0.47371882", "0.47323987", "0.47300202", "0.47250938", "0.47146147", "0.4706034", "0.4702324", "0.47003168", "0.46998814", "0.46992597", "0.46941707", "0.4681949", "0.46776706", "0.46672156", "0.46659562", "0.46655497", "0.46637756", "0.46625474", "0.4659219", "0.4657076", "0.4657076", "0.46559587", "0.46555123", "0.46519685", "0.46411213" ]
0.8360239
0
Returns tuple of the form (attack_bonus, strength_bonus) for the best scimitar (weapon) at a given attack level. Scimitars are almost always the most efficient weapon
def get_weapon_stats(attack_level): if attack_level >= 60: # Dragon scimitar return (67, 66) elif attack_level >= 40: # Rune scimitar return (45, 44) elif attack_level >= 30: # Adamant scimitar return (29, 28) elif attack_level >= 20: # Mithril scimitar return (21, 20) elif attack_level >= 10: # Black scimitar return (19, 14) elif attack_level >= 5: # Steel scimitar return (15, 14) else: # Iron scimitar return (10, 9)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def most_powerful_weapon(self):\n # sets inital damge to 0\n max_damage = 0\n # sets the best weapon to nothing\n best_weapon = None\n # Loop for each item in inventory\n for item in self.inventory:\n # Code adapted from Make Your own Python Text Based Adventure\n # tries to see if the item damage is greator than the current max\n # damage and then replaces the best weapon in inventory\n try:\n if item.damage > max_damage:\n best_weapon = item\n max_damage = item.damage\n except AttributeError:\n pass\n # sends the best weapon to function\n return best_weapon", "def calculate_hit(self):\n weapon = self.game_data['player inventory']['equipped weapon']\n weapon_power = self.game_data['player inventory'][weapon]['power']\n max_strength = weapon_power\n min_strength = max_strength - 7\n return random.randint(min_strength, max_strength)", "def attack(self):\n # TODO: Use integer division to find half of the max_damage value\n # then return a random integer between\n # half of max_damage and max_damage\n print(\"max damage of \" + self.name + \" is \")\n print(str(self.attack_strength))\n min_damage = self.attack_strength // 2\n weapon_attack_value = random.randint(min_damage, self.attack_strength)\n return weapon_attack_value", "def attack(self):\n # TODO: Use integer division to find half of the max_damage value\n # then return a random integer between half of max_damage and max_damage\n \n weapon_attack_value = random.randint(self.max_damage//2, self.max_damage)\n return weapon_attack_value", "def SADamageFunction(\n skill: AdventurerSkill | None,\n adventurer: \"Adventurer\",\n enemy: \"Enemy\",\n memboost: dict[str, int | float],\n combo: int,\n saRng: float,\n) -> int:\n if skill is None:\n return 0\n\n # lowercase everything\n target = skill.target.lower()\n tempBoostName = skill.tempBoost.lower()\n powerCoefficientName = skill.powerCoefficient.lower()\n powerCoefficient = 1.0\n\n if tempBoostName == \"none\":\n tempBoost = 1.0\n elif \"normal\" in tempBoostName:\n tempBoost = 1.4\n else:\n tempBoost = 1.7\n\n if skill.target == \"foe\":\n match powerCoefficientName:\n case \"low\" | \"lo\":\n powerCoefficient = 1.5\n case \"mid\" | \"medium\":\n powerCoefficient = 1.7\n case \"high\":\n powerCoefficient = 1.9\n case \"super\":\n powerCoefficient = 2.1\n case \"ultra\":\n powerCoefficient = 4.0\n else:\n match powerCoefficientName:\n case \"low\" | \"lo\":\n powerCoefficient = 1.1\n case \"mid\" | \"medium\":\n powerCoefficient = 1.15\n case \"high\":\n powerCoefficient = 1.2\n case \"super\":\n powerCoefficient = 1.4\n case \"ultra\":\n powerCoefficient = 3.6\n\n if \"physical\" in skill.type:\n stat_key = \"strength\"\n resist_key = \"physical\"\n else:\n stat_key = \"magic\"\n resist_key = \"magic\"\n\n tempPower = adventurer.stats[stat_key]\n tempPowerBoostAdv = adventurer.statsBoostAdv[stat_key]\n tempPowerBoostAst = adventurer.statsBoostAst[stat_key]\n tempMemBoost = memboost[stat_key]\n\n tempTypeResistDownBase = enemy.typeResistDownBase[resist_key]\n tempTypeResistDownAdv = enemy.typeResistDownAdv[resist_key]\n tempTypeResistDownAst = enemy.typeResistDownAst[resist_key]\n # check enemy buffs p/m resist\n tempTypeResistBuff = enemy.get_buff_mod(f\"{resist_key}_resist\")\n\n # get strength/magic debuff\n powerDebuff = adventurer.get_boostCheckAdv(False, stat_key)\n tempPowerBoostDebuff = 0.0\n if powerDebuff is not None:\n tempPowerBoostDebuff = abs(powerDebuff.modifier)\n else:\n tempPowerBoostDebuff = 0\n\n if len(skill.index_to) != 0:\n tempPower = 0\n tempPowerBoostAdv = 0.0\n tempPowerBoostAst = 0.0\n tempMemBoost = 0\n powerCoefficient = powerCoefficient * 1.96\n for index_to_attributes in skill.index_to:\n tempPower += adventurer.stats[index_to_attributes]\n tempPowerBoostAdv += adventurer.statsBoostAdv[index_to_attributes]\n tempPowerBoostAst += adventurer.statsBoostAst[index_to_attributes]\n tempMemBoost += memboost[index_to_attributes]\n tempElementBoostDebuff = 0.0\n if skill.element != \"\" and skill.noType != 1:\n # elementResistDownBase\n tempElementResistDownBase = enemy.elementResistDownBase[skill.element]\n # elementResistDownAdv\n tempElementResistDownAdv = enemy.elementResistDownAdv[skill.element]\n # elementResistDownAst\n tempElementResistDownAst = enemy.elementResistDownAst[skill.element]\n # elementDamageBoostAdv[location]\n\n tempElementDamageBoostAdv = adventurer.elementDamageBoostAdv[skill.element]\n if memboost.get(f\"{skill.element}_attack\") is not None:\n tempElementDamageBoostAdv += memboost[f\"{skill.element}_attack\"]\n # elemental damage boost from weapon\n if adventurer.stats.get(skill.element) is not None:\n tempElementDamageBoostAdv += cast(float, adventurer.stats[skill.element])\n # elementDamageBoostAst[location]\n tempElementDamageBoostAst = adventurer.elementDamageBoostAst[skill.element]\n # element debuff\n tempEleDebuff = adventurer.get_boostCheckAdv(False, f\"{skill.element}_attack\")\n if tempEleDebuff is not None:\n tempElementBoostDebuff = abs(tempEleDebuff.modifier)\n else:\n tempElementResistDownBase = 0.0\n tempElementResistDownAdv = 0.0\n tempElementResistDownAst = 0.0\n tempElementDamageBoostAdv = 0.0\n tempElementDamageBoostAst = 0.0\n\n if target == \"foe\":\n temptargetResistDownAdv = enemy.targetResistDownAdv[\"st\"]\n temptargetResistDownAst = enemy.targetResistDownAst[\"st\"]\n # foes\n else:\n temptargetResistDownAdv = enemy.targetResistDownAdv[\"aoe\"]\n temptargetResistDownAst = enemy.targetResistDownAst[\"aoe\"]\n\n temp_enemy_end = enemy.stats\n\n tempDamage = (\n (\n max(\n 2\n * tempPower\n * tempBoost\n * (\n 1\n + tempPowerBoostAdv\n + tempPowerBoostAst\n + tempMemBoost\n - tempPowerBoostDebuff\n )\n - temp_enemy_end[\"endurance\"],\n 0,\n )\n )\n * (\n 1\n - tempElementResistDownBase\n - tempElementResistDownAdv\n - tempElementResistDownAst\n - tempTypeResistDownBase\n - tempTypeResistDownAdv\n - tempTypeResistDownAst\n - tempTypeResistBuff\n )\n * (\n 1\n + tempElementDamageBoostAdv\n + tempElementDamageBoostAst\n - tempElementBoostDebuff\n )\n * (1 + adventurer.critPenBoost + 0.06)\n * (1 - temptargetResistDownAdv - temptargetResistDownAst)\n * powerCoefficient\n * 1.5\n * (skill.extraBoost)\n * (0.8 + combo * 0.2)\n * saRng\n )\n return int(tempDamage)", "def weaponValue(self, level):\n if level == 1:\n bonus = 2\n elif level == 2:\n bonus = 4\n elif level == 3:\n bonus = 6\n elif level == 4:\n bonus = 8\n else:\n bonus = 0\n\n return bonus", "def get_max_hit_and_accuracy(\n levels, attack_style, attack_bonus, strength_bonus):\n weapon_attack, weapon_strength = get_weapon_stats(levels.attack)\n attack_bonus += weapon_attack\n strength_bonus += weapon_strength\n\n if attack_style == Attack_Style.ATTACK:\n effective_attack = osrs.effective_level(levels.attack, 1, 3, 1)\n effective_strength = osrs.effective_level(levels.strength, 1, 0, 1)\n elif attack_style == Attack_Style.STRENGTH:\n effective_attack = osrs.effective_level(levels.attack, 1, 0, 1)\n effective_strength = osrs.effective_level(levels.strength, 1, 3, 1)\n\n enemy_effective_defence = osrs.effective_level(1, 1, 0, 1)\n\n max_hit = osrs.max_hit(effective_strength, strength_bonus)\n accuracy = osrs.accuracy(effective_attack, attack_bonus,\n enemy_effective_defence, 0)\n\n return (max_hit, accuracy)", "def Attack_Weapon(self, bonus=0):\n bonus = str(bonus);\n if (bonus == \"0\"):\n return \"\".join((\"[[1d20+\", self.Attribute_Power(\"attack\"), \"]] vs \", self.Attribute_Power(\"def\")));\n else:\n return \"\".join((\"[[1d20+\", self.Attribute_Power(\"attack\"), \"+\", bonus, \"]] vs \", self.Attribute_Power(\"def\")));", "def weapon_strength(weapon):\n weapon_strength_int = WEAPON_STRENGTHS[weapon]\n #print weapon_strength_int\n return weapon_strength_int", "def Attack_Skill(self, bonus=0):\n bonus = str(bonus);\n if (bonus == \"0\"):\n return \"\".join((\"[[1d20+\", Attribute(\"halflevel\"), \"[level/2]+\", self.Attribute_Power(\"mod\"), \"+\", self.Attribute_Power(\"attack-misc\"), \"]] vs \", self.Attribute_Power(\"def\")));\n else:\n return \"\".join((\"[[1d20+\", Attribute(\"halflevel\"), \"[level/2]+\", self.Attribute_Power(\"mod\"), \"+\", self.Attribute_Power(\"attack-misc\"), \"+\", bonus, \"]] vs \", self.Attribute_Power(\"def\")));", "def attack(self):\n\n lowest_attack = int(self.attack_strength)// 2\n attack_strength = random.randint(lowest_attack, int(self.attack_strength))\n return attack_strength", "def find_desired_outcome(\n base_player_stats: Tuple[int, int, int],\n base_boss_stats: Tuple[int, int, int],\n weapons,\n armors,\n rings,\n character: str = \"player\",\n key=min,\n) -> int:\n\n outcomes = []\n for item_combination in generate_item_combinations(weapons, armors, rings):\n player = Character(\"player\", *base_player_stats)\n boss = Character(\"boss\", *base_boss_stats)\n total_cost: int = 0\n for item in item_combination:\n total_cost += item.cost\n player.armor += item.armor\n player.damage += item.damage\n\n winner = fight_battle(player, boss)\n if winner.name == character:\n outcomes.append(total_cost)\n\n return key(outcomes)", "def bless_advanced(unit):\n return {DAMAGE: unit.maximum_damage + 1}", "def attack(self):\n if random.random() < self.chance_critical:\n return self.strength * 2\n return self.strength", "def analysis(self, game_info):\n available_cards_indices = []\n for card_index in range(len(game_info['cards'])):\n card = game_info['cards'][card_index]\n cost_color, cost_value = card.get_cost()\n if cost_color == 0:\n available_cards_indices.append(card_index)\n continue\n resource_name = getResourceName(cost_color)\n if game_info[resource_name] >= cost_value:\n available_cards_indices.append(card_index)\n\n self_coeff = -1\n enemy_coeff = 1\n if len(available_cards_indices) > 0:\n index = 0\n optimal_priority = 0\n for i in xrange(len(available_cards_indices)):\n priority = 0\n actions = game_info['cards'][available_cards_indices[i]].get_actions()\n for action in actions['player']:\n if action[0] == 0:\n priority += self_coeff * action[1]\n for action in actions['opponent']:\n if action[0] == 0:\n priority -= enemy_coeff * action[1]\n if i == 0:\n optimal_priority = priority\n elif priority > optimal_priority:\n index, optimal_priority = i, priority\n return game_info['cards'][available_cards_indices[index]], TO_PRESS\n else:\n index = 0\n for i in xrange(1, 5):\n if game_info['cards'][i].cost_value > game_info['cards'][index].cost_value:\n index = i\n return game_info['cards'][index], TO_DROP", "def calculate_hit(self, armor_list, inventory):\n armor_power = 0\n for armor in armor_list:\n armor_power += inventory[armor]['power']\n max_strength = max(1, (self.level * 5) - armor_power)\n min_strength = 0\n return random.randint(min_strength, max_strength)", "def get_weapon(self):\n\n return self.suggestion_set[1]", "def get_best_action(self, strategy, player):\n actions = self.game.get_actions(player)\n action = None\n if not actions:\n action = (player, None)\n elif strategy == \"q\":\n action = actions[np.argmax([self.weights @ extractor(self.game, a) for a in actions])]\n elif strategy == \"random\":\n action = actions[random.randint(0, len(actions) - 1)]\n feature = extractor(self.game.copy(), action)\n return feature, action", "def bless_basic(unit):\n return {DAMAGE: unit.maximum_damage}", "def calculate_overall_rating(player_dict):\r\n if player_dict[\"position\"].upper() == \"QB\":\r\n throw_power = int(max(min(int(player_dict[\"throw_power\"]), 99), 70))\r\n throw_accuracy = int(max(min(math.ceil(\r\n ((2 * (\r\n int(player_dict[\"throw_accuracy_short\"]) + \r\n int(player_dict[\"throw_accuracy_mid\"]) + \r\n int(player_dict[\"throw_accuracy_deep\"]) + \r\n int(player_dict[\"throw_on_the_run\"]) + \r\n int(player_dict[\"playaction\"])\r\n )) - (2 * min(\r\n int(player_dict[\"throw_accuracy_short\"]), \r\n int(player_dict[\"throw_accuracy_mid\"]), \r\n int(player_dict[\"throw_accuracy_deep\"]), \r\n int(player_dict[\"throw_on_the_run\"]), \r\n int(player_dict[\"playaction\"])\r\n ))\r\n ) / 8\r\n ), 99), 60))\r\n break_tackles = int(max(min(\r\n math.ceil(((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 7), \r\n 90), 20))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 98), 45))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 55))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((throw_power - 50.0) / 10.0) * 4.9\r\n overall_rating += ((throw_accuracy - 50.0) / 10.0) * 5.8\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 0.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 0.8\r\n overall_rating += ((awareness - 50.0) / 10.0) * 4.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 2.0\r\n overall_rating = int(max(min((round(overall_rating) + 28), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"HB\":\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 70), 25))\r\n break_tackles = int(max(min(\r\n (math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 5), \r\n 99), 50))\r\n carrying = int(max(min(int(player_dict[\"carrying\"]), 99), 60))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 90), 45))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 50))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 0.33\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 0.8\r\n overall_rating += ((carrying - 50.0) / 10.0) * 2.0\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 2.8\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.0\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.6\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.3\r\n overall_rating += ((catching - 50.0) / 10.0) * 1.4\r\n overall_rating = int(max(min((round(overall_rating) + 27), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"FB\":\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 75), 40))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 85), 45))\r\n break_tackles = int(max(min(\r\n (math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 5), \r\n 99), 55))\r\n carrying = int(max(min(int(player_dict[\"carrying\"]), 99), 60))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 65))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 55))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 95), 60))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 60))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 45))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 1.0\r\n overall_rating += ((run_block - 50.0) / 10.0) * 7.2\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 1.8\r\n overall_rating += ((carrying - 50.0) / 10.0) * 1.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.0\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.8\r\n overall_rating += ((strength - 50.0) / 10.0) * 1.8\r\n overall_rating += ((speed - 50.0) / 10.0) * 1.8\r\n overall_rating += ((catching - 50.0) / 10.0) * 5.2\r\n overall_rating = int(max(min((round(overall_rating) + 39), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"WR\":\r\n break_tackles = int(max(min(\r\n math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2), \r\n 80), 35))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 75))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 75))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 85), 35))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 99), 65))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 0.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.3\r\n overall_rating += ((agility - 50.0) / 10.0) * 2.3\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.3\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.8\r\n overall_rating += ((speed - 50.0) / 10.0) * 2.3\r\n overall_rating += ((catching - 50.0) / 10.0) * 4.75\r\n overall_rating += ((jumping - 50.0) / 10.0) * 1.4\r\n overall_rating = int(max(min((round(overall_rating) + 26), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"TE\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 55))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 90), 55))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 55))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 60))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 99), 45))\r\n break_tackles = int(max(min(\r\n (math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 5), \r\n 95), 20))\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 80), 35))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 85), 35))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 2.65\r\n overall_rating += ((strength - 50.0) / 10.0) * 2.65\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.65\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.25\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.25\r\n overall_rating += ((catching - 50.0) / 10.0) * 5.4\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 1.2\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 1.2\r\n overall_rating += ((run_block - 50.0) / 10.0) * 5.4\r\n overall_rating = int(max(min((round(overall_rating) + 35), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"LT\" or player_dict[\"position\"].upper() == \"RT\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 85), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 85), 40))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 90), 60))\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 99), 60))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 99), 60))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 0.8\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.3\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.3\r\n overall_rating += ((agility - 50.0) / 10.0) * 0.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 0.8\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 4.75\r\n overall_rating += ((run_block - 50.0) / 10.0) * 3.75\r\n overall_rating = int(max(min((round(overall_rating) + 26), 99), 40))\r\n return overall_rating\r\n \r\n if (player_dict[\"position\"].upper() == \"LG\" or player_dict[\"position\"].upper() == \"RG\" or \r\n player_dict[\"position\"].upper() == \"C\"):\r\n speed = int(max(min(int(player_dict[\"speed\"]), 85), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 85), 40))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 90), 60))\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 99), 65))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 1.7\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.25\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.25\r\n overall_rating += ((agility - 50.0) / 10.0) * 0.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.7\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 3.25\r\n overall_rating += ((run_block - 50.0) / 10.0) * 4.8\r\n overall_rating = int(max(min((round(overall_rating) + 28), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"LE\" or player_dict[\"position\"].upper() == \"RE\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 90), 55))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 60))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 90), 45))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 60))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.75\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.75\r\n overall_rating += ((awareness - 50.0) / 10.0) * 1.75\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.75\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 3.8\r\n overall_rating += ((tackle - 50.0) / 10.0) * 5.5\r\n overall_rating = int(max(min((round(overall_rating) + 30), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"DT\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 90), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 90), 40))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 1.8\r\n overall_rating += ((strength - 50.0) / 10.0) * 5.5\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 1\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.8\r\n overall_rating += ((tackle - 50.0) / 10.0) * 4.55\r\n overall_rating = int(max(min((round(overall_rating) + 29), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"LOLB\" or player_dict[\"position\"].upper() == \"ROLB\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 70))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 60))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 65))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 75))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 90), 20))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 60))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.75\r\n overall_rating += ((strength - 50.0) / 10.0) * 2.4\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.6\r\n overall_rating += ((agility - 50.0) / 10.0) * 2.4\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.3\r\n overall_rating += ((catching - 50.0) / 10.0) * 1.3\r\n overall_rating += ((tackle - 50.0) / 10.0) * 4.8\r\n overall_rating = int(max(min((round(overall_rating) + 29), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"MLB\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 65))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 60))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 65))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 75))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 0.75\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.4\r\n overall_rating += ((awareness - 50.0) / 10.0) * 5.2\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.65\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.75\r\n overall_rating += ((tackle - 50.0) / 10.0) * 5.2\r\n overall_rating = int(max(min((round(overall_rating) + 27), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"CB\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 80))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 85), 40))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 75))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 40))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 85), 30))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.85\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.9\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.85\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.55\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.35\r\n overall_rating += ((catching - 50.0) / 10.0) * 3\r\n overall_rating += ((jumping - 50.0) / 10.0) * 1.55\r\n overall_rating += ((tackle - 50.0) / 10.0) * 1.55\r\n overall_rating = int(max(min((round(overall_rating) + 28), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"FS\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 75))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 85), 45))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 70))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 35))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 90), 45))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.0\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.9\r\n overall_rating += ((awareness - 50.0) / 10.0) * 4.85\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.5\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.5\r\n overall_rating += ((catching - 50.0) / 10.0) * 3.0\r\n overall_rating += ((jumping - 50.0) / 10.0) * 1.5\r\n overall_rating += ((tackle - 50.0) / 10.0) * 2.5\r\n overall_rating = int(max(min((round(overall_rating) + 30), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"SS\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 75))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 90), 45))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 70))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 35))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 90), 45))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.2\r\n overall_rating += ((strength - 50.0) / 10.0) * 1.7\r\n overall_rating += ((awareness - 50.0) / 10.0) * 4.75\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.7\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.7\r\n overall_rating += ((catching - 50.0) / 10.0) * 3.2\r\n overall_rating += ((jumping - 50.0) / 10.0) * 0.9\r\n overall_rating += ((tackle - 50.0) / 10.0) * 3.2\r\n overall_rating = int(max(min((round(overall_rating) + 30), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"K\":\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 85), 35))\r\n kick_power = int(max(min(int(player_dict[\"kick_power\"]), 99), 80))\r\n kick_accuracy = int(max(min(int(player_dict[\"kick_accuracy\"]), 99), 70))\r\n \r\n overall_rating = (-177 + (0.218 * awareness) + (1.28 * kick_power) + (1.47 * kick_accuracy))\r\n overall_rating = int(max(min(round(overall_rating), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"P\":\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 85), 40))\r\n kick_power = int(max(min(int(player_dict[\"kick_power\"]), 99), 80))\r\n kick_accuracy = int(max(min(int(player_dict[\"kick_accuracy\"]), 99), 70))\r\n \r\n overall_rating = (-183 + (0.218 * awareness) + (1.5 * kick_power) + (1.33 * kick_accuracy))\r\n overall_rating = int(max(min(round(overall_rating), 99), 40))\r\n return overall_rating", "def getWeightsAttack(self, gameState, action):\r\n return {'minDistToFood': -1,'getFood': 100}", "def getDamage(self, player, is_random=True):\n \n if \"restrained\" in self.debuffs:\n return 0, 0\n \n mitigation, num_cats = player.getCatBonus(player.defending_kittens,\n \"defending\")\n raw_dmg = random.randint(self._damage[0], self._damage[1])\n \n true_dmg = raw_dmg - mitigation\n if true_dmg < 0:\n true_dmg = 0\n \n return true_dmg, num_cats", "def getDamage(self):\n \n weapon_dmg = self.weapon.getDamage()\n cat_bonus, att_cats = self.getCatBonus(self.attacking_kittens,\n \"attacking\")\n true_dmg = weapon_dmg + cat_bonus + self.getBonusDamageFromInsanity()\n return true_dmg, att_cats", "def find_best(self):\n best_st = 0\n best_bt = 0\n best_perf = -1.1\n for bt in self.btl:\n for st in self.stl:\n if self.total[bt, st, \"perf\"] > best_perf:\n best_perf = self.total[bt, st, \"perf\"]\n best_st = st\n best_bt = bt\n return (best_perf, self.total[best_bt, best_st, \"count\"], best_bt, best_st)", "def attack(self):\n return random.randint(self.max_damage//2, self.max_damage)", "def get_max_hit_increases(\n start_strength_level, end_strength_level,\n strength_bonus, stance_adder):\n greatest_max_hit = 0\n max_hit_increases = []\n cur_strength_level = start_strength_level\n while cur_strength_level < end_strength_level:\n effective_strength = osrs.effective_level(\n cur_strength_level, 1, stance_adder, 1)\n max_hit = osrs.max_hit(effective_strength, strength_bonus)\n\n if max_hit > greatest_max_hit:\n greatest_max_hit = max_hit\n max_hit_increases.append((cur_strength_level, max_hit))\n\n cur_strength_level += 1", "def interpretSkillAdventurerAttack(\n skillEffectsWithName: tuple[str, list], adventurer: \"Adventurer\", enemy: \"Enemy\"\n) -> AdventurerSkill | None:\n # for index_to maybe list {\"modifier\": \"End. & Mag.\", \"target\": \"skill\", \"attribute\": \"indexed_to\",\"speed\": \"None\" }\n\n # test if skill effects empty\n if skillEffectsWithName:\n _, skillEffects = skillEffectsWithName\n else:\n skillEffects = []\n\n damage_skills = [\n x\n for x in skillEffects\n if x.attribute.lower().strip() == \"damage\"\n or (\n (x.element is not None and x.element != \"\")\n and (x.type == \"physical_attack\" or x.type == \"magic_attack\")\n )\n ]\n if len(damage_skills) > 0:\n damage_skill = damage_skills[0]\n # do the damage first if attribute == element and modifier== high/medium etc, type = attack\n index_to_effects = [\n x for x in skillEffects if x.attribute.lower().strip() == \"indexed_to\"\n ]\n index_to_modifier = set()\n # modifier is the index_to target\n for index_to_effect in index_to_effects:\n # \"attribute\" index_to\n index_to_modifier.add(index_to_effect.modifier)\n \"\"\"\n For temp boosts\n {\n \"modifier\": \"normal2_str\",\n \"target\": \"skill\",\n \"attribute\": \"temp_boost\",\n }\n \"\"\"\n temp_boost_effects = [\n x for x in skillEffects if x.attribute.lower().strip() == \"temp_boost\"\n ]\n if len(temp_boost_effects) > 0:\n temp_boost_mod = temp_boost_effects[0].modifier\n else:\n temp_boost_mod = \"none\"\n\n # loop through the variables to check if attribute exists\n extra_boosts_effects = [\n x for x in skillEffects if \"per_each\" in x.attribute.lower().strip()\n ]\n extra_boosts_value = 1.0\n # for example str/mag debuff\n if len(extra_boosts_effects) > 0:\n for extra_boosts in extra_boosts_effects:\n temp_extra_boosts = interpretExtraBoostWrapper(\n extra_boosts, adventurer, enemy\n )\n extra_boosts_value = extra_boosts_value + temp_extra_boosts\n # SELECT ase.AdventurerSkillEffectsid, ase.AdventurerSkillid, ase.duration, e.name AS element, m.value AS modifier, ty.name AS type, ta.name AS target, a.name AS attribute, s.name AS speed, ad.stars, ad.title, ad.alias, ad.limited, c.name\n ret = AdventurerSkill(\n damage_skill.target,\n temp_boost_mod,\n damage_skill.modifier,\n extra_boosts_value,\n 0,\n damage_skill.type,\n damage_skill.element,\n index_to_modifier,\n )\n return ret\n else:\n return None", "def __calcSuitTarget(self, attackIndex):\n attack = self.battle.suitAttacks[attackIndex]\n suitId = attack[SUIT_ID_COL]\n if self.SuitAttackers.has_key(suitId) and \\\n random.randint(0, 99) < 75:\n # first calculate the total damage done to this suit by all\n # recorded attackers, this is so we can create a frequency\n # list of damage percentages that we can randomly pick from\n totalDamage = 0\n for currToon in self.SuitAttackers[suitId].keys():\n totalDamage += self.SuitAttackers[suitId][currToon]\n\n # create a list of damage percentages and pick one of the\n # weighted values, this tells us which toon attacker that\n # the suit should attack\n dmgs = []\n for currToon in self.SuitAttackers[suitId].keys():\n dmgs.append((self.SuitAttackers[suitId][currToon] /\n totalDamage) * 100)\n dmgIdx = SuitBattleGlobals.pickFromFreqList(dmgs)\n if (dmgIdx == None):\n toonId = self.__pickRandomToon(suitId) \n else:\n toonId = self.SuitAttackers[suitId].keys()[dmgIdx]\n if (toonId == -1 or toonId not in self.battle.activeToons):\n return -1\n self.notify.debug(\"Suit attacking back at toon \" + str(toonId))\n return self.battle.activeToons.index(toonId)\n else:\n #return random.randint(0, len(self.battle.activeToons) - 1)\n # make sure we only randomly choose from the active toons\n # that are still alive at this point in the round\n return self.__pickRandomToon(suitId)", "def difficulty_for_level(level):\n return 0 if level==\"easy\" else (1 if level==\"medium\" else 2)", "def optimal_battle(hard=False):\n winners = []\n\n def _run(spells, best_cost):\n for spell in all_spells.keys():\n spells.append(spell)\n\n # Calc cost to skip useless battles\n cost = sum([all_spells[s] for s in spells])\n if cost < best_cost:\n\n # Battle with this spells list\n hero = Player('Hero', hit=50, mana=500)\n boss = Player('Boss', hit=71, damage=10)\n outcome = battle(hero, boss, spells, hard=hard)\n # print outcome\n if outcome == 'win':\n if cost < best_cost:\n # Save this spells list\n print cost, ', '.join(map(lambda x : x.func_name, spells))\n winners.append(list(spells))\n best_cost = cost\n elif outcome == 'moar':\n # Add more spells\n best_cost = min(best_cost, _run(spells, best_cost))\n else:\n # Don't progress further\n pass\n\n spells.pop()\n\n return best_cost\n\n\n return _run([], float('inf'))", "def get_best_anticipation(self, perception: Perception) -> Perception:\n return self.effect.get_best_anticipation(perception)", "def W_Crit(self, multiplier=1):\n multiplier = str(multiplier);\n weapon_dice_count = self.Attribute_Power(\"weapon-num-dice\");\n weapon_dice = self.Attribute_Power(\"weapon-dice\");\n return \"\".join((\"(\", multiplier, \"*\", weapon_dice_count, \")*\", weapon_dice));", "def analysis(self, game_info):\n available_cards_indices = []\n for card_index in range(len(game_info['cards'])):\n card = game_info['cards'][card_index]\n cost_color, cost_value = card.get_cost()\n if cost_value == 99:\n return game_info['cards'][card_index], TO_DROP\n if cost_color == 0:\n available_cards_indices.append(card_index)\n continue\n resource_name = getResourceName(cost_color)\n if game_info[resource_name] >= cost_value:\n available_cards_indices.append(card_index)\n\n if len(available_cards_indices) > 0:\n random_index = available_cards_indices[randint(0, len(available_cards_indices)-1)]\n return game_info['cards'][random_index], TO_PRESS\n\n return game_info['cards'][randint(0, 5)], TO_DROP", "def attack(self):\n \n half_max_damage = int(self.max_damage) // 2\n random_value = randint(half_max_damage, int(self.max_damage))\n\n return random_value", "def attack(self):\n return random.randint(0, self.attack_strength)", "def weapon_mapping(self, weapon: int) -> str:\n if weapon == 0:\n return 'shortsword'\n elif weapon == 1:\n return 'longsword'\n elif weapon == 2:\n return 'greatsword'\n elif weapon == 3:\n return 'halberd'\n elif weapon == 4:\n return 'longbow'\n elif weapon == 5:\n return 'greataxe'\n elif weapon == 6:\n return 'crossbow-heavy'\n elif weapon == 7:\n return 'battleaxe'\n elif weapon == 8:\n return 'handaxe'\n elif weapon == 9:\n return 'light-hammer'\n elif weapon == 10:\n return 'maul'\n elif weapon == 11:\n return 'glaive'\n elif weapon == 12:\n return 'rapier'", "def skill_cost(self, header_skill):\n # see if there is an updated cost for the skill\n # (find the skill it will effect)\n rule_query = Rule.objects.filter(\n skill=header_skill,\n new_cost__gt=0\n )\n if not rule_query.exists():\n return header_skill.cost\n # there are cost updates that affect the sent skill. See if the current\n # character has any of them.\n # just get the new costs and put them in the list and get the lowest.\n people_grants = self.people.rules.filter(\n id__in=rule_query\n ).values_list('new_cost', flat=True)\n tradition_grants = self.tradition.rules.filter(\n id__in=rule_query\n ).values_list('new_cost', flat=True)\n # Get the skills the character has that have rules \n # associated with them\n skill_type = ContentType.objects.get_for_model(Skill)\n skill_grants = Rule.objects.filter(\n content_type=skill_type,\n object_id__in=self.characterskills_set.values_list('skill__skill', flat=True)\n ).values_list('new_cost', flat=True)\n header_type = ContentType.objects.get_for_model(Header)\n header_grants = Rule.objects.filter(\n content_type=header_type,\n object_id__in=self.headers.values_list('id', flat=True)\n ).values_list('new_cost', flat=True)\n found_rules = list(tradition_grants) + list(people_grants) + list(skill_grants) + list(header_grants)\n # print(f\"FOUND RULES:{found_rules}\")\n if not found_rules:\n return header_skill.cost\n return min(found_rules)", "def adaptive(self):\n return self._calculate_integer_level(2, self.easy(), self.hard())", "def test_attack_types(self):\n battle = self.battle\n s1 = battle.create_skirmish(self.alice, 10) # Attack 10 infantry\n s1.react(self.bob, 8, troop_type='cavalry') # --Attack 8 cavalry\n\n # Cavalry should get a 50% bonus here, for a total of 8+4=12\n # So Bob should win by 2 despite lesser numbers\n result = s1.resolve()\n self.assert_(result)\n self.assertEqual(result.victor, self.bob.team)\n self.assertEqual(result.margin, 2)\n self.assertEqual(result.vp, 10)\n\n s2 = battle.create_skirmish(self.bob, 10,\n troop_type='cavalry') # attack 10 cavalry\n s2.react(self.alice, 8, troop_type='ranged') # -- oppose 8 ranged\n result = s2.resolve()\n self.assert_(result)\n self.assertEqual(result.victor, self.alice.team)\n self.assertEqual(result.margin, 2)\n self.assertEqual(result.vp, 10)\n\n s3 = battle.create_skirmish(self.carol, 10, # Attack 10 ranged\n troop_type='ranged')\n s3.react(self.bob, 8) # -- oppose 8 infantry\n result = s3.resolve()\n self.assert_(result)\n self.assertEqual(result.victor, self.bob.team)\n self.assertEqual(result.margin, 2)\n self.assertEqual(result.vp, 10)", "def weapon_cooldown(self) -> Union[int, float]:\n if self.can_attack_ground or self.can_attack_air:\n return self.proto.weapon_cooldown\n return -1", "def get_best(self):\n scores, ids = self.sort_best()\n return scores[1], ids[1]", "def test_bad_attack_types(self):\n battle = self.battle\n s1 = battle.create_skirmish(self.alice, 10) # Attack 10 infantry\n s1.react(self.bob, 10, troop_type='ranged') # --Attack 10 ranged\n\n # Ranged should get a 50% penalty here, for a total of 10/2 = 5\n # So Alice should win by 5 despite lesser numbers\n result = s1.resolve()\n self.assert_(result)\n self.assertEqual(result.victor, self.alice.team)\n self.assertEqual(result.margin, 5)\n self.assertEqual(result.vp, 10)\n\n s2 = battle.create_skirmish(self.bob, 10, # attack 10 ranged\n troop_type='ranged')\n s2.react(self.alice, 10, troop_type='cavalry') # -- oppose 10 cavalry\n result = s2.resolve()\n self.assert_(result)\n self.assertEqual(result.victor, self.bob.team)\n self.assertEqual(result.margin, 5)\n self.assertEqual(result.vp, 10)\n\n s3 = battle.create_skirmish(self.carol, 10, # Attack 10 cavalry\n troop_type='cavalry')\n s3.react(self.bob, 10) # -- oppose 10 infantry\n result = s3.resolve()\n self.assert_(result)\n self.assertEqual(result.victor, self.carol.team)\n self.assertEqual(result.margin, 5)\n self.assertEqual(result.vp, 10)", "def attack_bonus_on_level(self, level):\n raise NotImplementedError", "def work(char, base, scale_stat, factor):\n added = int(math.floor(char.__dict__[scale_stat] / factor))\n earned = base + added\n return [(\"gold\", earned)]", "def crits_revisited_hp(weapon, value) -> str:\n\tfunc = inspect.currentframe().f_code\n\t# FORMAT = \"[ • %(funcName)20s() ] %(message)s\"\n\t# logging.basicConfig(format=FORMAT)\n\t# logging.debug(\"%s\", func.co_name)\n\tcritdamage = []\n\tfor die in weapon.damage:\n\t\t# logging.debug(\"Die = %s\", str(die))\n\t\tquantity, sides, damage_type = die\n\t\tif value == 1:\n\t\t\tlogging.debug('%s(): regular roll', func.co_name)\n\t\t\tcritdamage.append(die_roll_str(die))\n\t\telif value == 2:\n\t\t\tlogging.debug('%s(): maximum regular roll', func.co_name)\n\t\t\tcritdamage.append(str(sides))\n\t\telif value == 3:\n\t\t\tlogging.debug('%s(): double the rolls', func.co_name)\n\t\t\t# Double the number of dice rolled; e.g. change '2d6' to '4d6'\n\t\t\tcritdamage.append(\n\t\t\t\tre.sub(\n\t\t\t\t\t\"(\\\\d+)d\",\n\t\t\t\t\tlambda m: str(int(m.groups()[0]) * 2) + \"d\",\n\t\t\t\t\tdie_roll_str(die),\n\t\t\t\t)\n\t\t\t)\n\t\telif value == 4:\n\t\t\tlogging.debug('%s(): maximum + roll', func.co_name)\n\t\t\tcritdamage.append(str(sides))\n\t\t\tcritdamage.append(die_roll_str(die))\n\t\telif value == 5:\n\t\t\tlogging.debug('%s(): double maximum', func.co_name)\n\t\t\t# print([str(sides)] * (2 * quantity))\n\t\t\t# critdamage.extend([str(sides)] * (2 * quantity))\n\t\t\tcritdamage.extend([f\"{2 * quantity}*{str(sides)}\"])\n\n\t# print(f\"{value} and {critdamage}\")\n\tlogging.debug(\"crit damage revisited = %s\", str(critdamage))\n\treturn \"+\".join(critdamage)", "def attack(self):\n\t if self.damage == 0:\n\t\treturn None\n\t elif self.name == \"die\":\n\t roll = random.randint(1,20)\n\t if roll == 1:\n\t return 0\n\t else:\n\t return 1\n\t elif self.damage == 1 or self.damage == 2:\n\t\treturn self.damage\n\t elif self.damage == 3:\n\t\treturn random.randint(3,5)\n\t elif self.damage == -4:\n\t return 4\n\t elif self.damage == 10:\n\t\trandomInt = random.randint(1,4)\n\t\tif randomInt == 1:\n\t\t return 10\n\t\telse:\n\t\t return 0\n\t else:\n\t return self.damage", "def get_attack_advantage(attacker_index, defender_index):\n\n return attack_chart[attacker_index,defender_index]", "def getBestOption(self):\n if len(self.Data) < 1:\n return None\n else:\n bestR = max(self.Data.items(), key=lambda x: x[1]['SPat'].I)\n return bestR[1]", "def heuristic_combined_1_2_3_with_improve_score(game, player) -> float:\n\n center_available_factor = get_center_available_factor(game, player)\n reflection_available_factor = get_reflection_available_factor(game, player)\n partition_possible_factor = get_partition_possible_factor(game, player)\n improved_score_factor = get_improved_score_factor(game, player)\n\n return float((center_available_factor +\n reflection_available_factor +\n partition_possible_factor) * improved_score_factor)", "def choose_move(self): # pylint: disable=too-many-branches,too-many-return-statements\n if self.current_mana < 10: # Only usable move\n return self.moves.teleport\n\n if self.game.player.current_hp <= 10 and self.current_mana >= self.moves.claw.mana_cost:\n return self.moves.claw\n if self.game.player.current_hp <= 20:\n return self.moves.glide\n if self.game.player.current_hp <= 30:\n if self.current_mana < 50:\n options = {self.moves.teleport: 3, self.moves.glide: 6}\n elif self.current_mana <= 140:\n options = {self.moves.teleport: 1, self.moves.glide: 2, self.moves.claw: 6}\n else:\n options = {self.moves.glide: 2.3333333333, self.moves.claw: 6.6666666667}\n if self.current_hp <= 180:\n options[self.moves.heal] = 1\n return self.random_weighted(options)\n\n if self.current_hp < 25:\n if self.current_mana < 50:\n return self.random_weighted({self.moves.teleport: 0.1, self.moves.glide: 0.1, self.moves.heal: 0.8})\n if self.game.player.current_hp <= 40:\n return random.choice([self.moves.claw, self.moves.heal])\n\n if random.random() < 0.1:\n return random.choice(self.attack_options())\n return self.moves.heal\n\n options = self.attack_options()\n if self.current_hp <= 0.9*self.max_hp:\n options.append(self.moves.heal)\n return random.choice(options)", "def update_hp_for_higher_level(chosen_class,level):\n #Checks to see if your character is level 4,8,12,etc.\n def upgradedAbilityAt4(level):\n if level % 4 == 0:\n upgraded_ability = raw_input(\"Level \"+str(level)+\"!\\n Which two abilities would you like to upgrade? (Adds +1 to ability)\\n Please input two from str/dex/con/int/wis/cha with a space in between.\\n (ex: cha dex) \").split(' ')\n print\n #To write:\n #if either ability pushes ability score over 20, redo input\n\n \n for i in upgraded_ability:\n self.stealthUpdate(i,1)\n #class specific HP calculations\n if chosen_class == 'barbarian': \n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,12) + self.con + self.classMods[6]\n elif chosen_class == 'cleric':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,8) + self.con + self.classMods[6]\n elif chosen_class == 'druid':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,8) + self.con + self.classMods[6]\n elif chosen_class == 'fighter':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,10) + self.con + self.classMods[6]\n elif chosen_class == 'monk':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,8) + self.con + self.classMods[6]\n elif chosen_class == 'paladin':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,10) + self.con + self.classMods[6]\n elif chosen_class == 'ranger':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,10) + self.con + self.classMods[6]\n elif chosen_class == 'rogue':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,6) + self.con + self.classMods[6]\n elif chosen_class == 'wizard':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,6) + self.con + self.classMods[6]", "def __find_best(self):\n # First look for offensive moves\n for i in range(0, 3):\n col = self.__get_col(i)\n if len(col.get('empty')) == 1:\n if col.get(self.opponent_char) == 2:\n return col.get('empty')[0]\n for i in range(0, 3):\n row = self.__get_row(i)\n if len(row.get('empty')) == 1:\n if row.get(self.opponent_char) == 2:\n return row.get('empty')[0]\n for i in range(0, 2):\n diag = self.__get_diag(i)\n if len(diag.get('empty')) == 1:\n if diag.get(self.opponent_char) == 2:\n return diag.get('empty')[0]\n\n # Then check again looking for defensive moves\n for i in range(0, 3):\n col = self.__get_col(i)\n if len(col.get('empty')) == 1:\n if col.get(self.player_char) == 2:\n return col.get('empty')[0]\n for i in range(0, 3):\n row = self.__get_row(i)\n if len(row.get('empty')) == 1:\n if row.get(self.player_char) == 2:\n return row.get('empty')[0]\n for i in range(0, 2):\n diag = self.__get_diag(i)\n if len(diag.get('empty')) == 1:\n if diag.get(self.player_char) == 2:\n return diag.get('empty')[0]\n\n ##### CLEAN THIS METHOD UP LATER #####\n return None", "def fitness_score(sack, individual, max_weight):\n profit = sack[0] \n weight = sack[1] \n total_profit = 0\n total_weight = 0\n \n for i in range(len(individual)):\n if individual[i] == 1:\n total_profit += profit[i]\n total_weight += weight[i]\n \n if total_weight > max_weight:\n total_profit = 0\n\n return total_profit, total_weight", "def get_best(self, population):\n best = min(population, key=self.cost_function)\n return best, self.cost_function(best)", "def get_best( self ):\n if len(self.listScore) < 1:\n if self.bMinimumIsBest: return 9999,\"Unknown\"\n else: return -1,\"Unknown\"\n return self.listScore[0]", "def attack(health_meter):\n hit_list = 4 * ['igrac'] + 6 * ['neprijatelj']\n injured_unit = random.choice(hit_list)\n hit_points = health_meter[injured_unit]\n injury = random.randint(10, 15)\n health_meter[injured_unit] = max(hit_points - injury, 0)\n print(\"NAPAD! \", end='')\n show_health(health_meter)", "def get_player_best_score(self, player):\n return self.get_highscores().filter(player=player).first()", "def retrieve_handcrafted_inputs(self, obs):\n feature_units = obs.observation.feature_units\n allies = [unit for unit in feature_units if unit.alliance == _PLAYER_SELF]\n enemies = [unit for unit in feature_units if unit.alliance == _PLAYER_ENEMY]\n\n current_hp = self.calculate_hitpoints(feature_units, _PLAYER_SELF)\n current_hp = current_hp / self.initial_self_hit_points\n\n weapon_cooldown = 0\n for ally in allies:\n if ally.weapon_cooldown > 0:\n weapon_cooldown += 1\n if weapon_cooldown > (len(allies) / 2):\n # nn input weapon cooldown = 1 means the majority cannot fire\n weapon_cooldown = 1\n else:\n weapon_cooldown = 0\n\n self_weapon_range = 5\n self_radius = 1\n self_unit_type = 1\n if len(allies) > 0:\n self_weapon_range = weapon_ranges[allies[0].unit_type]\n self_radius = unit_sizes[allies[0].unit_type] / float(2)\n self_unit_type = unit_type[allies[0].unit_type]\n\n enemy_radius = 1\n enemy_weapon_range = 1\n enemy_unit_type = 0\n if len(enemies) > 0:\n enemy_weapon_range = weapon_ranges[enemies[0].unit_type]\n enemy_radius = unit_sizes[enemies[0].unit_type] / float(2)\n enemy_unit_type = unit_type[enemies[0].unit_type]\n\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_current_location(obs)) < (\n self_radius + self_weapon_range + enemy_radius):\n enemy_in_range = 1\n else:\n enemy_in_range = 0\n\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_current_location(obs)) < (\n self_radius + enemy_weapon_range + enemy_radius):\n in_enemy_range = 1\n else:\n in_enemy_range = 0\n\n north_bound, south_bound, west_bound, east_bound = self.calculate_distance_to_bounds(obs)\n\n if self.previous_command == \"FIGHT\":\n prev_cmd = 1\n elif self.previous_command == \"FLEE\":\n prev_cmd = 0\n\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence = self.detect_enemies_by_region(obs)\n\n return [current_hp, weapon_cooldown, enemy_in_range, in_enemy_range, prev_cmd, north_bound, south_bound,\n west_bound, east_bound,\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence, self_unit_type,\n enemy_unit_type]", "def get_attack_damage(self, by: str):\n if by == 'spell':\n if self.spells:\n # get the spell with the maximum damage that we have enough mana for\n available_spells = [spell for spell in self.spells if self._mana >= spell.mana_cost]\n if not available_spells:\n return None\n\n spell = max(available_spells, key= lambda spell: spell.damage) # type: Spell\n if spell:\n return spell\n else:\n print('{} does not know any spells.'.format(self.name))\n return None\n else:\n return self.weapon.damage", "def __attackDamage(self, attack, suit=0):\n if suit:\n for dmg in attack[SUIT_HP_COL]:\n if (dmg > 0):\n return dmg\n return 0\n else:\n for dmg in attack[TOON_HP_COL]:\n if (dmg > 0):\n return dmg\n return 0", "def attack(health_meter):\n hit_list = 4 * ['player'] + 6 * ['enemy']\n injured_unit = random.choice(hit_list)\n hit_points = health_meter[injured_unit]\n injury = random.randint(10, 15)\n health_meter[injured_unit] = max(hit_points - injury, 0)\n print(\"ATTACK! \", end='')\n show_health(health_meter)", "def trySpec(weapon: dict):\n roll = random.randint(1, 100)\n if roll > 20:\n return False\n if roll == 1 and \"UR\" in weapon.keys():\n return weapon[\"UR\"]\n if roll < 6 and \"MR\" in weapon.keys():\n return weapon[\"MR\"]\n if roll < 11 and \"R\" in weapon.keys():\n return weapon[\"R\"]\n if \"C\" in weapon.keys():\n return weapon[\"C\"]", "def retrieve_handcrafted_inputs(self, obs):\n self.detect_self_unit_types(obs)\n\n feature_units = obs.observation.feature_units\n allies = [unit for unit in feature_units if unit.alliance == _PLAYER_SELF]\n selected_allies = [unit for unit in allies if unit.unit_type == self.current_group_id]\n enemies = [unit for unit in feature_units if unit.alliance == _PLAYER_ENEMY]\n\n hitpoints = 0\n for unit in selected_allies:\n hitpoints += unit.health\n\n if self.current_group_id in unit_health.keys():\n init_hp = 0\n init_hp = unit_health[self.current_group_id] * self.init_unit_counts[self.current_group_id]\n else:\n init_hp = self.initial_self_hit_points\n current_hp = hitpoints / init_hp\n\n weapon_cooldown = 0\n for ally in selected_allies:\n if ally.weapon_cooldown > 0:\n weapon_cooldown += 1\n if weapon_cooldown > (len(selected_allies) / 2):\n # nn input weapon cooldown = 1 means the majority cannot fire\n weapon_cooldown = 1\n else:\n weapon_cooldown = 0\n\n self_weapon_range = 5\n self_radius = 1\n self_unit_type = 1\n self_speed = 1\n if len(selected_allies) > 0:\n self_weapon_range = weapon_ranges[self.current_group_id]\n self_radius = unit_sizes[self.current_group_id] / float(2)\n self_unit_type = unit_type[self.current_group_id]\n self_speed = unit_speed[self.current_group_id]\n\n enemy_radius = 1\n enemy_weapon_range = 1\n enemy_unit_type = 0\n enemy_speed = 1\n if len(enemies) > 0:\n self.enemy_id = enemies[0].unit_type\n enemy_weapon_range = weapon_ranges[self.enemy_id]\n enemy_radius = unit_sizes[self.enemy_id] / float(2)\n enemy_unit_type = unit_type[self.enemy_id]\n enemy_speed = unit_speed[self.enemy_id]\n\n # TODO can be inaccurate if using melee units\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_avg_location_of_self_subgroup(obs)) < (\n self_radius + self_weapon_range + enemy_radius):\n enemy_in_range = 1\n else:\n enemy_in_range = 0\n\n in_enemy_range = 0\n for ally in selected_allies:\n for enemy in enemies:\n if self.retrieve_distance_between_positions([enemy.x, enemy.y], [ally.x, ally.y]) < (\n self_radius + enemy_weapon_range + enemy_radius):\n in_enemy_range = 1\n break\n else:\n in_enemy_range = 0\n if in_enemy_range:\n break\n\n north_bound, south_bound, west_bound, east_bound = self.calculate_distance_to_bounds(obs, for_subgroup=True)\n\n if self.previous_commands[self.current_group_id] == \"FIGHT\":\n prev_cmd = 1\n elif self.previous_commands[self.current_group_id] == \"FLEE\":\n prev_cmd = 0\n\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence = self.detect_enemies_by_region(obs,\n for_subgroup=True)\n\n distance_to_enemy = self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_avg_location_of_self_subgroup(obs))\n distance_to_enemy = distance_to_enemy / float((32 ** 2 + 20 ** 2) ** 0.5)\n\n return [current_hp, weapon_cooldown, enemy_in_range, in_enemy_range, prev_cmd, north_bound, south_bound,\n west_bound, east_bound,\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence, self_unit_type,\n enemy_unit_type, self_weapon_range, enemy_weapon_range, self_speed, enemy_speed, distance_to_enemy]", "def strategiaa(stan_gry):\n ruch = min(random.randint(1,3), stan_gry)\n return ruch", "def heuristics(course, suggestedPlan, user):\n score = course.score\n bonus = 0\n return score + bonus", "def weapon_to_beat(weapon: str) -> str:\n better_weapon={'rock': 'paper',\n 'scissors': 'rock',\n 'paper': 'scissors'}\n\n if weapon not in better_weapon.keys():\n raise ValueError('unknown weapon.')\n return better_weapon[weapon]", "def retrieve_handcrafted_inputs(self, obs):\n feature_units = obs.observation.feature_units\n allies = [unit for unit in feature_units if unit.alliance == _PLAYER_SELF]\n enemies = [unit for unit in feature_units if unit.alliance == _PLAYER_ENEMY]\n\n current_hp = self.calculate_hitpoints(feature_units, _PLAYER_SELF)\n current_hp = current_hp / self.initial_self_hit_points\n\n weapon_cooldown = 0\n for ally in allies:\n if ally.weapon_cooldown > 0:\n weapon_cooldown += 1\n if weapon_cooldown > (len(allies) / 2):\n # nn input weapon cooldown = 1 means the majority cannot fire\n weapon_cooldown = 1\n else:\n weapon_cooldown = 0\n\n self_weapon_range = 5\n self_radius = 1\n self_unit_type = 1\n self_speed = 1\n if len(allies) > 0:\n self_weapon_range = weapon_ranges[allies[0].unit_type]\n self_radius = unit_sizes[allies[0].unit_type] / float(2)\n self_unit_type = unit_type[allies[0].unit_type]\n self_speed = unit_speed[allies[0].unit_type]\n\n enemy_radius = 1\n enemy_weapon_range = 1\n enemy_unit_type = 0\n enemy_speed = 1\n if len(enemies) > 0:\n enemy_weapon_range = weapon_ranges[enemies[0].unit_type]\n enemy_radius = unit_sizes[enemies[0].unit_type] / float(2)\n enemy_unit_type = unit_type[enemies[0].unit_type]\n enemy_speed = unit_speed[enemies[0].unit_type]\n\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_current_location(obs)) < (\n self_radius + self_weapon_range + enemy_radius):\n enemy_in_range = 1\n else:\n enemy_in_range = 0\n\n north_bound, south_bound, west_bound, east_bound = self.calculate_distance_to_bounds(obs)\n\n if self.previous_command == \"FIGHT\":\n prev_cmd = 1\n elif self.previous_command == \"FLEE\":\n prev_cmd = 0\n\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence = self.detect_enemies_by_region(obs)\n\n return [current_hp, weapon_cooldown, enemy_in_range, prev_cmd, north_bound, south_bound, west_bound, east_bound,\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence, self_unit_type,\n enemy_unit_type, self_weapon_range, enemy_weapon_range, self_speed, enemy_speed]", "def custom_score(game, player):\n # return penalize_corners_heuristic(game, player)\n # return favor_run_away_heuristic(game, player)\n return look_ahead_heuristic(game, player)", "def battle(first, second):\n\n print(get_catchphrase(first))\n print(get_catchphrase(second))\n\n if get_damage(second) > get_damage(first):\n return second\n else:\n return first", "def retrieve_handcrafted_inputs(self, obs):\n feature_units = obs.observation.feature_units\n allies = [unit for unit in feature_units if unit.alliance == _PLAYER_SELF]\n enemies = [unit for unit in feature_units if unit.alliance == _PLAYER_ENEMY]\n\n current_hp = self.calculate_hitpoints(feature_units, _PLAYER_SELF)\n current_hp = current_hp / self.initial_self_hit_points\n\n weapon_cooldown = 0\n for ally in allies:\n if ally.weapon_cooldown > 0:\n weapon_cooldown += 1\n if weapon_cooldown > (len(allies) / 2):\n # nn input weapon cooldown = 1 means the majority cannot fire\n weapon_cooldown = 1\n else:\n weapon_cooldown = 0\n\n self_weapon_range = 5\n self_radius = 1\n self_unit_type = 1\n self_speed = 1\n if len(allies) > 0:\n self.self_id = allies[0].unit_type\n self_weapon_range = weapon_ranges[self.self_id]\n self_radius = unit_sizes[self.self_id] / float(2)\n self_unit_type = unit_type[self.self_id]\n self_speed = unit_speed[self.self_id]\n\n enemy_radius = 1\n enemy_weapon_range = 1\n enemy_unit_type = 0\n enemy_speed = 1\n if len(enemies) > 0:\n self.enemy_id = enemies[0].unit_type\n enemy_weapon_range = weapon_ranges[self.enemy_id]\n enemy_radius = unit_sizes[self.enemy_id] / float(2)\n enemy_unit_type = unit_type[self.enemy_id]\n enemy_speed = unit_speed[self.enemy_id]\n\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_current_location(obs)) < (\n self_radius + self_weapon_range + enemy_radius):\n enemy_in_range = 1\n else:\n enemy_in_range = 0\n\n in_enemy_range = 0\n for ally in allies:\n for enemy in enemies:\n if self.retrieve_distance_between_positions([enemy.x, enemy.y], [ally.x, ally.y]) < (\n self_radius + enemy_weapon_range + enemy_radius):\n in_enemy_range = 1\n break\n else:\n in_enemy_range = 0\n\n north_bound, south_bound, west_bound, east_bound = self.calculate_distance_to_bounds(obs)\n\n if self.previous_command == \"FIGHT\":\n prev_cmd = 1\n elif self.previous_command == \"FLEE\":\n prev_cmd = 0\n\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence = self.detect_enemies_by_region(obs)\n\n return [current_hp, weapon_cooldown, enemy_in_range, in_enemy_range, prev_cmd, north_bound, south_bound,\n west_bound, east_bound,\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence, self_unit_type,\n enemy_unit_type, self_weapon_range, enemy_weapon_range, self_speed, enemy_speed, self.self_id,\n self.enemy_id]", "def evaluate_power(soldier_list: List[Soldier]):\n inf_count = 0\n inf_avg_weapon = 0.0\n inf_avg_armor = 0.0\n arc_count = 0\n arc_avg_weapon = 0.0\n arc_avg_armor = 0.0\n cvl_count = 0\n cvl_avg_weapon = 0.0\n cvl_avg_armor = 0.0\n \n for soldier in soldier_list:\n ################################# YOUR CODE HERE #################################\n if soldier.typecode == \"ARC\":\n arc_count += 1\n arc_avg_armor += soldier.armor\n arc_avg_weapon += soldier.weapon\n elif soldier.typecode == \"INF\":\n inf_count += 1\n inf_avg_armor += soldier.armor\n inf_avg_weapon += soldier.weapon\n elif soldier.typecode == \"CVL\":\n cvl_count += 1\n cvl_avg_armor += soldier.armor\n cvl_avg_weapon += soldier.weapon\n if arc_count != 0:\n arc_avg_armor /= arc_count\n arc_avg_weapon /= arc_count\n\n if cvl_count != 0:\n cvl_avg_armor /= cvl_count\n cvl_avg_weapon /= cvl_count\n\n if inf_count != 0:\n inf_avg_armor /= inf_count\n inf_avg_weapon /= inf_count\n ##################################################################################\n return (inf_count, inf_avg_weapon, inf_avg_armor), (arc_count, arc_avg_weapon, arc_avg_armor), (cvl_count, cvl_avg_weapon, cvl_avg_armor)", "def get_best_sensitivity_metrics(self,\n verbose: bool = True) -> Tuple[int, int]:\n sensitivity_scores = list()\n for i in self.search_space:\n classes = self.convert_classes(threshold=i)\n tn, fp, fn, tp = confusion_matrix(self.y_true, classes).ravel()\n sensitivity = tp / (tp + fn)\n sensitivity_scores.append(sensitivity)\n best_sensitivity_score, best_sensitivity_threshold = self._get_best_metrics(\n metric_type='sensitivity_score',\n scores=sensitivity_scores,\n greater_is_better=True,\n verbose=verbose\n )\n return best_sensitivity_score, best_sensitivity_threshold", "def policy(self, s):\r\n if s.dealer_sum >= 16:\r\n return Action.STICK\r\n else:\r\n return Action.HIT", "def bestAction(self):\n get_q = self.getQFunction()\n maxq = -5000\n best_actions = []\n for (state, action), q in get_q.items():\n if q > maxq:\n maxq = q\n best_actions = [action]\n elif q == maxq:\n best_actions.append(action)\n return self.tuple_to_dictionary(random.choice(best_actions))", "def next_choice(self, opponent: 'Player') -> str:\n\n if self.adaptive_ai:\n # this is an adaptive_ai player, so see if it has collected\n # enough stats about the current opponent yet:\n if sum(self.opponent_choices[opponent.name].values()) > 5:\n # has enough samples to start adapting to the opponent\n print(' {} is trying to guess the opponent\\'s choice...'.format(self.name))\n\n # AI algorithm 1:\n # simply find the most-frequent selection by the opponent and\n # choose its killer.\n\n guess = self.opponent_choices[opponent.name].most_common(1)[0][0]\n ai_choice = weapon_to_beat(guess)\n print(' ', opponent.name, 'most often chose', guess, 'so he/she chose', ai_choice)\n return ai_choice\n\n # use the standard tendency distribution to choose a weapon:\n n = randint(1, self.randmax)\n if n <= self.tendency[0]:\n return 'rock'\n elif n <= self.tendency[0] + self.tendency[1]:\n return 'paper'\n else:\n return 'scissors'", "def get_bonus_health(self):\n return self._bonus_health", "def get_strength(self):\n return 10 - self.get_agility()", "def best_reward(self, observation, sess, weighted=False):\n if weighted:\n return self.weighted_choice(observation, sess)[1]\n else:\n return self.best_choice(observation, sess)[1]", "def __attackDamageForTgt(self, attack, tgtPos, suit=0):\n if suit:\n return attack[SUIT_HP_COL][tgtPos]\n else:\n return attack[TOON_HP_COL][tgtPos]", "def custom_score_6(game, player):\n \"\"\"custom_score_6 heuristic function aims towards weighted chances heuristics\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n return float(length_my_player_moves*length_my_player_moves - 1.5*length_opp_payer_moves*length_opp_payer_moves)", "def minorStrategy(self):\n if self.can_afford_wonder:\n return \"playWonder\", self.burnCard()\n elif len(self.cards_CAN_play) == 0 or self.board.material['coin'] < 2:\n return \"discardCard\", self.burnCard()\n else:\n cheapest_card = self.cards_CAN_play[0].totalCost()\n play_card = 0\n for card in enumerate(self.cards_CAN_play):\n card_cost = card[1].totalCost()\n if card_cost < cheapest_card:\n cheapest_card = card_cost\n play_card = card[0]\n return \"playCard\", play_card", "def get_damage():\n\n return character['Damage']", "def custom_score(game, player):\n\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n heuristics_options = {\n \"heuristic_1_center\": heuristic_1_center,\n \"heuristic_2_reflection\": heuristic_2_reflection,\n \"heuristic_3_partition\": heuristic_3_partition,\n \"heuristic_combined_1_2\": heuristic_combined_1_2,\n \"heuristic_combined_1_3\": heuristic_combined_1_3,\n \"heuristic_combined_2_3\": heuristic_combined_2_3,\n \"heuristic_combined_1_2_3\": heuristic_combined_1_2_3,\n \"heuristic_combined_1_2_3_with_improve_score\": heuristic_combined_1_2_3_with_improve_score\n }\n\n return heuristics_options[\"heuristic_combined_1_2_3_with_improve_score\"](game, player)", "def knapsack(items: List[Item], capacity: float) -> Tuple[float, List[int]]: \n values = []\n weights = []\n capacities = []\n for item in items:\n values.append(item.value)\n\n temp_weights = []\n for item in items:\n temp_weights.append(item.weight)\n weights.append(temp_weights)\n \n capacities = [capacity]\n\n # print(f\"Values: {values}\")\n # print(f\"Weights: {weights}\")\n\n # http://google.github.io/or-tools/python/ortools/algorithms/pywrapknapsack_solver.html\n # Dynamic Programming Solver is also available through `KNAPSACK_DYNAMIC_PROGRAMMING_SOLVER`\n solver = pywrapknapsack_solver.KnapsackSolver(\n pywrapknapsack_solver.KnapsackSolver.KNAPSACK_MULTIDIMENSION_BRANCH_AND_BOUND_SOLVER,\n 'Knapsack'\n )\n\n # You can also set a time limit here to make sure that the solution is terminated if it takes too long\n # Use `set_time_limit()` method for this\n\n\n solver.Init(values, weights, capacities)\n computed_value = solver.Solve()\n \n taken: List[int] = []\n \n for i in range(len(values)):\n if solver.BestSolutionContains(i):\n taken.append(1)\n else:\n taken.append(0)\n\n # print('Taken:', taken)\n # print('Total weight:', computed_value)\n \n return computed_value, taken", "def getResistance(state, resType) :\n debufF = reduce(lambda x, y: x + y, getDebuff(state, resType), 0)\n return state['enemy']['resistance'][resType] + debufF", "def __init__(self,player,skill,difficulty):\n\n # Lowest possible skill roll == skill lvl - player lvl (or 0)\n lower_bound = max(0,skill.level - player.level)\n\n # Highest possible skill roll == skill lvl + 2*player level\n upper_bound = skill.level + (2*player.level)\n\n # Sets critical range (upper percentile to be considered crit)\n crit_range = player.crit_level / 100\n\n self.roll = random.randint(lower_bound,upper_bound)\n if (self.roll/upper_bound) > (1-crit_range):\n self.crit=True\n else:\n self.crit=False\n\n if self.roll >= difficulty:\n self.hit=True\n else:\n self.hit=False\n\n return self.hit, self.crit", "def W(self, multiplier=1):\n multiplier = str(multiplier);\n weapon_dice_count = self.Attribute_Power(\"weapon-num-dice\");\n weapon_dice = self.Attribute_Power(\"weapon-dice\");\n return \"\".join((\"(\", multiplier, \"*\", weapon_dice_count, \")d\", weapon_dice));", "def get_best_score_and_time(self):\n\n best_time = 10000\n best_score = 0\n\n for game in self.games:\n if game.status == \"won\":\n if best_time > game.timing:\n best_time = game.timing\n if best_score < game.score:\n best_score = game.score\n\n if best_time == 10000:\n best_time = 0\n\n return (best_score, best_time)", "def evaluateAttack(self, gameState, action):\r\n features = self.getFeaturesAttack(gameState, action)\r\n weights = self.getWeightsAttack(gameState, action)\r\n return features * weights", "def get_stats(self):\n if self.character_data is None: raise Exception('You must call get_character() first.')\n character = self.character_data\n if self._stats is not None:\n return self._stats\n\n try:\n prof_bonus = int(character.value(\"H14\"))\n except (TypeError, ValueError):\n raise MissingAttribute(\"Proficiency Bonus\")\n\n index = 15\n stat_dict = {}\n for stat in ('strength', 'dexterity', 'constitution', 'intelligence', 'wisdom', 'charisma'):\n try:\n stat_dict[stat] = int(character.value(\"C\" + str(index)))\n index += 5\n except (TypeError, ValueError):\n raise MissingAttribute(stat)\n\n stats = BaseStats(prof_bonus, **stat_dict)\n self._stats = stats\n return stats", "def __getitem__(self, item):\r\n debug.write(\"[SourceRPG] Handling retrieving of attribute %s for player %s\" % (item, self.name), 3)\r\n if item in self.currentAttributes:\r\n debug.write(\"Item is in current attributes, return\", 4)\r\n return self.currentAttributes[item]\r\n if item in self.currentSkills:\r\n debug.write(\"Item is a skill, return skill level\", 4)\r\n level = self.currentSkills[item]\r\n if item in skills:\r\n if level > int(skills[item].maxLevel):\r\n level = int(skills[item].maxLevel)\r\n return level\r\n if item in self.playerAttributes:\r\n debug.write(\"The item is an attribute, return from the local cache\", 4)\r\n return self.playerAttributes[item]\r\n if item in skills:\r\n \"\"\" \r\n The item is a skill, however, the user hasn't got a database column\r\n yet and rather than create one because we don't need it yet, we can\r\n just return 0\r\n \"\"\"\r\n debug.write(\"Skill not in player's cache, but is loaded, assume no level\", 4)\r\n return 0\r\n debug.write(\"Value not found, return 0\", 3)\r\n return None", "def fitness(self, hVals):\n fitness = 0\n\n board_state = self.board.deep_copy()\n pieces_state = self.pieces.deep_copy()\n\n ai = opponent_AI(self.board, self.pieces)\n\n # tie/ 0 score\n board1 = [['♜', '♞', '♝', '♛', '♚', '♝', '♞', '♜'],\n ['♟', '♟', '♟', '♟', '♟', '♟', '♟', '♟'],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n ['♙', '♙', '♙', '♙', '♙', '♙', '♙', '♙'],\n ['♖', '♘', '♗', '♕', '♔', '♗', '♘', '♖']]\n\n # mild white advantage\n board2 = [['♜', '♞', '♝', '♛', '♚', '♝', '♞', '♜'],\n ['♟', '♟', '♟', '♟', '♟', '♟', '♟', '♟'],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, '♙', None, None, None],\n [None, None, None, None, None, None, None, None],\n ['♙', '♙', '♙', '♙', None, '♙', '♙', '♙'],\n ['♖', '♘', '♗', '♕', '♔', '♗', '♘', '♖']]\n\n # white advantage\n board3 = [[None, None, None, None, None, '♜', '♚', None],\n [None, None, None, None, None, '♟', '♟', '♟'],\n [None, None, '♟', None, '♟', None, None, None],\n [None, '♟', '♙', None, None, None, None, None],\n [None, '♙', None, None, None, None, None, None],\n [None, None, None, None, None, '♘', None, '♙'],\n [None, None, None, None, '♗', '♙', '♙', None],\n [None, None, None, None, None, None, '♔', None]]\n # black advantage\n board4 = [[None, None, None, '♜', None, None, '♚', None],\n [None, None, '♜', None, None, '♟', None, None],\n [None, None, None, None, '♟', None, '♟', None],\n [None, '♟', None, None, '♙', None, None, '♟'],\n [None, '♙', None, '♙', None, None, None, None],\n [None, None, None, None, None, None, None, '♙'],\n [None, None, None, None, None, '♙', '♙', None],\n [None, None, None, None, '♕', None, '♔', None]]\n\n # white advantage\n board5 = [[None, None, None, None, None, None, '♚', None],\n ['♟', None, None, None, '♙', None, None, '♟'],\n [None, '♟', None, None, None, '♕', None, None],\n [None, None, None, '♟', None, None, None, '♔'],\n [None, None, '♟', '♙', None, None, '♙', None],\n [None, '♞', '♙', None, None, None, None, None],\n [None, None, None, None, None, None, None, '♙'],\n [None, None, None, None, '♛', None, None, None]]\n\n # strong black advantage\n board6 = [[None, '♛', None, None, None, '♗', '♚', None],\n [None, None, None, None, None, '♟', None, '♟'],\n [None, None, '♟', None, None, None, '♟', None],\n [None, '♟', None, '♝', None, None, None, None],\n [None, None, None, None, '♞', None, None, None],\n [None, None, None, None, None, '♘', None, '♙'],\n ['♜', None, None, None, None, None, '♙', '♔'],\n [None, None, None, None, None, None, None, None]]\n\n # even game\n board7 = [['♜', None, '♝', '♛', '♚', '♝', None, '♜'],\n ['♟', '♟', '♟', None, None, '♟', '♟', '♟'],\n [None, None, '♞', '♟', None, '♞', None, None],\n [None, None, None, None, '♟', None, None, None],\n [None, None, None, None, '♙', None, None, None],\n [None, None, '♘', '♙', None, '♘', None, None],\n ['♙', '♙', '♙', None, None, '♙', '♙', '♙'],\n ['♖', None, '♗', '♕', '♔', '♗', None, '♖']]\n\n # B Queen\n board9 = [[None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, '♚', '♟'],\n [None, None, None, None, None, None, None, None],\n [None, None, None, '♛', None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, '♔', '♙', None, None, None, None, None],\n [None, None, None, None, None, None, None, None]]\n\n # B Rook\n board10 = [[None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, '♚', '♟'],\n [None, None, None, None, None, None, None, None],\n [None, None, None, '♜', None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, '♔', '♙', None, None, None, None, None],\n [None, None, None, None, None, None, None, None]]\n\n # B Bishop\n board11 = [[None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, '♚', '♟'],\n [None, None, None, None, None, None, None, None],\n [None, None, None, '♝' , None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, '♔', '♙', None, None, None, None, None],\n [None, None, None, None, None, None, None, None]]\n\n # B Knight\n board12 = [[None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, '♚', '♟'],\n [None, None, None, None, None, None, None, None],\n [None, None, None, '♞' , None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, '♔', '♙', None, None, None, None, None],\n [None, None, None, None, None, None, None, None]]\n # B Pawn\n board13 = [[None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, '♚', '♟'],\n [None, None, None, None, None, None, None, None],\n [None, None, None, '♟' , None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, '♔', '♙', None, None, None, None, None],\n [None, None, None, None, None, None, None, None]]\n\n # W Queen\n board15 = [[None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, '♚', '♟'],\n [None, None, None, None, None, None, None, None],\n [None, None, None, '♕', None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, '♔', '♙', None, None, None, None, None],\n [None, None, None, None, None, None, None, None]]\n\n # W Rook\n board16 = [[None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, '♚', '♟'],\n [None, None, None, None, None, None, None, None],\n [None, None, None, '♖', None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, '♔', '♙', None, None, None, None, None],\n [None, None, None, None, None, None, None, None]]\n\n # W Bishop\n board17 = [[None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, '♚', '♟'],\n [None, None, None, None, None, None, None, None],\n [None, None, None, '♗' , None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, '♔', '♙', None, None, None, None, None],\n [None, None, None, None, None, None, None, None]]\n\n # W Knight\n board18 = [[None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, '♚', '♟'],\n [None, None, None, None, None, None, None, None],\n [None, None, None, '♘' , None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, '♔', '♙', None, None, None, None, None],\n [None, None, None, None, None, None, None, None]]\n # W Pawn\n board19 = [[None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, '♚', '♟'],\n [None, None, None, None, None, None, None, None],\n [None, None, None, '♙' , None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, '♔', '♙', None, None, None, None, None],\n [None, None, None, None, None, None, None, None]]\n\n board_state.squares = board1\n\n score1 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n\n # encourages heuristic to evaluate black and white pieces equivalently and opposite to each other\n if not (-24000 < score1 < 24000):\n fitness += 3\n\n if not (-12000 < score1 < 12000):\n fitness += 3\n\n if not (-6000 < score1 < 6000):\n fitness += 2\n\n if not (-5000 < score1 < 5000):\n fitness += 2\n\n if not (-4000 < score1 < 4000):\n fitness += 2\n\n if not (-3000 < score1 < 3000):\n fitness += 2\n\n if not (-2000 < score1 < 2000):\n fitness += 1\n\n if not (-1000 < score1 < 1000):\n fitness += 1\n\n if not (-500 < score1 < 500):\n fitness += 1\n\n if not (-400 < score1 < 400):\n fitness += 1\n\n if not (-300 < score1 < 300):\n fitness += 1\n\n if not (-250 < score1 < 250):\n fitness += 1\n\n if not (-200 < score1 < 200):\n fitness += 1\n\n# # If the heuristic needs to be very specific\n# if not (-150 < score1 < 150):\n# fitness += 1\n#\n# if not (-100 < score1 < 100):\n# fitness += 1\n#\n# if not (-75 < score1 < 75):\n# fitness += 1\n#\n# if not (-50 < score1 < 50):\n# fitness += 1\n\n board_state.squares = board2\n\n score2 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n\n if score2 > score1:\n fitness += 1\n\n board_state.squares = board3\n\n score3 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n\n if score3 > -200:\n fitness += 1\n\n board_state.squares = board4\n\n score4 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n\n if score4 < 300:\n fitness += 1\n\n board_state.squares = board5\n\n score5 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n\n if score5 > -200:\n fitness += 1\n\n if score3 > score2:\n fitness += 1\n\n if score5 > score2:\n fitness += 1\n\n board_state.squares = board6\n\n score6 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n\n if score6 < 500:\n fitness += 1\n\n if score6 < score4:\n fitness += 1\n\n board_state.squares = board7\n\n score7 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n\n # encourages heuristic to evaluate black and white pieces equivalently and opposite to each other\n if not (-24000 < score7 < 24000):\n fitness += 3\n\n if not (-12000 < score7 < 12000):\n fitness += 3\n\n if not (-6000 < score7 < 6000):\n fitness += 2\n\n if not (-5000 < score7 < 5000):\n fitness += 2\n\n if not (-4000 < score7 < 4000):\n fitness += 2\n\n if not (-3000 < score7 < 3000):\n fitness += 2\n\n if not (-2000 < score7 < 2000):\n fitness += 1\n\n if not (-1000 < score7 < 1000):\n fitness += 1\n\n if not (-500 < score7 < 500):\n fitness += 1\n\n if not (-400 < score7 < 400):\n fitness += 1\n\n if not (-300 < score7 < 300):\n fitness += 1\n\n if not (-250 < score7 < 250):\n fitness += 1\n\n if not (-200 < score7 < 200):\n fitness += 1\n\n# if not (-150 < score7 < 150):\n# fitness += 1\n#\n# if not (-100 < score7 < 100):\n# fitness += 1\n#\n# if not (-75 < score7 < 75):\n# fitness += 1\n#\n# if not (-50 < score7 < 50):\n# fitness += 1\n\n board_state.squares = board9\n score9 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n board_state.squares = board10\n score10 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n board_state.squares = board11\n score11 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n board_state.squares = board12\n score12 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n board_state.squares = board13\n score13 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n\n # Optimizes Black piece values relative to board impact\n if not (score9 > score10 > score11 > score13 > 0):\n fitness += 1\n if not (score9 > score10 > score12 > score13 > 0):\n fitness += 1\n\n board_state.squares = board15\n score15 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n board_state.squares = board16\n score16 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n board_state.squares = board17\n score17 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n board_state.squares = board18\n score18 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n board_state.squares = board19\n score19 = ai.evaluate(board_state, pieces_state, Vals=hVals)\n\n # Optimizes White piece values relative to board impact\n if not (0 > score19 > score18 > score16 > score15):\n fitness += 1\n\n if not (0 > score19 > score17 > score16 > score15):\n fitness += 1\n\n if not ((score15) < (score18 + score17) < score16):\n fitness += 1\n\n if not ((score9) > (score11 + score12) > score10):\n fitness += 1\n\n # For troubleshooting\n print(fitness, \": \", hVals)\n\n return fitness", "def trump(self):\n attack = None\n other = self.enemy.enemy_trump(self.other_hand.get_hand())\n me = self.player.player_trump(self.my_hand.get_hand())\n if other == None and me != None:\n attack = 0\n else:\n if other != None and me == None:\n attack = 1\n else:\n if other == None and me == None:\n attack = randint(0, 1)\n else:\n if other.weight < me.weight:\n attack = 1\n else:\n attack = 0\n return attack", "def weighted_choice(items: List[Tuple[str, float]]) -> str:\r\n total_weight = sum(item[1] for item in items)\r\n n = random.uniform(0, total_weight)\r\n for item, weight in items:\r\n if weight > n:\r\n return item\r\n n -= weight\r\n return item", "def _best_action(self, state):\n actions_rewards = list(self.Q[state].items())\n return max(actions_rewards, key=lambda x: x[1])[0]", "def interpretExtraBoost(skillEffect, adventurer: \"Adventurer\", enemy: \"Enemy\") -> float:\n extra_boosts_modifier_value = 0.0\n temp_list: list = skillEffect.attribute.split(\"_\")\n # per each\n temp_list = temp_list[2:]\n try:\n temp_list.remove(\"skill\")\n except:\n pass\n\n if temp_list[0] == \"self\":\n effect_lists: list[list] = [adventurer.boostCheckAdv, adventurer.boostCheckAst]\n else:\n effect_lists = [enemy.boostCheckAdv, enemy.boostCheckAst]\n temp_list = temp_list[1:]\n attribute = \"_\".join(temp_list[: len(temp_list) - 1])\n attribute_type = temp_list[-1]\n\n for effect_list in effect_lists:\n for selfBuffs in effect_list:\n if selfBuffs.isbuff == (attribute_type == \"buff\"):\n if selfBuffs.attribute == attribute:\n extra_boosts_modifier_value += (\n int(skillEffect.modifier.strip()) / 100\n )\n\n return extra_boosts_modifier_value", "def DetermineAttackOrder(self):\n\n if self.fighter1.speed > self.fighter2.speed:\n self.attacker = self.fighter1\n self.defender = self.fighter2\n elif self.fighter2.speed > self.fighter1.speed:\n self.attacker = self.fighter2\n self.defender = self.fighter1\n else:\n if self.fighter1.luck > self.fighter2.luck:\n self.attacker = self.fighter1\n self.defender = self.fighter2\n elif self.fighter2.luck > self.fighter1.luck:\n self.attacker = self.fighter2\n self.defender = self.fighter1\n else:\n if random.random() <= 0.5:\n self.attacker = self.fighter1\n self.defender = self.fighter2\n else:\n self.attacker = self.fighter2\n self.defender = self.fighter1", "def custom_score_7(game, player):\n \"\"\"custom_score_7 heuristic function also aims towards weighted chances heuristics\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n return float(1.5*length_my_player_moves*length_my_player_moves - length_opp_payer_moves*length_opp_payer_moves)", "def stealability(self):\n stealability_score = float(self.price) / float(self.weight)\n print (stealability_score)\n\n if stealability_score < 0.5:\n return 'Not so stealable...'\n elif stealability_score >= 0.5 and stealability_score < 1.0:\n return 'Kinda stealable.'\n else:\n return 'Very stealable!'", "def best_choice(self, observation, sess):\n assert self.initialised, \"This model must be initialised (self.initialisation())\"\n reward_temp = self.get_reward(observation, sess)\n reward = []\n for i in reward_temp:\n i = np.clip(i, 1e-10, 1-1e-10)\n i = i / np.sum(i)\n reward.append(np.squeeze(i))\n choice = [np.random.choice(range(len(i)), p = i) for i in reward]\n return choice, [reward[j][i] for j, i in enumerate(choice)]" ]
[ "0.6914016", "0.66133755", "0.647129", "0.6280319", "0.6277309", "0.62479544", "0.6210469", "0.6196124", "0.6190553", "0.61680675", "0.60728323", "0.60306805", "0.6020015", "0.59235066", "0.5873241", "0.5860358", "0.5845829", "0.5792037", "0.57798666", "0.5775046", "0.5746795", "0.5742029", "0.57380325", "0.5729014", "0.5720506", "0.5672314", "0.56190413", "0.5595903", "0.5594676", "0.55684036", "0.55240804", "0.55205417", "0.55066687", "0.5500665", "0.54914737", "0.5487913", "0.54788643", "0.5466053", "0.54617554", "0.5461335", "0.5457925", "0.54518634", "0.54378724", "0.5435344", "0.5411078", "0.5377625", "0.5369202", "0.5349995", "0.53484064", "0.5347596", "0.5344023", "0.5322808", "0.53212297", "0.5319073", "0.53180593", "0.5314323", "0.5311444", "0.53095686", "0.53082997", "0.53058326", "0.53047526", "0.5303385", "0.5296926", "0.52967227", "0.5287082", "0.5279203", "0.52784926", "0.5272928", "0.52684903", "0.52675843", "0.52664924", "0.52527094", "0.5244739", "0.5230208", "0.5219971", "0.5219589", "0.5212974", "0.520843", "0.5208296", "0.5204081", "0.519928", "0.519761", "0.5194071", "0.51823246", "0.51803076", "0.51801586", "0.51669514", "0.51611626", "0.5154372", "0.5154187", "0.5152659", "0.5151332", "0.514973", "0.51483047", "0.5136282", "0.51250374", "0.5120592", "0.51191485", "0.5118651", "0.5111084" ]
0.733018
0
Returns list of tuples of the form (level, max_hit) for levels between start_strength_level and end_strength_level that increase max_hit. Assumes start_strength_level < end_strength_level and no multipliers
def get_max_hit_increases( start_strength_level, end_strength_level, strength_bonus, stance_adder): greatest_max_hit = 0 max_hit_increases = [] cur_strength_level = start_strength_level while cur_strength_level < end_strength_level: effective_strength = osrs.effective_level( cur_strength_level, 1, stance_adder, 1) max_hit = osrs.max_hit(effective_strength, strength_bonus) if max_hit > greatest_max_hit: greatest_max_hit = max_hit max_hit_increases.append((cur_strength_level, max_hit)) cur_strength_level += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_max_hit_and_accuracy(\n levels, attack_style, attack_bonus, strength_bonus):\n weapon_attack, weapon_strength = get_weapon_stats(levels.attack)\n attack_bonus += weapon_attack\n strength_bonus += weapon_strength\n\n if attack_style == Attack_Style.ATTACK:\n effective_attack = osrs.effective_level(levels.attack, 1, 3, 1)\n effective_strength = osrs.effective_level(levels.strength, 1, 0, 1)\n elif attack_style == Attack_Style.STRENGTH:\n effective_attack = osrs.effective_level(levels.attack, 1, 0, 1)\n effective_strength = osrs.effective_level(levels.strength, 1, 3, 1)\n\n enemy_effective_defence = osrs.effective_level(1, 1, 0, 1)\n\n max_hit = osrs.max_hit(effective_strength, strength_bonus)\n accuracy = osrs.accuracy(effective_attack, attack_bonus,\n enemy_effective_defence, 0)\n\n return (max_hit, accuracy)", "def calculate_hit(self):\n weapon = self.game_data['player inventory']['equipped weapon']\n weapon_power = self.game_data['player inventory'][weapon]['power']\n max_strength = weapon_power\n min_strength = max_strength - 7\n return random.randint(min_strength, max_strength)", "def _hit_range_get(self):\n return (self.hit_start, self.hit_end)", "def calculate_hit(self, armor_list, inventory):\n armor_power = 0\n for armor in armor_list:\n armor_power += inventory[armor]['power']\n max_strength = max(1, (self.level * 5) - armor_power)\n min_strength = 0\n return random.randint(min_strength, max_strength)", "def checkRange(currentNumRange: tuple, currentLevel: int):\n\n\tlowerNumber, higherNumber = currentNumRange[0], currentNumRange[1]\n\tmid = (higherNumber + lowerNumber) // 2\n\tans = getAnswer(f\"Does your number is greater than {mid}?\", mid)\n\n\tif ans:\n\t\tlowerNumber = mid\n\telse:\n\t\thigherNumber = mid\n\n\n\treturn (lowerNumber, higherNumber)", "def calc_tohit(attr, level):\n return level + calc_attr_mod(attr)", "def count_property_range_hits(prop, node_dict, hits):\n\tres = []\n\t# sets tuple position to use in dict value\n\tswitcher = {\n \"length\": (0,(0,4000,8000,12000,16000,20000)),\n \"steps\": (1,(0,2,4,8,16,32)),\n \"cov\": (2,(1,10,100,1000,10000,100000)),\n \"cv\": (3, (0,0.05,0.10,0.15,0.20,0.25))\n }\n\tif prop not in switcher:\n\t\treturn res\n\ttup_pos = switcher[prop][0]\n\tnode_cnt = 0\n\tpos_cnt = 0\n\tfor ind in range(len(switcher[prop][1])-1):\n\t\tmin_val = switcher[prop][1][ind]\n\t\tmax_val = switcher[prop][1][ind+1]\n\t\tfor node in node_dict.keys():\n\t\t\tval = node_dict[node][tup_pos]\n\t\t\tif ind < len(switcher[prop][1])-2:\n\t\t\t\trange_test_val = (min_val <= val < max_val)\n\t\t\telse:\n\t\t\t\trange_test_val = (min_val <= val <= max_val)\n\t\t\t# print \"range bool is\", range_test_val\n\t\t\tif range_test_val:\n\t\t\t\tnode_cnt += 1\n\t\t\t\tif node in hits: pos_cnt += 1\n\t\tif node_cnt > 0:\n\t\t\tres.append( (pos_cnt, node_cnt, round(float(pos_cnt)/node_cnt,2)))\n\t\telse:\n\t\t\tres.append((0,0,0))\n\t\tnode_cnt = 0\n\t\tpos_cnt = 0\n\treturn res", "def get_max_gains(self):\n return tuple([lib.is_SetHWGainFactor(self.hcam,0x800c+i,100)/100 for i in range(4)])", "def _determine_level(levels, points):\n import operator\n level = None\n sorted_levels = sorted(levels.iteritems(), key=operator.itemgetter(1))\n for el in sorted_levels:\n if points <= el[1]:\n level = el[0]\n break\n\n max_level = max(levels.iterkeys(), key=lambda threshold: levels[threshold])\n if points >= levels[max_level]:\n level = max_level\n return level", "def bounds_slope(regressions):\n max_up_slope = 0\n min_down_slope = 0\n for regression in regressions.itervalues():\n min_slope = regression.find_min_slope()\n max_up_slope = max(max_up_slope, min_slope)\n min_down_slope = min(min_down_slope, min_slope)\n \n return (max_up_slope, min_down_slope)", "def query_range(tree, start_y, start_x, end_y, end_x):\n res = 0\n start_y -= 1\n\n while end_y > start_y:\n res += bit.query_range(tree[end_y], start_x, end_x)\n end_y -= (end_y & -end_y)\n\n while start_y > end_y:\n res -= bit.query_range(tree[start_y], start_x, end_x)\n start_y -= (start_y & -start_y)\n\n return res", "def __calculateSupportResistenceLevels(self):\n\n for i in range(2, self.df.shape[0] - 2):\n if self.__isSupport(self.df, i):\n l = self.df['low'][i]\n if self.__isFarFromLevel(l):\n self.levels.append((i, l))\n elif self.__isResistance(self.df, i):\n l = self.df['high'][i]\n if self.__isFarFromLevel(l):\n self.levels.append((i, l))\n return self.levels", "def get_combined_energy(start, end, max_level, group=ms2sp(80)):\n # Dictionaries to store the energy intervals for each lead\n dicts = {}\n for lead in sig_buf.get_available_leads():\n dicts[lead] = {}\n for i in range(max_level + 1):\n dicts[lead][i] = []\n # Energy intervals detection and combination\n idx = start\n while idx < end:\n wfs = {}\n for lead in dicts:\n wfs[lead] = get_deflection_observations(\n start + idx, start + idx + TWINDOW, lead=lead, max_level=max_level, group=group\n )\n for i in range(max_level + 1):\n if dicts[lead][i] and wfs[lead][i]:\n if wfs[lead][i][0].earlystart - dicts[lead][i][-1].lateend <= group:\n dicts[lead][i][-1].end.value = wfs[lead][i][0].start.value\n wfs[lead][i].pop(0)\n dicts[lead][i].extend(wfs[lead][i])\n idx += TWINDOW\n # Remove overlapping intervals\n combine_energy_intervals(dicts.values())\n # Now we flatten the dictionaries, putting all the intervals in a sequence\n # sorted by the earlystart value.\n return SortedList(\n w for w in it.chain.from_iterable(it.chain.from_iterable(dic.values() for dic in dicts.values()))\n )", "def getLevels():", "def max_diaphragmatic_level(levels):\n return [max(x) for x in levels]", "def get_gain_range(self, *args):\n return _uhd_swig.usrp_source_get_gain_range(self, *args)", "def get_hit_points(min, max):\n return random.randint(min, max)", "def calc_level(xp, dominion):\n if xp < 3:\n xp_potential = 1\n if xp >= 3 and xp < 6:\n xp_potential = 2\n if xp >= 6 and xp < 12:\n xp_potential = 3\n if xp >= 12 and xp < 24:\n xp_potential = 4\n if xp >= 24 and xp < 48:\n xp_potential = 5\n if xp >= 48 and xp < 72:\n xp_potential = 6\n if xp >= 72 and xp < 96:\n xp_potential = 7\n if xp >= 96 and xp < 130:\n xp_potential = 8\n if xp >= 130 and xp < 170:\n xp_potential = 9\n if xp >= 170:\n xp_potential = 10\n if dominion < 2:\n dom_potential = 1\n if dominion >= 2 and dominion < 4:\n dom_potential = 2\n if dominion >= 4 and dominion < 10:\n dom_potential = 3\n if dominion >= 10 and dominion < 22:\n dom_potential = 4\n if dominion >= 22 and dominion < 38:\n dom_potential = 5\n if dominion >= 38 and dominion < 57:\n dom_potential = 6\n if dominion >= 57 and dominion < 76:\n dom_potential = 7\n if dominion >= 76 and dominion < 95:\n dom_potential = 8\n if dominion >= 95 and dominion < 124:\n dom_potential = 9\n if dominion >= 124:\n dom_potential = 10\n return min(xp_potential, dom_potential)", "def get_gain_range(self, *args):\n return _uhd_swig.usrp_sink_get_gain_range(self, *args)", "def assign_level(self, minibatch_reference_proboxes):\n with tf.name_scope('assign_levels'):\n ymin, xmin, ymax, xmax = tf.unstack(minibatch_reference_proboxes, axis=2)\n\n w = tf.maximum(xmax - xmin, 0.) # avoid w is negative\n h = tf.maximum(ymax - ymin, 0.) # avoid h is negative\n\n levels = tf.round(4. + tf.log(tf.sqrt(w*h + 1e-8)/224.0) / tf.log(2.)) # 4 + log_2(***)\n\n levels = tf.maximum(levels, tf.ones_like(levels) * (np.float32(self.min_level))) # level minimum is 2\n levels = tf.minimum(levels, tf.ones_like(levels) * (np.float32(self.max_level))) # level maximum is 5\n\n return tf.cast(levels, tf.int32)", "def get_weapon_stats(attack_level):\n if attack_level >= 60:\n # Dragon scimitar\n return (67, 66)\n elif attack_level >= 40:\n # Rune scimitar\n return (45, 44)\n elif attack_level >= 30:\n # Adamant scimitar\n return (29, 28)\n elif attack_level >= 20:\n # Mithril scimitar\n return (21, 20)\n elif attack_level >= 10:\n # Black scimitar\n return (19, 14)\n elif attack_level >= 5:\n # Steel scimitar\n return (15, 14)\n else:\n # Iron scimitar\n return (10, 9)", "def max_gain(self):\n if self.val1:\n val1_gain_tuple, val0_gain_tuple = self.val1.max_gain(), self.val0.max_gain()\n if val1_gain_tuple.gain > val0_gain_tuple.gain:\n return val1_gain_tuple\n else:\n return val0_gain_tuple\n elif self.attributes:\n filtered_data = filter_data(self.data,self.ancestors)\n max_attribute, max_gain = max([(attribute,\n self.heuristic(self,attribute)) for attribute in self.attributes],\n key = lambda x: x[1])\n return gain_tuple(self, max_attribute, max_gain)\n return gain_tuple(None, '', 0)", "def lvl_algo(next_level):\n total_xp_needed = (next_level * next_level)\n return total_xp_needed", "def on_max_hit_points(self):\n pass", "def extract_levels(enemy_behavior: List[Any]):\n levels = set()\n levels.add(1)\n for b in enemy_behavior:\n if type(b) == ESBranchLevel:\n levels.add(b.branch_value)\n elif hasattr(b, 'level'):\n levels.add(b.level)\n return levels", "def get_strength_text(currentstrength):\n for i in range(0, 5): \n strengthrange = (79, 59, 39, 19, 0)\n if currentstrength in range(strengthrange[i], strengthrange[i] + 20):\n strength = STRENGTH_TEXT[i]\n if currentstrength > 99:\n strength = STRENGTH_TEXT[0]\n\n return strength", "def compute_pair_bounds(self, edges, pair):\n lower_bounds =[]\n upper_bounds = []\n for arc in edges:\n l_e = self.arc_info[arc][\"lower_bound\"]\n u_e = self.arc_info[arc][\"upper_bound\"]\n f_mij = self.compute_f_mij(arc, pair)\n lower_bounds.append(l_e - f_mij)\n upper_bounds.append(u_e - f_mij)\n lb = max(lower_bounds + [0])\n # in case no edges in here, make max of 5,000\n if len(upper_bounds) == 0:\n i = pair[0]\n j = pair[1]\n print(\"Path i ({}): {}\".format(i, self.paths[i]))\n print(\"Path j ({}): {}\".format(j, self.paths[j]))\n ub = min(upper_bounds + [5000])\n #print(\"lower bounds: {}\".format(lower_bounds))\n #print(\"upper bounds: {}\".format(upper_bounds))\n return(lb, ub)", "def getSupportResistanceLevels(self):\n return self.levels", "def attack_bonus_on_level(self, level):\n raise NotImplementedError", "def get_bounds(group: Group, player: int) -> Tuple[Position, Position]:\n tiles = []\n for couple in group:\n tiles.append(couple[0])\n if couple[2] != -1:\n tiles.append(couple[1])\n\n maximum = max(tiles, key=lambda t: t[player])\n minimum = min(tiles, key=lambda t: t[player])\n\n return minimum, maximum", "def get_skill_levels(self):\n return self.model_class.objects.filter(enforced=self.enforced).order_by('-gte')", "def get_in_range(board, position, blast_strength):\n tiles_in_range = []\n for row, col in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n for dist in range(1, blast_strength):\n r = position[0] + row * dist\n c = position[1] + col * dist\n if 0 <= r < len(board) and 0 <= c < len(board):\n tiles_in_range.append(board[r, c])\n if board[r, c] in SOLID_TILES or board[r, c] == Item.Bomb.value:\n break\n else:\n break\n return tiles_in_range", "def get_threshold_levels(min_conf, max_conf, steps):\n\n if min_conf >= max_conf:\n return np.asarray([0.0])\n\n bins = np.arange(steps) # 0 ~ (steps-1)\n vals = bins / (steps-1) # float(max(bins)) # 0.0 ~ 1.0\n val_range = max_conf - min_conf\n norm_vals = (vals*val_range + min_conf) # min_conf ~ max_conf\n\n return norm_vals", "def get_thresholds(kalpha, deltaelow, deltaehigh, maxphotons, nscatter, scatter):\n thresholds = tuple(\n [\n (float(n), float(s), n * kalpha + s * scatter - deltaelow, n * kalpha + s * scatter + deltaehigh, s * scatter)\n for s in range(nscatter + 1, -1, -1)\n for n in range(maxphotons - s + 1)\n if not (n == 0 and s == 0)\n ]\n )\n return thresholds", "def get_gain_range(self, *args):\n return _uhd_swig.usrp_sink_sptr_get_gain_range(self, *args)", "def get_gain_range(self, *args):\n return _uhd_swig.usrp_source_sptr_get_gain_range(self, *args)", "def findLevels(A, level, mode='rising', boxWidth=0, rangeSubset=None):\n assert mode in ('rising', 'falling', 'both'), 'traceManip.findLevels: Unknown mode \\'%s\\'' % mode\n\n if boxWidth is not 0:\n A = np.convolve(A, np.array([1]*boxWidth)/float(boxWidth))\n\n crossings = np.diff(np.sign(A-level), axis=0)\n \n if mode is 'rising':\n rising_points = np.where(crossings > 0)\n return rising_points[0], len(rising_points[0])\n elif mode is 'falling':\n falling_points = np.where(crossings < 0)\n return falling_points[0], len(falling_points[0])\n else:\n all_crossing_points = np.where(np.abs(crossings) > 0)\n return all_crossing_points, len(all_crossing_points)", "def bf_search(grid, level):\n states_we_have_seen_before = Set(grid)\n current_states = [grid]\n result = None\n counter = 0\n\n while result is None:\n next_states = Set()\n\n for g in current_states:\n for gg in legal_moves(g):\n if gg not in states_we_have_seen_before:\n states_we_have_seen_before.add(gg)\n next_states.add(gg)\n\n for t in next_states:\n if match_level(t, level):\n result = t\n break\n\n current_states = next_states\n counter += 1\n\n return (counter, result)", "def test_level_discovery(self):\n defined_levels = find_defined_levels()\n level_values = defined_levels.values()\n for number in (0, 10, 20, 30, 40, 50):\n assert number in level_values", "def get_min_max(self, run_id):\n runs = self.repo.get_all_runs()\n levels = runs[['min_level', 'max_level']][runs['run_id'] == run_id]\n return levels", "def rangeFinder(self, startIndex, endIndex, list):\n max = list[startIndex]\n min = list[startIndex]\n for x in range(startIndex, endIndex):\n if list[x] > max:\n max = list[x]\n if list[x] < min:\n min = list[x]\n return max-min", "def compute_density_level(group_result_with_log_density: List[dict], length: float):\n log_density_list = [group['log_density'] for group in group_result_with_log_density]\n max_val = max(log_density_list)\n min_val = min(log_density_list)\n # split range with 10 and compute which to where\n range_val = max_val - min_val\n total_level = 9\n gap = range_val / total_level\n level_list = []\n for i, log_density in enumerate(log_density_list):\n level = 5\n if gap != 0:\n level = round((log_density - min_val) / gap)\n level_list.append(dict(level=level, start_time=group_result_with_log_density[i]['pitches'][0]['time']))\n\n for level_dict in level_list:\n start = level_dict['start_time'] / length\n level_dict['start_time'] = start\n return level_list", "def update_hp_for_higher_level(chosen_class,level):\n #Checks to see if your character is level 4,8,12,etc.\n def upgradedAbilityAt4(level):\n if level % 4 == 0:\n upgraded_ability = raw_input(\"Level \"+str(level)+\"!\\n Which two abilities would you like to upgrade? (Adds +1 to ability)\\n Please input two from str/dex/con/int/wis/cha with a space in between.\\n (ex: cha dex) \").split(' ')\n print\n #To write:\n #if either ability pushes ability score over 20, redo input\n\n \n for i in upgraded_ability:\n self.stealthUpdate(i,1)\n #class specific HP calculations\n if chosen_class == 'barbarian': \n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,12) + self.con + self.classMods[6]\n elif chosen_class == 'cleric':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,8) + self.con + self.classMods[6]\n elif chosen_class == 'druid':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,8) + self.con + self.classMods[6]\n elif chosen_class == 'fighter':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,10) + self.con + self.classMods[6]\n elif chosen_class == 'monk':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,8) + self.con + self.classMods[6]\n elif chosen_class == 'paladin':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,10) + self.con + self.classMods[6]\n elif chosen_class == 'ranger':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,10) + self.con + self.classMods[6]\n elif chosen_class == 'rogue':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,6) + self.con + self.classMods[6]\n elif chosen_class == 'wizard':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,6) + self.con + self.classMods[6]", "def EM_gain_range(self):\n mini, maxi = ct.c_int(), ct.c_int()\n self.lib.GetEMGainRange(ct.pointer(mini), ct.pointer(maxi))\n\n return (mini.value, maxi.value)", "def toms748_scan(\n data,\n model,\n bounds_low,\n bounds_up,\n level=0.05,\n atol=2e-12,\n rtol=1e-4,\n from_upper_limit_fn=False,\n **hypotest_kwargs,\n):\n cache = {}\n\n def f_cached(poi):\n if poi not in cache:\n cache[poi] = hypotest(\n poi,\n data,\n model,\n return_expected_set=True,\n **hypotest_kwargs,\n )\n return cache[poi]\n\n def f(poi, level, limit=0):\n # Use integers for limit so we don't need a string comparison\n # limit == 0: Observed\n # else: expected\n return (\n f_cached(poi)[0] - level\n if limit == 0\n else f_cached(poi)[1][limit - 1] - level\n )\n\n def best_bracket(limit):\n # return best bracket\n ks = np.asarray(list(cache))\n vals = np.asarray(\n [\n value[0] - level if limit == 0 else value[1][limit - 1] - level\n for value in cache.values()\n ]\n )\n pos = vals >= 0\n neg = vals < 0\n lower = ks[pos][np.argmin(vals[pos])]\n upper = ks[neg][np.argmax(vals[neg])]\n return (lower, upper)\n\n # extend bounds_low and bounds_up if they don't bracket CLs level\n lower_results = f_cached(bounds_low)\n # {lower,upper}_results[0] is an array and {lower,upper}_results[1] is a\n # list of arrays so need to turn {lower,upper}_results[0] into list to\n # concatenate them\n while np.any(np.asarray([lower_results[0]] + lower_results[1]) < level):\n bounds_low /= 2\n lower_results = f_cached(bounds_low)\n upper_results = f_cached(bounds_up)\n while np.any(np.asarray([upper_results[0]] + upper_results[1]) > level):\n bounds_up *= 2\n upper_results = f_cached(bounds_up)\n\n tb, _ = get_backend()\n obs = tb.astensor(\n toms748(f, bounds_low, bounds_up, args=(level, 0), k=2, xtol=atol, rtol=rtol)\n )\n exp = [\n tb.astensor(\n toms748(f, *best_bracket(idx), args=(level, idx), k=2, xtol=atol, rtol=rtol)\n )\n for idx in range(1, 6)\n ]\n if from_upper_limit_fn:\n return obs, exp, (list(cache), list(cache.values()))\n return obs, exp", "def get_gains(self):\n return tuple([lib.is_SetHWGainFactor(self.hcam,0x8000+i,0)/100 for i in range(4)])", "def showBestStatLevelReached(self) :\n bestLevel = 0\n for level in self.level_history :\n bestLevel = level.level if bestLevel < level.level else bestLevel\n Scenario.messageBestStatLevelReached(bestLevel)", "def get_span_scores(span_start_logits: torch.Tensor, span_end_logits: torch.Tensor) -> torch.Tensor:\n if span_start_logits.dim() != 2 or span_end_logits.dim() != 2:\n raise ValueError(\"Input shapes must be (batch_size, passage_length)\")\n batch_size, passage_length = span_start_logits.size()\n device = span_start_logits.device\n # (batch_size, passage_length, passage_length)\n span_log_probs = span_start_logits.unsqueeze(2) + span_end_logits.unsqueeze(1)\n # Only the upper triangle of the span matrix is valid; the lower triangle has entries where\n # the span ends before it starts.\n span_log_mask = torch.triu(torch.ones((passage_length, passage_length), device=device)).log()\n valid_span_log_probs = span_log_probs + span_log_mask\n\n # Here we take the span matrix and flatten it, then find the best span using argmax. We\n # can recover the start and end indices from this flattened list using simple modular\n # arithmetic.\n # (batch_size, passage_length * passage_length)\n# best_spans = valid_span_log_probs.view(batch_size, -1).argmax(-1)\n# span_start_indices = best_spans // passage_length\n# span_end_indices = best_spans % passage_length\n# return torch.stack([span_start_indices, span_end_indices], dim=-1)\n return valid_span_log_probs", "def get_strength(self):\n return 10 - self.get_agility()", "def find_contour_levels(grid, levels=np.array([0.68, 0.95, 0.997])):\n sorted_ = np.sort(grid.ravel())[::-1]\n pct = np.cumsum(sorted_) / np.sum(sorted_)\n cutoffs = np.searchsorted(pct, levels)\n return np.sort(sorted_[cutoffs])", "def calculate_min_max_tiles(self):", "def find_max_power(data, interval_power, interval_duration, search_range):\n max_power = 0\n index = None\n for i in range(min(len(data), search_range)):\n power = get_row_power(data, i)\n if power > max_power:\n index, max_power = i, power\n logging.debug(\"peak index = %u, max_power = %u\", index, max_power)\n return index", "def get_hit(self):\n for bossProjectile in self.overlapping_sprites:\n self.score.value -= 10\n self.score.right = games.screen.width - 10 \n bossProjectile.handle_caught()", "def difficulty_for_level(level):\n return 0 if level==\"easy\" else (1 if level==\"medium\" else 2)", "def return_parameter_bounds(maximum_luminosity=20):\n return [(maximum_luminosity, maximum_luminosity + 3),\n (3 * 10 ** -4, 8 * 10 ** -3), (2., 350), (-8., -0.2),\n (-400, 400)]", "def _value_in_bounds(self, vals):\n return (self._min_in_bounds(vals[0]), self._max_in_bounds(vals[1]))", "def scores2logSpanProb(startScores: torch.Tensor, endScores: torch.Tensor,\n selectionScore: torch.Tensor) -> torch.Tensor:\n \"\"\"\n We want to get:\n log P(a) = log (P(start)*P(end)*P(selected))\n that can be later used in loss like that:\n -log sum exp(log P(a))\n\n Let's see how to convert original expression to the expression that will be more suitable for computation:\n log P(a) = log (P(start)*P(end)*P(selected)) = \n = log (P(start)) + log (P(end)) + log (P(selected))) = \n\n \"\"\"\n\n # in all softmaxes we normalize by sum of all spans (/ starts / end) in all passages\n logStart = torch.nn.functional.log_softmax(startScores.flatten(), dim=0).view(startScores.shape)\n logEnd = torch.nn.functional.log_softmax(endScores.flatten(), dim=0).view(endScores.shape)\n\n # log of probability that given passage contains an answer\n logSelection = torch.nn.functional.log_softmax(selectionScore.flatten(), dim=0).view(-1, 1, 1)\n\n # for the starts and ends we need to make all combinations of start and ends to get the\n # log (P(start)) + log (P(end)) part of expression that can be added to the rest.\n #\n # For each passage we will create a matrix of scores that could be\n # indexed with [start][end] (=log (P(start)) + log (P(end))).\n\n logStart = logStart.unsqueeze(2).expand((logStart.shape[0], logStart.shape[1], logStart.shape[1])) # expand in cols\n # logStart -> passage X passage token start scores\n # [\n # [10, 5],\n # [2, 3]\n # ]\n # logStart.unsqueeze(2) -> passage X passage token start scores X 1\n # [\n # [\n # [10],\n # [5]\n # ],\n # [\n # [2],\n # [3]\n # ]\n # ]\n # logStart.unsqueeze(2).expand((logStart.shape[0], logStart.shape[1], logStart.shape[1]))\n # -> passage X passage token start scores X passage token start scores\n # [\n # [\n # [10, 10],\n # [5, 5]\n # ],\n # [\n # [2, 2],\n # [3, 3]\n # ]\n # ]\n logEnd = logEnd.unsqueeze(1).expand((logEnd.shape[0], logEnd.shape[1], logEnd.shape[1])) # expand in rows\n # logEnd -> passage X passage token end scores\n # [\n # [10, 5],\n # [2, 3]\n # ]\n # logEnd.unsqueeze(1) -> passage X passage token end scores X 1\n # [\n # [[10, 5]],\n # [[2, 3]]\n # ]\n # logEnd.unsqueeze(1).expand((logEnd.shape[0], logEnd.shape[1], logEnd.shape[1]))\n # -> passage X passage token end scores X passage token end scores\n # [\n # [\n # [10, 5]\n # [10, 5]\n # ],\n # [\n # [2, 3]\n # [2, 3]\n # ]\n # ]\n\n return logStart + logEnd + logSelection", "def _get_range(self):\n return tuple((0, m, 1) for m in self.level_shapes[0])", "def __info_gain_from_splits(self, potential_integer_splits, sorted_data):\n info_gains = []\n for split in map(int, potential_integer_splits):\n left_child = sorted_data[sorted_data[:, 0].astype(int) < split, :]\n right_child = sorted_data[sorted_data[:, 0].astype(int) >= split, :]\n info_gains.append(self.__calc_info_gain(sorted_data, left_child,\n right_child))\n return info_gains", "def maxIG(data):\n \n index = -1\n max_gain = -1\n \n for i in range(len(data[0]) - 1):\n gain = informationGain2(data, i)\n if gain > max_gain:\n index = i\n max_gain = gain\n \n return (index, max_gain)", "def get_bounds():\n return [0.00], [1.00]", "def searchRange4(self, nums: List[int], target: int) -> List[int]:\n def bisearch_l() -> int:\n i = -1\n l, r = 0, len(nums) - 1\n while l <= r:\n m = (l + r) // 2\n if nums[m] >= target:\n r = m - 1\n else:\n l = m + 1\n \n if nums[m] == target:\n i = m\n \n return i\n\n def bisearch_r() -> int:\n i = -1\n l, r = 0, len(nums) - 1\n while l <= r:\n m = (l + r) // 2\n if nums[m] > target:\n r = m - 1\n else:\n l = m + 1\n \n if nums[m] == target:\n i = m\n \n return i\n\n return [bisearch_l(), bisearch_r()]", "def level(score):\n user_level = \"\"\n if score < 20:\n user_level = \"elementary\"\n elif score < 30:\n user_level = \"intermediate\"\n elif score < 35:\n user_level = \"upper intermediate\"\n else:\n user_level = \"advanced\"\n return user_level", "def ability_bonus_on_level(self, level):\n raise NotImplementedError", "def get_levels(self):\n return self.levels[self.game]", "def max_cut(g):\n # Write your code here.\n return []", "def bounds(lines):\n min_x = bench_util.Max\n min_y = bench_util.Max\n max_x = bench_util.Min\n max_y = bench_util.Min\n \n for line in lines.itervalues():\n for x, y in line:\n min_x = min(min_x, x)\n min_y = min(min_y, y)\n max_x = max(max_x, x)\n max_y = max(max_y, y)\n \n return ((min_x, min_y), (max_x, max_y))", "def minmax(coins_left, score_p1, len_path_p1, loc_p1, score_p2, len_path_p2, loc_p2):\n # At the end of the tree, return the value of the leaf\n if len(coins_left) == 0 or score_p2 > 5 or score_p1 > 5:\n return score_p1, [], [] # maximizing for p1\n\n pl_last_coin = []\n en_last_coin = []\n best_pl_path = []\n best_en_path = []\n # Update the map data\n u.update_dists_from_each(dists_matrix, route_matrix, loc_p1, mazeMap, coins)\n u.update_dists_from_each(dists_matrix, route_matrix, loc_p2, mazeMap, coins + [loc_p1])\n\n # Todo : this is not very dynamic, if the enemy goes to the coin I want ?\n if len_path_p1 <= len_path_p2: # MAXIMIZING player1 turn\n best_value = float('-inf')\n best_coin = get_closest_coin(loc_p1, coins_left, dists_matrix)[0]\n best_pl_path = []\n en_closest_coin, en_closest_coin_dist = get_closest_coin(loc_p2, coins_left, dists_matrix)\n\n for coin in coins_left:\n new_len_path_p1 = len_path_p1 + dists_matrix[loc_p1][coin]\n loc_p1 = coin\n new_score_p1 = score_p1 + 1\n new_coins_left = coins_left[:]\n new_coins_left.remove(coin)\n\n node_value, en_path, pl_path = minmax(new_coins_left, new_score_p1, new_len_path_p1, loc_p1, score_p2, len_path_p2, loc_p2)\n if node_value > best_value and (coin != en_closest_coin or dists_matrix[loc_p1][coin] <= en_closest_coin_dist):\n best_value = node_value\n best_coin = coin\n best_pl_path = pl_path\n best_en_path = en_path\n pl_last_coin = [best_coin]\n\n else: # MINIMIZING, player 2 is going to the closest coin\n closest_coin, closest_coin_dist = get_closest_coin(loc_p2, coins_left, dists_matrix)\n\n new_len_path_p2 = len_path_p2 + closest_coin_dist\n loc_p2 = closest_coin\n new_score_p2 = score_p2 + 1\n new_coins_left = coins_left[:]\n new_coins_left.remove(closest_coin)\n\n node_value, en_path, pl_path = minmax(new_coins_left, score_p1, len_path_p1, loc_p1, new_score_p2, new_len_path_p2, loc_p2)\n\n best_value = node_value\n best_coin = closest_coin\n best_pl_path = pl_path\n best_en_path = en_path\n en_last_coin = [best_coin]\n\n en_path = en_last_coin + best_en_path\n pl_path = pl_last_coin + best_pl_path\n return best_value, en_path, pl_path", "def find_best_point(self, start_i, end_i, ranges):\n max_val = 0\n target = start_i\n for i in range(start_i, end_i):\n if ranges[i] > max_val:\n target = i\n max_val = ranges[i]\n \n angle = -(540-target)*3\n return float(angle)/1080, target", "def get_level_profile(n, l):\r\n c, p, q = (n, 1, 0)\r\n for i in range(l):\r\n c, p, q = get_next_level(c, p, q)\r\n return (c, p, q)", "def showBestGainWon(self) :\n bestGainWon = 0\n for level in self.level_history :\n bestGainWon = level.profit if bestGainWon < level.profit else bestGainWon\n Scenario.messageGetBestGainWon(bestGainWon)", "def find_abs_bound_range(self, results_dict, keys, avg_over=5):\n max_averages = []\n min_averages = []\n for key in keys:\n result_data = results_dict[key].data\n # compress to remove masked values\n sorted_data = np.sort(result_data.compressed())\n # select the \"avg_over\" extreme values from the array\n # and find it's average value\n max_average_data = np.average(sorted_data[-avg_over:])\n min_average_data = np.average(sorted_data[:avg_over])\n max_averages.append(max_average_data)\n min_averages.append(min_average_data)\n\n # the maximum absolute value for the bound\n abs_max = np.abs(np.max(max_averages))\n abs_min = np.abs(np.min(min_averages))\n max_bound = np.max([abs_min, abs_max])\n\n # find the bound candidate suited for the bound range\n index = np.argwhere(self.bound_candidates - max_bound > 0)[0, 0]\n\n return self.bound_candidates[index]", "def BestLevel(self,wavelet=None,maxLevel=None):\n\n if wavelet is None:\n wavelet = self.wavelet\n if maxLevel is None:\n maxLevel = self.maxLevel\n\n previouslevelmaxE = self.ShannonEntropy(self.data)\n self.wp = pywt.WaveletPacket(data=self.data, wavelet=wavelet, mode='symmetric', maxlevel=maxLevel)\n level = 1\n currentlevelmaxE = np.max([self.ShannonEntropy(n.data) for n in self.wp.get_level(level, \"freq\")])\n while currentlevelmaxE < previouslevelmaxE and level<maxLevel:\n previouslevelmaxE = currentlevelmaxE\n level += 1\n currentlevelmaxE = np.max([self.ShannonEntropy(n.data) for n in self.wp.get_level(level, \"freq\")])\n return level", "def support(self, level=1):\n if level == 1:\n sup = (2 * self.pivot_point) - self.last_high\n elif level == 2:\n sup = self.pivot_point - (self.last_high - self.last_low)\n elif level == 3:\n sup = self.last_low - 2*(self.last_high - self.pivot_point)\n else:\n raise ValueError('Not a valid level. Must be 1, 2, or 3')\n return sup", "def fetchbounds(self):\n pnts = [x for x in [self.out_start, self.start, self.in_start, \\\n self.in_end, self.end, self.out_end] \\\n if x is not None]\n return min(pnts), max(pnts)", "def compute_hit_properties(w, raw_hits, argmaxes, areas, centers):\n for hit_i in range(len(raw_hits)):\n current_max = -999.9\n current_argmax = -1\n current_area = 0.0\n current_center = 0.0\n for i, x in enumerate(w[raw_hits[hit_i, 0]:raw_hits[hit_i, 1]+1]):\n if x > current_max:\n current_max = x\n current_argmax = i\n current_area += x\n current_center += i * x\n argmaxes[hit_i] = current_argmax\n areas[hit_i] = current_area\n centers[hit_i] = current_center / current_area", "def getAbilityScores(self):\n mods = [(self.str -10)/2,\n (self.dex-10)/2,\n (self.con-10)/2,\n (self.int-10)/2,\n (self.wis-10)/2,\n (self.cha-10)/2]\n print \"STR: {0} ({1}) \\nDEX: {2} ({3})\\nCON: {4} ({5})\".format(self.str,\n mods[0],\n self.dex,\n mods[1],\n self.con,\n mods[2])\n print \"INT: {0} ({1})\\nWIS: {2} ({3})\\nCHA: {4} ({5})\".format(self.int,\n mods[3],\n self.wis,\n mods[4],\n self.cha,\n mods[5])", "def level_time_average(start_levels, attack_style, attack_bonus, strength_bonus):\n ticks_per_attack = 4 # Scimitar attack speed\n max_hit, accuracy = get_max_hit_and_accuracy(\n start_levels, attack_style, attack_bonus, strength_bonus)\n \n if attack_style == Attack_Style.ATTACK:\n start_exp = osrs.experience[start_levels.attack]\n end_exp = osrs.experience[start_levels.attack+1]\n elif attack_style == Attack_Style.STRENGTH:\n start_exp = osrs.experience[start_levels.strength]\n end_exp = osrs.experience[start_levels.strength+1]\n \n experience = end_exp - start_exp\n avg_hit = accuracy * max_hit / 2\n exp_per_hit = avg_hit * osrs.BASE_EXP_PER_DAMAGE\n ticks = experience / exp_per_hit * ticks_per_attack\n return ticks", "def get_levels(self, arcs):\n levels = set(map(lambda arc: arc['end'] - arc['start'], arcs))\n return sorted(list(levels))", "def bounds(self, start=None, finish=None):\n lower = start if start is not None else self.limits[0]\n upper = finish if finish is not None else self.limits[1]\n\n lower = lower + self.offsets[0]\n upper = upper + self.offsets[1]\n\n return (lower, upper)", "def range_around(goal_val: int, spread: int, min_val: int = 0, max_val: int = math.inf):\n lower = max(min_val, goal_val - spread)\n upper = min(max_val, goal_val + spread)\n return (lower, upper)", "def get_leveling_args(cards, card_attrs):\n if (len(card_attrs['evolve']) < len(card_attrs['level']) and\n len(cards) > 15):\n cards_to_consume = set()\n candidates = set(card_attrs['level'].keys())\n cards_by_xp = list(set(swizzle(cards, 'xp01')) & candidates)\n cards_by_rarity = list(set(swizzle(cards, 'type')) & candidates)\n cards_by_xp, cards_by_rarity, top_third = remove_rarest_third(\n cards_by_xp, cards_by_rarity)\n\n if cards_by_xp and top_third:\n # Number of cards to consume into our destination card will be between\n # min and max values (defined in config).\n num_to_consume = randint(\n cfg['level']['min_cards'],\n min(cfg['level']['max_cards'], len(top_third)))\n\n # Get the bottom n number of cards by xp to consume into a rare card\n lesser = min(num_to_consume, len(cards_by_xp))\n for i in range(lesser): # pylint: disable=unused-variable\n cur_card = cards_by_xp.pop(0)\n if cur_card in cards_by_rarity:\n cards_by_rarity.remove(cur_card)\n if cur_card not in cards_to_consume:\n cards_to_consume.add(cur_card)\n\n logger.debug(\"Cards to consume:\")\n logger.debug(cards_to_consume)\n\n # Choose one of the more rare cards as the target to level.\n # TODO: prefer rare cards with more xp pylint: disable=fixme\n dest_id = choice(top_third)\n\n return (dest_id, cards_to_consume)\n\n return False", "def get_map_size(level):\n if level < 5:\n return 5, 5\n if level < 70:\n return 10, 10\n if level < 150:\n return 25, 25\n return 50, 50", "def standing_level_access_map(self) -> dict:\n names_map = {\n self.StandingLevel.NONE: \"NONE\",\n self.StandingLevel.TERRIBLE: \"TERRIBLE\",\n self.StandingLevel.BAD: \"BAD\",\n self.StandingLevel.NEUTRAL: \"NEUTRAL\",\n self.StandingLevel.GOOD: \"GOOD\",\n self.StandingLevel.EXCELLENT: \"EXCELLENT\",\n }\n return {\n names_map[self.StandingLevel(level)]: (\n self.allow_access_with_standings and level >= self.standing_level\n )\n for level in self.StandingLevel.values\n }", "def get_highly_probable_states(stat_dist, threshold):\r\n states=[]\r\n for i in range(len(stat_dist)):\r\n if stat_dist[i]>threshold:\r\n states.append(i)\r\n return states", "def searchRange(self, nums: List[int], target: int) -> List[int]:\n if not nums:\n return [-1, -1]\n n = len(nums)\n start, end = 0, n - 1\n while start <= end:\n mid = start + (end - start + 1 + 1)//2 - 1\n left = right = -1\n if nums[mid] == target:\n left = right = mid\n elif nums[start] == target:\n left = right = start\n elif nums[end] == target:\n left = right = end\n\n if 0 <= left and left < n:\n has_left = left - 1 >= 0 and nums[left-1] == target\n has_right = right + 1 < n and nums[right+1] == target\n while has_left or has_right:\n if has_left:\n left -= 1\n if has_right:\n right += 1\n has_left = left - 1 >= 0 and nums[left-1] == target\n has_right = right + 1 < n and nums[right+1] == target\n\n return [left, right]\n\n elif nums[mid] > target:\n # [0, mid - 1]\n end = mid - 1\n else:\n # [mid + 1, n]\n start = mid + 1\n\n return [-1, -1]", "def get_bounds(self, A: list, c: int) -> (int, int):\r\n\r\n # This implementation uses two binary search algorithms to find\r\n # the upper and lower bound.\r\n # First step is to isolate the upper_bound.\r\n\r\n L = 0\r\n R = len(A)\r\n while L < R:\r\n # Find the middle value\r\n m = math.floor((L + R) / 2)\r\n v = A[m]\r\n\r\n # Check if |A[i] - i| < c:\r\n if abs(v - m) > c:\r\n # This step is important, if we are on a negative number\r\n # We need to move right instead of left.\r\n if v < 0 or (v - m) < 0:\r\n L = m + 1\r\n else:\r\n # Else, we need to move towards the left.\r\n R = m\r\n else:\r\n # If it matches the condition, move the left up because we're\r\n # going towards the lowest number.\r\n L = m + 1\r\n upper_bound = R\r\n\r\n # Now that we have the upper bound, we only need to\r\n # Binary search for the lower bound between index 0 and upper_bound.\r\n L = 0\r\n R = upper_bound\r\n while L < R:\r\n # find the middle\r\n m = math.floor((L + R) / 2)\r\n if abs(A[m] - m) > c:\r\n # If it's greater, move the left up.\r\n L = m + 1\r\n else:\r\n # Else, move the right down.\r\n R = m\r\n\r\n # Finally we have the lower bound.\r\n lower_bound = L\r\n\r\n # Return the lower bound and the upper bound index\r\n # Note the -1 because the upper bound will give the\r\n # size of the array in worst case.\r\n return lower_bound, upper_bound - 1", "def _get_energy_range(self):\n\n e0_min = self.network.isomers[0].E0\n e0_max = e0_min\n\n for isomer in self.network.isomers[1:]:\n E0 = isomer.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for reactant in self.network.reactants:\n E0 = reactant.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for product in self.network.products:\n E0 = product.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for rxn in self.network.path_reactions:\n E0 = rxn.transition_state.conformer.E0.value_si\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n\n return e0_min, e0_max", "def __getHints(self, p):\n st = bisect.bisect_left(self.index, (p[:self.ln], -1)) # binary search\n en = bisect.bisect_right(self.index, (p[:self.ln], sys.maxsize)) # binary search\n hits = self.index[st:en] # this range of elements corresponds to the hits\n return [h[1] for h in hits] # return just the offsets", "def find_optimum_thresholds(search_method, subscripts, knowledge_model,\n samples):\n if len(samples) == 0:\n return None\n\n exercise_name = samples[0][idx.exercise]\n if exercise_name not in knowledge_model[\"thetas\"]:\n return None\n else:\n thetas = knowledge_model[\"thetas\"][exercise_name]\n\n # Convert CSV features into proper float-array representation\n correct, features, _ = parse_features(samples, False, [\"custom\", \"random\"])\n\n # Compute predctions based on features\n predictions = regression_util.sigmoid(np.dot(features, thetas))\n\n thresholds = {}\n for subscript in subscripts:\n if search_method.lower() == \"brute\":\n optimum_threshold = scipy.optimize.brute(f_score,\n ranges=((0.0, 1.0),), Ns=101, args=(subscript, correct,\n predictions),\n full_output=True)\n\n # Transform to standard format\n optimum_threshold = {\n \"max_score\": -optimum_threshold[1],\n \"success\": True,\n \"threshold\": optimum_threshold[0][0],\n }\n\n elif search_method.lower() == \"minimize_scalar\":\n optimum_threshold = scipy.optimize.minimize_scalar(f_score,\n method=\"bounded\", bounds=(0.0, 1.0), args=(subscript,\n correct, predictions))\n\n # Transform to standard format\n optimum_threshold = {\n \"max_score\": -optimum_threshold.fun,\n \"success\": optimum_threshold.success,\n \"threshold\": optimum_threshold.x,\n }\n\n else:\n raise ValueError(\"Did not understand search method %s\" %\n search_method)\n\n if not optimum_threshold[\"success\"]:\n print >>sys.stderr, \"Optimization failed for\", subscript\n\n # Augment the result object with the number of samples\n optimum_threshold[\"samples\"] = len(samples)\n\n thresholds[subscript] = optimum_threshold\n return thresholds", "def score_max_depths(graph, max_depths):\n ###TODO\n pass", "def resistance(self, level=1):\n if level == 1:\n res = (2 * self.pivot_point) - self.last_low\n elif level == 2:\n res = self.pivot_point + (self.last_high - self.last_low)\n elif level == 3:\n res = self.last_high + 2*(self.pivot_point - self.last_low)\n else:\n raise ValueError('Not a valid level. Must be 1, 2, or 3')\n return res", "def get_levels(self, arcs: List[Dict[str, Any]]) -> Dict[Tuple[int, int, str], int]:\n arcs = [dict(t) for t in {tuple(sorted(arc.items())) for arc in arcs}]\n length = max([arc[\"end\"] for arc in arcs], default=0)\n max_level = [0] * length\n levels = {}\n for arc in sorted(arcs, key=lambda arc: arc[\"end\"] - arc[\"start\"]):\n level = max(max_level[arc[\"start\"] : arc[\"end\"]]) + 1\n for i in range(arc[\"start\"], arc[\"end\"]):\n max_level[i] = level\n levels[(arc[\"start\"], arc[\"end\"], arc[\"label\"])] = level\n return levels", "def bounds(self) -> Tensor:\n return torch.cat([self.mins, self.mins + self.ranges], dim=-2)", "def calculateOptimal(self) -> (list, int):\n\t\tcombinations = list(itertools.product(*self.clusters))\n\t\tmin_dist = 1000000\n\t\tmin_combination = None\n\t\tfor combination in combinations:\n\t\t\tdist = super().step(combination)\n\t\t\tif(dist < min_dist):\n\t\t\t\tmin_dist = dist\n\t\t\t\tmin_combination = combination\n\t\treturn (min_combination, min_dist)", "def alpha_beta(self, cur_state, limit, cur_level, alpha, beta, min_level):\n\n # Evaluate current state.\n if cur_level == limit or get_action_score(cur_state.action[0], cur_state.action[1], cur_state.action_player, cur_state.occupied)==100:\n return cur_state.value, cur_state, cur_level, None\n else:\n child_list = cur_state.successors()\n final_state = None\n action_took = None\n if cur_state.player == 1: # MAX player\n for i in range(len(child_list)):\n c = heapq.heappop(child_list)\n (c_alpha, c_state, c_level, action) = self.alpha_beta(c[1], limit, cur_level + 1, alpha, beta, min_level)\n # print(\"HERE: \"+str(c_alpha)+\" \"+str(c_level))\n if (c_alpha > alpha) or (c_alpha == alpha and c_level < min_level):\n alpha = c_alpha\n final_state = c_state\n action_took = c[1].action\n min_level = c_level\n if beta <= alpha:\n break\n return alpha, final_state, min_level, action_took\n else: # MIN player\n for i in range(len(child_list)):\n c = heapq.heappop(child_list)\n (c_beta, c_state, c_level, action) = self.alpha_beta(c[1], limit, cur_level + 1, alpha, beta, min_level)\n # print(\"c_beta = \" + str(c_beta) + \", beta = \" + str(beta))\n if (c_beta < beta) or (c_beta == beta and c_level < min_level):\n beta = c_beta\n final_state = c_state\n action_took = c[1].action\n min_level = c_level\n if beta <= alpha:\n break\n return beta, final_state, min_level, action_took", "def set_multipole_range(binning_path):\n\n lmins = []\n lmaxs = []\n for hdu in [0, 1, 3, 3]:\n\n data = fits.getdata(binning_path, hdu + 1)\n lmins.append(np.array(data.field(0), int))\n lmaxs.append(np.array(data.field(1), int))\n\n lmax = np.max([max(l) for l in lmaxs])\n\n return(lmax, lmins, lmaxs)", "def get_range(min, max, intervals, log):\n if not log:\n min = float(min)\n max = float(max)\n difference = max-min\n step_size = difference/intervals\n output = [min + i*step_size for i in range(intervals+1)]\n return output\n else:\n from math import log10 as log\n log_min = log(min)\n log_max = log(max)\n log_difference = log_max - log_min\n step_size = log_difference/intervals\n output = [pow(10, log_min + i*step_size) for i in range(intervals+1)]\n return output", "def use_level(self, level):\n\n if self.min_level <= level <= self.max_level:\n map_extent = self.tiles.use_level(level)\n if map_extent:\n self.level = level\n (self.map_width, self.map_height,\n self.ppd_x, self.ppd_y) = map_extent\n (self.map_llon, self.map_rlon,\n self.map_blat, self.map_tlat) = self.tiles.extent\n\n # do level change callback\n self.handleLevelChangeCallback(level)\n\n return True\n\n return False", "def high_and_low(numbers):\n highest = max(numbers)\n lowest = min(numbers)\n return (highest,lowest)" ]
[ "0.58623534", "0.56103396", "0.5589479", "0.556326", "0.5557862", "0.5534794", "0.5484521", "0.54517794", "0.5345242", "0.53201896", "0.5267493", "0.522973", "0.5207859", "0.5173894", "0.5169486", "0.51680756", "0.5166746", "0.5157776", "0.51527137", "0.51492393", "0.51178026", "0.5090489", "0.50744903", "0.5052406", "0.5035216", "0.4999881", "0.49979913", "0.4989706", "0.49859378", "0.49853504", "0.49730673", "0.49600846", "0.49531385", "0.49504447", "0.49498117", "0.49408692", "0.49290776", "0.49082515", "0.49073353", "0.4900823", "0.48986602", "0.48941326", "0.48910344", "0.48813853", "0.48647282", "0.48574615", "0.48387554", "0.4837908", "0.48239714", "0.48195627", "0.48132157", "0.4806523", "0.48065037", "0.4800838", "0.48003125", "0.47907242", "0.47897658", "0.47891217", "0.477701", "0.47741193", "0.47706273", "0.47651917", "0.47613624", "0.4758751", "0.4746183", "0.47320822", "0.47309217", "0.4722114", "0.47198355", "0.47186494", "0.47156775", "0.47149315", "0.47103378", "0.47067538", "0.46995065", "0.46992338", "0.469681", "0.4693957", "0.46887198", "0.4679804", "0.46708342", "0.46680936", "0.46667975", "0.46585932", "0.4653043", "0.46518314", "0.46441582", "0.4643674", "0.46426937", "0.4639784", "0.4634477", "0.4634176", "0.46337837", "0.4630953", "0.46264398", "0.4625945", "0.46161684", "0.46148142", "0.46131024", "0.46089762" ]
0.79078573
0
Generates steric beads required for checking for steric clashes between motifs. Each residues has three beads modeled after the typical three bead models used in coarse grain modeling. The three beads are, Phosphate (P, OP1, OP2) Sugar (O5',C5',C4',O4',C3',O3',C1',C2',O2') and Base (All remaining atoms).
def get_beads(self): phos_atoms,sugar_atoms,base_atoms = [],[],[] for i,a in enumerate(self.atoms): if a is None: continue if i < 3: phos_atoms.append(a) elif i < 12: sugar_atoms.append(a) else: base_atoms.append(a) beads = [] types = [residue.BeadType.PHOS, residue.BeadType.SUGAR, residue.BeadType.BASE] for i,alist in enumerate([phos_atoms,sugar_atoms,base_atoms]): if len(alist) > 0: beads.append(residue.Bead(util.center(alist), types[i])) return beads
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_bespoke_bond_smirks():\n gen = SmirksGenerator()\n mol = Molecule.from_smiles(\"CC\")\n\n bond_smirks = gen._get_bespoke_bond_smirks(molecule=mol)\n # there should be 2 unique bond smirks\n assert len(bond_smirks) == 2\n all_bonds = []\n for smirk in bond_smirks:\n atoms = condense_matches(mol.chemical_environment_matches(smirk.smirks))\n all_bonds.extend(atoms)\n assert set(atoms) == smirk.atoms\n # make sure all bonds are covered\n for bond in mol.bonds:\n assert (bond.atom1_index, bond.atom2_index) in all_bonds", "def test_get_all_bespoke_smirks():\n gen = SmirksGenerator()\n gen.target_smirks = [SmirksType.Vdw, SmirksType.Bonds, SmirksType.Angles, SmirksType.ProperTorsions]\n\n mol = Molecule.from_smiles(\"CO\")\n\n all_bespoke_smirks = gen._get_all_bespoke_smirks(molecule=mol, forcefield_editor=ForceFieldEditor(\"openff_unconstrained-1.3.0.offxml\"))\n # this is a list of all bespoke smirks with real initial values\n all_matches = []\n for smirk in all_bespoke_smirks:\n atoms = condense_matches(mol.chemical_environment_matches(smirk.smirks))\n assert compare_matches(atoms, smirk.atoms) is True\n all_matches.extend(atoms)\n\n assert all_covered(all_matches, mol) is True", "def test_bespoke_torsion_smirks():\n gen = SmirksGenerator()\n mol = Molecule.from_file(get_data(\"OCCO.sdf\"))\n\n torsion_smirks = gen._get_bespoke_torsion_smirks(molecule=mol)\n # there should be 5 unique torsions\n assert len(torsion_smirks) == 5\n\n all_torsions = []\n for smirk in torsion_smirks:\n atoms = condense_matches(mol.chemical_environment_matches(smirk.smirks))\n all_torsions.extend(atoms)\n assert compare_matches(atoms, smirk.atoms) is True\n\n for torsion in mol.propers:\n dihedral = tuple([atom.molecule_atom_index for atom in torsion])\n assert dihedral in all_torsions or tuple(reversed(dihedral)) in all_torsions", "def guess_potentialisation(self, sysargs):\n\n print(\"Guessing potentialisation...\")\n print(\"Copying reference basis...\")\n shutil.copyfile(self.reference_guess_basis_path, os.path.join(os.getcwd(), 'basis'))\n\n sp2_replacement_list = []\n sp2_deletion_list = []\n sp2_carbon_list = []\n sp3_replacement_list = []\n sp3_deletion_list = []\n sp3_carbon_list =[]\n carbon_atoms = [atom for atom in self.coord_list if atom[\"el\"] == 'c']\n\n # Sort through carbons to decide what needs potentialising. Find atoms bonded to each carbon\n for atom in carbon_atoms:\n distanced_atoms = self.order_atoms_by_distance_from(atom['#'])\n nearest_4_distances = [self.measure_atom_atom_dist(atom['#'], distanced_atom['#']) for distanced_atom in\n distanced_atoms[1:5]]\n bonded_distances = [less_than_distance for less_than_distance in nearest_4_distances if\n less_than_distance < self.bond_deciding_distance]\n\n # if 3 bonded atoms, may be sp2, check if they're hydrogens\n if len(bonded_distances) == 3:\n hydrogens_bonded_to_this_atom = [distanced_atom for distanced_atom in distanced_atoms[1:5] if\n distanced_atom['el'] == 'h' and self.measure_atom_atom_dist(atom['#'], distanced_atom['#']) < self.bond_deciding_distance]\n sp2_deletion_list.extend([hydrogen['#'] for hydrogen in hydrogens_bonded_to_this_atom])\n sp2_replacement_list.append(str(atom['#']))\n sp2_carbon_list.append(atom)\n\n # if 4 bonded atoms, may be sp3, check if they're hydrogens\n elif len(bonded_distances) == 4:\n hydrogens_bonded_to_this_atom = [distanced_atom for distanced_atom in distanced_atoms[1:5] if\n distanced_atom['el'] == 'h' and self.measure_atom_atom_dist(atom['#'], distanced_atom['#']) < self.bond_deciding_distance]\n if len(hydrogens_bonded_to_this_atom) == 3:\n sp3_replacement_list.extend([str(hydrogen['#']) for hydrogen in hydrogens_bonded_to_this_atom])\n sp3_deletion_list.extend([hydrogen['#'] for hydrogen in hydrogens_bonded_to_this_atom])\n sp3_carbon_list.append(atom)\n\n log_file = open('pseudification.log', 'w+')\n log_file.writelines(\n 'sp2 carbon indices: %s \\nsp3 carbon indices: %s \\n' % (\n ','.join(str(carbon['#']) for carbon in sp2_carbon_list),\n ','.join(str(carbon['#']) for carbon in sp3_carbon_list)\n ))\n\n sp2_coord_command = 'mn sp2 %s' % (','.join(sp2_replacement_list))\n print(\"sp2 command: %s\" % sp2_coord_command)\n sp3_coord_command = 'mn sp3 %s' % (','.join(sp3_replacement_list))\n print(\"sp3 command: %s\" % sp3_coord_command)\n\n if 'nosp3' not in sysargs:\n self.pseudopotentialise_ethane_like_molecule(sp3_coord_command.split(), execute_deletion=False)\n self.pseudopotentialise_molecule(sp2_coord_command.split(), execute_deletion=False)\n\n self.delete_specified_atoms(sp2_deletion_list + sp3_deletion_list)\n\n print(\"Identifying 2-electron sp2 carbons...\")\n # Now need to work out where the 2e sp2 carbons are\n self.coord_list = []\n self.read_coords()\n carbon_atoms = [atom for atom in self.coord_list if atom[\"el\"] == 'c']\n sp2_pseudocarbon_list = []\n\n for atom in carbon_atoms:\n carbon_pseudos = self.identify_pseudocarbon_potentials(atom['#'])\n # if 6 atoms within pseudo-distance this is an sp2 pseudo-carbon\n if len(carbon_pseudos) == 6:\n sp2_pseudocarbon_list.append(atom)\n print(\"Re-discovered %s sp2 carbons.\" % str(len(sp2_pseudocarbon_list)))\n\n # Now check for ncore=4 sp2 pseudocarbons\n pseudopotential_hashes_to_delete = []\n for atom in sp2_pseudocarbon_list:\n distanced_carbon_list = self.order_atoms_by_distance_from(atom['#'], element='c')\n carbons_bonded_to_this_atom = [distanced_atom for distanced_atom in distanced_carbon_list[1:5] if\n self.measure_atom_atom_dist(atom['#'],\n distanced_atom[\n '#']) < self.bond_deciding_distance]\n print(\"Carbons bonded to atom %s: %s\" % (str(atom['#']),\n str([carbon['#'] for carbon in carbons_bonded_to_this_atom])))\n\n for carbon_bonded_to_this_atom in carbons_bonded_to_this_atom:\n if carbon_bonded_to_this_atom not in sp2_pseudocarbon_list:\n def distance_from(list_atom):\n return self.measure_atom_atom_dist(carbon_bonded_to_this_atom['#'], list_atom['#'])\n carbon_pseudos = self.identify_pseudocarbon_potentials(atom['#'])\n # find pseudos closest to the other carbon\n pseudos_distanced_from_sp2_2e = sorted(carbon_pseudos, key=distance_from)\n pseudopotential_hashes_to_delete.append(pseudos_distanced_from_sp2_2e[0]['#'])\n pseudopotential_hashes_to_delete.append(pseudos_distanced_from_sp2_2e[1]['#'])\n\n self.delete_specified_atoms(pseudopotential_hashes_to_delete)\n\n # Read final coordinates\n self.coord_list = []\n self.read_coords()\n carbon_atoms = [atom for atom in self.coord_list if atom[\"el\"] == 'c']\n sp2_pseudocarbon_list = []\n sp2_2e_pseudocarbon_list = []\n sp2_2e_pseudohydrogen_list = []\n sp3_pseudocarbon_list = []\n\n for atom in carbon_atoms:\n carbon_pseudos = self.identify_pseudocarbon_potentials(atom['#'])\n\n # if 3 atoms within pseudo-distance this is an sp3 pseudo-carbon\n if len(carbon_pseudos) == 3:\n sp3_pseudocarbon_list.append(atom)\n\n # if 4 atoms within pseudo-distance this is an sp2 2e pseudo-carbon\n elif len(carbon_pseudos) == 4:\n sp2_2e_pseudocarbon_list.append(atom)\n sp2_2e_pseudohydrogen_list.extend(carbon_pseudos)\n\n # if 6 atoms within pseudo-distance this is an sp2 pseudo-carbon\n elif len(carbon_pseudos) == 6:\n sp2_pseudocarbon_list.append(atom)\n\n\n log_file.writelines(\n 'sp2 pseudocarbon indices: %s \\nsp3 pseudocarbon indices: %s\\nsp2 2e pseudocarbon indices: %s\\nsp2 2e pseudohydrogen indices: %s\\n' % (\n ','.join(str(carbon['#']) for carbon in sp2_pseudocarbon_list),\n ','.join(str(carbon['#']) for carbon in sp3_pseudocarbon_list),\n ','.join(str(carbon['#']) for carbon in sp2_2e_pseudocarbon_list),\n ','.join(str(carbon['#']) for carbon in sp2_2e_pseudohydrogen_list)\n ))\n\n # Need to supply potentials to atoms\n define_cmds_path = 'define_add_pseudos'\n with open(os.path.join(define_cmds_path), 'w') as var_file:\n var_file.writelines(define_cmds % (\n # sp2 potentials\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp2_pseudocarbon_list], 'b', self.pseudo_carbon_basis),\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp2_pseudocarbon_list], 'ecp', self.sp2_carbon_ecp),\n self.supply_ecps_bases_to_define(self.sp2_pseudo_element, 'b', 'none'),\n self.supply_ecps_bases_to_define(self.sp2_pseudo_element, 'ecp', self.sp2_hydrogen_ecp),\n # sp3 potentials\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp3_pseudocarbon_list], 'b', self.pseudo_carbon_basis),\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp3_pseudocarbon_list], 'ecp', self.sp3_carbon_ecp),\n self.supply_ecps_bases_to_define(self.sp3_pseudo_element, 'b', 'none'),\n self.supply_ecps_bases_to_define(self.sp3_pseudo_element, 'ecp', self.sp3_hydrogen_ecp),\n # sp2 2e potentials\n self.supply_ecps_bases_to_define(self.sp2_pseudo_element, 'b', 'none'),\n self.supply_ecps_bases_to_define([hydrogen['#'] for hydrogen in sp2_2e_pseudohydrogen_list], 'ecp', self.sp2_2e_hydrogen_ecp),\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp2_2e_pseudocarbon_list], 'b', self.pseudo_carbon_basis),\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp2_2e_pseudocarbon_list], 'ecp', self.sp2_2e_carbon_ecp),\n ))\n\n self.run_define('define_add_pseudos')", "def test_bespoke_target_torsion_smirks():\n gen = SmirksGenerator()\n mol = Molecule.from_file(get_data(\"OCCO.sdf\"))\n\n torsion_smirks = gen._get_bespoke_torsion_smirks(molecule=mol, central_bonds=[(1, 2)])\n # there should be 3 unique smirks for this molecule\n # H-C-C-H, H-C-C-O, O-C-C-O\n assert len(torsion_smirks) == 3\n for smirk in torsion_smirks:\n atoms = condense_matches(mol.chemical_environment_matches(smirk.smirks))\n assert compare_matches(atoms, smirk.atoms) is True", "def generate(self, analysis):\n\n #analysis = ['p','a','n','i','c','+past form']\n # Let's define our first FST\n\n f1 = FST('morphology-generate')\n \n f1.add_state('1')\n f1.add_state('2')\n f1.add_state('3')\n f1.add_state('4')\n f1.add_state('5') \n f1.add_state('6') #non-c state\n f1.add_state('7') #c state\n f1.add_state('8') #add k\n f1.add_state('9') #+present \n f1.add_state('10') #+past\n \n f1.initial_state = '1'\n #f1.set_final('8')\n f1.set_final('9')\n f1.set_final('10')\n \n #state 1 to 2, and 2 to 3. we don't care about vowel or consonant here\n for letter in list(string.ascii_letters):\n f1.add_arc('1', '2', letter, letter)\n f1.add_arc('2', '3', letter, letter)\n \n #3 to 5 input/output consonants\n vowels = ['a','e','i','o','u','A','E','I','O','U']\n consonants = [c for c in list(string.ascii_letters) if c not in vowels]\n non_c_con = [c for c in consonants if c not in ['c', 'C']]\n for letter in consonants:\n f1.add_arc('3', '5', letter, letter)\n f1.add_arc('5', '5', letter, letter)\n \n #the third and fourth input should be a vowel\n for letter in vowels:\n f1.add_arc('3', '4', letter, letter)\n f1.add_arc('4', '4', letter, letter)\n \n #if the fourth input is a non c consonant, go to 5\n for letter in non_c_con:\n f1.add_arc('4', '5', letter, letter)\n \n #if the input at state 5 is a vowel, go back to 4 \n for letter in vowels:\n f1.add_arc('5', '4', letter, letter)\n \n #if the second last letter is a c, go to 7\n f1.add_arc('4', '7', 'c', 'c')\n \n #add k after 7\n f1.add_arc('7', '8', '', 'k')\n #output nothing from 5 to 8\n f1.add_arc('5', '8', '', '')\n \n f1.add_arc('8','9','+present participle form','ing')\n f1.add_arc('8','10','+past form','ed')\n \n output = f1.transduce(analysis)[0]\n return ''.join(output)", "def test_bespoke_angle_smirks():\n gen = SmirksGenerator()\n mol = Molecule.from_smiles(\"CC\")\n\n angle_smirks = gen._get_bespoke_angle_smirks(molecule=mol)\n # there should be 2 unique smirks\n assert len(angle_smirks) == 2\n all_angles = []\n for smirk in angle_smirks:\n atoms = condense_matches(mol.chemical_environment_matches(smirk.smirks))\n all_angles.extend(atoms)\n assert set(atoms) == smirk.atoms\n # make sure all angles are covered\n for angle in mol.angles:\n assert tuple([atom.molecule_atom_index for atom in angle]) in all_angles", "def generate_SBB_representation (nffg, add_sg_hops=False,\n log=logging.getLogger(\"SBB\")):\n if nffg is None:\n log.error(\"Missing global resource info! Skip OneBisBis generation!\")\n return None\n # Create Single BiSBiS NFFG\n log.debug(\"Generate trivial SingleBiSBiS NFFG based on %s:\" % nffg)\n log.debug(\"START SBB generation...\")\n sbb = NFFG(id=\"SingleBiSBiS\", name=\"Single-BiSBiS-View\")\n # Create the single BiSBiS infra\n sbb_infra = sbb.add_infra(id=\"SingleBiSBiS\",\n name=\"SingleBiSBiS\",\n domain=NFFG.DEFAULT_DOMAIN,\n infra_type=NFFG.TYPE_INFRA_BISBIS)\n # Compute and add resources\n # Sum of available CPU\n try:\n sbb_infra.resources.cpu = sum(\n # If iterator is empty, sum got None --> TypeError thrown by sum\n (n.resources.cpu for n in nffg.infras if\n n.resources.cpu is not None) or None)\n except TypeError:\n sbb_infra.resources.cpu = None\n # Sum of available memory\n try:\n sbb_infra.resources.mem = sum(\n # If iterator is empty, sum got None --> TypeError thrown by sum\n (n.resources.mem for n in nffg.infras if\n n.resources.mem is not None) or None)\n except TypeError:\n sbb_infra.resources.mem = None\n # Sum of available storage\n try:\n sbb_infra.resources.storage = sum(\n # If iterator is empty, sum got None --> TypeError thrown by sum\n (n.resources.storage for n in nffg.infras if\n n.resources.storage is not None) or None)\n except TypeError:\n sbb_infra.resources.storage = None\n # Minimal available delay value of infras and links in DoV\n try:\n # Get the minimum delay in Dov to avoid false negative mapping result\n sbb_infra.resources.delay = min(itertools.chain(\n # If the chained iterators is empty --> ValueError thrown by sum\n (n.resources.delay for n in nffg.infras if\n n.resources.delay is not None),\n (l.delay for l in nffg.links if l.delay is not None)))\n except ValueError:\n sbb_infra.resources.delay = None\n # Maximum available bandwidth value of infras and links in DoV\n try:\n max_bw = max(itertools.chain(\n (n.resources.bandwidth for n in nffg.infras if\n n.resources.bandwidth is not None),\n (l.bandwidth for l in nffg.links if l.bandwidth is not None)))\n # Number of infras and links in DoV\n sum_infra_link = sum(1 for _ in itertools.chain(nffg.infras, nffg.links))\n # Overestimate switching capacity to avoid false positive mapping result\n sbb_infra.resources.bandwidth = max_bw * sum_infra_link\n except ValueError:\n sbb_infra.resources.bandwidth = None\n log.debug(\"Computed SingleBiBBiS resources: %s\" % sbb_infra.resources)\n # Add supported types\n s_types = set()\n for infra in nffg.infras:\n s_types = s_types.union(infra.supported)\n sbb_infra.add_supported_type(s_types)\n log.debug(\"Added supported types: %s\" % s_types)\n log.debug(\"Added Infra BiSBiS: %s\" % sbb_infra)\n log.log(5, \"SBB:\\n%s\" % sbb_infra.dump())\n # Add existing NFs\n for nf in nffg.nfs:\n c_nf = sbb.add_nf(nf=nf.copy())\n log.debug(\"Added NF: %s\" % c_nf)\n log.log(5, \"NF:\\n%s\" % nf.dump())\n # Discover and add NF connections\n for u, v, l in nffg.real_out_edges_iter(nf.id):\n if l.type != NFFG.TYPE_LINK_DYNAMIC:\n continue\n # Explicitly add links for both direction\n link1, link2 = sbb.add_undirected_link(port1=c_nf.ports[l.src.id],\n port2=sbb_infra.add_port(\n id=l.dst.id),\n p1p2id=l.id,\n p2p1id=\"%s-back\" % l.id,\n dynamic=True,\n delay=l.delay,\n bandwidth=l.bandwidth)\n log.debug(\"Added connection: %s\" % link1)\n log.debug(\"Added connection: %s\" % link2)\n # Use SAP id --> SBB port id cache for delay matrix calculation\n delay_matrix_cache = {}\n # Add existing SAPs and their connections to the SingleBiSBiS infra\n for sap in nffg.saps:\n c_sap = sbb.add_sap(sap_obj=sap.copy())\n log.debug(\"Added SAP: %s\" % c_sap)\n log.log(5, \"SAP:\\n%s\" % c_sap.dump())\n # Discover and add SAP connections\n for u, v, l in nffg.real_out_edges_iter(sap.id):\n if len(sap.ports) > 1:\n log.warning(\"SAP contains multiple port!\")\n sbb_infra_port = sbb_infra.add_port(id=str(c_sap.id),\n sap=sap.ports.container[0].sap)\n # Explicitly add links for both direction\n link1, link2 = sbb.add_undirected_link(port1=c_sap.ports[l.src.id],\n port2=sbb_infra_port,\n p1p2id=l.id,\n p2p1id=\"%s-back\" % l.id,\n delay=l.delay,\n bandwidth=l.bandwidth)\n log.debug(\"Added connection: %s\" % link1)\n log.debug(\"Added connection: %s\" % link2)\n delay_matrix_cache[c_sap.id] = sbb_infra_port.id\n # Shortest paths in format of dict in dict keyed with node ids\n # e.g. SAP2 --> EE1 --> 4.9\n latency_paths = NFFGToolBox.shortestPathsInLatency(G=nffg.network)\n log.log(5, \"Calculated latency paths for delay matrix:\\n%s\"\n % pprint.pformat(latency_paths))\n log.log(5, \"Collected SAP ports for delay matrix:\\n%s\"\n % pprint.pformat(delay_matrix_cache))\n dm_elements = itertools.permutations(delay_matrix_cache.keys(), 2)\n for src, dst in dm_elements:\n if src not in latency_paths:\n log.warning(\"Missing node: %s for latency paths: %s!\"\n % (src, (src, dst)))\n continue\n if dst not in latency_paths[src]:\n log.warning(\"Missing node: %s for latency paths: %s!\"\n % (src, (src, dst)))\n else:\n sbb_infra.delay_matrix.add_delay(src=src,\n dst=dst,\n delay=latency_paths[src][dst])\n log.debug(\"Added delay matrix element [%s --> %s]: %s\"\n % (src, dst, latency_paths[src][dst]))\n # Recreate flowrules based on NBalazs functions\n sg_hop_info = NFFGToolBox.get_all_sghop_info(nffg=nffg)\n log.debug(\"Detected SG hop info:\\n%s\" % pprint.pformat(sg_hop_info))\n log.debug(\"Recreate flowrules...\")\n for sg_id, value in sg_hop_info.iteritems():\n sg_src_node = value[0].node.id\n sg_src_port = value[0].id\n sg_dst_node = value[1].node.id\n sg_dst_port = value[1].id\n flowclass = value[2]\n fr_bw = value[3]\n fr_delay = value[4]\n fr_hop = sg_id\n sbb_src_port = [l.dst for u, v, l in\n sbb.network.out_edges_iter(sg_src_node, data=True) if\n l.src.id == sg_src_port and l.src.node.id == sg_src_node]\n if len(sbb_src_port) < 1:\n log.warning(\"No opposite Port(node: %s, id: %s) was found for SG hop: \"\n \"%s in new SingleBiSBiS node\" % (\n sg_src_node, sg_src_port, fr_hop))\n continue\n if len(sbb_src_port) > 1:\n log.warning(\"Too much Port(node: %s, id: %s) was found for SG hop: \"\n \"%s in new SingleBiSBiS node: %s\" % (\n sg_src_node, sg_src_port, fr_hop, sbb_src_port))\n continue\n sbb_src_port = sbb_src_port.pop()\n sbb_dst_port = [l.dst for u, v, l in\n sbb.network.out_edges_iter(sg_dst_node, data=True) if\n l.src.id == sg_dst_port and l.src.node.id == sg_dst_node]\n if len(sbb_dst_port) < 1:\n log.warning(\"No opposite Port(node: %s, id: %s) was found for SG hop: \"\n \"%s in new SingleBiSBiS node\" % (\n sg_dst_node, sg_dst_port, fr_hop))\n continue\n if len(sbb_dst_port) > 1:\n log.warning(\"Too much Port(node: %s, id: %s) was found for SG hop: \"\n \"%s in new SingleBiSBiS node: %s\" % (\n sg_dst_node, sg_dst_port, fr_hop, sbb_dst_port))\n continue\n sbb_dst_port = sbb_dst_port.pop()\n if flowclass:\n fr_match = \"in_port=%s;flowclass=%s\" % (sbb_src_port.id, flowclass)\n else:\n fr_match = \"in_port=%s\" % sbb_src_port.id\n fr_action = \"output=%s\" % sbb_dst_port.id\n if value[0].node.type == NFFG.TYPE_SAP and \\\n value[1].node.type == NFFG.TYPE_NF and \\\n value[0].sap is not None:\n # Update action for flowrule connecting inter-domain SAP to NF\n fr_action += \";UNTAG\"\n fr = sbb_src_port.add_flowrule(id=fr_hop,\n match=fr_match,\n action=fr_action,\n bandwidth=fr_bw,\n delay=fr_delay, )\n log.debug(\"Added flowrule: %s\" % fr)\n if add_sg_hops:\n log.debug(\"Recreate SG hops...\")\n for sg_id, value in sg_hop_info.iteritems():\n sg_src_port = value[0]\n sg_dst_port = value[1]\n hop_fc = value[2]\n hop_bw = value[3]\n hop_delay = value[4]\n sg = sbb.add_sglink(id=sg_id,\n src_port=sg_src_port,\n dst_port=sg_dst_port,\n flowclass=hop_fc,\n delay=hop_delay,\n bandwidth=hop_bw)\n log.debug(\"Added SG hop: %s\" % sg)\n else:\n log.debug(\"Skip SG hop recreation for the SingleBiSBiS!\")\n NFFGToolBox.rewrite_interdomain_tags([(sbb.id, sbb)])\n log.debug(\"END SBB generation...\")\n # Return with Single BiSBiS infra\n return sbb", "def test_noise_model_basis_gates(self):\n basis_gates = ['u1', 'u2', 'u3', 'cx']\n model = NoiseModel(basis_gates)\n target = sorted(basis_gates)\n self.assertEqual(model.basis_gates, target)\n\n # Check adding readout errors doesn't add to basis gates\n model = NoiseModel(basis_gates)\n target = sorted(basis_gates)\n model.add_all_qubit_readout_error([[0.9, 0.1], [0, 1]], False)\n self.assertEqual(model.basis_gates, target)\n model.add_readout_error([[0.9, 0.1], [0, 1]], [2], False)\n self.assertEqual(model.basis_gates, target)\n\n # Check a reset instruction error isn't added to basis gates\n model = NoiseModel(basis_gates)\n target = sorted(basis_gates)\n model.add_all_qubit_quantum_error(reset_error(0.2), ['reset'], False)\n self.assertEqual(model.basis_gates, target)\n\n # Check a non-standard gate isn't added to basis gates\n model = NoiseModel(basis_gates)\n target = sorted(basis_gates)\n model.add_all_qubit_quantum_error(reset_error(0.2), ['label'], False)\n self.assertEqual(model.basis_gates, target)\n\n # Check a standard gate is added to basis gates\n model = NoiseModel(basis_gates)\n target = sorted(basis_gates + ['h'])\n model.add_all_qubit_quantum_error(reset_error(0.2), ['h'], False)\n self.assertEqual(model.basis_gates, target)", "def builder(plates, start, name, assay, isolate, layout, exp_date, mic):\n plateno = 1\n rid = start # record ID\n readno = 1\n segno = 1\n for plate in plates:\n seg = plateno * 8\n startseg = seg - 8\n segment = layout[startseg:seg]\n plate_mic = mic[startseg:seg]\n with open(plate, 'r') as infile:\n # 3 reads per plate\n front = 'INSERT INTO `mic` VALUES ('\n sep = ','\n row = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']\n row_num = 0\n for line in infile:\n this_row = row[row_num]\n pep = segment[row_num].split(' ')[0]\n this_mic = plate_mic[row_num]\n # note that blood is hard-coded to NA right now\n buff = [str(rid), str(assay), str(isolate), '1', str(pep), name, 'assayed', 'experiment',\n str(readno), exp_date, this_row]\n rec = line.strip().split(' ')\n buff.extend(rec)\n buff.extend([this_mic, 'NA'])\n buff_form = buff[:5] + [\"'\" + x + \"'\" for x in buff[5:]] + ['NULL', 'NULL);']\n outbuff = front + ','.join(buff_form)\n outbuff = re.sub(\"experiment','4',\",\"experiment','AVERAGE',\",outbuff)\n\n # increment counters\n rid += 1\n if row_num == 7:\n row_num = 0\n if readno == 4: # assumes 3 reads and an average\n plateno += 1\n readno = 1\n else:\n readno += 1\n else:\n row_num += 1\n\n yield outbuff", "def test_generate_smirks(bespoke_smirks):\n gen = SmirksGenerator()\n gen.target_smirks = [SmirksType.Vdw, ]\n gen.generate_bespoke_terms = bespoke_smirks\n\n mol = Molecule.from_smiles(\"CC\")\n smirks_list = gen.generate_smirks(molecule=mol)\n\n # we only request one parameter type\n types = set([smirk.type for smirk in smirks_list])\n assert len(types) == 1", "def schreier_sims_incremental(self, base=None, gens=None, slp_dict=False):\n if base is None:\n base = []\n if gens is None:\n gens = self.generators[:]\n degree = self.degree\n id_af = list(range(degree))\n # handle the trivial group\n if len(gens) == 1 and gens[0].is_Identity:\n if slp_dict:\n return base, gens, {gens[0]: [gens[0]]}\n return base, gens\n # prevent side effects\n _base, _gens = base[:], gens[:]\n # remove the identity as a generator\n _gens = [x for x in _gens if not x.is_Identity]\n # make sure no generator fixes all base points\n for gen in _gens:\n if all(x == gen._array_form[x] for x in _base):\n for new in id_af:\n if gen._array_form[new] != new:\n break\n else:\n assert None # can this ever happen?\n _base.append(new)\n # distribute generators according to basic stabilizers\n strong_gens_distr = _distribute_gens_by_base(_base, _gens)\n strong_gens_slp = []\n # initialize the basic stabilizers, basic orbits and basic transversals\n orbs = {}\n transversals = {}\n slps = {}\n base_len = len(_base)\n for i in range(base_len):\n transversals[i], slps[i] = _orbit_transversal(degree, strong_gens_distr[i],\n _base[i], pairs=True, af=True, slp=True)\n transversals[i] = dict(transversals[i])\n orbs[i] = list(transversals[i].keys())\n # main loop: amend the stabilizer chain until we have generators\n # for all stabilizers\n i = base_len - 1\n while i >= 0:\n # this flag is used to continue with the main loop from inside\n # a nested loop\n continue_i = False\n # test the generators for being a strong generating set\n db = {}\n for beta, u_beta in list(transversals[i].items()):\n for j, gen in enumerate(strong_gens_distr[i]):\n gb = gen._array_form[beta]\n u1 = transversals[i][gb]\n g1 = _af_rmul(gen._array_form, u_beta)\n slp = [(i, g) for g in slps[i][beta]]\n slp = [(i, j)] + slp\n if g1 != u1:\n # test if the schreier generator is in the i+1-th\n # would-be basic stabilizer\n y = True\n try:\n u1_inv = db[gb]\n except KeyError:\n u1_inv = db[gb] = _af_invert(u1)\n schreier_gen = _af_rmul(u1_inv, g1)\n u1_inv_slp = slps[i][gb][:]\n u1_inv_slp.reverse()\n u1_inv_slp = [(i, (g,)) for g in u1_inv_slp]\n slp = u1_inv_slp + slp\n h, j, slp = _strip_af(schreier_gen, _base, orbs, transversals, i, slp=slp, slps=slps)\n if j <= base_len:\n # new strong generator h at level j\n y = False\n elif h:\n # h fixes all base points\n y = False\n moved = 0\n while h[moved] == moved:\n moved += 1\n _base.append(moved)\n base_len += 1\n strong_gens_distr.append([])\n if y is False:\n # if a new strong generator is found, update the\n # data structures and start over\n h = _af_new(h)\n strong_gens_slp.append((h, slp))\n for l in range(i + 1, j):\n strong_gens_distr[l].append(h)\n transversals[l], slps[l] =\\\n _orbit_transversal(degree, strong_gens_distr[l],\n _base[l], pairs=True, af=True, slp=True)\n transversals[l] = dict(transversals[l])\n orbs[l] = list(transversals[l].keys())\n i = j - 1\n # continue main loop using the flag\n continue_i = True\n if continue_i is True:\n break\n if continue_i is True:\n break\n if continue_i is True:\n continue\n i -= 1\n\n strong_gens = _gens[:]\n\n if slp_dict:\n # create the list of the strong generators strong_gens and\n # rewrite the indices of strong_gens_slp in terms of the\n # elements of strong_gens\n for k, slp in strong_gens_slp:\n strong_gens.append(k)\n for i in range(len(slp)):\n s = slp[i]\n if isinstance(s[1], tuple):\n slp[i] = strong_gens_distr[s[0]][s[1][0]]**-1\n else:\n slp[i] = strong_gens_distr[s[0]][s[1]]\n strong_gens_slp = dict(strong_gens_slp)\n # add the original generators\n for g in _gens:\n strong_gens_slp[g] = [g]\n return (_base, strong_gens, strong_gens_slp)\n\n strong_gens.extend([k for k, _ in strong_gens_slp])\n return _base, strong_gens", "def create_GO(init_file, no_COOH, no_epoxy, no_OH, filename1):\n global atoms\n global bond_list\n bond_list = bond_list_1\n atoms = read_in_graphene(init_file)\n global anywhere_map\n anywhere_map = get_map_anywhere(atoms)\n global edge_map\n edge_map = get_map_edge(atoms)\n \n list_residue_numbers = [x.residue_number for x in atoms]\n added_functional_groups = max(list_residue_numbers)\n \n must_add = no_COOH + no_epoxy + no_OH\n while (must_add > 0):\n print(\"Left to add: \", \"cooh: \", no_COOH, \"epoxy: \", no_epoxy, \"hydroxyl: \", no_OH)\n chosen = random.choice(pick_to_add(no_COOH, no_epoxy, no_OH))\n if (chosen == \"carboxyl\"):\n attempt = 0\n while (attempt < 50):\n old_length = len(atoms)\n new_atoms = add_carboxyl(random_pick_spot(\"carboxyl\", edge_map, anywhere_map), atoms, added_functional_groups, top_or_down())\n if (old_length != len(new_atoms)):\n atoms = new_atoms\n added_functional_groups += 1\n must_add -= 1\n no_COOH -= 1\n attempt = 1888\n else:\n attempt += 1\n if (attempt == 50):\n must_add = -1\n elif (chosen == \"epoxy\"): \n attempt = 0\n while (attempt < 50):\n old_length = len(atoms)\n new_atoms = add_epoxy(random_pick_spot(\"epoxy\", edge_map, anywhere_map), atoms, added_functional_groups, top_or_down())\n if (old_length != len(new_atoms)):\n atoms = new_atoms\n added_functional_groups += 1\n must_add -= 1\n no_epoxy -= 1\n attempt = 1888\n else:\n attempt += 1\n if (attempt == 50):\n must_add = -1\n elif (chosen == \"hydroxyl\"):\n attempt = 0\n while (attempt < 50):\n old_length = len(atoms)\n new_atoms = add_hydroxyl(random_pick_spot(\"hydroxyl\", edge_map, anywhere_map), atoms, added_functional_groups, top_or_down())\n if (old_length != len(new_atoms)):\n atoms = new_atoms\n added_functional_groups += 1\n must_add -= 1\n no_OH -=1\n attempt = 1888 \n else:\n attempt += 1\n if (attempt == 50):\n must_add = -1\n atno = 1\n new_list = []\n for atom in atoms:\n if (atom.atom_name == \"CX\"):\n New_CX = Atom(atno, \"CX\", \"GGG\", atno, atom.x, atom.y, atom.z)\n new_list.append(New_CX)\n atno += 1 \n \n for atom in atoms:\n if (atom.atom_name == \"C4\"):\n check = False\n for atom_CY in atoms:\n if ((atom_CY.atom_name == \"CY\") and (atom_CY.residue_name == \"C1A\") and (atom_CY.residue_number == atom.residue_number)):\n for atom_OJ in atoms:\n if ((atom_OJ.atom_name == \"OJ\") and (atom_OJ.residue_name == \"C1A\") and (atom_OJ.residue_number == atom.residue_number)):\n for atom_OK in atoms:\n if ((atom_OK.atom_name == \"OK\") and (atom_OK.residue_name == \"C1A\") and (atom_OK.residue_number == atom.residue_number)):\n for atom_HK in atoms:\n if ((atom_HK.atom_name == \"HK\") and (atom_HK.residue_name == \"C1A\") and (atom_HK.residue_number == atom.residue_number)):\n New_CY = Atom(atno + 0, \"CY\", \"C1A\", atom.residue_number, atom_CY.x, atom_CY.y, atom_CY.z )\n New_C4 = Atom(atno + 1, \"C4\", \"C1A\", atom.residue_number, atom.x, atom.y, atom.z)\n New_OJ = Atom(atno + 2, \"OJ\", \"C1A\", atom.residue_number, atom_OJ.x, atom_OJ.y, atom_OJ.z)\n New_OK = Atom(atno + 3, \"OK\", \"C1A\", atom.residue_number, atom_OK.x, atom_OK.y, atom_OK.z)\n New_HK = Atom(atno + 4, \"HK\", \"C1A\", atom.residue_number, atom_HK.x, atom_HK.y, atom_HK.z)\n atno += 5\n new_list.append(New_CY); new_list.append(New_C4); new_list.append(New_OJ); new_list.append(New_OK); new_list.append(New_HK);\n check = True\n break\n if (check == True):\n break\n if (check == True):\n break\n if (check == True):\n break \n \n elif (atom.atom_name == \"OE\"): \n check = False\n for atom_CY in atoms:\n if ((atom_CY.atom_name == \"CY\") and (atom_CY.residue_name == \"E1A\") and (atom_CY.residue_number == atom.residue_number)):\n for atom_CY2 in atoms: \n if ((atom_CY2.atom_name == \"CZ\") and (atom_CY2.residue_name == \"E1A\") and (atom_CY2.residue_number == atom.residue_number) and (atom_CY2 != atom_CY)):\n New_CY = Atom( atno + 0, \"CY\", \"E1A\", atom.residue_number, atom_CY.x, atom_CY.y, atom_CY.z)\n New_CY2 = Atom(atno + 1, \"CZ\", \"E1A\", atom.residue_number, atom_CY2.x, atom_CY2.y, atom_CY2.z)\n New_OE = Atom( atno + 2, \"OE\", \"E1A\", atom.residue_number, atom.x, atom.y, atom.z)\n atno += 3\n new_list.append(New_CY); new_list.append(New_CY2); new_list.append(New_OE);\n check = True\n break\n if (check == True):\n break\n elif (atom.atom_name == \"OL\"):\n check = False\n for atom_CY in atoms:\n if ((atom_CY.atom_name == \"CY\") and (atom_CY.residue_name == \"H1A\") and (atom_CY.residue_number == atom.residue_number)):\n for atom_HK in atoms:\n if ((atom_HK.atom_name == \"HK\") and (atom_HK.residue_name == \"H1A\") and (atom_HK.residue_number == atom.residue_number)):\n New_CY = Atom(atno + 0, \"CY\", \"H1A\", atom.residue_number, atom_CY.x, atom_CY.y, atom_CY.z)\n New_OL = Atom(atno + 1, \"OL\", \"H1A\", atom.residue_number, atom.x, atom.y, atom.z)\n New_HK = Atom(atno + 2, \"HK\", \"H1A\", atom.residue_number, atom_HK.x, atom_HK.y, atom_HK.z)\n atno += 3\n new_list.append(New_CY); new_list.append(New_OL); new_list.append(New_HK);\n check = True\n break\n if (check == True):\n break\n \n atoms = new_list.copy()\n writepdb(atoms, filename1)\n sum_c1a = 0; sum_e1a = 0; sum_h1a = 0; sum_ggg = 0\n for atom in atoms:\n if (atom.residue_name == \"C1A\"):\n sum_c1a += 1\n elif (atom.residue_name == \"E1A\"):\n sum_e1a += 1\n elif (atom.residue_name == \"H1A\"):\n sum_h1a += 1\n elif (atom.residue_name == \"GGG\"):\n sum_ggg += 1\n print(\"Placed:\")\n print(\"carboxyl: \", sum_c1a/5)\n print(\"epoxy: \", sum_e1a/3)\n print(\"hydroxyl: \", sum_h1a/3)\n print(\"graphene atoms (CX - GGG) left: \", sum_ggg)\n return 'done.'", "def generate_bnd(cli_file, geo_file, slf_file, bnd_file, varnames, varunits):\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ cli+slf new mesh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n if not path.exists(cli_file):\n raise TelemacException(\\\n '... the provided cli_file does not seem to exist:'\n ' {}\\n\\n'.format(cli_file))\n if not path.exists(geo_file):\n raise TelemacException(\\\n '... the provided geo_file does not seem to exist: '\n '{}\\n\\n'.format(geo_file))\n\n if len(varnames) != len(varunits):\n raise TelemacException(\\\n 'Not the same number of variables and units\\nvarnames: {}\\nvarunits: {}'\n '{}\\n\\n'.format(varnames, varunits))\n\n\n # Read the new CLI file to get boundary node numbers\n print(' +> getting hold of the Conlim file and of its liquid boundaries')\n cli = Conlim(cli_file)\n # Keeping only open boundary nodes\n bor = np.extract(cli.bor['lih'] != 2, cli.bor['n'])\n\n # Find corresponding (x,y) in corresponding new mesh\n print(' +> getting hold of the GEO file and of its bathymetry')\n geo = Selafin(geo_file)\n xys = np.vstack((geo.meshx[bor-1], geo.meshy[bor-1])).T\n _ = geo.get_variables_at(0,\\\n subset_variables_slf(\"BOTTOM: \", geo.varnames)[0])[0]\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ slf existing res ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n if not path.exists(slf_file):\n raise TelemacException(\\\n '... the provided slf_file does not seem to exist: '\n '{}\\n\\n'.format(slf_file))\n slf = Selafin(slf_file)\n slf.set_kd_tree()\n slf.set_mpl_tri()\n\n print(' +> support extraction')\n # Extract triangles and weigths in 2D\n support2d = []\n ibar = 0\n pbar = ProgressBar(maxval=len(xys)).start()\n for xyi in xys:\n support2d.append(xys_locate_mesh(xyi, slf.ikle2, slf.meshx, slf.meshy,\n slf.tree, slf.neighbours))\n ibar += 1\n pbar.update(ibar)\n pbar.finish()\n # Extract support in 3D\n support3d = list(zip(support2d, len(xys)*[range(slf.nplan)]))\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ writes BND header ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n bnd = Selafin('')\n bnd.fole = {}\n bnd.fole.update({'hook':open(bnd_file, 'wb')})\n bnd.fole.update({'name':bnd_file})\n bnd.fole.update({'endian':\">\"}) # big endian\n bnd.fole.update({'float':('f', 4)}) # single precision\n\n # Meta data and variable names\n bnd.title = ''\n bnd.nbv1 = len(varnames)\n # /!\\ ELEVATION has to be the first variable\n # (for possible vertical re-interpolation within TELEMAC)\n\n bnd.varnames = []\n bnd.varunits = []\n for var, unit in zip(varnames, varunits):\n new_var = var + (16-len(var))*\" \"\n new_unit = unit + (16-len(unit))*\" \"\n bnd.varnames.append(new_var)\n bnd.varunits.append(new_unit)\n\n bnd.nvar = bnd.nbv1\n bnd.varindex = range(bnd.nvar)\n\n # Sizes and mesh connectivity\n bnd.nplan = slf.nplan\n # Number of nodes per boundary element (ndp2 in 2D and ndp3 in 3D)\n bnd.ndp2 = 2\n bnd.ndp3 = 4\n bnd.npoin2 = len(bor)\n bnd.npoin3 = bnd.npoin2*slf.nplan\n bnd.iparam = [0, 0, 0, 0, 0, 0, bnd.nplan, 0, 0, 1]\n bnd.ipob2 = bor # /!\\ note that ipobo keeps the original numbering\n print(' +> masking and setting connectivity')\n # Set the array that only includes elements of geo.ikle2\n # with at least two nodes in bor\n array_1d = np.in1d(geo.ikle2, np.sort(bor-1))\n mask = geo.ikle2[np.where(np.sum(array_1d.reshape(geo.nelem2, geo.ndp2),\n axis=1) == 2)]\n # this ikle2 keeps the original numbering\n ikle2 = np.ravel(mask)[np.in1d(mask, np.sort(bor-1))].reshape(len(mask), 2)\n # ~~> re-numbering ikle2 as a local connectivity matrix\n knolg, _ = np.unique(np.ravel(ikle2), return_index=True)\n knogl = dict(zip(knolg, range(len(knolg))))\n bnd.ikle2 = - np.ones_like(ikle2, dtype=np.int)\n for k in range(len(ikle2)):\n # /!\\ bnd.ikle2 has a local numbering, fit to the boundary elements\n bnd.ikle2[k] = [knogl[ikle2[k][0]], knogl[ikle2[k][1]]]\n # Last few numbers\n bnd.nelem2 = len(bnd.ikle2)\n if slf.nplan > 1:\n bnd.nelem3 = bnd.nelem2*(slf.nplan-1)\n else:\n bnd.nelem3 = bnd.nelem2\n bnd.ndp3 = bnd.ndp2\n # 3D structures\n if slf.nplan > 1:\n bnd.ipob3 = np.ravel(np.add(np.repeat(bnd.ipob2, slf.nplan)\\\n .reshape((bnd.npoin2, slf.nplan)),\n bnd.npoin2*np.arange(slf.nplan)).T)\n bnd.ikle3 = \\\n np.repeat(bnd.npoin2*np.arange(slf.nplan-1),\n bnd.nelem2*bnd.ndp3)\\\n .reshape((bnd.nelem2*(slf.nplan-1), bnd.ndp3)) + \\\n np.tile(np.add(np.tile(bnd.ikle2, 2),\n np.repeat(bnd.npoin2*np.arange(2), bnd.ndp2)),\n (slf.nplan-1, 1))\n else:\n bnd.ipob3 = bnd.ipob2\n bnd.ikle3 = bnd.ikle2\n # Mesh coordinates\n bnd.meshx = geo.meshx[bor-1]\n bnd.meshy = geo.meshy[bor-1]\n\n print(' +> writing header')\n # Write header\n bnd.append_header_slf()\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ writes BND core ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n print(' +> setting variables')\n # TIME and DATE extraction\n bnd.datetime = slf.datetime\n bnd.tags['times'] = slf.tags['times']\n # VARIABLE extraction\n list_var = varnames[0]+\": \"\n for var in varnames[1:]:\n list_var += \";\"+var+\": \"\n\n vrs = subset_variables_slf(list_var, slf.varnames)\n\n # Read / Write data, one time step at a time to support large files\n print(' +> reading / writing variables')\n pbar = ProgressBar(maxval=len(slf.tags['times'])).start()\n zeros = np.zeros((bnd.npoin3, 1), dtype=np.float)\n for itime in range(len(slf.tags['times'])):\n data = get_value_history_slf(slf.file, slf.tags, [itime], support3d,\n slf.nvar, slf.npoin3, slf.nplan, vrs)\n data = np.reshape(np.transpose(np.reshape(np.ravel(data),\n (bnd.nvar, bnd.npoin2,\n bnd.nplan)),\n (0, 2, 1)),\n (bnd.nvar, bnd.npoin3))\n bnd.append_core_time_slf(itime)\n bnd.append_core_vars_slf(data)\n pbar.update(itime)\n pbar.finish()\n\n # Close bnd_file\n bnd.fole['hook'].close()", "def twostr_func(wavelength, F_s, solarzenithangle,albedo_dif, \n\t\t\talbedo_dir, temp_ground, w_0, g, tau_n, temp_c):\n\t\n\t########################\n\t###Import useful libraries\n\t########################\n\timport numpy as np\n\timport pdb\n\timport scipy.linalg\n\n\n\n\n\t########################\n\t###Define model parameters\n\t########################\n\t#Properties of the ground\n\temissivity_ground=1.-albedo_dif #emissivity of ground. 1=perfect BB emitter.\n\n\t#Optical depth structure\n\tNlayer=len(tau_n) #number of layers in the atmospheric model.\n\t\n\ttau_c=np.zeros(Nlayer+1)# tau_c[n] is the cumulative optical depth at the upper edge of layer n. So tau_c[0]=0, and tau_c[N] is the maximum possible.\n\tfor n in range(0, Nlayer):\n\t\ttau_c[n+1]=tau_c[n]+tau_n[n] \n\n\t#In the Toon formalism, j=0 corresponds to space, and j=N+1 corresponds to the planet surface.\n\t#These points in wavelength space define the edges of the bins in tau space. \n\t#Other terminology:\n\t#\ttau_c=cumulative optical depth of layers *above* layer n. \n\t#\ttau_n=total optical depth of the layer n\n\t#\ttau=total optical depth at any point within a layer n, hence satisfying 0<tau<tau_n\n\n\tmu_0=np.cos(solarzenithangle) #\"incident direction of solar beam\"\n\n\n\t########################\n\t###Determine the two-stream approximation coefficients.\n\t########################\n\t#Eddington and quadrature are good at solar wavelengths (i.e., not thermal blackbody dominated). delta scalings of Joseph et al (1976) recommended to replace w_0, g, tau in this case. However, when dominated by internal isotropic sources like the Planck function, hemispheric mean approximation is preferable. When w_0=0, quadrature case has problems. This happens esp at thermal wavelengths. Again this favors using hemispheric mean at these wavelengths\n\t\n\t#We use quadrature because 1) we are at solar wavelengths for this UV work and 2) that's what twostr.f does (which is our comparison case)\n\tgamma_1= np.sqrt(3.)*(2.-w_0*(1.+g))/2. #consistent with Toon et al; consistent with Pierrehumbert gamma_1\n\tgamma_2=np.sqrt(3.)*w_0*(1.-g)/2. #consistent with Toon et al; consistent with Pierrehumbert gamma_2\n\tgamma_3=(1.-np.sqrt(3.)*g*mu_0)/2. #consistent with Toon et al; equal to the Pierrehumbert gamma_plus/w_0\n\tgamma_4=1.-gamma_3 #consistent with Toon et al; equal to the Pierrehumbert gamma_minus/w_0\n\tmu_1=1./np.sqrt(3.)+np.zeros(np.shape(gamma_1))#In Toon paper (eqn 18), this is given by: (1.-w_0)/(gamma_1-gamma_2). For the quadrature approximation, it is 1./np.sqrt(3.). Given its use, it seems to relate most closely to gamma_B from Pierrehumbert (see eqs 5.27, 5.30)\n\n\t##Eddington\n\t#gamma_1= (7.-w_0*(4.+3.*g))/4.\n\t#gamma_2=-1.*(1.-w_0*(4.-3.*g))/4.\n\t#gamma_3=(2.-3.*g*mu_0)/4.\n\t#gamma_4=1.-gamma_3 #consistent with Toon et al; equal to the Pierrehumbert gamma_minus/w_0\n\t#mu_1=1./2.+np.zeros(np.shape(gamma_1))#In Toon paper (eqn 18), this is given by: (1.-w_0)/(gamma_1-gamma_2). For the quadrature approximation, it is 1./np.sqrt(3.). Given its use, it seems to relate most closely to gamma_B from Pierrehumbert (see eqs 5.27, 5.30)\n\n\talambda=np.sqrt(np.abs(gamma_1*gamma_1-gamma_2*gamma_2)) #this is the lower-case lambda, from eqn 21 of Toon et al\n\t\t\t\t\t\t\t\t #The absolute value was added based on the code Toon just sent us. This corresponds to his AK(L,J) parameter. But it should not matter since gamma_1>gamma_2 for w_0<1.\n\tclambda=(gamma_1-alambda)/(gamma_2) #this is the upper-case lambda, from eqn 22 of Toon et al\n\n\tEMLT=np.exp(-alambda*tau_n) #this appears to be a prefactor used to facilitate computation of eqn 44 of Toon et al\n\te1=1.+clambda*EMLT\n\te2=1.-clambda*EMLT\n\te3=clambda+EMLT\n\te4=clambda-EMLT\n\n\t########################\n\t###Set up calculation\n\t########################\n\t\"\"\"\n\tThe fundamental equation we are solving is of form:\n\tA_{l}*Y_{l-1}+B_{l}*Y_{l}+D{l+1}=E_{l} (equation 39 of Toon et al)\n\tHere, A_l, B_l, D_l, E_l are quantities we determine, and the Y_l is what we solve for.\n\tHence, we can summarize that we are solving a matrix equation that takes form:\n\tPY=E\n\twhere Y[l]=Y_l\n\t E[l]=E_l\n\t P[l, l-1]=A_l [row, column]\n\t P[l, l]=B_l\n\t P[l, l+1]=D_l\n\t P[i,j]=0 else\n\tToon et al use 1-indexing. Hence n runs from 1 to N, l runs from 1 to 2N, where N is the number of layers, and they have:\n\tY_l=Y_{1n} for l=1,3,5,...2n-1...2N-1\n\tY_l=Y_{2n} for l=2,4,6,...2n...2N\n\n\tHowever, we use Python, which has 0-indexing. Hence *we* choose that n runs from 0 to N-1, l runs from 0 to 2N-1, and:\n\tY_l=Y_{1n} for l=0,2,4...2n...2N-2\n\tY_l=Y_{2n} for l=1,3,5...2n+1...2N-1\n\n\tThe Y_{1n} and Y_{2n} are related to F^+_n and F^-_n via equations 31 and 32 of Toon et al.\n\tThis parametrization has been done to remove exponentials with positive operands (ie ones that could grow large and lead to numerical instabilities) from the matrix.\n\n\tNote: The mapping of this PQ=R to the F+ and F- is unclear because of 1) this parametrization in terms of Y_l (done to eliminate numerical instabilities) and 2)further linear combinations done to convert a pentagiagonal matrix to an even simpler tridiagonal matrix. Hence intuitive checks are hard.\n\t\"\"\"\n\n\t########################\n\t###Set up surface flux\n\t########################\n\tS_sfc=albedo_dir*mu_0*np.exp(-tau_c[-1]/mu_0)*np.pi*F_s+emissivity_ground*np.pi*Planck(temp_ground, wavelength)\n\t#Surface emission. Formed by adding blackbody emission from the ground to the reflected energy from the direct beam. The direct beam's reflected energy is assumed to be purely diffuse. This corresponds to equations 37 and 38 of Toon et al. Note that this does NOT match equation 5.31 of Pierrehumbert because it does not include the reflected diffuse radiation. So, this implicitly assumes the diffuse albedo to be 0. \n\n\t########################\n\t###Set up C-values\n\t########################\n\t#In the reshuffled set of parameters used in this formalism, these seem analagous to the forcing term in Pierrehumbert. All the added radiation is contained in here.\n\n\tdef C_plus(n, tau): #implementation of superposition of eqns 23 and 27 from Toon et al\n\t\tsolarrad_denominator=alambda[n]**2.-1./mu_0**2.\n\t\tsolarrad_prefactor=w_0[n]*F_s*np.pi\n\t\tsolarrad_exponential=np.exp(-1.*(tau_c[n]+tau)/mu_0)\n\t\tsolarrad_factor=((gamma_1[n]-1./mu_0)*gamma_3[n]+gamma_4[n]*gamma_2[n])\n\t\tsolarrad=solarrad_prefactor*solarrad_factor*solarrad_exponential/solarrad_denominator #units of flux: erg/s/cm2/nm\n\t\t\n\t\tblackbody_prefactor=2*np.pi*mu_1[n]\n\t\tB0n=Planck(temp_c[n], wavelength)\n\t\tB1n=(Planck(temp_c[n+1], wavelength)-B0n)/tau_n[n] #this is effectively a slope\n\t\tblackbody_factor=B0n+B1n*(tau+1./(gamma_1[n]+gamma_2[n]))\n\t\tblackbody=blackbody_prefactor*blackbody_factor #start with units of the Planck function, which are: erg/s/cm2/nm/sr. But multiplying by 2pi sr restores the units of flux. So can safely add them. \n\t\t\n\t\tresult=solarrad+blackbody\n\t\treturn result\n\n\tdef C_minus(n, tau): #implementation of superposition of eqns 24 and 27 from Toon et al\n\t\tsolarrad_denominator=alambda[n]**2.-1./mu_0**2.\n\t\tsolarrad_prefactor=w_0[n]*F_s*np.pi\n\t\tsolarrad_exponential=np.exp(-1.*(tau_c[n]+tau)/mu_0)\n\t\tsolarrad_factor=((gamma_1[n]+1./mu_0)*gamma_4[n]+gamma_3[n]*gamma_2[n])\n\t\tsolarrad=solarrad_prefactor*solarrad_factor*solarrad_exponential/solarrad_denominator #units of flux: erg/s/cm2/nm\n\t\t\n\t\tblackbody_prefactor=2*np.pi*mu_1[n]\n\t\tB0n=Planck(temp_c[n], wavelength)\n\t\tB1n=(Planck(temp_c[n+1], wavelength)-B0n)/tau_n[n] #this is effectively a slope\n\t\tblackbody_factor=B0n+B1n*(tau-1./(gamma_1[n]+gamma_2[n]))\n\t\tblackbody=blackbody_prefactor*blackbody_factor #start with units of the Planck function, which are: erg/s/cm2/nm/sr. But multiplying by 2pi sr restores the units of flux. So can safely add them. \n\t\t\n\t\tresult=solarrad+blackbody\n\t\treturn result\n\n\t########################\n\t###Calculate matrix coefficients\n\t#########################\n\t#initialize the A, B, D, and E.\n\tA=np.zeros(Nlayer*2)\n\tB=np.zeros(np.shape(A))\n\tD=np.zeros(np.shape(A))\n\tE=np.zeros(np.shape(A))\n\n\n\t#For l=0 (n=0) we have the boundary condition that the downward diffuse flux at the top of the first layer is equal to any incident diffuse downward flux. We set this to be zero.\n\tA[0]=0.\n\tB[0]=e1[0]\n\tD[0]=-1.*e2[0]\n\tE[0]=0.-1*C_minus(0,0) #This is really F_minus[0,0], i.e. we are assuming there is no downward diffuse flux from the top of the atmosphere.\n\n\t#for l=2N-1 (n=N-1), we have the boundary condition that the upward flux at the surface is the sume of the reflected downward diffuse flux and energy from any other sources (e.g. reflected direct beam, BB emission of the ground)/np.sqrt(3.)\n\tA[2*Nlayer-1]=e1[Nlayer-1]-albedo_dif*e3[Nlayer-1]\n\tB[2*Nlayer-1]=e2[Nlayer-1]-albedo_dif*e4[Nlayer-1]\n\tD[2*Nlayer-1]=0.\n\tE[2*Nlayer-1]=S_sfc-C_plus(Nlayer-1, tau_n[Nlayer-1])+albedo_dif*C_minus(Nlayer-1, tau_n[Nlayer-1])\n\n\t#There is a problem in the Toon paper. As written, the l=2n depends on e_n+1, running over the array edge. twostr.f resolves this by adopting a different mapping: their definition reduces to defining l=2(n+1) and running n from 0 to N-1. In this case, l=2 (The third value in the list of ls) depends on n=0 and n=1. This eliminates the overflow problem. We have implemented this below.\n\t\n\t##For n=1,2,3...N-1, l=2,4,6,...2N-2:\n\tfor n in range(0, Nlayer-1):\n\t\tl=2*(n+1)\n\t\tA[l]=e2[n]*e3[n]-e4[n]*e1[n]\n\t\tB[l]=e1[n]*e1[n+1]-e3[n]*e3[n+1]\n\t\tD[l]=e3[n]*e4[n+1]-e1[n]*e2[n+1]\n\t\t\n\t\tE[l]=e3[n]*(C_plus(n+1, 0.)-C_plus(n, tau_n[n]))+e1[n]*(C_minus(n,tau_n[n])-C_minus(n+1,0.))\n\n\n\t#For n=0...N-2, l=1,3...2N-3:\n\tfor n in range(0, Nlayer-1):\n\t\tl=2*n+1\n\t\tA[l]=e2[n+1]*e1[n]-e3[n]*e4[n+1]\n\t\tB[l]=e2[n]*e2[n+1]-e4[n]*e4[n+1]\n\t\tD[l]=e1[n+1]*e4[n+1]-e2[n+1]*e3[n+1]\n\t\t\n\t\tE[l]=e2[n+1]*(C_plus(n+1, 0.)-C_plus(n, tau_n[n]))-e4[n+1]*(C_minus(n+1, 0)-C_minus(n, tau_n[n])) #twostr.f has a -1*e_{4,n+1}. We have applied the same even though this is NOT what is written in the Toon et al paper. We have done this because Toon told us (6/26/2015) that there are some sign errors in the coefficients, and we currently trust the validated CLIMA code over the paper we know has errors in it. EDIT: Looking at the code Toon shared with us, he does the same. \n\n\n\t########################\n\t###Assemble matrix equation components\n\t#########################\n\tP=np.zeros([Nlayer*2,Nlayer*2])\n\n\t#l=0: no \"A\" coefficient b/c l-1 has no meaning\n\tP[0,0]=B[0]\n\tP[0,1]=D[0]\n\n\t#l=2N-1: no \"D\" coefficient b/c l+1 has no meaning\n\tP[2*Nlayer-1,2*Nlayer-1-1]=A[2*Nlayer-1]\n\tP[2*Nlayer-1,2*Nlayer-1]=B[2*Nlayer-1]\n\n\tfor l in range(1, Nlayer*2-1): #This populates the matrix P in PY=E. \n\t\tP[l, l-1]=A[l]\n\t\tP[l,l]=B[l]\n\t\tP[l,l+1]=D[l]\n\n\t########################\n\t###Invert matrix\n\t#########################\n\t#Y=np.linalg.solve(P, E) #this is the Y_l\n\t\n\t#try using a specialized solver\n\tab=np.zeros([3,2*Nlayer])\n\tab[0,:]=np.append(0.0, np.diag(P, k=1))\n\tab[1,:]=np.diag(P, k=0)\n\tab[2,:]=np.append(np.diag(P, k=-1),0.0)\n\t#pdb.set_trace()\n\tY=scipy.linalg.solve_banded((1,1), ab, E) #this is the Y_l\n\n\n\t########################\n\t###Convert from Y_l to Y_1n, Y_2n\n\t#########################\n\t#The Y_1n as defined in Toon et al correspond to l=1,3, 5...2N-1. Adjusting for the zero-indexing of Python as we have done, they instead correspond to l=0,2,...2N-2\n\t#The Y_2n as defined in Toon et al correspond to l=2,4,6...2N. Adjusting for Python zero-indexing as we have done, they instead correspond to l=1,3,5...2N-1.\n\t#For detail, see eq. 40.\n\tY_1=np.zeros(Nlayer)\n\tY_2=np.zeros(Nlayer)\n\tfor n in range(0, Nlayer):\n\t\tY_1[n]=Y[2*n]\n\t\tY_2[n]=Y[2*n+1] \n\t\t#last number called is Nlayer-1=N-1, so is consistent.\n\t\n\t########################\n\t###Convert from Y_1n, Y_2n to F_plus, F_minus\n\t#########################\n\tdef F_plus(n,tau): #defined from Eqn 31 of Toon et al.\n\t\tterm1=Y_1[n]*(np.exp(-alambda[n]*(tau_n[n]-tau))+clambda[n]*np.exp(-alambda[n]*tau))\n\t\tterm2=Y_2[n]*(np.exp(-alambda[n]*(tau_n[n]-tau))-clambda[n]*np.exp(-alambda[n]*tau))\n\t\tterm3=C_plus(n,tau)\n\t\t\n\t\tresult=term1+term2+term3\n\t\treturn result\n\n\tdef F_minus(n, tau): #defined from Eqn 32 of Toon et al.\n\t\tterm1=Y_1[n]*(clambda[n]*np.exp(-alambda[n]*(tau_n[n]-tau))+np.exp(-alambda[n]*tau))\n\t\tterm2=Y_2[n]*(clambda[n]*np.exp(-alambda[n]*(tau_n[n]-tau))-np.exp(-alambda[n]*tau))\n\t\tterm3=C_minus(n,tau)\n\t\t\n\t\tresult=term1+term2+term3\n\t\treturn result\n\t\n\t########################\n\t###Evaluate F_plus, F_minus at boundary edges\n\t#########################\n\tF_plus_tau0=np.zeros(np.shape(tau_n))\n\tF_plus_taumax=np.zeros(np.shape(tau_n))\n\tF_minus_tau0=np.zeros(np.shape(tau_n))\n\tF_minus_taumax=np.zeros(np.shape(tau_n))\n\n\tfor n in range(0, Nlayer):\n\t\tF_plus_tau0[n]=F_plus(n, 0.)\n\t\tF_plus_taumax[n]=F_plus(n, tau_n[n])\n\t\tF_minus_tau0[n]=F_minus(n, 0.)\n\t\tF_minus_taumax[n]=F_minus(n, tau_n[n])\n\n\n\t########################\n\t###Convert from Y_1n, Y_2n to F_net, mean intensity.\n\t#########################\n\t#test if diffuse flux dominates over direct flux. If direct flux dominant, instead set mu_1=mu_0\n\t\n\t#if F_minus_taumax[-1]<mu_0*np.pi*F_s*np.exp(-tau_c[-1]/mu_0):\n\t\t#mu_1=np.zeros(np.shape(mu_1))+mu_0\n\t#mu_1=np.zeros(np.shape(mu_1))+mu_0\n\t\n\tF_net=np.zeros(np.shape(tau_n)) #defined from Eqn 48 of Toon et al. This quantity is the net flux at the BASE of layer n.\n\tfor n in range(0, Nlayer):\n\t\tdirect=mu_0*np.pi*F_s*np.exp(-(tau_c[n]+tau_n[n])/mu_0) #eqn 50 of Toon et al\n\n\t\tterm1=Y_1[n]*(e1[n]-e3[n])\n\t\tterm2=Y_2[n]*(e2[n]-e4[n])\n\t\tterm3=C_plus(n, tau_n[n])-C_minus(n, tau_n[n])\n\t\t\n\t\tF_net[n]=term1+term2+term3 -direct\n\n\tAMEAN=np.zeros(np.shape(tau_n)) #defined from Eqn 49 of Toon et al. This is the equivalent of the quantity AMEAN in the twostr.f code. It is equal to 4*np.pi*J_n, where J_n is the mean intensity at the base of layer n. Hence this quantity AMEAN should be equal to the total intensity received by a point at the base of layer n. \n\tfor n in range(0, Nlayer):\n\t\tdirect=mu_0*np.pi*F_s*np.exp(-(tau_c[n]+tau_n[n])/mu_0) #eqn 50 of Toon et al\n\t\n\t\tterm1=Y_1[n]*(e1[n]+e3[n])\n\t\tterm2=Y_2[n]*(e2[n]+e4[n])\n\t\tterm3=C_plus(n, tau_n[n])+C_minus(n, tau_n[n])\n\t\t\n\t\t#AMEAN[n]=(1./mu_1[n])*(term1+term2+term3)+direct/mu_0\t\n\t\tAMEAN[n]=(1./mu_1[n])*(F_plus_taumax[n]+F_minus_taumax[n])+direct/mu_0\t\n\t\n\t########################\n\t###Compute \"surface intensity\"\n\t#########################\t\n\t#\"Surface intensity\" refers to the total intensity that would be intercepted by a particle at the surface of the planet. Whereas the total intensity is equal to (F_plus[-1]+F_minus[-1])/mu_1+direct[-1]/mu_0, the surface intensity is instead equal to (F_minus[-1])/mu_1+direct[-1]/mu_0, i.e. the downwelling diffuse intensity (since the bottom intensity is cut out due to there being a planet there) plus the direct intensity\n\t\n\tsurface_intensity=(F_minus_taumax[-1]/mu_1[-1])+(np.pi*F_s)*np.exp(-(tau_c[-1])/mu_0)\n\t\n\t########################\n\t###Return Result\n\t#########################\n\t#F_minus_tau0\n\t#np.max(np.abs((F_minus_taumax[:-1]-F_minus_tau0[1:]))/F_minus_tau0[1:])\n\t#np.max(np.abs((F_plus_taumax[:-1]-F_plus_tau0[1:]))/F_plus_tau0[1:])\n\t\n\treturn (F_plus_tau0, F_plus_taumax, F_minus_tau0, F_minus_taumax, F_net, AMEAN, surface_intensity)", "def test_f1_circuit_maker(self):\n fho = tfho.test_file_handle_object()\n W = 5\n G = 20\n fg = .9\n X = 10\n fx = .85\n gate_maker = g.TYPE_TO_GATE_GEN[g.TEST_TYPES.RANDOM]\n # family 1 files:\n t_circuit_file_name = \"circuit_file_trimming\"\n t_circuit_file = fho.get_file_object(t_circuit_file_name, 'w')\n t_input_file_name = \"input_file_trimming\"\n t_input_file = fho.get_file_object(t_input_file_name, 'w')\n t_output_file_name = \"output_file_trimming\"\n t_output_file = fho.get_file_object(t_output_file_name, 'w')\n nt_circuit_file_name = \"circuit_file_no_trimming\"\n nt_circuit_file = fho.get_file_object(nt_circuit_file_name, 'w')\n nt_input_file_name = \"input_file_no_trimming\"\n nt_input_file = fho.get_file_object(nt_input_file_name, 'w')\n nt_output_file_name = \"output_file_no_trimming\"\n nt_output_file = fho.get_file_object(nt_output_file_name, 'w')\n level_type_array = [g.LEVEL_TYPES.RANDOM]\n F = 1\n # make a family 1 circuit with trimming:\n sr.seed(self.rand_seed)\n t_gen = g.f1f2_circuit_maker_with_trimming_switch(W, G, fg,\n t_circuit_file,\n t_input_file,\n t_output_file,\n X, fx, gate_maker,\n level_type_array, True)\n t_gen.generate()\n # make a family 1 circuit without trimming, with the same randomness:\n sr.seed(self.rand_seed)\n nt_gen = g.f1f2_circuit_maker_with_trimming_switch(W, G, fg,\n nt_circuit_file,\n nt_input_file,\n nt_output_file,\n X, fx, gate_maker,\n level_type_array, False)\n nt_gen.generate()\n # obtain strings representing the contents of all the resulting files:\n t_circuit_string = fho.get_file(t_circuit_file_name).getvalue()\n t_input_string = fho.get_file(t_input_file_name).getvalue()\n t_output_string = fho.get_file(t_output_file_name).getvalue()\n nt_circuit_string = fho.get_file(nt_circuit_file_name).getvalue()\n nt_input_string = fho.get_file(nt_input_file_name).getvalue()\n nt_output_string = fho.get_file(nt_output_file_name).getvalue()\n # make sure that the inputs and outputs produced by the trimming and\n # no trimming algorithms are the same:\n self.assertEqual(t_input_string, nt_input_string)\n self.assertEqual(t_output_string, nt_output_string)\n # make sure that the input begins and ends with a bracket:\n self.assertEqual(\"[\", t_input_string[0])\n self.assertEqual(\"]\", t_input_string[-1])\n # make sure that each input element is a bit:\n for bit in t_input_string[1:-1]:\n self.assertTrue((bit == '0') or (bit == '1'))\n # make sure that the output is a bit:\n self.assertTrue((t_output_string == '0') or (t_output_string == '1'))\n # make sure that the two circuit headers are the same, and that they\n # contain the correct values:\n t_circuit_header = t_circuit_string.split(\"\\n\")[0]\n nt_circuit_header = nt_circuit_string.split(\"\\n\")[0]\n self.assertEqual(t_circuit_header, nt_circuit_header)\n (W_string, G_string, F_string) = t_circuit_header.split(\",\")\n W_value = int(W_string.split(\"=\")[-1])\n G_value = int(G_string.split(\"=\")[-1])\n F_value = int(F_string.split(\"=\")[-1])\n self.assertEqual(W, W_value)\n self.assertEqual(G, G_value)\n self.assertEqual(F, F_value)\n # note that we cannot test that the circuits themselves are the same,\n # because the trimming algorithm produces a circuit with gates listed\n # in a different order.", "def SSt_theo_old(D, k):\n\ta1b = k[\"A1B1\"]\n\tba1 = k[\"B1A1\"]\n\tca1 = k[\"C1A1\"]\n\tcb = k[\"B1C1\"]\n\tnum = a1b*ba1*ca1*ca1 + ba1*ba1*ca1*ca1 + 3*a1b*ba1*ca1*cb + 2*ba1*ba1*ca1*cb + \\\n\t\t\ta1b*ca1*ca1*cb + 2*ba1*ca1*ca1*cb + 2*a1b*ba1*cb*cb + ba1*ba1*cb*cb + \\\n\t\t\t2*a1b*ca1*cb*cb + 2*ba1*ca1*cb*cb + ca1*ca1*cb*cb + \\\n\t\t\t\\\n\t\t\t(a1b*ba1*ba1*ca1 + ba1*ba1*ba1*ca1 + a1b*ba1*ca1*ca1 + a1b*ca1*ca1*ca1 + \\\n\t\t\t3*ba1*ca1*ca1*ca1 + 2*a1b*ba1*ba1*cb + ba1*ba1*ba1*cb + 2*a1b*ba1*ca1*cb + \\\n\t\t\t3*ba1*ba1*ca1*cb + 4*a1b*ca1*ca1*cb + 5*ba1*ca1*ca1*cb + 3*ca1*ca1*ca1*cb + \\\n\t\t\t2*a1b*ba1*cb*cb + 2*ba1*ba1*cb*cb + 2*a1b*ca1*cb*cb + 4*ba1*ca1*cb*cb + \\\n\t\t\t2*ca1*ca1*cb*cb) * D + \\\n\t\t\t\\\n\t\t\t(a1b*ba1*ba1*ca1 + a1b*ba1*ca1*ca1 + 4*ba1*ba1*ca1*ca1 + a1b*ca1*ca1*ca1 + \\\n\t\t\t2*ca1*ca1*ca1*ca1 + ba1*ba1*ba1*cb + 3*a1b*ba1*ca1*cb + 3*ba1*ba1*ca1*cb + \\\n\t\t\ta1b*ca1*ca1*cb + 5*ba1*ca1*ca1*cb + 3*ca1*ca1*ca1*cb + ba1*ba1*cb*cb + \\\n\t\t\t2*ba1*ca1*cb*cb + ca1*ca1*cb*cb) * D*D + \\\n\t\t\t\\\n\t\t\t(ba1*ba1*ba1*ca1 + a1b*ba1*ca1*ca1 + 3*ba1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + \\\n\t\t\t2*ba1*ca1*ca1*cb) * D*D*D + \\\n\t\t\t\\\n\t\t\tba1*ba1*ca1*ca1 * D*D*D*D\n\t##\n\tden = a1b*(ba1*ba1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 2*ba1*ca1*ca1*cb + ba1*ba1*cb*cb + \n\t\t\t2*ba1*ca1*cb*cb + ca1*ca1*cb*cb) + \\\n\t\t\t\\\n\t\t\ta1b*(4*ba1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 6*ba1*ca1*ca1*cb + 4*ca1*ca1*ca1*cb + \n\t\t\t2*ba1*ba1*cb*cb + 4*ba1*ca1*cb*cb + 2*ca1*ca1*cb*cb) * D + \\\n\t\t\t\\\n\t\t\ta1b*(2*ba1*ba1*ca1*ca1 + 4*ca1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 6*ba1*ca1*ca1*cb + \n\t\t\t4*ca1*ca1*ca1*cb + ba1*ba1*cb*cb + 2*ba1*ca1*cb*cb + ca1*ca1*cb*cb) * D*D + \\\n\t\t\t\\\n\t\t\ta1b*(4*ba1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 2*ba1*ca1*ca1*cb) * D*D*D + \\\n\t\t\t\\\n\t\t\ta1b*ba1*ba1*ca1*ca1 * D*D*D*D\n\t##\n\ttau = num/den\n\t##\n\treturn tau*np.log(20)", "def XsamsRadTranBroadening(G):\n s=''\n if countReturnables('RadTransBroadeningNatural'):\n # attempt at making a loop to support multiple natural broadening effects\n if hasattr(G('RadTransBroadeningNatural'), \"Broadenings\"):\n for Broadening in makeiter(G('RadTransBroadeningNatural').Broadenings):\n GB = lambda name: GetValue(name, Broadening=Broadening)\n s += makeBroadeningType(GB, name='Natural')\n else:\n s += makeBroadeningType(G, name='Natural')\n if countReturnables('RadTransBroadeningInstrument'):\n s += makeBroadeningType(G, name='Instrument')\n if countReturnables('RadTransBroadeningDoppler'):\n s += makeBroadeningType(G, name='Doppler')\n if countReturnables('RadTransBroadeningPressure'):\n s += makeBroadeningType(G, name='Pressure')\n return s", "def _define_biophysics(self):\n\t\tfor node in self.node:\n\t\t\tnode.nseg=1\n\t\t\tnode.diam=self._nodeD\n\t\t\tnode.L=self._nodeLength\n\t\t\tnode.Ra=self._rhoa/10000\n\t\t\tnode.cm=2\n\t\t\tnode.insert('axnode')\n\t\t\tnode.insert('extracellular')\n\t\t\tnode.xraxial[0]=self._Rpn0\n\t\t\tnode.xg[0]=1e10\n\t\t\tnode.xc[0]=0\n\n\t\tfor mysa in self.mysa:\n\t\t\tmysa.nseg=1\n\t\t\tmysa.diam=self._fiberD\n\t\t\tmysa.L=self._paraLength1\n\t\t\tmysa.Ra=self._rhoa*(1/(self._paraD1/self._fiberD)**2)/10000\n\t\t\tmysa.cm=2*self._paraD1/self._fiberD\n\t\t\tmysa.insert('pas')\n\t\t\tmysa.g_pas=0.001*self._paraD1/self._fiberD\t\t\n\t\t\tmysa.e_pas=-80\n\t\t\tmysa.insert('extracellular')\n\t\t\tmysa.xraxial[0]=self._Rpn1\n\t\t\tmysa.xg[0]=self._mygm/(self._nl*2)\n\t\t\tmysa.xc[0]=self._mycm/(self._nl*2)\n\n\t\tfor flut in self.flut:\n\t\t\tflut.nseg=1\n\t\t\tflut.diam=self._fiberD\n\t\t\tflut.L=self._paraLength2\n\t\t\tflut.Ra=self._rhoa*(1/(self._paraD2/self._fiberD)**2)/10000\n\t\t\tflut.cm=2*self._paraD2/self._fiberD\n\t\t\tflut.insert('pas')\n\t\t\tflut.g_pas=0.0001*self._paraD2/self._fiberD\t\t\n\t\t\tflut.e_pas=-80\n\t\t\tflut.insert('extracellular')\n\t\t\tflut.xraxial[0]=self._Rpn2\n\t\t\tflut.xg[0]=self._mygm/(self._nl*2)\n\t\t\tflut.xc[0]=self._mycm/(self._nl*2)\n\t\t\n\t\tfor stin in self.stin:\n\t\t\tstin.nseg=1\n\t\t\tstin.diam=self._fiberD\n\t\t\tstin.L=self._interLength\n\t\t\tstin.Ra=self._rhoa*(1/(self._axonD/self._fiberD)**2)/10000\n\t\t\tstin.cm=2*self._axonD/self._fiberD\n\t\t\tstin.insert('pas')\n\t\t\tstin.g_pas=0.0001*self._axonD/self._fiberD\n\t\t\tstin.e_pas=-80\n\t\t\tstin.insert('extracellular')\n\t\t\tstin.xraxial[0]=self._Rpx\n\t\t\tstin.xg[0]=self._mygm/(self._nl*2)\n\t\t\tstin.xc[0]=self._mycm/(self._nl*2)", "def stim_conditions(angles, onebeep_nb, twobeep_nb, onebeep_tc, twobeep_tc):\n##### make single auditory stim################################################\n\n #conditions_1A = [-30_1A, 0_1A, 30_1A, -30_2A, 0_2A, 30_2A]\n spatials = ('-30', '0', '30')\n beep_combos_1a = ('onebeep_nb', 'twobeep_nb', 'onebeep_tc', 'twobeep_tc')\n\n##### make competing auditory stim#############################################\n\n #conditions_2A = []\n spatials = ('-30x0', '0x30', '-30x30')\n beep_combos_2a = ('onebeep_nbxonebeep_tc', 'twobeep_nbxonebeep_tc',\n 'onebeep_nbxtc2', 'twobeep_nbxtwobeep_tc')\n\n all_spatials = [s.split('x') for s in spatials]\n for s in all_spatials[1:]:\n all_spatials[0] += s\n all_spatials = all_spatials[0]\n all_spatials = list(np.unique([float(s) for s in all_spatials]))\n\n all_combos = [ss.split('x') for ss in beep_combos_2a]\n for ss in all_combos[1:]:\n all_combos[0] += ss\n all_combos = all_combos[0]\n all_combos = list(np.unique([float(ss) for ss in all_combos]))\n\n##### convolve with HRTF at appropriate angles ################################\n\n move_sig = np.concatenate([convolve_hrtf(stim, fs, ang)\n for ang in range(-30, 30)], axis=1)\n return move_sig", "def test_bespoke_atom_smirks():\n gen = SmirksGenerator()\n mol = Molecule.from_smiles(\"C\")\n\n atom_smirks = gen._get_bespoke_atom_smirks(molecule=mol)\n # there should only be 2 unique smirks\n assert len(atom_smirks) == 2\n # make sure the correct atoms are hit\n all_atoms = []\n for smirk in atom_smirks:\n atoms = condense_matches(mol.chemical_environment_matches(smirk.smirks))\n all_atoms.extend(atoms)\n assert set(atoms) == smirk.atoms\n # make sure all atoms have a bespoke smirks\n for i in range(mol.n_atoms):\n assert (i, ) in all_atoms", "def create_duck1010(self):\n assignments = ['oblig{num}:pub({pub}):ln(Obligatorisk oppgave {num})'.format(num=num, pub=num*40) for num in xrange(1, 4)]\n periods = ['springcur:begins(-2):ends(6):ln(Spring Current)',\n 'springold:begins(-14):ends(6):ln(Spring Old)']\n self.testhelper.add(nodes=\"duckburgh:admin(duckburghadmin).ifi:admin(ifiadmin)\",\n subjects=[\"duck1010:ln(DUCK1010 - Objektorientert programmering)\"],\n periods=periods,\n assignments=assignments)\n self.testhelper.duck1010.admins.add(self.testhelper.thor)\n\n for year in range(2000, 2011): # Add some extra old semesters just to make it easier to test layouts with many semesters\n logging.info('Creating duck1010 spring%s', year)\n self.testhelper.duck1010.periods.create(\n short_name='spring{0}'.format(year),\n long_name='Spring {0}'.format(year),\n start_time=datetime(year, 8, 1),\n end_time=datetime(year, 12, 30)\n )\n\n anotherTryVerdict = {'grade': 'Not approved', 'points': 0, 'is_passing_grade': False}\n failedVerdict = {'grade': 'Not approved', 'points': 0, 'is_passing_grade': False}\n okVerdict = {'grade': 'Approved', 'points': 1, 'is_passing_grade': True}\n goodVerdict = {'grade': 'Approved', 'points': 1, 'is_passing_grade': True}\n\n assignmentnames = [name.split(':')[0] for name in assignments]\n periodnames = self._onlyNames(periods)\n for periodname in periodnames:\n periodpath = 'duckburgh.ifi;duck1010.' + periodname\n logging.info('Creating %s', periodpath)\n period = self.testhelper.get_object_from_path(periodpath)\n self._set_first_deadlines(period)\n self._addRelatedStudents(period)\n self._addRelatedExaminers(period)\n self._addBadGroups(periodpath, assignmentnames, anotherTryVerdict, failedVerdict)\n self._addMediumGroups(periodpath, assignmentnames, anotherTryVerdict, okVerdict)\n self._addGoodGroups(periodpath, assignmentnames, goodVerdict)", "def build_antibodies(experiment, canonicals, ln):\n # Store all of the antibodies in this list\n antibodies = []\n # Get the list of CDRs to be considered\n cdrs = list(canonicals.keys())\n cdrs.sort()\n # Get the optimal set of canonical structures for this library\n solution = experiment[\"Scores\"][ln-1][1]\n # Go through the antibody chains being designed\n chains = experiment[\"Optcdr Chains\"]\n chains.sort()\n # Find the reference framework molecule\n if experiment[\"Optcdr Frameworks\"] != {}:\n file = experiment[\"Optcdr Framework Reference\"].split(\"/\")[-1]\n path = experiment[\"Optcdr Framework Reference\"].replace(file, '')\n reference = MOLECULES.MoleculeFile(file, path) \n for chain in chains:\n # Store the molecules in a list\n molecules = []\n # Go through the CDR numbers\n for i in range(1, 4):\n # Extract the CDR name\n cdr = chain[0].upper() + str(i) \n # Get the index for the CDR in the solution dictionary\n index = cdrs.index(cdr) + 1\n # Append the canonical structure molecule to the list of molecules\n molecules.append(canonicals[cdr][solution[index]])\n # If a framework has been specified, add it\n name = chain.lower()\n if name in experiment[\"Optcdr Frameworks\"]:\n # Extract the Molecule class object\n molecule = experiment[\"Optcdr Frameworks\"][name][0][2]\n # Properly orient the framework\n orient_framework(molecule, reference[chain[0].upper()])\n # Obtain the list of framework regions\n frameworks = include_framework(molecule)\n # Go through the molecules and convert them to text\n texts = []\n for mol in molecules:\n # Skip the first and last residues of each CDR since they are\n # attach points\n text = \"\"\n for rn in range(len(mol)):\n if rn not in [0, len(mol) - 1]:\n text += str(mol[rn])\n # Append this CDR to the list of CDRs \n texts.append(text)\n # Make sure there are 4 framework regions and 3 CDRs\n if len(frameworks) != 4 or len(texts) != 3:\n text = \"The framework molecule does not include all of the \"\n text += \"necessary regions\"\n raise OptcdrError(text)\n # Concatenate the text into a single string\n atoms = ''\n # Add FR1, CDR1, FR2, CDR2, FR3, CDR3 in that order \n for i in range(0, 3):\n atoms += frameworks[i] + texts[i] \n # Add FR4 \n atoms += frameworks[3]\n # Overwrite the molecules list with this single molecule\n molecules =[MOLECULES.Molecule(atoms, 1, 1, chain[0].upper(), True,\\\n experiment[\"Force Field\"], experiment[\"File Format\"])]\n # Add the list of molecules to the list of antibodies\n antibodies.extend(molecules)\n # Generate the expected number of Design Molecules\n dms = 0\n for chain in chains:\n if chain in experiment[\"Optcdr Frameworks\"]:\n dms += 1\n else:\n dms += 3\n # Create a formatted list\n formatted_antibodies = []\n # If a single molecule is present for each chain, use the framework name\n if len(antibodies) == len(chains):\n for antibody in antibodies:\n formatted_antibodies.append([None, antibody.name, antibody])\n # If there are 3 CDRs for each chain, generate a unique name for each CDR\n elif len(antibodies) == dms: \n # Create a formatted list\n formatted_antibodies = []\n # Create a string of possible names\n alphabet = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n # Use a counter to go through the alphabet\n an = 0\n # Go through the antibodies\n for antibody in antibodies:\n # Use a while loop to find an appropriate name for the molecule\n goOn = True\n while goOn:\n # Increment the counter\n an += 1\n # If the name has not been used, store it\n if alphabet[an] not in experiment[0]:\n goOn = False\n # Store the formatted molecule list\n antibody.name = alphabet[an]\n formatted_antibodies.append([None, alphabet[an], antibody])\n # Otherwise, something is wrong so raise an error\n else:\n text = \"There is an unexpected number of antibodies generated for \"\n text += \"library \" + str(ln)\n raise OptcdrError(text)\n # Update the experiment to include the antibody details\n experiment[\"Molecules\"].extend(formatted_antibodies)\n experiment.make_DesignGroups()\n existing = experiment['Summary']\n experiment.finish_creation()\n experiment['Summary'] += existing\n # Update the OptCDR Scores\n SHARING.load_scores(experiment, experiment[\"Folder\"] + \\\n \"results/initial/\")\n # Output these molecules to the 'Current' folder\n SHARING.output_Current(experiment, './Current/')\n # Refine the initial antibody structure\n initialize_antibodies(experiment, canonicals)", "def isBGS_sga(gflux=None, rflux=None, zflux=None, w1flux=None, refcat=None, rfibertotflux=None,\n rfiberflux=None, maskbits=None, south=True, targtype=None):\n\n _check_BGS_targtype(targtype)\n\n bgs = np.zeros_like(rflux, dtype='?')\n\n # the SGA galaxies.\n LX = np.zeros_like(rflux, dtype='?')\n # ADM Could check on \"L2\" for DR8, need to check on \"LX\" post-DR8.\n if refcat is not None:\n rc1d = np.atleast_1d(refcat)\n if isinstance(rc1d[0], str):\n LX = [(rc[0] == \"L\") if len(rc) > 0 else False for rc in rc1d]\n else:\n LX = [(rc.decode()[0] == \"L\") if len(rc) > 0 else False for rc in rc1d]\n if np.ndim(refcat) == 0:\n LX = np.array(LX[0], dtype=bool)\n else:\n LX = np.array(LX, dtype=bool)\n\n # Make sure to include all the SGA galaxies.\n bgs |= LX\n # ADM geometric masking cuts from the Legacy Surveys.\n # Remove SGA in BRIGHT and CLUSTER.\n bgs &= imaging_mask(maskbits, bgsmask=True)\n\n g = 22.5 - 2.5*np.log10(gflux.clip(1e-16))\n r = 22.5 - 2.5*np.log10(rflux.clip(1e-16))\n z = 22.5 - 2.5*np.log10(zflux.clip(1e-16))\n w1 = 22.5 - 2.5*np.log10(w1flux.clip(1e-16))\n rfib = 22.5 - 2.5*np.log10(rfiberflux.clip(1e-16))\n\n # BASS r-mag offset with DECaLS.\n offset = 0.04\n\n # D. Schlegel - ChangHoon H. color selection to get a high redshift\n # success rate.\n if south:\n schlegel_color = (z - w1) - 3/2.5 * (g - r) + 1.2\n rfibcol = (rfib < 20.75) | ((rfib < 21.5) & (schlegel_color > 0.))\n else:\n schlegel_color = (z - w1) - 3/2.5 * (g - (r-offset)) + 1.2\n rfibcol = (rfib < 20.75+offset) | ((rfib < 21.5+offset) & (schlegel_color > 0.))\n\n if targtype == 'bright':\n if south:\n bgs &= rflux > 10**((22.5-19.5)/2.5)\n bgs &= rflux <= 10**((22.5-12.0)/2.5)\n bgs &= rfibertotflux <= 10**((22.5-15.0)/2.5)\n else:\n bgs &= rflux > 10**((22.5-(19.5+offset))/2.5)\n bgs &= rflux <= 10**((22.5-12.0)/2.5)\n bgs &= rfibertotflux <= 10**((22.5-15.0)/2.5)\n elif targtype == 'faint':\n if south:\n bgs &= rflux > 10**((22.5-20.3)/2.5)\n bgs &= rflux <= 10**((22.5-19.5)/2.5)\n bgs &= (rfibcol)\n else:\n bgs &= rflux > 10**((22.5-(20.3))/2.5)\n bgs &= rflux <= 10**((22.5-(19.5+offset))/2.5)\n bgs &= (rfibcol)\n\n return bgs", "def make_seq(scaffold, o_dict):\n scaff_name = scaffold[0]\n sequence = []\n \n nice_scaff = \"contigs__\"\n \n scaff_string = str(scaffold)\n while scaffold:\n \n if len(scaffold) == 1:\n #This should never happen!\n paf(\"\\nWARNING: odd number of elements in scaffold!\")\n paf(\"scaffold is: \" + scaff_string)\n nice_scaff += \"WARNING:_odd_number_of_elements_in_scaffold!\"\n sequence.description = scaff_name\n return sequence, nice_scaff\n\n end1 = scaffold.pop(0)\n end2 = scaffold.pop(0)\n \n if end1[0:4] != \"five\" and end1[0:5] != \"three\":\n if end2 in repeat_contigs and end2[0:10] == \"threeprime\":\n #Only attach a repeat if connected by fiveprime end,\n # to avoid creating duplicate copies\n ''' this condition has been removed!\n end1 = scaffold.pop(0)\n end2 = scaffold.pop(0)\n #threeprime ends of repeats are not attached\n if end2[0:4] != \"five\" and end2[0:5] != \"three\": end2 = other_end(end1)\n '''\n \n if \"dummy\" in end2:\n end1 = scaffold.pop(0)\n end2 = scaffold.pop(0)\n\n if end2[0:4] != \"five\" and end2[0:5] != \"three\":\n #This should never happen! \n paf(\"\\nWARNING: scaffold not included in assembly!\")\n paf(\"scaffold is: \" + scaff_string)\n paf(\"end1 is: \" + str(end1))\n paf(\"end2 is: \" + str(end2)+ \"\\n\")\n nice_scaff += \"scaffold.not.included.in.assembly!\" + str(end1) + \".\" + str(end2)\n sequence.description = scaff_name\n return sequence, nice_scaff\n else:\n sequence, nice_scaff = initiate_seq(end2, nice_scaff)\n elif (end2 != \"link_circular\") and (\"dummy\" not in end1):\n sequence, nice_scaff = extend_seq(sequence, end0, end1, o_dict, nice_scaff)\n end0 = end2\n \n sequence.description = scaff_name\n \n return sequence, nice_scaff", "def SecondaryComplex_to_Bid():\n Parameter('RIP3_0' , 2.0e4) # molecules per cell\n Parameter('BidK_0' , 5.0e3) # molecules per cell\n \n alias_model_components()\n Initial(RIP3(bRHIM = None, state = 'unmod'), RIP3_0) # RIP3\n Initial(BidK(bf = None), BidK_0) \n # ==============================================================\n # Assembly of Complex II, Riptosome and Necrosome\n # --------------------------------------------------------------\n # FADD + TRADD[active] <-> FADD:TRADD[active]\n # FADD + RIP1 <-> FADD:RIP1\n # TRADD + RIP1 <-> TRADD:RIP1\n\n # CD95_to_secondary complex contains the rules for recruitment of proC8 to FADD.\n # (RIP1 or TRADD):FADD + proC8 <-> (RIP1 or TRADD):FADD:proC8\n # (RIP1 or TRADD):FADD:proC8 + proC8 <-> (RIP1 or TRADD):FADD:proC8:proC8\n # (RIP1 or TRADD):FADD:proC8 + flip_L <-> (RIP1 or TRADD):FADD:proC8:flip_L\n # (RIP1 or TRADD):FADD:proC8 + flip_S <-> (RIP1 or TRADD):proC8:flip_S\n \n # RIP1%ProC8%ProC8(in a complex) >> RIP1[trunc] + C8 + (remains of the complex)\n # RIP1%ProC8%cFlip[L](in a complex) >> RIP1[trunc] + remains of the complex)\n # RIP1%cFlip[S](in a complex) + RIP3 >> RIP1:RIP3(in a complex, i.e. necrosome)\n\n # RIP1 + C8 <-> RIP1:C8 >> RIP1[trunc] + C8\n # RIP3 + C8 <-> RIP3:C8 >> RIP3[trunc] + C8\n # Bid + C8 <-> Bid:C8 >> Bid[trunc] + C8\n \n # -------------Assembling Complex II-----------------\n Parameter('Ka_RIP1_FADD', 1e-7) # Biochemica et Biophysica Acta 1834(2013) 292-300\n Parameter('Kd_RIP1_FADD', 1e-8) # Biochemica et Biophysica Acta 1834(2013) 292-300\n alias_model_components()\n \n bind(FADD(bDD = None, bDED1 = None, bDED2 = None), 'bDD', TRADD(bDD1=None, state = 'active'), 'bDD1', [1e-6, 1e-3])\n bind(FADD(bDD = None), 'bDD', RIP1(bDD=None, bRHIM = None, state = 'unmod'), 'bDD', [Ka_RIP1_FADD, Kd_RIP1_FADD])\n bind(TRADD(bDD2 = None, state = 'active'),'bDD2', RIP1(bDD = None, bRHIM = None, state = 'unmod'), 'bDD', [1e-6, 1e-3])\n # For simplicity, I am neglecting the binary intereaction that occurs between proC8 and RIP1.\n # Binding of proC8 and c-flip to FADD is accomplished in CD95_to_Secondary complex. \n\n #--------------RIP1 Truncation reactions-------------\n #---Truncation by C8---------------------------------\n RIP_CIIA_proC8 = RIP1(bDD=ANY, bRHIM = None, state = 'unmod')% TRADD(bDD2 = None, bDD1 = ANY, state = 'active') % FADD(bDD=ANY, bDED1=ANY, bDED2=ANY)%proC8(bDED=ANY)%proC8(bDED=ANY)\n RIP_CIIB_proC8 = RIP1(bDD=ANY, bRHIM = None, state = 'unmod')% FADD(bDD=ANY, bDED1=ANY, bDED2=ANY)%proC8(bDED=ANY)%proC8(bDED=ANY)\n CIIA = TRADD(bDD2 = None, bDD1 = ANY, state = 'active') % FADD(bDD=ANY, bDED1=None, bDED2=None)\n \n Rule('RIP1_truncation_CIIA', RIP_CIIA_proC8 >> CIIA + C8(bf = None, state = 'A') + RIP1(bDD=None, bRHIM = None, state = 'trunc'), Parameter('k11',1e-1))\n Rule('RIP1_truncation_CIIB', RIP_CIIB_proC8 >> FADD(bDD=None, bDED1=None, bDED2=None)+ C8(bf = None, state = 'A') + RIP1(bDD=None, bRHIM = None, state = 'trunc'), Parameter('k12', 1e-1))\n catalyze_state(C8(bf = None, state = 'A'), 'bf', RIP1(bDD=None), 'bRHIM', 'state', 'unmod', 'trunc', [1e-6, 1e-3, 1e-1])\n\n #---Truncation by proC8:cFlip_L---------------------\n Riptosome_FADD = RIP1(bDD=1, bRHIM = None, state = 'unmod')%FADD(bDD=1, bDED1=ANY, bDED2=ANY)%proC8(bDED = ANY)%flip_L(bDED = ANY)\n Riptosome_TRADD = RIP1(bDD=1, bRHIM = None, state = 'unmod')%TRADD(bDD1=ANY, bDD2=1)%FADD(bDD=ANY, bDED1=ANY, bDED2=ANY)%proC8(bDED = ANY)%flip_L(bDED = ANY)\n\n Rule('RIP1_truncation_FADD', Riptosome_FADD >> FADD(bDD=None, bDED1=ANY, bDED2=ANY)%proC8(bDED = ANY)%flip_L(bDED = ANY) + RIP1(bDD=None, bRHIM = None, state = 'trunc'), Parameter('k13', 1e-1))\n Rule('RIP1_truncation_TRADD', Riptosome_TRADD >> FADD(bDD=None, bDED1=ANY, bDED2=ANY)%proC8(bDED = ANY)%flip_L(bDED = ANY) + RIP1(bDD=None, bRHIM = None, state = 'trunc'), Parameter('k14', 1e-1))\n \n # -------------RIP3 Binding Interactions----------------\n Ripto1_Flip_S = FADD(bDD=ANY, bDED1=ANY, bDED2=ANY) % RIP1(bDD=ANY, bRHIM=None, state='unmod') % TRADD(bDD1=ANY, bDD2=ANY, state='active') % flip_S(bDED=ANY) % proC8(bDED=ANY)\n Ripto2_Flip_S = FADD(bDD=ANY, bDED1=ANY, bDED2=ANY) % RIP1(bDD=ANY, bRHIM=None, state='unmod') % flip_S(bDED=ANY) % proC8(bDED=ANY)\n Necrosome1 = FADD(bDD=ANY, bDED1=ANY, bDED2=ANY) % RIP1(bDD=ANY, bRHIM=6, state='unmod') % TRADD(bDD1=ANY, bDD2=ANY, state='active') % flip_S(bDED=ANY) % proC8(bDED=ANY) % RIP3(bRHIM= 6, state = 'unmod')\n Necrosome2 = FADD(bDD=ANY, bDED1=ANY, bDED2=ANY) % RIP1(bDD=ANY, bRHIM=5, state='unmod') % flip_S(bDED=ANY) % proC8(bDED=ANY) % RIP3(bRHIM= 5, state = 'unmod')\n\n Rule('RIP3_binding1', Ripto1_Flip_S + RIP3(bRHIM= None, state = 'unmod') <> Necrosome1, Parameter('k15', 1e-6), Parameter('k16', 1e-3))\n Rule('RIP3_binding2', Ripto2_Flip_S + RIP3(bRHIM= None, state = 'unmod') <> Necrosome2, Parameter('k17', 1e-6), Parameter('k18', 1e-3))\n \n #RIP3 Truncation\n catalyze_state(C8(bf = None, state = 'A'), 'bf', RIP3(), 'bRHIM', 'state', 'unmod', 'trunc', [1e-6, 1e-3, 1e-1])\n\n #-------------Bid Interactions--------------------------\n # Bid Phosphorylation and Truncation\n catalyze_state(BidK(), 'bf', Bid(), 'bf', 'state', 'U', 'po4', [1e-6, 1e-3, 1e-1])\n catalyze_state(C8(bf = None, state = 'A'), 'bf', Bid(), 'bf', 'state', 'U', 'T', [1.04e-5, 0.005, 0.1])\n\n # Bid-PO4 competing with RIP1 for binding to Complex II\n bind(TRADD(bDD2 = None, state = 'active'),'bDD2', Bid(bf = None, state = 'po4'), 'bf', [1e-6, 1e-3])\n # Bid-PO4 sequestering RIP1\n bind(RIP1(bDD = None, bRHIM = None, state = 'unmod'), 'bRHIM', Bid(bf = None, state = 'po4'), 'bf', [1e-6, 1e-3])", "def make_bispectra(self, stokes='postbisp', maxturns=0):\n\n bisp = lambda d: d[:,:,0] * d[:,:,1] * n.conj(d[:,:,2]) # bispectrum for data referenced by triple (data[:,triples])\n\n # set up triples and arrays for bispectrum considering flagged baselines (only having zeros).\n triples = self.make_triples()\n meanbl = self.data.mean(axis=2).mean(axis=0) # find bls with no zeros in either pol to ignore in triples\n self.triples = triples[n.all(meanbl[triples][:,0] != 0j, axis=1) & n.all(meanbl[triples][:,1] != 0j, axis=1) & n.all(meanbl[triples][:,2] != 0j, axis=1) == True] # only take triples if both pols are good. may be smaller than set for an individual pol\n\n # need to select path based on how polarization is handled. assumes only dual-pol data.\n print 'Bispectrum made for stokes =', stokes\n if ( (stokes == 'postbisp') | (stokes == 'prebisp') | (stokes == 'noavg') ): # case of combining two stokes\n bispectra = n.zeros((len(self.dmarr), len(self.data), len(self.triples)), dtype='complex')\n elif isinstance(stokes, types.IntType): # case of using single pol\n if stokes >= self.npol:\n raise IndexError, 'Stokes parameter larger than number of pols in data.'\n bispectra = n.zeros((len(self.dmarr), len(self.data), len(self.triples)), dtype='complex')\n elif stokes == 'noavg':\n bispectra = n.zeros((len(self.dmarr), len(self.data), len(self.triples), self.npol), dtype='complex')\n\n # iterate over dm trials\n for dmbin in xrange(len(self.dmarr)):\n\n if maxturns == 0:\n dddata = self.dedisperse(dmbin).mean(axis=2) # average over channels\n elif maxturns > 0:\n dddata = self.spectralInterpolate(self.dedisperse(dmbin), axis=2, maxturns=maxturns) # interpolate over channels using fft\n\n if stokes == 'prebisp':\n dddata = dddata.mean(axis=2)\n bispectra[dmbin] = bisp(dddata[:, self.triples])\n elif stokes == 'postbisp':\n bispectra[dmbin] = bisp(dddata[:, self.triples]).mean(axis=2)\n elif stokes == 'noavg':\n bispectra[dmbin] = bisp(dddata[:, self.triples])\n elif isinstance(stokes, types.IntType): # case of using single pol\n bispectra[dmbin] = bisp(dddata[:, self.triples, stokes])\n\n print 'dedispersed for ', self.dmarr[dmbin]\n self.bispectra = n.ma.masked_array(bispectra, bispectra == 0j)", "def create_B_words(path_to_pairs,\n path_to_librispeech_text,\n path_to_phonemes,\n path_save,\n freq_sim,\n len_sim,\n edit_sim):\n for i in range(len(path_to_pairs)):\n \n pairs = []\n dic_cl_eq = {} # Classe d'equivalence pour le sens des mots\n \n with open(path_to_pairs[i]) as f:\n for line in f:\n line = line.replace('\\n', '').split(' ')\n pairs.append(line)\n if line[0] in dic_cl_eq:\n dic_cl_eq[line[0]].add(line[1])\n else:\n dic_cl_eq[line[0]] = {line[1]}\n if line[1] in dic_cl_eq:\n dic_cl_eq[line[1]].add(line[0])\n else:\n dic_cl_eq[line[1]] = {line[0]}\n \n dic_cl_eq_prev = {}\n while dic_cl_eq_prev != dic_cl_eq:\n dic_cl_eq_prev = copy.deepcopy(dic_cl_eq)\n for word in dic_cl_eq:\n for syn in dic_cl_eq[word]:\n dic_cl_eq[word] = set.union(dic_cl_eq[word], dic_cl_eq[syn])\n \n with open(path_to_librispeech_text) as f:\n text_librispeech = f.read()\n text_librispeech_split = text_librispeech.replace('\\n', ' ').split(' ')\n freq_libri = {}\n for word in text_librispeech_split:\n if word in dic_cl_eq:\n if word in freq_libri:\n freq_libri[word] += 1\n else:\n freq_libri[word] = 1\n \n phonemes = []\n with open(path_to_phonemes[i]) as f:\n for line in f:\n line = line.replace('\\n', '').split(' ')\n phonemes.append(line)\n \n dic_word_phonemes = {}\n for j in range(len(pairs)):\n dic_word_phonemes[pairs[j][0]] = phonemes[j][0]\n dic_word_phonemes[pairs[j][1]] = phonemes[j][1]\n \n file = open(path_save[i], 'w+')\n file.truncate(0)\n \n for j in range(len(pairs)):\n A, X = pairs[j]\n B_0 = []\n for word in dic_cl_eq:\n if word not in dic_cl_eq[A]:\n if np.abs(np.log(freq_libri[word])/np.log(freq_sim) \\\n - np.log(freq_libri[A])/np.log(freq_sim)) <= 1:\n if (len(word) > (1-len_sim)*len(A)) and \\\n (len(word) < (1+len_sim)*len(A)):\n p_A = dic_word_phonemes[A]\n p_X = dic_word_phonemes[X]\n p_word = dic_word_phonemes[word]\n if np.abs(dist(p_A, p_X) - dist(p_X, p_word)) < edit_sim:\n B_0.append(word)\n line_0 = ' '.join([A, X] + B_0)\n \n X, A = pairs[j]\n B_1 = []\n for word in dic_cl_eq:\n if word not in dic_cl_eq[A]:\n if np.abs(np.log(freq_libri[word])/np.log(freq_sim) \\\n - np.log(freq_libri[A])/np.log(freq_sim)) <= 1:\n if (len(word) > np.around((1-len_sim)*len(A), decimals=2)) and \\\n (len(word) < np.around((1+len_sim)*len(A), decimals=2)):\n p_A = dic_word_phonemes[A]\n p_X = dic_word_phonemes[X]\n p_word = dic_word_phonemes[word]\n if np.abs(dist(p_A, p_X) - dist(p_X, p_word)) < edit_sim:\n B_1.append(word)\n line_1 = ' '.join([A, X] + B_1)\n \n if max(len(B_0), len(B_1)) == 0:\n print(X, A)\n \n line = line_0 if len(line_0) > len(line_1) else line_1\n if j < len(pairs) - 1:\n line += '\\n'\n file.write(line)\n \n file.close()", "def genChains(self):\n self.numMonomer = 0\n self.numBonds = 0\n self.numMols = 0\n self.numCations = 0\n self.numAnions = 0\n\n self.atomsCoords = []\n self.atomsType = []\n self.atomsCharge = []\n self.molId = []\n self.bondList = []\n \n for i in range(self.numPa + self.numPc):\n\n if i < self.numPc:\n # polycation chains, charge in LJ units of LAMMPS\n # electron charge would be 10.54 using bare LAMMPS LJ units\n # the dielectric constans of solvent is effectively taken as 111 when assign 1 to +e\n # just need to set dielectric as 0.72 in LAMMPS ot mimic water with dielectric constant 80\n self.beadCharge = 1\n self.beadType = 1 # atomic type for neutral beads in polycation chains\n self.chain = self.lenPc\n else:\n self.beadCharge = -1 # polyanion chains\n self.beadType = 3 # atomic type for neutral beads in polyanion chains\n self.chain = self.lenPa\n\n self.numMols += 1\n\n # generate the first bead of each chain randomly\n self.numMonomer += 1\n self.cxyz = np.random.rand(3) * self.box + self.lxyz\n\n self.atomsCoords.append(self.cxyz)\n #self.atomsType.append(self.beadType)\n\n # decide if the first bead is charged or not\n if self.chargeRepeat == 1:\n self.atomsCharge.append(self.beadCharge)\n self.atomsType.append(self.beadType + 1)\n if i < self.numPc:\n self.numCations += 1\n else:\n self.numAnions += 1\n else:\n self.atomsType.append(self.beadType)\n self.atomsCharge.append(0)\n\n self.molId.append(self.numMols)\n\n self.currpxyz = self.cxyz\n\n # follow random walk to generate the chain\n # generate the seconb bead of the chain\n self.theta, self.phi = np.random.rand(2) * np.array([np.pi, 2 * np.pi])\n self.ds = np.array([np.cos(self.theta), np.sin(self.theta) * np.cos(self.phi),\\\n np.sin(self.theta) * np.sin(self.phi)]) * self.segment\n\n self.cxyz = self.currpxyz + self.ds\n\n self.numMonomer += 1\n self.atomsCoords.append(self.cxyz)\n\n # decide if the second bead is charged or not\n if 2%self.chargeRepeat == 0:\n self.atomsCharge.append(self.beadCharge)\n self.atomsType.append(self.beadType + 1)\n if i < self.numPc:\n self.numCations += 1\n else:\n self.numAnions += 1\n else:\n self.atomsCharge.append(0)\n self.atomsType.append(self.beadType)\n\n self.molId.append(self.numMols)\n \n self.numBonds += 1\n self.bondList.append([self.numMonomer - 1, self.numMonomer])\n\n self.currpxyz = self.cxyz\n\n self.currtheta = self.theta\n self.currphi = self.phi\n\n self.dstot += self.ds\n\n # generating the rest beads of the chain\n\n for k in range(3, self.chain+1):\n # only accept atoms that are beyong certain distance\n # from the atom precding the current atom in the chain\n self.theta, self.phi = np.random.rand() * np.array([np.pi - self.stiffangle, \\\n 2 * np.pi])\n self.ds1 = np.array([np.cos(self.theta), np.sin(self.theta) * np.cos(self.phi),\\\n np.sin(self.theta) * np.sin(self.phi)]) * self.segment\n\n self.reverseXZrotation()\n self.cxyz = self.currpxyz + self.ds\n\n self.numMonomer += 1\n self.atomsCoords.append(self.cxyz)\n\n if k % self.chargeRepeat == 0:\n self.atomsCharge.append(self.beadCharge)\n self.atomsType.append(self.beadType + 1)\n if i < self.numPc:\n self.numCations += 1\n else:\n self.numAnions += 1\n else:\n self.atomsCharge.append(0)\n self.atomsType.append(self.beadType)\n\n self.molId.append(self.numMols)\n self.numBonds += 1\n self.bondList.append([self.numMonomer - 1, self.numMonomer])\n\n self.currpxyz = self.cxyz\n\n self.currtheta = np.arccos(self.ds[0]/self.segment)\n if self.ds[2] > 0:\n self.currphi = np.arccos(self.ds[1]/self.segment/np.sin(self.currtheta))\n else:\n self.currphi = 2*np.pi - np.arccos(self.ds[1]/self.segment/np.sin(self.currtheta))\n\n self.dstot += self.ds\n\n print \"%d beads are generated.\\n\" % self.numMonomer \n assert self.numMonomer == self.numPc * self.lenPc + self.numPa * self.lenPa, \\\n \"The number of monomers in chains is wrong!\\n\"\n assert self.numCations == int(np.floor(self.lenPc * self.chargeFraction)*self.numPc), \\\n \"The number of positively charged beads is wrong!\\n\"\n assert self.numAnions == int(np.floor(self.lenPa * self.chargeFraction)*self.numPa), \\\n \"The number of negatively charged beads is wrong!\\n\"", "def build_basis(self):\n if self.debug:\n print('sps_basis: rebuilding basis')\n # Setup the internal component basis arrays\n inwave = self.ssp.wavelengths\n nbasis = len(np.atleast_1d(self.params['mass']))\n self.nbasis = nbasis\n # nbasis = ( len(np.atleast_1d(self.params['zmet'])) *\n # len(np.atleast_1d(self.params['tage'])) )\n self.basis_spec = np.zeros([nbasis, len(inwave)])\n self.basis_mass = np.zeros(nbasis)\n\n i = 0\n tesc = self.params['dust_tesc']\n dust1, dust2 = self.params['dust1'], self.params['dust2']\n for j, zmet in enumerate(self.params['zmet']):\n for k, tage in enumerate(self.params['tage']):\n # get the intrinsic spectrum at this metallicity and age\n if self.safe:\n # do it using compsp\n if self.ssp._zcontinuous > 0:\n self.ssp.params['logzsol'] = zmet\n else:\n self.ssp.params['zmet'] = zmet\n w, spec = self.ssp.get_spectrum(tage=tage, peraa=True)\n mass = self.ssp.stellar_mass\n else:\n # do it by hand. Faster but dangerous\n spec, mass, lbol = self.ssp.ztinterp(zmet, tage, peraa=True)\n self.basis_spec[i, :] = spec\n self.basis_mass[i] = mass\n i += 1\n self.basis_dirty = False", "def create_model():\n # Get list of all syllables: [\"<s>\", \"AH\", \"</s>\", \"<s>\", \"T\", ...]\n syllabifier = Syllabifier()\n all_syllables = syllabifier.all_syllables()\n\n # Count conditional probabilties of phoneme tuples\n tcf = TrigramCollocationFinder.from_words(all_syllables)\n bcf = BigramCollocationFinder.from_words(all_syllables)\n tri_dict = dict(sorted(tcf.ngram_fd.items(), key=lambda t: (-t[1], t[0])))\n bi_dict = dict(sorted(bcf.ngram_fd.items(), key=lambda t: (-t[1], t[0])))\n\n # Create dictionary to count cond prob all phoneme tuples\n accepted_phonemes = [i[0] for i in cmudict.phones()]\n accepted_phonemes.append('<s>')\n accepted_phonemes.append('</s>')\n phoneme_tups = [p for p in itertools.product(accepted_phonemes, repeat=3)]\n cond_probs_dict = dict([(char, 0) for char in phoneme_tups])\n\n for t in tri_dict:\n p1, p2, p3 = t[0], t[1], t[2]\n tri_count = tri_dict[t]\n bi_count = bi_dict[(p1, p2)]\n if bi_count > 1:\n cond_prob = tri_count * 1.0 / bi_count\n else:\n cond_prob = 0.0\n cond_probs_dict[(p1, p2, p3)] = cond_prob\n\n pickle.dump(cond_probs_dict, open(COND_PROBS_PATH, \"wb\"))\n return", "def schreier_sims_random(self, base=None, gens=None, consec_succ=10,\n _random_prec=None):\n if base is None:\n base = []\n if gens is None:\n gens = self.generators\n base_len = len(base)\n n = self.degree\n # make sure no generator fixes all base points\n for gen in gens:\n if all(gen(x) == x for x in base):\n new = 0\n while gen._array_form[new] == new:\n new += 1\n base.append(new)\n base_len += 1\n # distribute generators according to basic stabilizers\n strong_gens_distr = _distribute_gens_by_base(base, gens)\n # initialize the basic stabilizers, basic transversals and basic orbits\n transversals = {}\n orbs = {}\n for i in range(base_len):\n transversals[i] = dict(_orbit_transversal(n, strong_gens_distr[i],\n base[i], pairs=True))\n orbs[i] = list(transversals[i].keys())\n # initialize the number of consecutive elements sifted\n c = 0\n # start sifting random elements while the number of consecutive sifts\n # is less than consec_succ\n while c < consec_succ:\n if _random_prec is None:\n g = self.random_pr()\n else:\n g = _random_prec['g'].pop()\n h, j = _strip(g, base, orbs, transversals)\n y = True\n # determine whether a new base point is needed\n if j <= base_len:\n y = False\n elif not h.is_Identity:\n y = False\n moved = 0\n while h(moved) == moved:\n moved += 1\n base.append(moved)\n base_len += 1\n strong_gens_distr.append([])\n # if the element doesn't sift, amend the strong generators and\n # associated stabilizers and orbits\n if y is False:\n for l in range(1, j):\n strong_gens_distr[l].append(h)\n transversals[l] = dict(_orbit_transversal(n,\n strong_gens_distr[l], base[l], pairs=True))\n orbs[l] = list(transversals[l].keys())\n c = 0\n else:\n c += 1\n # build the strong generating set\n strong_gens = strong_gens_distr[0][:]\n for gen in strong_gens_distr[1]:\n if gen not in strong_gens:\n strong_gens.append(gen)\n return base, strong_gens", "def generate_burst_train( base, z, x_0, dist, xi_p, mass, radius,\n bean, full_model=False, debug=False):\n\n forward, backward = True, True # go in both directions at the start\n\n mdot_max = -1\n\n # Now to go ahead and try to simulate the bursts train with the resulting\n # best set of parameters\n # Start from the *second* (observed) burst in the train\n # Originally this would have simulated four bursts following the reference,\n # and three preceding. However, the last burst in the train (the 8th) for\n # runs test17 were wildly variable, so now restrict the extent by one\n\n if bean.bstart is not None:\n sbt = bean.bstart[bean.ref_ind]\n else:\n # In the absence of any bursts, set the reference time to ref_ind (can be\n # any time within the outburst)\n # sbt = 0.0\n sbt = bean.ref_ind\n\n salpha = -1\n flag = 1 # Initially OK\n\n stime = [] # initialise array to store simulated times\n earliest = sbt # this is the earliest burst in the train\n latest = sbt # this is the time of the latest burst in the train\n # for i in range (0,2*(1+double)+1): # Do the 5th burst also, forward only\n for i in range(0, bean.numburstssim): # Do the 5th burst also, forward only\n\n # Here we adopted recurrence time corrections for SAX\n\t# J1808.4--3658 ,since the accretion rate is not constant over the\n\t# extrapolated time, resulting in the recurrence time being\n\t# underestimated by settle. Correction factors are from Zac\n\t# Johnston, calculated using KEPLER\n\n\t# if i == 0: # This is observed burst at 1.89 cfac1 = 1.02041\n # cfac2 = 1.02041\n # if (\n # i == 1\n # ): # to the right this is 3rd observed burst, to left it is predicted burst\n # cfac1 = 1.00\n # cfac2 = 1.1905\n # if (\n # i == 2\n # ): # to the right this is 4th observed burst, to left is predicted burst\n # cfac1 = 1.00\n # cfac2 = 1.2346\n # if (\n # i == 3\n # ): # to the right this is final predicted burst, to the left is first observed burst (note that cfac = 1.25 is estimated interpolation)\n # cfac1 = 1.00\n # cfac2 = 1.25\n # if i == 4: # to the right this is final predicted burst, to the left is first observed burst (note that cfac = 1.25 is estimated interpolation)\n # cfac1 = 0.98\n # cfac2 = 1.27\n\n if backward:\n # Find the time for the *previous* burst in the train\n result2 = next_burst( base, z, x_0, earliest, bean,\n dist, xi_p, 1.0, mass, radius, direction=-1, debug=debug)\n\n if forward:\n # Also find the time for the *next* burst in the train\n result3 = next_burst( base, z, x_0, latest, bean,\n dist, xi_p, 1.0, mass, radius, direction=1, debug=debug)\n\n if result2 is not None:\n # we have a result from the next_burst call going backward, so add its properties to the arrays\n t2 = result2.t2[0]\n _alpha = result2.alpha[0]\n _e_b = result2.e_b[0]\n _mdot = result2.mdot\n if salpha == -1:\n # create the arrays with which to accumulate the results\n stime = [t2, sbt]\n iref = 1 # index for reference burst\n salpha = [_alpha]\n se_b = [_e_b]\n smdot = [_mdot]\n else:\n stime.insert(0, t2)\n iref += 1\n salpha.insert(0, _alpha)\n se_b.insert(0, _e_b)\n smdot.insert(0, _mdot)\n earliest = t2\n else:\n # if the earlier burst has failed, we don't need to pursue any further\n backward = False\n\n if result3 is not None:\n # we have a result from the next_burst call going forward, so add its properties to the arrays\n t3 = result3.t2[0]\n _alpha2 = result3.alpha[0]\n _e_b2 = result3.e_b[0]\n _mdot2 = result3.mdot\n if salpha == -1:\n # This shouldn't happen, as we should be able to get at least one earlier burst\n stime = [sbt, t3]\n iref = 0\n salpha = [_alpha2]\n se_b = [_e_b2]\n smdot = [_mdot2]\n else:\n salpha.append(_alpha2)\n se_b.append(_e_b2)\n smdot.append(_mdot2)\n stime.append(t3)\n latest = t3\n\n # Check the results here\n\n # I don't think t2 or t3 are ever set to these \"dummy\" values anymore\n # if abs(t2) == 99.99 or abs(t3) == 99.99:\n if not (forward or backward):\n break\n\n if (mdot_max == -1) & (len(stime) > 0):\n\n mdot_max = max(smdot)\n\n result = dict()\n\n if full_model:\n # model parameters are redundant for the model returned\n result[\"base\"] = [base]\n result[\"z\"] = [z]\n result[\"x_0\"] = [x_0]\n result[\"dist\"] = [dist]\n result[\"xi_p\"] = [xi_p]\n\n result[\"mdot_max\"] = [mdot_max]\n\n result[\"mass\"] = [mass]\n result[\"radius\"] = [radius]\n\n result[\"forward\"] = forward # to keep track of the outcome of each direction\n result[\"backward\"] = backward\n\n # now the actual predictions\n\n result[\"time\"] = stime\n if len(stime) > 0:\n # The simulation might fail to generate any bursts, so only add the arrays if they exist\n result[\"mdot\"] = smdot\n # this is redundant, can be worked out from the times\n # result[\"iref\"] = iref\n result[\"alpha\"] = salpha\n result[\"e_b\"] = se_b\n #print(f\"In burstrain fluence is {se_b}\")\n\n\n return result", "def genPrimerPairs_5Ext(primer_length=20, anneal_length=10, GC_low=40, GC_high=60):\n\n print('Primers for 5\\' extension half-asstemers')\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n \"\"\"re.match checks if the first 2 Nuc are GC in the forward and backwards direction\"\"\"\n while not (re.match(\"[GC]{2}\",str(forwTemplate5_3)) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[::-1])) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[10:12]))):\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n\n forwTemp3_5 = forwTemplate5_3[::-1]\n forwPrimer5_3 = forwTemp3_5.complement()\n print(f\"Template Seq 3\\' - > 5\\': {forwTemp3_5}\")\n print(f\"ForwPrimer Seq 5\\' - > 3\\': {forwPrimer5_3}\")\n\n forwPrimer_f10 = forwPrimer5_3[:10]\n print(f\"First 10 Nucleotides of forward primer: {forwPrimer_f10}\")\n\n revPrimer_f10 = GenOligoGC(10,GC_low, GC_high)\n while not re.match(\"[GC]{2}\",str(revPrimer_f10)):\n revPrimer_f10 = GenOligoGC(10,GC_low, GC_high)\n\n revPrimer5_3 = revPrimer_f10 + forwPrimer_f10\n\n print(f\"RevPrimer Seq 5\\' - > 3\\': {revPrimer5_3}\")\n\n return forwPrimer5_3, revPrimer5_3", "def FASTBlade2SID(ed_file=None, Imodes_bld=[0,1,2], method='ShapeFunctions', startAtRoot=True, int_method='OpenFAST', consistency='OpenFAST', AdjBlMs=None):\r\n import welib.weio as weio\r\n from welib.fast.elastodyn import bladeParameters\r\n from welib.fast.elastodyn import bladeDerivedParameters\r\n\r\n\r\n if method=='FEM':\r\n\r\n parentDir=os.path.dirname(ed_file)\r\n ed = weio.read(ed_file)\r\n TipRad = ed['TipRad']\r\n HubRad = ed['HubRad']\r\n BldLen= TipRad-HubRad;\r\n BldFile = ed['BldFile(1)'].replace('\"','')\r\n BldFile=os.path.join(parentDir,BldFile)\r\n bld = weio.FASTInputFile(BldFile).toDataFrame()\r\n z = bld['BlFract_[-]']*BldLen + HubRad\r\n m = bld['BMassDen_[kg/m]'] # mu\r\n EIy = bld['FlpStff_[Nm^2]']\r\n EIz = bld['EdgStff_[Nm^2]'] # TODO actually EIx, but FEM beams along x\r\n phi = bld['PitchAxis_[-]']/(180)*np.pi \r\n # --- Derived parameters\r\n phi= np.concatenate(([0], np.diff(phi))) #% add phi_abs(1) to pitch angle\r\n A = m*0 + 100 # Area\r\n Kv = m*0 + 100 # Saint Venant torsion\r\n E = 214e9 # Young modulus \r\n Iy = EIy/E\r\n Iz = EIz/E\r\n xNodes = np.zeros((3,len(m)))\r\n xNodes[2,:]=z\r\n sid = Beam2SID(xNodes, Imodes_bld, m, Iy, Iz, Kv=Kv, A=A, E=E, phi=phi)\r\n\r\n elif method=='ShapeIntegral':\r\n # Extract relevant blade data\r\n p = bladeParameters(ed_file, AdjBlMs=AdjBlMs)\r\n if not startAtRoot:\r\n p['s_G0'][2,:] += p['HubRad']\r\n # TODO downselect modes\r\n # Compute SID based on twisted shape functions\r\n sid = BeamShapes2SID(p['s_G0'], p['s_span'], p['m'], p['EI'], p['Ut'][Imodes_bld], p['Vt'][Imodes_bld], p['Kt'][Imodes_bld], int_method=int_method, damping=None, consistency=consistency, shapeIntegrals=True)\r\n\r\n\r\n elif method=='ShapeFunctions':\r\n p = bladeParameters(ed_file, AdjBlMs=AdjBlMs)\r\n #p = bladeDerivedParameters(p, inertiaAtBladeRoot=startAtRoot)\r\n if not startAtRoot:\r\n p['s_G0'][2,:] += p['HubRad']\r\n # TODO downselect modes\r\n # Compute SID based on twisted shape functions\r\n sid = BeamShapes2SID(p['s_G0'], p['s_span'], p['m'], p['EI'], p['Ut'][Imodes_bld], p['Vt'][Imodes_bld], p['Kt'][Imodes_bld], int_method=int_method, damping=None, consistency=consistency, shapeIntegrals=False)\r\n\r\n else:\r\n raise NotImplementedError(method)\r\n\r\n return sid", "def part3e_1():\n # Run on a few examples and see that the constraints are being met.\n xs = \"Werner & Co entered court today . Werner maintained that they were not guilty .\".split()\n ys = \"-ORG- -ORG- -ORG- -O- -O- -O- -O- -ORG- -O- -O- -O- -O- -O- -O- -O-\".split()\n assert len(xs) == len(ys)\n\n N = 50000\n ys_ = submission.computeGibbsBestSequence(\n englishCRF,\n submission.getLongRangeCRFBlocks,\n submission.chooseGibbsLongRangeCRF,\n xs, \n N)\n grader.requireIsEqual( ys, ys_ )", "def make_bangbang_model(d):\n icdict = {'x': 35, 'y': 0}\n # d < 0 => under-damped\n # d > 0 => over-damped\n # d = +/- 0.025 is a good choice\n pardict = {'a': 0.1, 'x0': 35,\n 'S': 0, 'd': d}\n\n DSargs = args()\n DSargs.name = 'saccade_bangbang'\n DSargs.ics = icdict\n DSargs.pars = pardict\n DSargs.tdata = [0, 50]\n DSargs.varspecs = {'x': 'y',\n 'y': 'S -(2*a+d)*y + a*a*(x0-x)'}\n DSargs.fnspecs = {'Jacobian': (['t', 'x', 'y'],\n \"\"\"[[0, 1],\n [-a*a, -(2*a+d)]]\n \"\"\")}\n return Generator.Vode_ODEsystem(DSargs)", "def generate_DOS(B, tau_q, **kwargs):\n \n # Read in the keyword arguments\n eps = kwargs.get('eps') # default to None\n LL_energies = kwargs.get('LL_energies')\n T_low = kwargs.get('T_low', 0.1)\n T_high = kwargs.get('T_high', 1)\n n_e = kwargs.get('n_e', 3e15)\n factor = kwargs.get('factor', 10)\n tau_q_dep = kwargs.get('tau_q_dep', lambda B: 1) # not used yet!\n broadening = kwargs.get('broadening', 'Gaussian')\n E_spin = kwargs.get('E_spin', lambda B: 0) # spin gap is zero\n \n \n # calculate cyclotron frequency, convert into energy in units of Kelvin\n E_c = omega_c(B) * hbar / k_b # in K\n\n \n if broadening == 'Gaussian':\n broaden = lambda eps, eps_0, gamma: gauss(eps, eps_0, gamma)\n eps_width = 6\n elif broadening == 'Lorentzian':\n broaden = lambda eps, eps_0, gamma: lorentz(eps, eps_0, gamma)\n eps_width = 30\n \n # by default, take spinless Landau levels with gaps of E_c\n # I'm not sure about the added 0.5, which is not included in Zhang but is\n # in other references such as Kobayakawa\n\n if eps is None:\n eps = generate_eps(T_low, T_high, n_e, factor)\n\n # precalculate sigma squared for the Gaussian\n #sigma2 = 0.5 * E_c * hbar / (np.pi * tau_q * k_b) # sigma squared\n #sigma = sqrt(sigma2)\n gamma = 0.5 * hbar/(k_b * tau_q)\n sigma = gamma/sqrt(2)\n \n ### we could also intelligently choose Landau levels to sum over\n ### let's commit first before modifying this...\n \n if LL_energies is None:\n # choose LLs only in a range such that their broadening reaches\n # all the way to the fermi level.\n \n E_min = max (np.amin (eps) - gamma * eps_width, E_c)\n E_max = np.amax(eps) + gamma * eps_width\n LL_max = np.ceil(E_max/E_c - 0.5)\n LL_min = np.floor(E_min/E_c - 0.5)\n LL_energies = E_c * (np.arange(LL_min, LL_max+1, 1) + 0.5)\n \n\n # the prefactor normalizes the height of the Gaussian, accounting for\n # the broadening given by sigma2\n #prefactor = np.sqrt(omega_c(B) * tau_q)\n\n # Sum over Gaussians centred at E_c *N. This could be done more\n # pythonically or more efficiently\n # Should also make it so you can pass in your own Landau level spacings,\n # so that you can use spin-split LLs\n return_value = np.zeros(len(eps))\n for eps_0 in LL_energies:\n #return_value += exp(-(eps - eps_0)**2 / (2 * sigma**2))\n \n ## broaden should return a gaussian with area = 1. However, each \n ## gaussian accounts for an area \n return_value += 0.5 * E_c * broaden(eps, eps_0 - E_spin(B)/2, sigma)\n return_value += 0.5 * E_c * broaden(eps, eps_0 + E_spin(B)/2, sigma)\n #print eps_0-E_spin(B), eps_0+E_spin(B)\n \n #return [eps, prefactor * return_value]\n return [eps, return_value]", "def generate_testsystem(smiles = 'CCCC',\n forcefield_files = ['amber14/protein.ff14SB.xml', 'amber14/tip3p.xml'],\n forcefield_kwargs = {'removeCMMotion': False, 'ewaldErrorTolerance': 1e-4, 'constraints' : None, 'hydrogenMass' : 4 * unit.amus},\n nonperiodic_forcefield_kwargs = {'nonbondedMethod': app.NoCutoff},\n periodic_forcefield_kwargs = {'nonbondedMethod': app.PME},\n small_molecule_forcefield = 'gaff-2.11',\n padding=9*unit.angstroms,\n ionicStrength=0.0*unit.molar,\n water_model = 'tip3p',\n pressure = 1.0 * unit.atmosphere,\n temperature = 300 * unit.kelvin,\n barostat_period = 50,\n **kwargs\n ):\n from openforcefield.topology import Molecule\n from perses.utils.openeye import smiles_to_oemol\n from openmmforcefields.generators.system_generators import SystemGenerator\n from perses.utils.openeye import OEMol_to_omm_ff\n from simtk import openmm\n from qmlify.utils import pull_force_by_name\n\n oemol = smiles_to_oemol(smiles)\n off_molecules = [Molecule.from_openeye(oemol)]\n vac_system_generator = SystemGenerator(forcefields=forcefield_files,\n small_molecule_forcefield=small_molecule_forcefield,\n forcefield_kwargs=forcefield_kwargs,\n nonperiodic_forcefield_kwargs = nonperiodic_forcefield_kwargs, molecules = off_molecules)\n barostat = openmm.MonteCarloBarostat(pressure, temperature, barostat_period)\n sol_system_generator = SystemGenerator(forcefields=forcefield_files,\n small_molecule_forcefield=small_molecule_forcefield,\n forcefield_kwargs=forcefield_kwargs,\n periodic_forcefield_kwargs = periodic_forcefield_kwargs,\n molecules = off_molecules,\n barostat = barostat)\n\n\n vac_system, vac_positions, vac_topology = OEMol_to_omm_ff(oemol, vac_system_generator)\n\n #now i can attempt to solvate\n modeller = app.Modeller(vac_topology, vac_positions)\n modeller.addSolvent(sol_system_generator.forcefield, model=water_model, padding=padding, ionicStrength=ionicStrength)\n sol_positions, sol_topology = modeller.getPositions(), modeller.getTopology()\n sol_positions = unit.quantity.Quantity(value = np.array([list(atom_pos) for atom_pos in sol_positions.value_in_unit_system(unit.md_unit_system)]), unit = unit.nanometers)\n sol_system = sol_system_generator.create_system(sol_topology)\n\n vac_sys_pos_top = (vac_system, vac_positions, vac_topology)\n sol_sys_pos_top = (sol_system, sol_positions, sol_topology)\n\n #a quick assertion to make sure the nonbonded forces are being treated properly\n vac_nbf, sol_nbf = pull_force_by_name(vac_system, 'NonbondedForce'), pull_force_by_name(sol_system, 'NonbondedForce')\n assert not vac_nbf.usesPeriodicBoundaryConditions()\n assert sol_nbf.usesPeriodicBoundaryConditions()\n\n return vac_sys_pos_top, sol_sys_pos_top", "def create_CGfiles_using_martinizepy(Ctermini_type, set_charge, name):\n\n os.system('cp %s/%s ./'%(this_path,martini_itp))\n\n os.system('python2 %s/martinize.py -f %s_aa.pdb \\\n -o %s.top -x %s.pdb -name %s -ff martini22 \\\n -nt \\\n -ss CCCCCCCCCCCC '%(this_path,name,name,name,name))\n\n\n # Collect lines defining atoms\n lines_atoms = []\n break1,break2 = None,None\n with open('%s.itp'%name, 'r') as f:\n data = f.readlines()\n start = False\n for i,line in enumerate(data):\n if '[ atoms ]' in line:\n start = True\n break1 = i+1\n continue\n if start:\n if line.split()==[]:\n start = False\n break2 = i\n break\n lines_atoms = lines_atoms + [line]\n \n \n\n # Modify lines_atoms as per Ctermini\n charged_thusfar = 0\n if Ctermini_type.upper() == 'OH':\n for i in range(len(lines_atoms))[::-1]:\n if 'BB' in lines_atoms[i]:\n lines_atoms[i] = lines_atoms[i].replace(' 0.0', '-1.0')\n lines_atoms[i] = lines_atoms[i].replace('P5', 'Qa') \n charged_thusfar += -1\n break\n\n\n # modify charge of side chains,\n # CURRENTLY only neutralizes if Qd SC is found (deprotonation)\n neutralize_ahead = False\n if set_charge < 0: # deprotonation\n for i in range(len(lines_atoms))[::-1]:\n if charged_thusfar == set_charge:\n neutralize_ahead = True\n \n if ('SC' in lines_atoms[i]) and ('-1.0' in lines_atoms[i]):\n if 'Qa' not in lines_atoms[i]:\n raise RuntimeError('-1.0 charge without Qa bead is found')\n if neutralize_ahead:\n lines_atoms[i] = lines_atoms[i].replace('-1.0', ' 0.0')\n lines_atoms[i] = lines_atoms[i].replace('Qa', 'P1')\n else:\n charged_thusfar += -1\n\n if ('SC' in lines_atoms[i]) and (' 1.0' in lines_atoms[i]):\n if 'Qd' not in lines_atoms[i]:\n raise RuntimeError('1.0 charge without Qd bead is found')\n lines_atoms[i] = lines_atoms[i].replace('1.0', ' 0.0')\n lines_atoms[i] = lines_atoms[i].replace('Qd', 'P1')\n\n if charged_thusfar != set_charge:\n raise ValueError('Peptide sequence could not be used to achieve set_charge')\n\n elif set_charge == 0: # protonation-deprotonation\n if Ctermini_type == 'OH':\n raise ValueError('Protonation after deprotonation does not make sense')\n \n for i in range(len(lines_atoms))[::-1]:\n if ('SC' in lines_atoms[i]) and ('-1.0' in lines_atoms[i]):\n if 'Qa' not in lines_atoms[i]:\n raise RuntimeError('-1.0 charge without Qa bead is found')\n lines_atoms[i] = lines_atoms[i].replace('-1.0', ' 0.0')\n lines_atoms[i] = lines_atoms[i].replace('Qa', 'P1')\n\n if ('SC' in lines_atoms[i]) and (' 1.0' in lines_atoms[i]):\n if 'Qd' not in lines_atoms[i]:\n raise RuntimeError('1.0 charge without Qd bead is found')\n lines_atoms[i] = lines_atoms[i].replace('1.0', ' 0.0')\n lines_atoms[i] = lines_atoms[i].replace('Qd', 'P1')\n \n elif set_charge > 0: # protonation\n if Ctermini_type == 'OH':\n raise ValueError('Protonation after deprotonation does not make sense')\n\n for i in range(len(lines_atoms))[::-1]:\n if charged_thusfar == set_charge:\n neutralize_ahead = True\n\n if ('SC' in lines_atoms[i]) and ('-1.0' in lines_atoms[i]):\n if 'Qa' not in lines_atoms[i]:\n raise RuntimeError('-1.0 charge without Qa bead is found')\n lines_atoms[i] = lines_atoms[i].replace('-1.0', ' 0.0')\n lines_atoms[i] = lines_atoms[i].replace('Qa', 'P1')\n \n if ('SC' in lines_atoms[i]) and (' 1.0' in lines_atoms[i]):\n if 'Qd' not in lines_atoms[i]:\n raise RuntimeError('1.0 charge without Qd bead is found')\n if neutralize_ahead:\n lines_atoms[i] = lines_atoms[i].replace('1.0', ' 0.0')\n lines_atoms[i] = lines_atoms[i].replace('Qd', 'P1')\n else:\n charged_thusfar += 1\n \n if charged_thusfar != set_charge:\n raise ValueError('Peptide sequence could not be used to achieve set_charge')\n\n\n data_new = ''\n for line in data[:break1]:\n data_new += line\n for line in lines_atoms:\n data_new += line\n for line in data[break2:]:\n data_new += line\n \n \n with open('%s.itp'%name, 'w') as f:\n f.write(data_new)", "def casdetude_genetics_sk():\n file_path = PROJECT_PATH + \"/geographycal_data/Monterusciello/MontEdo_buildings\"\n router = Router(building_file=file_path)\n\n router.design_aqueduct(0)\n\n router.write2epanet(router.acqueduct, PROJECT_PATH + \"/Monterusciello_solution/Monterusciello_acqueduct\",\n diam=False)\n\n read_epanet = graphIO.graph_reader(router.acqueduct)\n read_epanet.read_epanet(PROJECT_PATH + \"/geographycal_data/SolvedNet/MonteSolution\")\n\n minimal = router.design_minimal_aqueduct(router.acqueduct, \"Q*H\")\n router.write2epanet(minimal, PROJECT_PATH + \"/Monterusciello_solution/Monterusciello_acqueduct\", diam=False)\n\n read_epanet = graphIO.graph_reader(router.acqueduct)\n read_epanet.read_epanet(PROJECT_PATH + \"/geographycal_data/SolvedNet/prova\")\n kpi_calculator(router.acqueduct)", "def generate_schreier_sims(self, af=False):\n\n n = self._degree\n u = self.basic_transversals\n basic_orbits = self._basic_orbits\n if len(u) == 0:\n for x in self.generators:\n if af:\n yield x._array_form\n else:\n yield x\n return\n if len(u) == 1:\n for i in basic_orbits[0]:\n if af:\n yield u[0][i]._array_form\n else:\n yield u[0][i]\n return\n\n u = list(reversed(u))\n basic_orbits = basic_orbits[::-1]\n # stg stack of group elements\n stg = [list(range(n))]\n posmax = [len(x) for x in u]\n n1 = len(posmax) - 1\n pos = [0]*n1\n h = 0\n while 1:\n # backtrack when finished iterating over coset\n if pos[h] >= posmax[h]:\n if h == 0:\n return\n pos[h] = 0\n h -= 1\n stg.pop()\n continue\n p = _af_rmul(u[h][basic_orbits[h][pos[h]]]._array_form, stg[-1])\n pos[h] += 1\n stg.append(p)\n h += 1\n if h == n1:\n if af:\n for i in basic_orbits[-1]:\n p = _af_rmul(u[-1][i]._array_form, stg[-1])\n yield p\n else:\n for i in basic_orbits[-1]:\n p = _af_rmul(u[-1][i]._array_form, stg[-1])\n p1 = _af_new(p)\n yield p1\n stg.pop()\n h -= 1", "def subprogram3(modulusBone,modulusImplant,bodyWeight,outerDia,canalDiameter):\n force = 30 * bodyWeight\n compressive_stress = -4 * force / (math.pi * (outerDia ** 2 - canalDiameter ** 2)) # assume bone is hollow cylinder\n stressReduc = compressive_stress * math.sqrt((2 * modulusBone) / (modulusBone + modulusImplant))\n Eratio = math.sqrt(modulusImplant/modulusBone)\n years = 0 # initializing years\n y = 0.001 * (years ** 2) - 3.437 * Eratio * years + 181.72 # relationship between compressive strength of bone and\n # number of years since implantation\n\n bone_stress = abs(stressReduc) # reduced stress being applied is negative but we need the absolute value\n\n while bone_stress < y: # checking to see when the reduced stress is greater than the bone strength\n years += 1 # adding 1 to years each time the bone strength is bigger than the reduced stress\n y = 0.001 * (years ** 2) - 3.437 * Eratio * years + 181.72\n\n yrsFail = years - 1 # we want exactly one year before the implant fails\n stressFail = 0.001 * (yrsFail ** 2) - 3.437 * Eratio * yrsFail + 181.72\n\n if yrsFail < 0: # cannot have negative years before failure\n print(\"\")\n print(\"Output:\")\n print(\"Number of Years before Fracture Risk = \", 0, \"Years\")\n print(\"Compressive Stress on Bone corresponding to Failure= \", round(stressFail, 2), \"MPa\")\n print(\"----------------\")\n print(\" \")\n print(\" \")\n return\n\n print(\"\")\n print(\"Output:\")\n print(\"Number of Years before Fracture Risk = \", yrsFail, \"Years\")\n print(\"Compressive stress on Bone corresponding to Failure= \", round(stressFail, 2), \"MPa\")\n print(\"----------------\")\n print(\" \")\n print(\" \")", "def change_input_rs(filename, force=50, restart_time=50_000, seed=None):\r\n\r\n def get_n_polymers(filename):\r\n \"\"\"Gets the number of membrane lipids from the dmpcis file\"\"\"\r\n \r\n dmpcis_name = re.sub('dmpci', 'dmpcis', filename)\r\n\r\n with open(dmpcis_name, 'rt') as f:\r\n for line in f:\r\n if line.startswith(' # of type 1'):\r\n n_polymers = int(line.strip().split()[-1])\r\n\r\n return n_polymers\r\n\r\n\r\n n_polymers = get_n_polymers(filename)\r\n\r\n commands = {'ToggleBeadDisplay': f\"1\\t\\tW\",\r\n 'SetCurrentStateCamera': f\"1\\t\\t0.5 -0.5 -0.5\\t0.5 0.5 0.5\",\r\n 'SetCurrentStateDefaultFormat': f\"1\\t\\tParaview\\n\",\r\n\r\n 'SelectPolymerTypeInSimBox': f\"1\\t\\tsingleLipid\\tSingleLipid\",\r\n 'SelectBeadTypeInSimBox': f\"1\\t\\tsingleLipidHead\\tHS\",\r\n 'SelectBeadTypeInSlice': f\"1\\t\\tlowerLipidHeads\\tH\\t0 0 1\\t0.5 0.5 0.46875\\t0.5 0.5 0.03125\\n\",\r\n\r\n # 'MSDOfPolymerTarget':\t f\"{1}\\t\\tsingleLipid\\tlipidDisplacement\\t1\\t5000\",\r\n # 'CylinderLinearForceOnTarget': f\"{2}\\t\\tsingleLipid\\tf_anchor\\t0 0 1\\t0.5 0.5 0.53125\\t2\",\r\n # 'FreezeBeadsInSlice': f\"1000\\t\\t0 0 1\\t0\\t4\",\r\n # 'ConstantForceOnTarget':\t [f\"1000\\t\\tsingleLipidHead\\tf_s\\t0 0 1\\t{force /3:.6f}\",\r\n # f\"1000\\t\\tlowerHeads\\tf_l\\t0 0 1\\t{force /n_polymers /6:.6f}\\n\"],\r\n\r\n # 'RemoveCommandTargetActivity': [f\"1000\\t\\tf_anchor\", f\"2000\\t\\tf_s\", f\"2000\\t\\tf_l\"]\r\n\r\n 'MSDOfPolymerTarget':\t f\"1\\t\\tsingleLipid\\tlipidDisplacement\\t1\\t5000\",\r\n 'FreezeBeadsInSlice': f\"1000\\t\\t0 0 1\\t0\\t4\",\r\n 'ConstantForceOnTarget':\t [f\"1000\\t\\tsingleLipidHead\\tf_s\\t0 0 1\\t{force /3:.6f}\",\r\n f\"1000\\t\\tlowerHeads\\tf_l\\t0 0 1\\t{force /n_polymers /6:.6f}\\n\"],\r\n\r\n 'RemoveCommandTargetActivity': [f\"2000\\t\\tf_s\", f\"2000\\t\\tf_l\"]\r\n }\r\n\r\n \r\n run_id = filename.split('.')[-1]\r\n\r\n with open(filename+'_rs', 'wt') as wf:\r\n with open(filename, 'rt') as rf:\r\n\r\n for line in rf:\r\n if line.strip().startswith(\"State\"):\r\n wf.write(f\"State\\trestart\\nRunId\\t{run_id:s}\\nStateId\\t{restart_time:d}\\n\\n\")\r\n\r\n while not line.strip().startswith(\"Bead\"):\r\n line = next(rf)\r\n\r\n if line.startswith('Time'):\r\n wf.write(f\"Time\\t\\t5000\\n\")\r\n continue\r\n\r\n if line.startswith('Step'):\r\n wf.write(f\"Step\\t\\t0.02\\n\")\r\n continue\r\n\r\n if line.startswith('DensityPeriod'):\r\n wf.write(f\"DensityPeriod\\t5000\\n\")\r\n continue\r\n\r\n if line.startswith('RestartPeriod'):\r\n wf.write(f\"RestartPeriod\\t5000\\n\")\r\n continue\r\n\r\n if re.match('\\t+Times', line):\r\n wf.write(f\"\\tTimes\\t{0}\\t5000\\n\")\r\n continue\r\n\r\n if line.startswith('Command'):\r\n continue\r\n \r\n wf.write(line)\r\n\r\n for command, value in commands.items():\r\n if isinstance(value, str):\r\n wf.write(f\"Command {command}\\t\\t{value}\\n\")\r\n\r\n elif isinstance(value, list):\r\n for val in value:\r\n wf.write(f\"Command {command}\\t\\t{val}\\n\")\r\n\r\n else:\r\n print(\"I have a bad feeling about this\")\r\n\r\n return run_id", "def build_tweaks(mitralsclub, nospineinh, nosingles,\n nojoints, nomultis, nopgs, onlytwomits, \n includeProjections=[], twomitrals=(0,2), nolateral=False):\n excludePopulations = []\n excludeProjections = ['SA']\n ## In odor_pulses, odor_morphs, scaled_pulses, I have not specified to include \n ## file-based inputs to 2nd order cells as below. If not specified, force include:\n if 'granule_baseline' not in includeProjections: includeProjections.append('granule_baseline')\n if 'ORN_PG' not in includeProjections: includeProjections.append('ORN_PG')\n if not mitralsclub:\n excludeProjections.append('mitral_granule_extra_exc')\n if nospineinh:\n excludeProjections.append('_spinesingles')\n excludeProjections.append('_spinejoints')\n excludeProjections.append('_spinemultis')\n if nosingles:\n excludePopulations.append('singles')\n excludeProjections.append('_singles') # _ to avoid deleting spinesingles\n if nojoints:\n excludePopulations.append('joints')\n excludeProjections.append('_joints') # _ to avoid deleting spinejoints\n if nomultis:\n excludePopulations.append('multis')\n excludeProjections.append('_multis') # _ to avoid deleting spinemultis\n if nopgs:\n excludePopulations.append('PGs')\n excludeProjections.append('PG')\n if onlytwomits:\n onlyInclude = {'includePopulation':('mitrals',[str(twomitrals[0]),str(twomitrals[1])]),\n 'includeProjections':includeProjections}\n return {'excludePopulations':excludePopulations,\n 'excludeProjections':excludeProjections,'onlyInclude':onlyInclude}\n else:\n if nolateral:\n ## remove other mitrals so that there is no lateral inhibition\n ## differs from nojoints, in keeping the joints self-inhibition\n print \"EXCLUDING OTHER MITS, KEEPING ONLY mits 0 and 1\"\n onlyInclude = {'includePopulation':('mitrals',['0','1']),\n 'includeProjections':includeProjections}\n return {'excludePopulations':excludePopulations,\n 'excludeProjections':excludeProjections,'onlyInclude':onlyInclude}\n else:\n return {'excludePopulations':excludePopulations,\\\n 'excludeProjections':excludeProjections}", "def test_f2_circuit_maker(self):\n fho = tfho.test_file_handle_object()\n W = 5\n G = 20\n fg = .9\n X = 10\n fx = .85\n gate_maker = g.TYPE_TO_GATE_GEN[g.TEST_TYPES.RANDOM]\n # family 2 files:\n t_circuit_file_name = \"circuit_file_trimming\"\n t_circuit_file = fho.get_file_object(t_circuit_file_name, 'w')\n t_input_file_name = \"input_file_trimming\"\n t_input_file = fho.get_file_object(t_input_file_name, 'w')\n t_output_file_name = \"output_file_trimming\"\n t_output_file = fho.get_file_object(t_output_file_name, 'w')\n nt_circuit_file_name = \"circuit_file_no_trimming\"\n nt_circuit_file = fho.get_file_object(nt_circuit_file_name, 'w')\n nt_input_file_name = \"input_file_no_trimming\"\n nt_input_file = fho.get_file_object(nt_input_file_name, 'w')\n nt_output_file_name = \"output_file_no_trimming\"\n nt_output_file = fho.get_file_object(nt_output_file_name, 'w')\n level_type_array = [g.LEVEL_TYPES.XOR, g.LEVEL_TYPES.RANDOM,\n g.LEVEL_TYPES.XOR]\n F = 2\n # make a family 1 circuit with trimming:\n sr.seed(self.rand_seed)\n t_gen = g.f1f2_circuit_maker_with_trimming_switch(W, G, fg,\n t_circuit_file,\n t_input_file,\n t_output_file,\n X, fx, gate_maker,\n level_type_array, True)\n t_gen.generate()\n # make a family 1 circuit without trimming, with the same randomness:\n sr.seed(self.rand_seed)\n nt_gen = g.f1f2_circuit_maker_with_trimming_switch(W, G, fg,\n nt_circuit_file,\n nt_input_file,\n nt_output_file,\n X, fx, gate_maker,\n level_type_array, False)\n nt_gen.generate()\n # obtain strings representing the contents of all the resulting files:\n t_circuit_string = fho.get_file(t_circuit_file_name).getvalue()\n t_input_string = fho.get_file(t_input_file_name).getvalue()\n t_output_string = fho.get_file(t_output_file_name).getvalue()\n nt_circuit_string = fho.get_file(nt_circuit_file_name).getvalue()\n nt_input_string = fho.get_file(nt_input_file_name).getvalue()\n nt_output_string = fho.get_file(nt_output_file_name).getvalue()\n # make sure that the inputs and outputs produced by the trimming and\n # no trimming algorithms are the same:\n self.assertEqual(t_input_string, nt_input_string)\n self.assertEqual(t_output_string, nt_output_string)\n # make sure that the input begins and ends with a bracket:\n self.assertEqual(\"[\", t_input_string[0])\n self.assertEqual(\"]\", t_input_string[-1])\n # make sure that each input element is a bit:\n for bit in t_input_string[1:-1]:\n self.assertTrue((bit == '0') or (bit == '1'))\n # make sure that the output is a bit:\n self.assertTrue((t_output_string == '0') or (t_output_string == '1'))\n # make sure that the two circuit headers are the same, and that they\n # contain the correct values:\n t_circuit_header = t_circuit_string.split(\"\\n\")[0]\n nt_circuit_header = nt_circuit_string.split(\"\\n\")[0]\n self.assertEqual(t_circuit_header, nt_circuit_header)\n (W_string, G_string, X_string, F_string) = t_circuit_header.split(\",\")\n W_value = int(W_string.split(\"=\")[-1])\n G_value = int(G_string.split(\"=\")[-1])\n X_value = int(X_string.split(\"=\")[-1])\n F_value = int(F_string.split(\"=\")[-1])\n self.assertEqual(W, W_value)\n self.assertEqual(G, G_value)\n self.assertEqual(F, F_value)\n # note that we cannot test that the circuits themselves are the same,\n # because the trimming algorithm produces a circuit with gates listed\n # in a different order.", "def make_rat():\n rats_path = os.path.dirname(__file__)\n models_path = os.path.join(rats_path, '..', 'models')\n recon22_path = os.path.join(models_path, 'recon_2.2.xml')\n rambo_path = os.path.join(rats_path, 'rambo.xml')\n sbml = read_sbml(recon22_path)\n\n# v_sol, f_opt = optimize_cobra_model(sbml, 1000)\n# print 'optimal growth rate:\\t%g\\n'%(f_opt)\n\n # map hgnc to rat\n hgnc_map = {}\n hgnc_map_file = os.path.join(rats_path, 'ensembl_hgnc_to_rat.txt')\n with open(hgnc_map_file, 'rU') as csvfile:\n reader = csv.reader(csvfile, delimiter='\\t')\n for row in reader:\n # ensembl = row[0].strip()\n hgnc = row[1].strip()\n hgnc = hgnc.replace('HGNC:','')\n if len(row) > 2:\n rat = row[2].strip()\n else:\n rat = ''\n\n if hgnc not in hgnc_map:\n hgnc_map[hgnc] = ''\n if not hgnc_map[hgnc]:\n hgnc_map[hgnc] = rat\n\n model = sbml.getModel()\n to_remove = []\n for reaction in model.getListOfReactions().clone():\n rID = reaction.getId()\n ga_human = get_notes_field(rID, 'GENE_ASSOCIATION', sbml)\n if ga_human:\n ga_human = ga_human.replace('HGNC:', '') \n ga_rat = ga_human\n for hgnc in re.findall(r'\\b\\S+\\b', ga_human):\n if (hgnc not in ['and', 'or', 'AND', 'OR']):\n if hgnc in hgnc_map:\n rat = hgnc_map[hgnc]\n else:\n print 'HGNC:%s\\tnot found'%hgnc\n rat = ''\n if not rat:\n rat = 'False'\n ga_rat = re.sub(r'\\b' + hgnc + r'\\b', rat, ga_rat)\n ga_rat = to_dnf(ga_rat)\n set_notes_field(rID, 'GENE_ASSOCIATION', ga_rat, sbml)\n\n# if not ga_rat:\n# to_remove.append(rID)\n# print '%s\\t[%s]\\n%s\\n'%(rID, reaction.getName(), ga_human)\n# print '%s of %s reactions removed\\n'%(len(to_remove), model.getNumReactions())\n\n for rID in to_remove:\n model.removeReaction(rID)\n\n model.setName('rambo')\n model.setId('rambo')\n model.setMetaId('meta_' + model.getId())\n model.setAnnotation(model.getAnnotation())\n\n write_sbml(sbml, rambo_path)\n\n v_sol, f_opt = optimize_cobra_model(sbml, 1000)\n print 'optimal growth rate:\\t%g\\n'%(f_opt)", "def stereogenic_bond_keys(gra):\n gra = without_bond_orders(gra)\n gra = explicit(gra) # for simplicity, add the explicit hydrogens back in\n bnd_keys = dict_.keys_by_value(\n resonance_dominant_bond_orders(gra), lambda x: 2 in x)\n\n # make sure both ends are sp^2 (excludes cumulenes)\n atm_hyb_dct = resonance_dominant_atom_hybridizations(gra)\n sp2_atm_keys = dict_.keys_by_value(atm_hyb_dct, lambda x: x == 2)\n bnd_keys = frozenset({bnd_key for bnd_key in bnd_keys\n if bnd_key <= sp2_atm_keys})\n\n bnd_keys -= bond_stereo_keys(gra)\n bnd_keys -= functools.reduce( # remove double bonds in small rings\n frozenset.union,\n filter(lambda x: len(x) < 8, rings_bond_keys(gra)), frozenset())\n\n atm_ngb_keys_dct = atom_neighbor_keys(gra)\n\n def _is_stereogenic(bnd_key):\n atm1_key, atm2_key = bnd_key\n\n def _is_symmetric_on_bond(atm_key, atm_ngb_key):\n atm_ngb_keys = list(atm_ngb_keys_dct[atm_key] - {atm_ngb_key})\n\n if not atm_ngb_keys: # C=:O:\n ret = True\n elif len(atm_ngb_keys) == 1: # C=N:-X\n ret = False\n else:\n assert len(atm_ngb_keys) == 2 # C=C(-X)-Y\n ret = (stereo_priority_vector(gra, atm_key, atm_ngb_keys[0]) ==\n stereo_priority_vector(gra, atm_key, atm_ngb_keys[1]))\n\n return ret\n\n return not (_is_symmetric_on_bond(atm1_key, atm2_key) or\n _is_symmetric_on_bond(atm2_key, atm1_key))\n\n ste_gen_bnd_keys = frozenset(filter(_is_stereogenic, bnd_keys))\n return ste_gen_bnd_keys", "def preparehspiceidvgGEO1v2(wheretosimpath,templatepath,modelverilogpath,modelcardpath,vgs,vds,Lparam,HFINparam,TFIN_TOPparam,TFIN_BASEparam,EOTparam,NBODYparam,NFINparam,PHIGparam,RSHSparam,RSHDparam):\n #make an aux copy of hspice file to simulate\n shutil.copyfile(templatepath,wheretosimpath+'idvgaux.sp')\n #make an aux copy of modelcard file to simulate\n shutil.copyfile(modelcardpath,wheretosimpath+'modelcardaux.nmos')\n\n #update path of model and modelcard\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'pathmodelverilog', modelverilogpath)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'pathmodelcard', '\\\"modelcardaux.nmos\\\"')\n\n #bias update\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsi', str(vgs[0]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsf', str(vgs[-1]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsdelta', str(vgs[1]-vgs[0]))\n\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsi', str(vds[0]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsf', str(vds[-1]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsdelta', str(vds[1]-vds[0]))\n\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'Lparam', Lparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'HFINparam',HFINparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'TFIN_TOPparam', TFIN_TOPparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'TFIN_BASEparam',TFIN_BASEparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'EOTparam', EOTparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'NBODYparam',NBODYparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'NFINparam', NFINparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'PHIGparam', PHIGparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'RSHSparam', RSHSparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'RSHDparam', RSHDparam)", "def test_get_all_smirks():\n gen = SmirksGenerator()\n gen.target_smirks = [SmirksType.Vdw, SmirksType.Bonds, SmirksType.Angles, SmirksType.ProperTorsions]\n\n mol = Molecule.from_smiles(\"CO\")\n\n all_smirks = gen._get_all_smirks(molecule=mol, forcefield_editor=ForceFieldEditor(\"openff_unconstrained-1.3.0.offxml\"))\n # this is a list of all of the smirks from the forcefield\n all_matches = []\n for smirk in all_smirks:\n atoms = condense_matches(mol.chemical_environment_matches(smirk.smirks))\n assert compare_matches(atoms, smirk.atoms) is True\n all_matches.extend(atoms)\n\n assert all_covered(all_matches, mol) is True", "def generate_symbole(figure_name = \"canon\"):\n if figure_name == \"planeur\": #PLANNEUR\n planneur = np.zeros((3, 3))\n planneur[1, 0] = 1\n planneur[0, 1] = 1\n planneur[0, 2] = 1\n planneur[1, 2] = 1\n planneur[2, 2] = 1\n return planneur\n\n elif figure_name == \"canon\": #CANON\n canon = np.zeros((36,9))\n canon[0:2,5:7] = 1\n canon[11,4:7] = 1\n canon[15:17,4:7] = 1\n canon[12,3] = 1\n canon[14,3] = 1\n canon[13,2] = 1\n canon[12,7] = 1\n canon[14,7] = 1\n canon[13,8] = 1\n canon[25,0:2] = 1\n canon[22:25,1:3] = 1\n canon[21,2:5] = 1\n canon[24,3] = 1\n canon[22:25,4:6] = 1\n canon[25,5:7] = 1\n canon[30,1:3] = 1\n canon[34:36,3:5] = 1\n return canon\n\n elif figure_name == \"blinker\": #BLINKER\n blinker = np.ones((3,1))\n return blinker\n\n elif figure_name == \"oscillator_alone\":\n osc = np.zeros((11,11))\n osc[2,2:9] = 1\n osc[8,2:9] = 1\n osc[2:9,2] = 1\n osc[2:9,8] = 1\n osc[5,2] = 0\n osc[5,8] = 0\n osc[2,5] = 0\n osc[8,5] = 0\n osc[0,5] = 1\n osc[10,5] = 1\n osc[5,0] = 1\n osc[5,10] = 1\n osc[1,4:7] = 1\n osc[9,4:7] = 1\n osc[4:7,1] = 1\n osc[4:7,9] = 1\n return osc\n\n elif figure_name == \"oscillator_one_block\":\n osc = generate_symbole(\"oscillator_alone\")\n osc[0:2,-2:] = 1\n return osc\n\n elif figure_name == \"oscillator_four_blocks\":\n osc = generate_symbole(\"oscillator_alone\")\n osc[0:2, -2:] = 1\n osc[0:2,0:2] = 1\n osc[-2:,0:2] = 1\n osc[-2:,-2:] = 1\n return osc\n\n elif figure_name == \"croix\":\n return osc\n\n elif figure_name == \"diag\":\n return osc\n\n elif figure_name == \"octogone\":\n return osc\n\n else:\n return 0", "def preparehspiceidvgGEO4(wheretosimpath,templatepath,modelverilogpath,modelcardpath,vgs,vds,Lparam,Ach_UFCMparam,Cins_UFCMparam,W_UFCMparam,NBODYparam,NFINparam):\n#L=Lparam Ach_UFCM=Ach_UFCMparam Cins_UFCM=Cins_UFCMparam W_UFCM=W_UFCMparam NBODY=NBODYparam NFIN=NFINparam\n #make an aux copy of hspice file to simulate\n shutil.copyfile(templatepath,wheretosimpath+'idvgaux.sp')\n #make an aux copy of modelcard file to simulate\n shutil.copyfile(modelcardpath,wheretosimpath+'modelcardaux.nmos')\n\n #update path of model and modelcard\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'pathmodelverilog', modelverilogpath)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'pathmodelcard', '\\\"modelcardaux.nmos\\\"')\n\n #bias update\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsi', str(vgs[0]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsf', str(vgs[-1]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsdelta', str(vgs[1]-vgs[0]))\n\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsi', str(vds[0]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsf', str(vds[-1]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsdelta', str(vds[1]-vds[0]))\n\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'Lparam', Lparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'Ach_UFCMparam',Ach_UFCMparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'Cins_UFCMparam', Cins_UFCMparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'W_UFCMparam',W_UFCMparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'NBODYparam',NBODYparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'NFINparam', NFINparam)", "def SecondaryComplex_to_Bid():\n Parameter('RIP3_0' , 2.0e4) # molecules per cell\n Parameter('Bid_0' , 2.0e4) # molecules per cell\n Parameter('BidK_0' , 5.0e3) # molecules per cell\n \n Initial(RIP3(bRHIM = None, state = 'unmod'), RIP3_0) # RIP3\n Initial(Bid(bf = None, state = 'unmod'), Bid_0) # Bid\n Initial(BidK(bf = None), BidK_0)\n # ==============================================================\n # Assembly of Complex II, Riptosome and Necrosome\n # --------------------------------------------------------------\n # FADD + TRADD[active] <-> FADD:TRADD[active]\n # FADD + RIP1 <-> FADD:RIP1\n # TRADD + RIP1 <-> TRADD:RIP1\n \n # CD95_to_secondary complex contains the rules for recruitment of proC8 to FADD.\n # (RIP1 or TRADD):FADD + proC8 <-> (RIP1 or TRADD):FADD:proC8\n # (RIP1 or TRADD):FADD:proC8 + proC8 <-> (RIP1 or TRADD):FADD:proC8:proC8\n # (RIP1 or TRADD):FADD:proC8 + flip_L <-> (RIP1 or TRADD):FADD:proC8:flip_L\n # (RIP1 or TRADD):FADD:proC8 + flip_S <-> (RIP1 or TRADD):proC8:flip_S\n \n # RIP1%ProC8%ProC8(in a complex) >> RIP1[trunc] + C8 + (remains of the complex)\n # RIP1%ProC8%cFlip[L](in a complex) >> RIP1[trunc] + remains of the complex)\n # RIP1%cFlip[S](in a complex) + RIP3 >> RIP1:RIP3(in a complex, i.e. necrosome)\n \n # RIP1 + C8 <-> RIP1:C8 >> RIP1[trunc] + C8\n # RIP3 + C8 <-> RIP3:C8 >> RIP3[trunc] + C8\n # Bid + C8 <-> Bid:C8 >> Bid[trunc] + C8\n \n # -------------Assembling Complex II-----------------\n bind(FADD(bDD = None, bDED1 = None, bDED2 = None), 'bDD', TRADD(bDD1=None, state = 'active'), 'bDD1', [KF, KR])\n bind(FADD(bDD = None, bDED1 = None, bDED2 = None), 'bDD', RIP1(bDD=None, bRHIM = None, state = 'unmod'), 'bDD', [Ka_RIP1_FADD, Kd_RIP1_FADD])\n bind(TRADD(bDD2 = None, state = 'active'),'bDD2', RIP1(bDD = None, bRHIM = None, state = 'unmod'), 'bDD', [KF, KR])\n # For simplicity, I am neglecting the binary intereaction that occurs between proC8 and RIP1.\n # Binding of proC8 and c-flip to FADD is accomplished in CD95_to_Secondary complex.", "def clean_junctions(seqlst, proteins, escores, models, mutate_cutoff=0.38,\n mutate_gap=0, primer=\"GTCTTGATTCGCTTGACGCTGCTG\",\n max_mutate_count=2):\n # loop through each sequence in seqlst\n for i in range(len(seqlst)):\n seq = seqlst[i]\n # if there is not sequence, don't do anything\n if seq == \"\":\n continue\n es_preds = {}\n model_preds = {}\n es_preds_primer = {}\n model_preds_primer = {}\n es_preds_primer = {}\n model_preds_primer = {}\n d = {\"sequence\": [seq]}\n df = pd.DataFrame.from_dict(d)\n d_primer = {\"sequence\": [seq + primer]}\n df_primer = pd.DataFrame.from_dict(d_primer)\n # check for forward orientation\n for protein in proteins:\n # predict sequence without primer\n es_preds[protein] = escores[protein].predict_sequences(df)[\"sequence1\"]\n model_preds[protein] = models[protein].predict_sequences(df)[\"sequence1\"]\n # predict sequence with primer\n es_preds_primer[protein] = escores[protein].predict_sequences(df_primer)[\"sequence1\"]\n model_preds_primer[protein] = models[protein].predict_sequences(df_primer)[\"sequence1\"]\n seq_bs = Sequence(es_preds, model_preds, proteins, escores, mutate_cutoff, mutate_gap)\n seq_primer_bs = Sequence(es_preds_primer, model_preds_primer, proteins,\n escores, mutate_cutoff, mutate_gap)\n # theres a sequence with less bsites with primer. why?\n if seq_bs.site_count_all() > seq_primer_bs.site_count_all():\n print(\"Number of bsites with primer should not be less than without\")\n seqlst[i] = \"\"\n if seq_bs.site_count_all() < seq_primer_bs.site_count_all():\n new_seq = clean_junction_seq(ori_seq=seq,\n proteins=proteins,\n mutate_cutoff=mutate_cutoff,\n escores=escores,\n max_mutate_count=2)\n if new_seq == seq:\n seqlst[i] = \"\"\n\n # check reverse complement\n for i in range(len(seqlst)):\n # get the sequence\n seq = seqlst[i]\n # get the reverse complement of the primer\n primer_obj = Seq(primer)\n rev_primer = str(primer_obj.reverse_complement())\n # if there is not sequence, don't do anything\n if seq == \"\":\n continue\n\n es_preds = {}\n model_preds = {}\n es_preds_primer = {}\n model_preds_primer = {}\n\n d = {\"sequence\": [seq]}\n df = pd.DataFrame.from_dict(d)\n d_primer = {\"sequence\": [rev_primer + seq]}\n df_primer = pd.DataFrame.from_dict(d_primer)\n\n # check for reverse complement\n for protein in proteins:\n # predict sequence without primer\n es_preds[protein] = escores[protein].predict_sequences(df)[\"sequence1\"]\n model_preds[protein] = models[protein].predict_sequences(df)[\"sequence1\"]\n # predict reverse complement of sequence with primer\n es_preds_primer[protein] = escores[protein].predict_sequences(df_primer)[\"sequence1\"]\n model_preds_primer[protein] = models[protein].predict_sequences(df_primer)[\"sequence1\"]\n seq_bs = Sequence(es_preds, model_preds, proteins, escores, mutate_cutoff, mutate_gap)\n seq_primer_bs = Sequence(es_preds_primer, model_preds_primer, proteins,\n escores, mutate_cutoff, mutate_gap)\n if seq_bs.site_count_all() > seq_primer_bs.site_count_all():\n print(\"Number of bsites with primer should not be less than without\")\n seqlst[i] = \"\"\n if seq_bs.site_count_all() < seq_primer_bs.site_count_all():\n new_seq = clean_junction_rev_seq(ori_seq=seq,\n proteins=proteins,\n mutate_cutoff=mutate_cutoff,\n escores=escores,\n max_mutate_count=2)\n if new_seq == seq:\n seqlst[i] = \"\"\n\n else:\n seqlst[i] = seqlst[i] + primer\n\n # if wild type is empty, return false\n if seqlst[0] == \"\":\n return [], False\n\n # otherwise, return the new seqlist\n return seqlst, True", "def __init__(self, n): # this is equivalent to starting a random one\n self.n = n\n # From table S1 in the supplemental materials\n # each c parameters is [body,limb]\n self.cv0 = [0.3, 0.0]\n self.cv1 = [0.2, 0.2]\n self.cR0 = [0.196,0.131]\n self.cR1 = [0.065,0.131]\n #[[dbodylow,dbodyhigh],[dlimblow,dlimbhigh]]\n self.d_params = [[1,5],[1,3]]\n # which oscillators are limb oscillators and which ones are body oscillators is pretty constant\n n_body = n - 4\n self.osc_class = [0 if i < n_body else 1 for i in range(self.n)] # 0 for body oscillator, 1 for limb oscillator\n # list of keys that can be mutated during evolution\n self.evolvables = ['w', 'phi', 'a', 'gsl', 'gsh', 'gb1', 'gb2', 'theta', 'ampl', 'ampl_dot']\n self.scalars = set(['gsl', 'gsh', 'gb1', 'gb2'])\n self.nonzeros = set([int(i) for i in \"8 160 29 181 50 202 71 223 92 244 113 265 134 286 155 307 1 20 22 41 43 62 64 83 85 104 106 125 127 146 169 188 190 209 211 230 232 251 253 272 274 293 295 314 320 321 322 323 364 365 366 367 348 349 350 351 392 393 394 395 338 376 337 356 359 397 379 398\".split(\" \")])\n self.shapes = {'w':n*n,\n 'phi':n*n,\n 'a':n,\n 'theta':n,\n 'ampl':n,\n 'ampl_dot':n}\n self.sizes = {'w':n*n,\n 'phi':n*n,\n 'a':n,\n 'theta':n,\n 'ampl':n,\n 'ampl_dot':n}", "def get_TEB_prerequisites(maps=None,\n Imap_label='353',Pmap_label='353',\n Imap_name=None,Pmap_name=None,\n lmax=100,masktype='PowerSpectra',\n rewrite=False,\n iso=True):\n\n if iso:\n isolabel = '_iso'\n filename = 'bispectrum{}__lmax{}_mask_{}_I{}_P{}.npy'.format(isolabel,\n lmax,\n masktype,\n Imap_label,\n Pmap_label)\n \n \n \n #if os.path.exists(filename) and not rewrite:\n # bispectrum = np.load(filename)\n # return bispectrum\n\n\n Tlm, Elm, Blm = get_alms(maps=maps,mask=None,masktype=masktype,\n maplabel=Imap_label,\n showI=False,rewrite=False,\n pol=True,intensity=True,\n writemap=False,savealms=True,\n lmax=lmax)\n\n if Imap_label != Pmap_label:\n Elm, Blm = get_alms(maps=maps,mask=None,masktype=masktype,\n maplabel=Pmap_label,\n showI=False,rewrite=False,\n pol=True,intensity=False,\n writemap=False,savealms=True,\n lmax=lmax)\n \n ls, ms = hp.sphtfunc.Alm.getlm(lmax,np.arange(len(Tlm)))\n lmin = ls.min()\n\n\n hs = get_hs(lmin=lmin, lmax=lmax)\n return Tlm,Elm,Blm,ls,ms,hs\n\n #print 'calculating bispectrum ...'\n \n #if iso:\n # bispectrum = calc_b_iso(Tlm.real, Elm.real, Blm.real, ls, ms,\n # hs, lmax=lmax)\n #else:\n # bispectrum = calc_b(Tlm, Elm, Blm, ls, ms,\n # hs, lmax=lmax)\n\n #np.save(filename, bispectrum)\n #return bispectrum", "def generate_modelSED_photo_fit(sp=None,sfh_form=4,filters=None,add_igm_absorption=0,igm_type=0,params_fsps=None,DL_Gpc=0.0,cosmo='flat_LCDM',\n\tH0=70.0,Om0=0.3,params_val=None,interp_filters_waves=[],interp_filters_trans=[]):\n\n\tdef_params_fsps, params_assoc_fsps, status_log = list_params_fsps()\n\n\tformed_mass = pow(10.0,params_val['log_mass'])\n\n\t# input model parameters to FSPS:\n\tfor pp in range(len(params_fsps)):\n\t\tstr_temp = params_assoc_fsps[params_fsps[pp]]\n\t\tif status_log[params_fsps[pp]] == 0:\n\t\t\tsp.params[str_temp] = params_val[params_fsps[pp]]\n\t\telif status_log[params_fsps[pp]] == 1:\n\t\t\tsp.params[str_temp] = pow(10.0,params_val[params_fsps[pp]])\n\n\t# generate the SED:\n\tif sfh_form==0 or sfh_form==1:\n\t\tage = pow(10.0,params_val['log_age'])\n\t\twave, extnc_spec = sp.get_spectrum(peraa=True,tage=age) ## spectrum in L_sun/AA\n\t\tmass = sp.stellar_mass\n\t\tdust_mass0 = sp.dust_mass ## in solar mass/norm\n\telif sfh_form==2 or sfh_form==3 or sfh_form==4:\n\t\tt0 = pow(10.0,params_val['log_t0'])\n\t\ttau = pow(10.0,params_val['log_tau'])\n\t\tage = pow(10.0,params_val['log_age'])\n\t\talpha = pow(10.0,params_val['log_alpha'])\n\t\tbeta = pow(10.0,params_val['log_beta'])\n\t\tSFR_fSM,mass,wave,extnc_spec,dust_mass0 = csp_spec_restframe_fit(sp=sp,sfh_form=sfh_form,formed_mass=formed_mass,age=age,tau=tau,t0=t0,alpha=alpha,beta=beta)\n\n\t# redshifting\n\tredsh_wave,redsh_spec0 = cosmo_redshifting(DL_Gpc=DL_Gpc,cosmo=cosmo,H0=H0,Om0=Om0,z=params_val['z'],wave=wave,spec=extnc_spec)\n\n\t# IGM absorption:\n\tif add_igm_absorption == 1:\n\t\tif igm_type == 0:\n\t\t\ttrans = igm_att_madau(redsh_wave,params_val['z'])\n\t\t\ttemp = redsh_spec0\n\t\t\tredsh_spec0 = temp*trans\n\t\telif igm_type == 1:\n\t\t\ttrans = igm_att_inoue(redsh_wave,params_val['z'])\n\t\t\ttemp = redsh_spec0\n\t\t\tredsh_spec0 = temp*trans\n\n\t# normalize:\n\tnorm0 = formed_mass/mass\n\tredsh_spec = redsh_spec0*norm0\n\tdust_mass = dust_mass0*norm0\n\n\t# filtering:\n\tphoto_SED_flux = filtering_interp_filters(redsh_wave,redsh_spec,interp_filters_waves,interp_filters_trans)\n\n\treturn photo_SED_flux", "def prepare(info_dict):\n\n logger.info(\"\\n-=# Chain optimization cycle 0 #=- \\n\")\n params, M, engine, result, _ = get_basic_info(info_dict)\n\n logger.info(\"Spring Force: %.2f kcal/mol/Ang^2 \\n\" % params.nebk)\n\n tmpdir = tempfile.mkdtemp()\n\n # Getting the initial chain.\n chain = ElasticBand(M, engine=engine, tmpdir=tmpdir, params=params, plain=params.plain)\n\n trust = params.trust\n chain.ComputeChain(result=result)\n chain.ComputeGuessHessian(blank=isinstance(engine, Blank))\n chain.PrintStatus()\n\n avgg_print, maxg_print = print_forces(chain, params.avgg, params.maxg)\n logger.info(\"-= Chain Properties =- \\n\")\n logger.info(\n \"@\\n%13s %13s %13s %13s %11s %13s %13s \\n\"\n % (\"GAvg(eV/Ang)\", \"GMax(eV/Ang)\", \"Length(Ang)\", \"DeltaE(kcal)\", \"RMSD(Ang)\", \"TrustRad(Ang)\", \"Step Quality\")\n )\n logger.info(\n \"@%13s %13s %13s \\n\"\n % (\n \" %s \" % avgg_print,\n \" %s \" % maxg_print,\n \"% 8.4f \" % sum(chain.calc_spacings()),\n )\n )\n\n GW = chain.get_global_grad(\"total\", \"working\")\n GP = chain.get_global_grad(\"total\", \"plain\")\n HW = chain.guess_hessian_working.copy()\n HP = chain.guess_hessian_plain.copy()\n dy, expect, expectG, ForceRebuild = chain.CalcInternalStep(trust, HW, HP)\n new_chain = chain.TakeStep(dy)\n respaced = new_chain.delete_insert(1.5)\n newcoords = chaintocoords(new_chain)\n attrs_new = check_attr(new_chain)\n attrs_prev = check_attr(chain)\n\n temp = {\"Ys\": [chain.get_internal_all().tolist()], \"GWs\": [GW.tolist()], \"GPs\": [GP.tolist()], \"attrs_new\": attrs_new,\n \"attrs_prev\": attrs_prev, \"trust\": trust, \"expect\": expect, \"expectG\": expectG.tolist(), \"respaced\": respaced,\n \"trustprint\": \"=\", \"frocerebuild\": False,\"lastforce\": 0, \"coord_ang_prev\": chaintocoords(chain, True),\n \"result_prev\": result, \"geometry\": []}\n info_dict.update(temp)\n return newcoords, info_dict", "def brh_cogs2(DB, species, missing_factor=0.0, seed_sp=None, min_score=0):\n def _sort_cogs(cogs1, cogs2):\n seed1, mx1, avg1, ncogs1 = cogs1\n seed2, mx2, avg2, ncogs2 = cogs2\n for i, j in ((mx1, mx2), (avg1, avg2), (ncogs1, ncogs2)):\n v = -1 * cmp(i, j)\n if v != 0:\n break\n return v\n \n log.log(26, \"Searching BRH orthologs\")\n species = set(map(str, species))\n \n min_species = len(species) - round(missing_factor * len(species))\n \n if seed_sp == \"auto\":\n sp_to_test = list(species)\n elif seed_sp == \"largest\":\n cmd = \"\"\"SELECT taxid, size FROM species\"\"\"\n db.seqcursor.execute(cmd)\n sp2size = {}\n for tax, counter in db.seqcursor.fetchall():\n if tax in species: \n sp2size[tax] = counter\n \n sorted_sp = sorted(sp2size.items(), lambda x,y: cmp(x[1],y[1]))\n log.log(24, sorted_sp[:6])\n largest_sp = sorted_sp[-1][0]\n sp_to_test = [largest_sp]\n log.log(28, \"Using %s as search seed. Proteome size=%s genes\" %\\\n (largest_sp, sp2size[largest_sp]))\n else:\n sp_to_test = [str(seed_sp)]\n\n analysis_txt = StringIO()\n if sp_to_test:\n log.log(26, \"Finding best COG selection...\")\n seed2size = get_sorted_seeds(seed_sp, species, sp_to_test, min_species, DB)\n size_analysis = []\n for seedname, content in seed2size.iteritems():\n cog_sizes = [size for seq, size in content]\n mx, avg = _max(cog_sizes), round(_mean(cog_sizes))\n size_analysis.append([seedname, mx, avg, len(content)])\n size_analysis.sort(_sort_cogs) \n #print '\\n'.join(map(str, size_analysis))\n seed = size_analysis[0][0]\n print_as_table(size_analysis[:25], stdout=analysis_txt,\n header=[\"Seed\",\"largest COG\", \"avg COG size\", \"total COGs\"])\n if size_analysis[0][1] < len(species)-1:\n print size_analysis[0][1]\n raise ValueError(\"Current COG selection parameters do not permit to cover all species\")\n \n log.log(28, analysis_txt.getvalue())\n # The following loop tests each possible seed if none is\n # specified.\n log.log(28, \"Computing Clusters of Orthologs groups (COGs)\")\n log.log(28, \"Min number of species per COG: %d\" %min_species)\n cogs_selection = []\n log.log(26,\"Using seed species:%s\", seed)\n species_side1 = ','.join(map(quote, [s for s in species if str(s)>str(seed)]))\n species_side2 = ','.join(map(quote, [s for s in species if str(s)<str(seed)]))\n pairs1 = []\n pairs2 = []\n # Select all ids with matches in the target species, and\n # return the total number of species covered by each of\n # such ids.\n if species_side1 != \"\":\n cmd = \"\"\"SELECT seqid1, taxid1, seqid2, taxid2 from ortho_pair WHERE\n taxid1=\"%s\" AND taxid2 IN (%s) \"\"\" % (seed, species_side1)\n DB.orthocursor.execute(cmd)\n pairs1 = DB.orthocursor.fetchall()\n\n if species_side2 != \"\":\n cmd = \"\"\"SELECT seqid2, taxid2, seqid1, taxid1 from ortho_pair WHERE\n taxid1 IN (%s) AND taxid2 = \"%s\" \"\"\" % (species_side2, seed)\n DB.orthocursor.execute(cmd)\n pairs2 = DB.orthocursor.fetchall()\n \n cog_candidates = defaultdict(set)\n for seq1, sp1, seq2, sp2 in pairs1 + pairs2:\n s1 = (sp1, seq1)\n s2 = (sp2, seq2)\n cog_candidates[(sp1, seq1)].update([s1, s2])\n\n all_cogs = [cand for cand in cog_candidates.values() if\n len(cand) >= min_species]\n\n # CHECK CONSISTENCY\n seqs = set()\n for cand in all_cogs:\n seqs.update([b for a,b in cand if a == seed])\n pre_selected_seqs = set([v[0] for v in seed2size[seed]])\n if len(seqs & pre_selected_seqs) != len(set(seed2size[seed])) or\\\n len(seqs & pre_selected_seqs) != len(seqs): \n print \"old method seqs\", len(seqs), \"new seqs\", len(set(seed2size[seed])), \"Common\", len(seqs & pre_selected_seqs)\n raise ValueError(\"ooops\")\n \n cog_sizes = [len(cog) for cog in all_cogs]\n cog_spsizes = [len(set([e[0] for e in cog])) for cog in all_cogs]\n\n if [1 for i in xrange(len(cog_sizes)) if cog_sizes[i] != cog_spsizes[i]]:\n raise ValueError(\"Inconsistent COG found\")\n \n if cog_sizes: \n cogs_selection.append([seed, all_cogs])\n log.log(26, \"Found %d COGs\" % len(all_cogs))\n \n recoded_cogs = []\n for cog in all_cogs:\n named_cog = map(lambda x: \"%s%s%s\" %(x[0], GLOBALS[\"spname_delimiter\"],x[1]), cog)\n recoded_cogs.append(named_cog)\n\n return recoded_cogs, analysis_txt.getvalue()", "def test_grounded_modified_enzyme():\n mek_s202 = Agent('MEK1', mods=[ModCondition('phosphorylation', 'S', '202')],\n db_refs={'HGNC': '6840'})\n mek_phos = Agent('MEK1', mods=[ModCondition('phosphorylation', None, None)],\n db_refs={'HGNC': '6840'})\n erk = Agent('ERK2', db_refs={'HGNC': '6871'})\n stmt_to_model = Phosphorylation(mek_s202, erk, None, None)\n stmt_to_check = Phosphorylation(mek_phos, erk, None, None)\n pa = PysbAssembler()\n pa.add_statements([stmt_to_model])\n pa.make_model(policies='one_step')\n mc = ModelChecker(pa.model, [stmt_to_check])\n results = mc.check_model()\n assert len(results) == 1\n assert results[0][0] == stmt_to_check\n assert results[0][1].paths == \\\n [[('MEK1_phosphoS202_phosphorylation_ERK2_phospho', 1),\n ('ERK2_phospho_p_obs', 1)]]", "def generateFamily(self):\n #\n symms = self.crystalStructure.symmetries\n\n ss_family = set() # will not preserve order\n\n plane = self.planeIdc\n dir = self.dirIdc\n\n if self.crystalStructure.name == 'hexagonal':\n # Transformation from crystal to orthonormal coords\n lMatrix = CrystalStructure.lMatrix(\n 1, 1, self.cOverA, np.pi / 2, np.pi / 2, np.pi * 2 / 3\n )\n # Q matrix for transforming planes\n qMatrix = CrystalStructure.qMatrix(lMatrix)\n\n # Transform into orthonormal basis\n plane = np.matmul(qMatrix, convertIdc('mb', plane=plane))\n dir = np.matmul(lMatrix, convertIdc('mb', dir=dir))\n\n for i, symm in enumerate(symms):\n symm = symm.conjugate\n\n plane_symm = symm.transformVector(plane)\n dir_symm = symm.transformVector(dir)\n\n if self.crystalStructure.name == 'hexagonal':\n # qMatrix inverse is equal to lMatrix transposed and vice-versa\n plane_symm = reduceIdc(convertIdc(\n 'm', plane=safeIntCast(np.matmul(lMatrix.T, plane_symm))\n ))\n dir_symm = reduceIdc(convertIdc(\n 'm', dir=safeIntCast(np.matmul(qMatrix.T, dir_symm))\n ))\n\n ss_family.add(SlipSystem(\n posIdc(safeIntCast(plane_symm)),\n posIdc(safeIntCast(dir_symm)),\n self.crystalStructure, cOverA=self.cOverA\n ))\n\n return ss_family", "def construction_loop(genome : Genome, building_blocks, config_path, xyz_file_path):\n\tdef determine_coupling_index(genome: Genome, index:int, building_blocks=building_blocks):\n\t\t\"\"\"\n\t\tdetermines coupling index (atom and corresponding line in xyz file of building block refered in genome[index]) and coupling angle\n\n\t\tArgs:\n\t\t\tparam1 (Genome): Genome to build\n\t\t\tparam2 (int): index which block is processed and used as coupling point. Must be even -> odd indices are couplings\n\n\t\tReturns:\n\t\t\t(int,float): corresponding line in xyz file of building block refered in genome[index], coupling angle\n\n\t\t\"\"\"\n\n\t\tif(index > len(genome)-2 or index < 0):\n\t\t\traise ValueError(\"index is out of proper range\")\n\n\t\t# coupling after building_block of interest \n\t\ti = index + 1\n\t\t\n\t\t#para\n\t\tif(genome[i]==0):\n\t\t\tcoupling_index = building_blocks[genome[index]].para_pos\n\t\t\tcoupling_angle = building_blocks[genome[index]].para_angle\n\t\t#meta\n\t\telif(genome[i]==1):\n\t\t\tcoupling_index = building_blocks[genome[index]].meta_pos\n\t\t\tcoupling_angle = building_blocks[genome[index]].meta_angle\n\t\t#ortho\n\t\telif(genome[i]==2):\n\t\t\tcoupling_index = building_blocks[genome[index]].ortho_pos\n\t\t\tcoupling_angle = building_blocks[genome[index]].ortho_angle\n\t\telse:\n\t\t\traise ValueError(\"coupling seems to be funny\")\n\t\treturn coupling_index, coupling_angle\n\n\tdef write_file_parts_to_file(xyz_file_parts, path, fixed_beginning, fixed_end,complexity, config_path):\n\t\t\"\"\"\n\t\twrite xyz file parts to proper xyz file and turbomole coord file. Complexity is written to file \n\n\t\tArgs:\n\t\t\tparam1 (List of np.ndarray): List of xyz files\n\t\t\tparam2 (String): path\n\t\t\tparam3 (int): fixed_beginning (index of atom in first block which should be fixed)\n\t\t\tparam4 (int): fixed_end (index of atom in last block which should be fixed)\n\t\t\tparam5 (int): complexity of whole molecule\n\t\t\tparam6 (String): path to config file\n\t\tReturns:\n\t\t\t\n\n\t\t\"\"\"\n\t\t#load ang to bohr factor\n\t\tcfg = configparser.ConfigParser()\n\t\tcfg.read(config_path, encoding='utf-8')\n\n\t\t#write complexity to file\n\t\twith open(path+\"/complexity\", \"w\") as file_complexity:\n\t\t\tfile_complexity.write(str(complexity))\n\t\tfile_complexity.close()\n\n\t\tconcat_xyz = np.concatenate(xyz_file_parts, axis=1)\n\t\ttop.write_xyz_file(path+\"/coord.xyz\", concat_xyz)\n\t\tcoord = top.x2t(concat_xyz)\n\t\t#fix right atoms\n\t\tcoord[4,fixed_beginning] = \"f\"\n\t\tfixed_end = sum(np.array([xyz_file_parts[i].shape[1] for i in range(0,len(xyz_file_parts)-1)]))+fixed_end\n\t\tcoord[4, fixed_end] = \"f\"\n\t\ttop.write_coord_file(path+\"/coord\", coord)\n\n\t\tlower_limit = np.min(concat_xyz[3,:]) + 0.1\n\t\tupper_limit = np.max(concat_xyz[3, :]) - 0.1\n\t\twith open(path+\"/limits\", \"w\") as limits:\n\t\t\tlimits.write(str(lower_limit) + \"\\n\")\n\t\t\tlimits.write(str(upper_limit))\n\n\n\tdef determine_nearest_neighbor(datContent, coupling_index, atom_type):\n\t\t\"\"\"\n\t\tdetermines nearest neighbor of atom with index coupling index in dat content of atom type atom_type\n\n\t\tArgs:\n\t\t\tparam1 (List of np.ndarray): List of xyz files\n\t\t\tparam2 (int): coupling_inxex\n\t\t\tparam3 (string): atom_type of nearest neighbour\n\t\tReturns:\n\t\t\tint : index of nearest neighbour\n\t\t\"\"\"\n\t\tintersting_atoms = list()\n\t\tintersting_atoms_distance = list()\n\t\tfor i in range(0, len(datContent[1,:])):\n\t\t\tif(datContent[0,i]==atom_type):\n\t\t\t\tintersting_atoms.append(i)\n\t\t\t\tdistance = (float(datContent[1,i])-float(datContent[1,coupling_index]))**2+(float(datContent[2,i])-float(datContent[2,coupling_index]))**2+(float(datContent[3,i])-float(datContent[3,coupling_index]))**2\t\t\t\n\t\t\t\tintersting_atoms_distance.append(distance)\n\t\tintersting_atoms = [x for _,x in sorted(zip(intersting_atoms_distance,intersting_atoms))]\n\n\t\treturn intersting_atoms[0]\n\n\tdef align_z_along_fixed_ends(xyz_file_parts, fixed_beginning, fixed_end):\n\t\t\"\"\"\n\t\tAlign molecule z axis along fixed ends. This is done by rotation about the axis given by curl(vec(fixed_beginning->fixed_end), e_z) by the angle between vec(fixed_beginning-fixed_end) and e_z\n\n\t\tArgs:\n\t\t\tparam1 (List of np.ndarray): List of xyz files\n\t\t\tparam2 (int): index in xyz_file_parts[0] of fixed beginning\n\t\t\tparam3 (int): index in xyz_file_parts[-1] of fixed end\n\t\tReturns:\n\t\t\tint : (List of np.ndarray): List of xyz file\n\t\t\"\"\"\n\n\t\tmolecule_axis = [xyz_file_parts[-1][1,fixed_end],xyz_file_parts[-1][2,fixed_end],xyz_file_parts[-1][3,fixed_end]]\n\n\n\t\tangle = np.arccos(molecule_axis[2]/np.linalg.norm(molecule_axis))\n\t\ttheta = angle\n\n\t\tif(angle != 0):\n\t\t\t#calculate rotation axis\n\t\t\trotation_axis = np.cross(molecule_axis, [0.0,0.0,1.0])\n\t\t\trotation_axis = 1.0/np.linalg.norm(rotation_axis)*rotation_axis\n\t\t\tu = rotation_axis\n\n\t\t\t#calculate rotation_matrix\n\t\t\trotation_matrix = [[np.cos(theta) + u[0]**2 * (1-np.cos(theta)), u[0] * u[1] * (1-np.cos(theta)) - u[2] * np.sin(theta), u[0] * u[2] * (1 - np.cos(theta)) + u[1] * np.sin(theta)],\n\t [u[0] * u[1] * (1-np.cos(theta)) + u[2] * np.sin(theta), np.cos(theta) + u[1]**2 * (1-np.cos(theta)), u[1] * u[2] * (1 - np.cos(theta)) - u[0] * np.sin(theta)],\n\t [u[0] * u[2] * (1-np.cos(theta)) - u[1] * np.sin(theta), u[1] * u[2] * (1-np.cos(theta)) + u[0] * np.sin(theta), np.cos(theta) + u[2]**2 * (1-np.cos(theta))]]\n\n\t\t\tfor j in range(0, len(xyz_file_parts)):\n\t\t\t\tfor i in range(0, len(xyz_file_parts[j][1,:])):\n\t\t\t\t\t \n\t\t\t\t\tvector_to_rotate = [round(float(xyz_file_parts[j][1,i]),5),round(float(xyz_file_parts[j][2,i]),5),round(float(xyz_file_parts[j][3,i]),5)]\n\t\t\t\t\trotated_vector = np.asmatrix(rotation_matrix)*np.asmatrix(vector_to_rotate).T\n\t\t\t\t\txyz_file_parts[j][1,i] = round(rotated_vector[0,0],5)\n\t\t\t\t\txyz_file_parts[j][2,i] = round(rotated_vector[1,0],5)\n\t\t\t\t\txyz_file_parts[j][3,i] = round(rotated_vector[2,0],5)\n\t\t\treturn xyz_file_parts\n\t\telse:\n\t\t\treturn xyz_file_parts\n\n\n\n\t#load properties from config file \n\tcfg = configparser.ConfigParser()\n\tcfg.read(config_path, encoding='utf-8')\n\tcc_bond_length = float(cfg.get('Building Procedure', 'CC_bond_lengt'))\n\tconjugation_angle_from_file = float(cfg.get('Building Procedure', 'conjugation_angle'))\n\tbuilding_block_path = cfg.get('Building Procedure', 'building_block_path')\n\n\t#ensure that genome is not empty\n\tif(len(genome) < 1):\n\t\tprint(\"Genome was emtpy\")\n\t\t# TODO: proper treatment\n\n\n\t#add anchor to end -> couplings are missing \n\t#add left anchor\n\tanchor_left, anchor_right = load_anchors_blocks(building_block_path)\n\tbuilding_blocks.append(anchor_left)\n\t#para coupling\n\tgenome.insert(0, len(building_blocks)-1)\n\t#add right anchor\n\tbuilding_blocks.append(anchor_right)\n\t#para coupling\n\tgenome.append(len(building_blocks)-1)\n\n\t#data content of every part of xyz file is stored in this list\t\n\txyz_file_parts = list()\n\n\t#first block as initialization directly added to list\n\tcoupling_point = Point(x=0.0, y=0.0, z=0.0)\n\tcoupling_angle = 0.0\n\tcoupling_index = -1\n\tconjugation_angle = 0\n\tadditional_angle = 0.0\n\n\t#indices for fixed atoms in beginning and end of chain\n\tfixed_beginning = 0\n\tfixed_end = 0\n\n\t#complexity measure of molecule\n\tcomplexity = 0\n\tfor i in range(0, len(genome)):\n\t\tcomplexity += building_blocks[genome[i]].complexity\n\t\t#odd index -> coupling\n\t\tif(i%2==1):\t\t\n\t\t\t#conclude coupling point\n\t\t\tx_c = float(xyz_file_parts[-1][1,coupling_index])\n\t\t\ty_c = float(xyz_file_parts[-1][2,coupling_index])\n\t\t\tz_c = float(xyz_file_parts[-1][3,coupling_index])\n\t\t\tcoupling_point = Point(x=x_c, y=y_c, z=z_c)\t\t\n\n\n\t\t#even index -> building block\n\t\telif(i%2 == 0):\t\t\t\n\n\t\t\t#handle rotation to process consecutive para or ortho couplings\n\t\t\tadditional_angle += (-1)**(i/2+1)*np.pi\n\t\t\tadditional_angle = 0\n\t\t\t\n\t\t\t#first block must not be shifted\t\n\t\t\tif(i == 0):\n\t\t\t\tdatContent = process_block_to_add(coupling_point, coupling_angle, conjugation_angle+additional_angle, 0.0, building_blocks[genome[i]])\n\t\t\t\tfixed_beginning = building_blocks[genome[i]].fixed_left\n\t\t\t\tif(building_blocks[genome[i]].fixed_left == -1):\n\t\t\t\t\tprint(\"Error in first block: fixed atom not properly specified\")\n\t\t\telse:\n\t\t\t\tdatContent = process_block_to_add(coupling_point, coupling_angle, conjugation_angle+additional_angle, cc_bond_length, building_blocks[genome[i]])\n\t\t\t\t#find fix index of last block\n\t\t\t\tif(i == len(genome)-1):\n\t\t\t\t\t#para_pos is assumed to be right coupling point\n\t\t\t\t\tfixed_end = building_blocks[genome[i]].para_pos\n\t\t\t\t\tif(building_blocks[genome[i]].para_pos == -1):\n\t\t\t\t\t\tprint(\"Error in last block: fixed atom not properly specified\")\n\n\n\n\t\t\t#determine index of atom at origin\n\t\t\torigin = building_blocks[genome[i]].origin\n\n\t\t\t#if other block will be added -> hydrogen at c coupling atom must be removed\n\t\t\tif(i != len(genome)-1):\t\t\t\t\n\t\t\t\t#determine coupling index and coupling angle\n\t\t\t\tcoupling_index, coupling_angle_single = determine_coupling_index(genome,i,building_blocks)\n\n\t\t\t\t#handle sign to process consecutive para or ortho couplings\n\t\t\t\t#coupling_angle += (coupling_angle_single*(-1)**(i/2+1))\n\t\t\t\tcoupling_angle += (coupling_angle_single)\n\t\t\t\t\n\n\t\t\t\t#remove hydrogen or other atom bonded to coupling atom\n\t\t\t\tnearest_neighbour = determine_nearest_neighbor(datContent, coupling_index, \"H\")\n\t\t\t\tdatContent = np.delete(datContent,nearest_neighbour,1)\n\t\t\t\t\n\t\t\t\t#update coupling index and fixed beginning\n\t\t\t\tif(coupling_index>nearest_neighbour):\t\t\t\t\n\t\t\t\t\tcoupling_index -= 1\n\t\t\t\t\tif(i == 0 and fixed_beginning>nearest_neighbour):\n\t\t\t\t\t\tfixed_beginning -=1\n\t\t\t\t#update origin\n\t\t\t\tif(origin>nearest_neighbour):\n\t\t\t\t\torigin -=1\n\n\n\t\t\t#hydrogen bonded to C atom at origin must be removed, too (except for first atom)\n\t\t\tif(i != 0):\t\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t#remove hydrogen or other atom bonded to atom at origin\n\t\t\t\tnearest_neighbour = determine_nearest_neighbor(datContent, origin, \"H\")\n\t\t\t\tdatContent = np.delete(datContent,nearest_neighbour,1)\n\t\t\t\t#update coupling index and fixed ending\n\t\t\t\tif(coupling_index>nearest_neighbour):\t\t\t\t\t\n\t\t\t\t\tcoupling_index = coupling_index -1\n\t\t\t\t\tif(i == len(genome)-1 and fixed_end>nearest_neighbour):\n\t\t\t\t\t\tfixed_end -=1\n\t\t\t\t\tpass\n\n\t\t\txyz_file_parts.append(datContent)\n\n\t\t\t#alternating conjugation\n\t\t\t#conjugation_angle += (-1)**(i/2+1)*conjugation_angle_from_file\n\t\t\tconjugation_angle -= conjugation_angle_from_file\n\n\t#align molecule axis to z\n\txyz_file_parts= align_z_along_fixed_ends(xyz_file_parts, fixed_beginning, fixed_end)\n\n\t#write xyz_file_parts to xyz file\n\twrite_file_parts_to_file(xyz_file_parts, xyz_file_path, fixed_beginning, fixed_end, complexity, config_path)", "def assemble_g_and_B(self, residuals, jacobians, dofs, args, g, B):\n # Assemble B\n self._build_B(jacobians, dofs, args, B)\n\n # Assemble holonomic constraint function\n self._build_g(residuals, dofs, args, g)\n\n return g, B", "def generate(self):\n\n minEig = -1\n attempts = 0\n\n while minEig <= 0:\n p = np.zeros((self.n_combi, self.signum))\n sigma_signals = np.zeros((self.n_combi, self.signum))\n\n corr_samples = []\n for j in range(self.corrnum):\n t = random.sample(list(range(self.n_sets)), int(self.tot_corr[j]))\n corr_samples.append(t)\n t1 = ismember(self.x_corrs, t)\n t2 = t1.sum(axis=1) == 2\n\n temp = self.corr_means[j] + self.corr_std[j] * np.random.randn(t2.sum(), 1)\n corr_arr = [0] * len(t2)\n idx = 0\n for k in range(len(t2)):\n if t2[k] == 1:\n p[k, j] = max(min(temp[idx], 1), 0)\n sigma_signals[k, j] = self.sigmad\n idx += 1\n else:\n p[k, j] = 0\n sigma_signals[k, j] = self.sigmaf\n\n if self.corrnum < self.signum:\n sigma_signals[:, self.corrnum: self.signum] = self.sigmaf * np.ones(\n (self.n_combi, self.signum - self.corrnum))\n # minEig = 1\n\n R = self.generateBlockCorrelationMatrix(sigma_signals, p)\n\n attempts += 1\n e, ev = np.linalg.eig(self.R)\n minEig = np.min(e)\n if attempts > self.maxIters and minEig < 0:\n raise Exception(\"A positive definite correlation matrix could not be found with prescribed correlation \"\n \"structure. Try providing a different correlation structure or reducing the standard \"\n \"deviation\")\n\n return p, sigma_signals, R", "def generate_networks(self):\n\n # Defines dictionary of residue interaction types to include as network\n # edges.\n #**N.B.** Might want to provide these interactions as a program input?\n # **N.B.** 'intra' in the interaction names dict refers to interactions\n # between residues in the same chain\n interactions = [['hb', 'hb_pairs', 'hb_pairs_fasta_intra'],\n ['nhb', 'nhb_pairs', 'nhb_pairs_fasta_intra'],\n ['plusminus2', 'minus_2', 'minus_2_fasta'],\n ['plusminus2', 'plus_2', 'plus_2_fasta'],\n ['plusminus1', 'minus_1', 'minus_1_fasta'],\n ['plusminus1', 'plus_1', 'plus_1_fasta'],\n ['vdw', 'van_der_waals', 'van_der_waals_fasta_intra']]\n\n # Initialises MultiGraph (= undirected graph with self loops and\n # parallel edges) network of interacting residues\n G = nx.MultiGraph()\n\n # Adds nodes (= residues) to MultiGraph, labelled with their side-chain\n # identity (initially set to unknown), z-coordinate, buried surface area\n # (sandwiches only) and whether they are edge or central strands\n # (sandwiches only).\n if self.barrel_or_sandwich == '2.40':\n for num in range(self.input_df.shape[0]):\n node = self.input_df['domain_ids'][num] + self.input_df['res_ids'][num]\n aa_id = self.input_df['fasta_seq'][num]\n int_or_ext = self.input_df['int_ext'][num][0:3]\n z_coord = self.input_df['z_coords'][num]\n try:\n phi_psi_class = self.input_df['phi_psi_class'][num]\n except KeyError:\n phi_psi_class = '-'\n if not int_or_ext in ['int', 'ext']:\n raise ValueError('Residue {} has not been assigned to the '\n 'interior or exterior surface of the input'\n ' beta-barrel structure'.format(node))\n G.add_node(node, type='strand', aa_id=aa_id, int_ext=int_or_ext,\n eoc='-', z=z_coord, phipsi=phi_psi_class)\n elif self.barrel_or_sandwich == '2.60':\n for num in range(self.input_df.shape[0]):\n node = self.input_df['domain_ids'][num] + self.input_df['res_ids'][num]\n aa_id = self.input_df['fasta_seq'][num]\n int_or_ext = self.input_df['int_ext'][num][0:3]\n z_sandwich_coord = self.input_df['sandwich_z_coords'][num]\n #z_strand_coord = self.input_df['strand_z_coords'][num]\n #buried_surface_area = self.input_df['buried_surface_area'][num]\n edge_or_central = self.input_df['edge_or_central'][num][0:3]\n try:\n phi_psi_class = self.input_df['phi_psi_class'][num]\n except KeyError:\n phi_psi_class = '-'\n if not int_or_ext in ['int', 'ext']:\n raise ValueError('Residue {} has not been assigned to the '\n 'interior or exterior surface of the input'\n ' beta-barrel structure'.format(node))\n G.add_node(node, type='strand', aa_id=aa_id, int_ext=int_or_ext,\n z=z_sandwich_coord,\n #zstrand=z_strand_coord, bsa=buried_surface_area,\n eoc=edge_or_central,\n phipsi=phi_psi_class)\n\n domain_res_ids = list(G.nodes())\n\n # Adds edges (= residue interactions) to MultiGraph, labelled by\n # interaction type. The interactions considered are defined in\n # interactions_dict.\n for int_list in interactions:\n edge_label = int_list[0]\n int_name = int_list[1]\n int_fasta = int_list[2]\n\n for num in range(self.input_df.shape[0]):\n res_1 = self.input_df['domain_ids'][num] + self.input_df['res_ids'][num]\n res_list = self.input_df[int_name][num]\n if type(res_list) != list:\n res_list = [res_list]\n\n for res_index, res_2 in enumerate(res_list):\n res_2 = self.input_df['domain_ids'][num] + res_2\n # Accounts for interactions between residue pairs where one\n # residue is in the beta-barrel/sandwich domain and the\n # other is within a loop region\n aa_id = self.input_df[int_fasta][num][res_index]\n if not res_2 in list(G.nodes()):\n G.add_node(res_2, type='loop', aa_id=aa_id)\n if aa_id != G.nodes()[res_2]['aa_id']:\n print(aa_id, G.nodes()[res_2]['aa_id'])\n raise ValueError(\n 'Identity of node {} is inconsistent according to '\n 'the pairwise interactions listed in {} '\n '{}'.format(res_2, self.input_df_path, edge_label)\n )\n\n # Ensures interactions are only added to the network once\n if G.has_edge(res_1, res_2) is False:\n G.add_edge(res_1, res_2, interaction=edge_label)\n elif G.has_edge(res_1, res_2) is True:\n attributes = [val for label, sub_dict in\n dict(G[res_1][res_2]).items() for key,\n val in sub_dict.items()]\n if not edge_label in attributes:\n G.add_edge(res_1, res_2, interaction=edge_label)\n\n return G", "def make_bispectra(self, bgwindow=4):\n\n bisp = lambda d, ij, jk, ki: d[:,ij] * d[:,jk] * n.conj(d[:,ki]) # bispectrum for pol data\n# bisp = lambda d, ij, jk, ki: n.complex(d[ij] * d[jk] * n.conj(d[ki])) # without pol axis\n\n triples = self.make_triples()\n meanbl = self.data.mean(axis=2).mean(axis=0) # find bls with no zeros in either pol to ignore in triples\n self.triples = triples[n.all(meanbl[triples][:,0] != 0j, axis=1) & n.all(meanbl[triples][:,1] != 0j, axis=1) & n.all(meanbl[triples][:,2] != 0j, axis=1)] # only take triples if both pols are good. may be smaller than set for an individual pol\n\n # set up arrays for bispectrum and for weighting data (ignoring zeros)\n bispectra = n.zeros((len(self.dmarr), len(self.data), len(self.triples)), dtype='complex')\n truearr = n.ones( (self.npol, self.nbl, len(self.chans)))\n falsearr = n.zeros( (self.npol, self.nbl, len(self.chans)))\n\n # iterate over dm trials and integrations\n for d in xrange(len(self.dmarr)):\n twidth = n.round(self.twidths[d])\n dmwidth = int(n.round(n.max(self.dmtrack0[d][0]) - n.min(self.dmtrack0[d][0])))\n\n for i in xrange((bgwindow/2)+twidth, len(self.data)-( (bgwindow/2)+2*twidth+dmwidth )): # dmwidth avoided at end, others are split on front and back side of time iteration\n# for i in xrange((bgwindow/2)+twidth, len(self.data)-( (bgwindow/2)+twidth+dmwidth ), max(1,twidth/2)): # can step by twidth/2, but messes up data products\n diff = self.tracksub(d, i, bgwindow=bgwindow)\n\n if len(n.shape(diff)) == 1: # no track\n continue\n\n# **need to redo for self.flags**\n weightarr = n.where(diff != 0j, truearr, falsearr) # ignore zeros in mean across channels # bit of a hack\n try:\n diffmean = n.average(diff, axis=2, weights=weightarr)\n except ZeroDivisionError:\n diffmean = n.mean(diff, axis=2) # if all zeros, just make mean # bit of a hack\n\n for trip in xrange(len(self.triples)):\n ij, jk, ki = self.triples[trip]\n bispectra[d, i, trip] = bisp(diffmean, ij, jk, ki).mean(axis=0) # Stokes I bispectrum. Note we are averaging after forming bispectrum, so not technically a Stokes I bispectrum.\n print 'dedispersed for ', self.dmarr[d]\n self.bispectra = n.ma.masked_array(bispectra, bispectra == 0j)", "def base_gate_sequence_and_macros(model, basic_gates_macros: dict = None):\n prep_fiducials, meas_fiducials, germs = (\n model.prep_fiducials(),\n model.meas_fiducials(),\n model.germs(),\n )\n\n # create minimal generating gate sequence\n base_gate_sequence = list({k.str.split(\"@\")[0] for k in prep_fiducials + germs + meas_fiducials})\n base_gate_sequence.remove(\"{}\")\n base_gate_sequence.sort(key=len, reverse=True)\n\n if basic_gates_macros:\n # create generating gate sequence macros\n base_gate_sequence_macros = [s.split(\"G\") for s in base_gate_sequence]\n for i, s in enumerate(base_gate_sequence_macros):\n s = [basic_gates_macros[k] for k in s if basic_gates_macros.get(k) is not None]\n base_gate_sequence_macros[i] = sequence_macros(s)\n return base_gate_sequence, base_gate_sequence_macros\n else:\n return base_gate_sequence", "def main(template_initial_path, template_grown_path, step, total_steps, hydrogen_to_replace, core_atom_linker,\n tmpl_out_path, null_charges=False, growing_mode=\"SoftcoreLike\"):\n lambda_to_reduce = float(step/(total_steps+1))\n templ_ini = TemplateImpact(template_initial_path)\n \n for bond in templ_ini.list_of_bonds:\n key, bond_cont = bond\n templ_grw = TemplateImpact(template_grown_path)\n fragment_atoms, core_atoms_in, core_atoms_grown = detect_atoms(template_initial=templ_ini, \n template_grown=templ_grw,\n hydrogen_to_replace=hydrogen_to_replace)\n set_fragment_atoms(list_of_fragment_atoms=fragment_atoms)\n set_connecting_atom(template_grown=templ_grw, pdb_atom_name=core_atom_linker)\n fragment_bonds = detect_fragment_bonds(list_of_fragment_atoms=fragment_atoms, template_grown=templ_grw)\n set_fragment_bonds(list_of_fragment_bonds=fragment_bonds)\n set_linker_bond(templ_grw)\n if growing_mode == \"SoftcoreLike\":\n modify_core_parameters_linearly(templ_grw, lambda_to_reduce, templ_ini, exp_charges=True,\n null_charges=null_charges)\n reduce_fragment_parameters_linearly(templ_grw, lambda_to_reduce, exp_charges=True, \n null_charges=null_charges)\n \n modify_linkers_parameters_linearly(templ_grw, lambda_to_reduce, templ_ini, hydrogen_to_replace)\n elif growing_mode == \"AllLinear\":\n modify_core_parameters_linearly(templ_grw, lambda_to_reduce, templ_ini, exp_charges=False)\n reduce_fragment_parameters_linearly(templ_grw, lambda_to_reduce, exp_charges=False,\n null_charges=False)\n modify_linkers_parameters_linearly(templ_grw, lambda_to_reduce, templ_ini, hydrogen_to_replace)\n elif growing_mode == \"SpreadHcharge\":\n if step > 1:\n reduce_fragment_parameters_originaly(templ_grw, templ_ini, lambda_to_reduce, \n hydrogen=hydrogen_to_replace, n_GS=total_steps)\n modify_linkers_parameters_linearly(templ_grw, lambda_to_reduce, templ_ini, hydrogen_to_replace)\n else:\n reduce_fragment_parameters_spreading_H(templ_grw, templ_ini, lambda_to_reduce, \n hydrogen=hydrogen_to_replace, n_GS=total_steps)\n else:\n raise ValueError(\"Growing mode Not valid. Choose between: 'SoftcoreLike', 'SpreadHcharge', 'AllLinear'.\")\n templ_grw.write_template_to_file(template_new_name=tmpl_out_path)\n return [atom.pdb_atom_name for atom in fragment_atoms], \\\n [atom.pdb_atom_name for atom in core_atoms_grown]", "def __generate_atoms__(self, pdb):\n\n atoms = [] # Maybe this can be a set \n # TODO: Here I can use self.structure.header['missing_residues'] to get a list of residues. It will have their seq and I can use this to make a sequential index\n for model in self.structure:\n residues = model.get_residues() # Biopython \n for residue in residues:\n full_id = residue.get_full_id()\n ins_code = full_id[3][2] \n this_model = str(int(full_id[1]) + 1) # BioPython starts at 0 and fr3d-python starts at 1. Add 1 to each model so unit ids match\n this_chain = full_id[2]\n component_number = full_id[3][1]\n if 'H' in full_id[3][0][0]:\n res_group = 'HETATM'\n else:\n res_group = 'ATOM'\n\n res = residue.get_resname().replace(\" \",\"\")\n\n if ins_code == \" \":\n ins_code = None\n\n for atom in residue:\n #drop numbers \n id = atom.id \n id = re.sub(r'\\d+', '',id)\n first = id[0]\n # logic to extract the type of atom from the id\n if 'C' == first: #Carbon\n atom_type = 'C' \n elif 'O' == first: #Ox\n atom_type = 'O'\n elif 'P' == first: #Phosphorus\n atom_type = 'P'\n elif 'N' == first: # nitrogen\n atom_type = 'N'\n else: #Magnesium, other ions\n atom_type = atom.id\n\n x = atom.coord[0]\n y = atom.coord[1]\n z = atom.coord[2]\n \n alt_id = atom.get_altloc()\n if alt_id == \" \":\n alt_id = None\n atoms.append(Atom(x=x, y=y, z=z,\n pdb=self.name,\n model=this_model,\n chain=this_chain,\n component_id=res,\n component_number=component_number,\n component_index=component_number,\n insertion_code=ins_code,\n alt_id= alt_id,\n group=res_group,\n type=atom_type,\n name=atom.get_name(),\n symmetry='1_555', #I haven't figured out how to extract symmetries from pdb files yet. Resort to identity\n polymeric=True)) # Need to find a way to parse this from biopython. Important, may be relevent in structures.py\n return atoms", "def CreateGeneModels(genes_cmpt, transcripts_cmpt, exons_cmpt, utr3_cmpt, utr5_cmpt, cds_cmpt):\n gene_counter, gene_models = 1, []\n for gene_entry in genes_cmpt: ## Figure out the genes and transcripts associated feature \n if gene_entry in transcripts_cmpt:\n gene=init_gene() \n gene['id']=gene_counter\n gene['name']=gene_entry[1]\n gene['chr']=genes_cmpt[gene_entry]['chr']\n gene['source']=genes_cmpt[gene_entry]['source']\n gene['start']=genes_cmpt[gene_entry]['start']\n gene['stop']=genes_cmpt[gene_entry]['stop']\n gene['strand']=genes_cmpt[gene_entry]['strand']\n if not gene['strand'] in ['+', '-']:\n gene['strand']='.' # Strand info not known replaced with a dot symbol instead of None, ?, . etc.\n if len(transcripts_cmpt[gene_entry])>1:\n gene['is_alt_spliced'] = 1\n gene['is_alt'] = 1\n\t gtype=[]\n for tids in transcripts_cmpt[gene_entry]: ## transcript section related tags \n gene['transcripts'].append(tids['ID'])\n\t\tgtype.append(tids['type'])\n exon_cod, utr5_cod, utr3_cod, cds_cod = [], [], [], []\n if (gene['chr'], tids['ID']) in exons_cmpt:\n exon_cod = [[feat_exon['start'], feat_exon['stop']] for feat_exon in exons_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in utr5_cmpt:\n utr5_cod = [[feat_utr5['start'], feat_utr5['stop']] for feat_utr5 in utr5_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in utr3_cmpt:\n utr3_cod = [[feat_utr3['start'], feat_utr3['stop']] for feat_utr3 in utr3_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in cds_cmpt:\n cds_cod = [[feat_cds['start'], feat_cds['stop']] for feat_cds in cds_cmpt[(gene['chr'], tids['ID'])]]\n if len(exon_cod) == 0: ## build exon coordinates from UTR3, UTR5 and CDS\n if cds_cod != []:\n exon_cod=createExon(gene['strand'], utr5_cod, cds_cod, utr3_cod) \n\n if gene['strand']=='-': ## general order to coordinates\n if len(exon_cod) >1:\n if exon_cod[0][0] > exon_cod[-1][0]:\n exon_cod.reverse()\n if len(cds_cod) >1:\n if cds_cod[0][0] > cds_cod[-1][0]: \n cds_cod.reverse()\n if len(utr3_cod) >1:\n if utr3_cod[0][0] > utr3_cod[-1][0]: \n utr3_cod.reverse()\n if len(utr5_cod) >1:\n if utr5_cod[0][0] > utr5_cod[-1][0]:\n utr5_cod.reverse()\n\n tis, cdsStop, tss, cleave = [], [], [], [] ## speacial sited in the gene region \n if cds_cod != []:\n if gene['strand'] == '+':\n tis = [cds_cod[0][0]]\n cdsStop = [cds_cod[-1][1]-3]\n elif gene['strand'] == '-':\n tis = [cds_cod[-1][1]]\n cdsStop = [cds_cod[0][0]+3]\n if utr5_cod != []:\n if gene['strand'] == '+':\n tss = [utr5_cod[0][0]]\n elif gene['strand'] == '-':\n tss = [utr5_cod[-1][1]]\n if utr3_cod != []:\n if gene['strand'] == '+':\n cleave = [utr3_cod[-1][1]]\n elif gene['strand'] == '-':\n cleave = [utr3_cod[0][0]]\n\n cds_status, exon_status, utr_status = 0, 0, 0 ## status of the complete elements of the gene\n if cds_cod != []: ## adding phase to the CDS region \n cds_cod_phase = addCDSphase(gene['strand'], cds_cod)\n cds_status = 1\n gene['cds_exons'].append(cds_cod_phase)\n\n if exon_cod != []: \n exon_status = 1\n if utr5_cod != [] or utr3_cod != []: \n utr_status = 1\n if cds_status != 0 and exon_status != 0 and utr_status != 0:\n gene['transcript_status'].append(1)\n else:\n gene['transcript_status'].append(0)\n\n if exon_cod: ## final check point for a valid gene model \n gene['exons'].append(exon_cod)\n gene['utr3_exons'].append(utr3_cod)\n gene['utr5_exons'].append(utr5_cod)\n gene['tis'].append(tis)\n gene['cdsStop'].append(cdsStop)\n gene['tss'].append(tss)\n gene['cleave'].append(cleave) \n\t \n\t gtype=list(set(gtype)) ## different types \n gene['gene_info']=dict(ID=gene_entry[1],\n\t\t\t\tSource=genes_cmpt[gene_entry]['source'],\n\t\t\t\tType=gtype)\n gene=FeatureValueFormat(gene) ## get prepare for MAT writing \n gene_counter+=1\n gene_models.append(gene)\n return gene_models", "def test_sdg_gate_deterministic_waltz_basis_gates(self):\n shots = 100\n circuits = ref_1q_clifford.sdg_gate_circuits_deterministic(final_measure=True)\n targets = ref_1q_clifford.sdg_gate_counts_deterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='u1,u2,u3,cx')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)", "def casdetude_genetics():\n file_path = PROJECT_PATH + \"/geographycal_data/Monterusciello/MontEdo_buildings\"\n router = Router(building_file=file_path)\n\n router.design_aqueduct(0)\n\n router.write2epanet(router.acqueduct, PROJECT_PATH + \"/Monterusciello_solution/Monterusciello_acqueduct\",\n diam=False)\n\n read_epanet = graphIO.graph_reader(router.acqueduct)\n read_epanet.read_epanet(PROJECT_PATH + \"/geographycal_data/SolvedNet/MonteSolution\")\n kpi_calculator(router.acqueduct)\n\n minimal = router.design_minimal_aqueduct(router.acqueduct, \"Q*H\")\n router.write2epanet(minimal, PROJECT_PATH + \"/Monterusciello_solution/Monterusciello_acqueduct\", diam=False)", "def optimize_beds(\n n_beds: int,\n n_patients: int,\n cost: List[int],\n A=None,\n deterministic=True,\n cambios=1,\n Q=7,\n metrics_func=metrics,\n validation=False,\n) -> dict:\n\n # Sets\n COV = range(3)\n B = [\n {0},\n {0, 1},\n {0, 1, 2},\n {0, 1, 2, 3},\n ]\n T = range(12)\n\n # Parámetros\n C = [\n [0, 0, 0, 38],\n [0, 0, 30, 0],\n [3, 3, 0, 0],\n [0, 0, 12, 0],\n [1, 1, 0, 8],\n [0, 0, 0, 7],\n [9, 9, 0, 0],\n [7, 2, 0, 0],\n ] # matriz\n\n N = range(130)\n\n D = [\n [0, 5, 40, 35, 30, 25, 25, 15],\n [5, 0, 35, 30, 25, 10, 20, 10],\n [40, 35, 0, 5, 20, 30, 15, 25],\n [35, 30, 5, 0, 15, 25, 10, 20],\n [30, 25, 20, 15, 0, 15, 25, 35],\n [25, 10, 30, 25, 15, 0, 15, 20],\n [25, 20, 15, 10, 25, 15, 0, 10],\n [15, 10, 25, 20, 35, 20, 10, 0],\n ] # matriz\n\n P, G, I, E_start, E_end, V, S = gen_patients(\n n_patients, deterministic=deterministic\n )\n if A is None:\n A = [0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 1, 1]\n # Q = 7\n\n # Tipo de cama\n Cama = [index for i in C for index, j in enumerate(i) for _ in range(j)]\n\n # Unidad\n Uni = [index for index, i in enumerate(C) for j in i for _ in range(j)]\n\n Aux = {(p, i): int(bool(Cama[i] in B[G[p]])) for i in N for p in range(n_patients)}\n\n COV = [i for i in N if Uni[i] in COV]\n\n with Env() as env, Model(env=env) as m:\n # Variables\n Y = m.addVars(P, N, T, vtype=GRB.BINARY, name=\"Y\")\n alpha = m.addVars(P, T, vtype=GRB.BINARY, name=\"alpha\")\n Z = m.addVars(P, T, vtype=GRB.BINARY, name=\"Z\")\n\n # Constraints\n\n # R1: Se debe respetar la cantidad de camas f en todo u\n m.addConstrs(\n (quicksum(Y[p, i, t] for p in P) <= n_beds for i in N for t in T),\n name=\"R1\",\n )\n\n # R2: Cambio de cama\n m.addConstrs(\n (\n Y[p, i, t - 1] - Y[p, i, t] <= Z[p, t]\n for p in P\n for i in N\n for t in range(E_start[p] + 1, E_end[p] + 1)\n ),\n name=\"R2.1\",\n )\n m.addConstrs(\n (\n Y[p, i, t] - Y[p, i, t - 1] <= Z[p, t]\n for p in P\n for i in N\n for t in range(E_start[p] + 1, E_end[p] + 1)\n ),\n name=\"R2.2\",\n )\n\n m.addConstrs(\n (Z[p, t] == 0 for p in P for t in range(E_start[p] + 1)), name=\"R2.3\"\n )\n\n m.addConstrs(\n (Z[p, t] == 0 for p in P for t in range(E_end[p] + 1, T[-1] + 1)),\n name=\"R2.4\",\n )\n\n # R3: Hay un máximo de cambios por cada 2 horas\n m.addConstrs((quicksum(Z[p, t] for p in P) <= A[t] for t in T), name=\"R3\")\n\n # R4: No se puede trasladar a los pacientes críticos\n m.addConstrs((S[p] * Z[p, t] <= Q for p in P for t in T), name=\"R4\")\n\n # R5: Un paciente puede estar en una cama no ideal\n m.addConstrs(\n (\n alpha[p, t] == 1 - quicksum(Y[p, i, t] * Aux[p, i] for i in N)\n for p in P\n for t in range(E_start[p], E_end[p] + 1)\n ),\n name=\"R5\",\n )\n\n # R6: Mientras esté en el hospital, p siempre tiene asignado 1 cama\n m.addConstrs(\n (\n quicksum(Y[p, i, t] for i in N) == 1\n for p in P\n for t in range(E_start[p], E_end[p] + 1)\n ),\n name=\"R6\",\n )\n\n # R7: Si p es COVID-19 positivo, solo puede ser asignado a una cama COVID-19.\n m.addConstrs(\n (\n quicksum(Y[p, i, t] for i in COV) == V[p]\n for p in P\n for t in range(E_start[p], E_end[p] + 1)\n ),\n name=\"R7\",\n )\n\n # R8: Antes de entrar p no tiene asignada una cama\n m.addConstrs(\n (\n quicksum(Y[p, i, t] for i in N) == 0\n for p in P\n for t in range(E_start[p])\n ),\n name=\"R8\",\n )\n\n # R9: Después de salir, p no tendrá asignada una cama\n m.addConstrs(\n (\n quicksum(Y[p, i, t] for i in N) == 0\n for p in P\n for t in range(E_end[p] + 1, T[-1] + 1)\n ),\n name=\"R9\",\n ) # +1 para ser inclusivo\n\n # R10: p no puede ser trasladado más de 1 vez al el día\n m.addConstrs(\n (\n quicksum(Z[p, t] for t in range(E_start[p], E_end[p] + 1)) <= cambios\n for p in P\n ),\n name=\"R10\",\n )\n\n # R11: un paciente máximo por cama\n m.addConstrs(\n (quicksum(Y[p, i, t] for p in P) <= 1 for i in N for t in T), name=\"R11\"\n )\n\n # Objective\n m.setObjective(\n quicksum(Y[p, i, t] * D[Uni[i]][I[p]] for i in N for p in P for t in T)\n * cost[0]\n - quicksum(Z[p, t] * S[p] for p in P for t in T) * cost[1]\n + quicksum(alpha[p, t] for p in P for t in T) * cost[2]\n + quicksum(Z[p, t] for p in P for t in T) * cost[3],\n GRB.MINIMIZE,\n )\n\n m.update()\n\n # m.computeIIS() -> En caso de ser infactible se escribe en el archivo iis.ilp donde es infactible\n # m.write(\"archivo/iis.ilp\") -> Acá lo escribe, se deben descomentar ambas lineas para visualizarlo\n\n m.optimize()\n # m.update()\n\n if m.status is GRB.OPTIMAL:\n m.write(\"out_cama.sol\")\n return metrics_func(\n m,\n Y,\n alpha,\n Z,\n D,\n I,\n B,\n G,\n Cama,\n Uni,\n Q,\n S,\n N,\n P,\n T,\n A,\n E_start,\n E_end,\n COV,\n V,\n Aux,\n validation,\n )\n general_metrics = defaultdict(lambda: m.status)\n return general_metrics, None", "def test_clashing_atoms():\n benzene_path = examples_paths()['benzene']\n toluene_path = examples_paths()['toluene']\n with mmtools.utils.temporary_directory() as tmp_dir:\n yaml_content = get_template_script(tmp_dir, keep_openeye=True)\n system_id = 'explicit-system'\n system_description = yaml_content['systems'][system_id]\n system_description['pack'] = True\n system_description['solvent'] = utils.CombinatorialLeaf(['vacuum', 'PME'])\n\n # Sanity check: at the beginning molecules clash\n toluene_pos = utils.get_oe_mol_positions(utils.load_oe_molecules(toluene_path, molecule_idx=0))\n benzene_pos = utils.get_oe_mol_positions(utils.load_oe_molecules(benzene_path, molecule_idx=0))\n assert pipeline.compute_min_dist(toluene_pos, benzene_pos) < pipeline.SetupDatabase.CLASH_THRESHOLD\n\n exp_builder = ExperimentBuilder(yaml_content)\n\n for sys_id in [system_id + '_vacuum', system_id + '_PME']:\n system_dir = os.path.dirname(\n exp_builder._db.get_system(sys_id)[0].position_path)\n\n # Get positions of molecules in the final system\n prmtop = openmm.app.AmberPrmtopFile(os.path.join(system_dir, 'complex.prmtop'))\n inpcrd = openmm.app.AmberInpcrdFile(os.path.join(system_dir, 'complex.inpcrd'))\n positions = inpcrd.getPositions(asNumpy=True).value_in_unit(unit.angstrom)\n topography = Topography(prmtop.topology, ligand_atoms='resname TOL')\n benzene_pos2 = positions.take(topography.receptor_atoms, axis=0)\n toluene_pos2 = positions.take(topography.ligand_atoms, axis=0)\n # atom_indices = pipeline.find_components(prmtop.createSystem(), prmtop.topology, 'resname TOL')\n # benzene_pos2 = positions.take(atom_indices['receptor'], axis=0)\n # toluene_pos2 = positions.take(atom_indices['ligand'], axis=0)\n\n # Test that clashes are resolved in the system\n min_dist, max_dist = pipeline.compute_min_max_dist(toluene_pos2, benzene_pos2)\n assert min_dist >= pipeline.SetupDatabase.CLASH_THRESHOLD\n\n # For solvent we check that molecule is within the box\n if sys_id == system_id + '_PME':\n assert max_dist <= exp_builder._db.solvents['PME']['clearance'].value_in_unit(unit.angstrom)", "def afshari_stewart_ds_2016(magnitude=7.0, distance=10.0, vs30=760.0, mechanism='unknown', \n z1=None, region='california', duration_type='DS575H'):\n\n # mechanism map\n mech_map = {'unknown':0, 'normal': 1, 'reverse': 2, 'strike-slip': 3}\n mech_tag = mech_map.get(mechanism.lower(), None)\n if mech_tag is None:\n print(\"SignificantDurationModel.afshari_stewart_ds_2016: mechanism='unknown','normal','reverse','strike-slip'?\")\n return None, None, None, None\n # region map\n reg_map = {'california':0, 'japan': 1, 'other': 2}\n reg_tag = reg_map.get(region.lower(), None)\n if reg_tag is None:\n print(\"SignificantDurationModel.afshari_stewart_ds_2016: region='california', 'japan', 'other'?\")\n return None, None, None, None\n # duration type map\n dur_map = {'DS575H':0, 'DS595H': 1, 'DS2080H': 2}\n dur_tag = dur_map.get(duration_type.upper(), None)\n if dur_tag is None:\n print(\"SignificantDurationModel.afshari_stewart_ds_2016: duration_type='DS575H','DS595H','DS2080H'?\")\n return None, None, None, None\n \n # source coefficients\n M1 = [5.35, 5.20, 5.20]\n M2 = [7.15, 7.40, 7.40]\n b0 = [[1.2800, 2.1820, 0.8822],\n [1.5550, 2.5410, 1.4090],\n [0.7806, 1.6120, 0.7729],\n [1.2790, 2.3020, 0.8804]]\n b1 = [[5.576, 3.628, 6.182],\n [4.992, 3.170, 4.778],\n [7.061, 4.536, 6.579],\n [5.578, 3.467, 6.188]]\n b2 = [0.9011, 0.9443, 0.7414]\n b3 = [-1.684, -3.911, -3.164]\n Mstar = [6, 6, 6]\n # path coefficients\n c1 = [0.1159, 0.3165, 0.0646]\n RR1 = [10, 10, 10]\n RR2 = [50, 50, 50]\n c2 = [0.1065, 0.2539, 0.0865]\n c3 = [0.0682, 0.0932, 0.0373]\n # site coefficients\n c4 = [-0.2246, -0.3183, -0.4237]\n Vref = [368.2, 369.9, 369.6]\n V1 = [600, 600, 600]\n c5 = [0.0006, 0.0006, 0.0005]\n dz1ref = [200, 200, 200]\n # standard deviation coefficients\n phi1 = [0.54, 0.43, 0.56]\n phi2 = [0.41, 0.35, 0.45]\n tau1 = [0.28, 0.25, 0.30]\n tau2 = [0.25, 0.19, 0.19]\n\n # basin depth\n if reg_tag == 0:\n mu_z1 = np.exp(-7.15/4*np.log((vs30**4+570.94**4)/(1360**4+570.94**4)))\n else:\n mu_z1 = np.exp(-5.23/4*np.log((vs30**4+412.39**4)/(1360**4+412.39**4)))\n # differential basin depth\n if z1 is None or z1 < 0 or reg_tag == 2:\n dz1 = 0\n else:\n dz1 = z1-mu_z1\n\n # source term\n if magnitude < M1[dur_tag]:\n F_E = b0[mech_tag][dur_tag]\n else:\n if magnitude < M2[dur_tag]:\n deltaSigma = np.exp(b1[mech_tag][dur_tag]+b2[dur_tag]*(magnitude-Mstar[dur_tag]))\n else:\n deltaSigma = np.exp(b1[mech_tag][dur_tag]+b2[dur_tag]*(M2[dur_tag]-Mstar[dur_tag])+ \\\n b3[dur_tag]*(magnitude-M2[dur_tag]))\n \n M_0 = 10**(1.5*magnitude+16.05)\n f_0 = 4.9e6*3.2*(deltaSigma/M_0)**(1/3)\n F_E = 1/f_0\n # path term\n if distance < RR1[dur_tag]:\n F_P = c1[dur_tag]*distance\n elif distance < RR2[dur_tag]:\n F_P = c1[dur_tag]*RR1[dur_tag]+c2[dur_tag]*(distance-RR1[dur_tag])\n else:\n F_P = c1[dur_tag]*RR1[dur_tag]+c2[dur_tag]*(RR2[dur_tag]-RR1[dur_tag])+c3[dur_tag]*(distance-RR2[dur_tag]) \n # F_deltaz term\n if dz1 <= dz1ref[dur_tag]:\n F_deltaz = c5[dur_tag]*dz1\n else:\n F_deltaz = c5[dur_tag]*dz1ref[dur_tag]\n # site term\n if vs30 < V1[dur_tag]:\n F_S = c4[dur_tag]*np.log(vs30/Vref[dur_tag])+F_deltaz\n else:\n F_S = c4[dur_tag]*np.log(V1[dur_tag]/Vref[dur_tag])+F_deltaz\n\n # median\n ds_median = np.exp(np.log(F_E+F_P)+F_S)\n # standard deviations\n # between event\n if magnitude < 5.5:\n ds_phi = phi1[dur_tag]\n elif magnitude < 5.75:\n ds_phi = phi1[dur_tag]+(phi2[dur_tag]-phi1[dur_tag])*(magnitude-5.5)/(5.75-5.5)\n else:\n ds_phi = phi2[dur_tag]\n # within event\n if magnitude < 6.5:\n ds_tau = tau1[dur_tag]\n elif magnitude < 7:\n ds_tau = tau1[dur_tag]+(tau2[dur_tag]-tau1[dur_tag])*(magnitude-6.5)/(7-6.5)\n else:\n ds_tau = tau2[dur_tag]\n # total\n ds_sigma = np.sqrt(ds_phi**2+ds_tau**2)\n\n # return\n return np.log(ds_median), ds_sigma, ds_tau, ds_phi", "def impose_clash(str1, strs, k, i, num_contact, c, similar_chains, stoichiometry, outdir, max_files, verbose):\n\n global models\n global models2\n global j\n\n if max_files:\n current_file_num = len(list(filter(lambda x: x.startswith(\"complex\"), os.listdir(outdir))))\n if current_file_num >= max_files:\n return\n\n alphabet = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\"\n current_stoich = {}\n stoich_list = [similar_chains[x.id] for x in str1.get_chains()]\n stoich_set = set(stoich_list)\n stoich_list2 = [stoich_list.count(x) for x in stoich_set]\n stoich_list2.sort(reverse = True)\n\n for index, s in enumerate(stoich_list2):\n current_stoich[alphabet[index]] = s\n if alphabet[index] not in stoichiometry:\n return\n elif current_stoich[alphabet[index]] > stoichiometry[alphabet[index]]:\n return\n\n if current_stoich == stoichiometry:\n for ind, ch in enumerate(list(str1.get_chains())):\n ch.id = alphabet[ind]\n models2.append(str1)\n j += 1\n io = PDBIO()\n io.set_structure(str1.copy()[0])\n io.save(outdir + \"/complex\" + str(j) + \".pdb\")\n if verbose:\n sys.stderr.write(\"Saving complex%s.pdb to %s.\\n\" % (str(j), outdir))\n return\n\n elif i >= k:\n return\n\n fails = 0\n chains1 = list(str1.get_chains())\n\n for str2 in strs:\n chains2 = list(str2.get_chains())\n for chain1 in chains1:\n for chain2 in chains2:\n str3 = str1.copy()\n str4 = str2.copy()\n copies3 = dict([(x, y) for x, y in zip(chains1, list(str3[0].get_chains()))])\n copies4 = dict([(x, y) for x, y in zip(chains2, list(str4[0].get_chains()))])\n\n if similar_chains[chain1.id] == similar_chains[chain2.id]:\n common_chain1 = copies3[chain1]\n common_chain2 = copies4[chain2]\n superimposed_chains = Superimpose(common_chain1, common_chain2)\n superimposed_chains_fin = superimposed_chains.SuperimposeStructures()\n superimposed_chains_fin.apply(list(str4[0].get_atoms()))\n c += 1\n chain_diff = [x for x in str4[0].get_chains() if x.id != common_chain2.id]\n chain_diff2 = chain_diff[0].copy()\n chain_diff2.id = id(chain_diff2) + c\n clashes = get_interactions(list(chain_diff2.get_atoms()), list(str3[0].get_atoms()), 2)\n\n if len(clashes) >= num_contact:\n fails += 1\n\n else:\n str3[0].add(chain_diff2)\n similar_chains[chain_diff2.id] = similar_chains[str2[0][chain_diff[0].get_id()].id]\n repeated = False\n str5 = str3.copy()\n for model in models:\n superimposed_models = Superimpose(model, str5[0])\n rmsd = superimposed_models.getRMSD()\n if rmsd < 10 and len(list(model.get_chains())) == len(list(str5.get_chains())):\n repeated = True\n if not repeated:\n models.append(str3)\n impose_clash(str3, strs, k, i + 1, num_contact, c, similar_chains, stoichiometry, outdir, max_files, verbose)\n if max_files:\n current_file_num = len(\n list(filter(lambda x: x.startswith(\"complex\"), os.listdir(outdir))))\n if current_file_num >= max_files:\n return\n\n else:\n\n fails += 1\n\n if fails == len(strs) * len(chains1):\n return", "def preparehspiceidvgGEO1(wheretosimpath,templatepath,modelverilogpath,modelcardpath,vgs,vds,Lparam,HFINparam,TFIN_TOPparam,TFIN_BASEparam,EOTparam,NBODYparam,NFINparam):\n #make an aux copy of hspice file to simulate\n shutil.copyfile(templatepath,wheretosimpath+'idvgaux.sp')\n #make an aux copy of modelcard file to simulate\n shutil.copyfile(modelcardpath,wheretosimpath+'modelcardaux.nmos')\n\n #update path of model and modelcard\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'pathmodelverilog', modelverilogpath)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'pathmodelcard', '\\\"modelcardaux.nmos\\\"')\n\n #bias update\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsi', str(vgs[0]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsf', str(vgs[-1]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsdelta', str(vgs[1]-vgs[0]))\n\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsi', str(vds[0]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsf', str(vds[-1]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsdelta', str(vds[1]-vds[0]))\n\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'Lparam', Lparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'HFINparam',HFINparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'TFIN_TOPparam', TFIN_TOPparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'TFIN_BASEparam',TFIN_BASEparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'EOTparam', EOTparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'NBODYparam',NBODYparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'NFINparam', NFINparam)", "def genPrimerPairs_3Ext(primer_length=20, anneal_length=10, GC_low=40, GC_high=60):\n\n print('Primers for 3\\' extension half-asstemers')\n\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n \"\"\"re.match checks if the first 2 Nuc are GC in the forward and backwards direction\"\"\"\n while not (re.match(\"[GC]{2}\",str(forwTemplate5_3)) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[::-1])) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[8:10]))):\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n\n forwTemp3_5 = forwTemplate5_3[::-1]\n forwPrimer5_3 = forwTemp3_5.complement()\n print(f\"Template Seq 3\\' - > 5\\': {forwTemp3_5}\")\n print(f\"ForwPrimer Seq 5\\' - > 3\\': {forwPrimer5_3}\")\n\n forwPrimer_L10 = forwPrimer5_3[10:]\n print(f\"Last 10 Nucleotides of forward primer: {forwPrimer_L10}\")\n\n revPrimer_L10 = GenOligoGC(10,GC_low, GC_high)\n while not re.match(\"[GC]{2}\",str(revPrimer_L10[::-1])):\n revPrimer_L10 = GenOligoGC(10,GC_low, GC_high)\n\n \"\"\"First 10 Nuc of rev primer must be identical to last 10 Nuc of forward Primer\"\"\"\n revPrimer5_3 = forwPrimer_L10 + revPrimer_L10\n\n print(f\"RevPrimer Seq 5\\' - > 3\\': {revPrimer5_3}\")\n\n return forwPrimer5_3, revPrimer5_3", "def test_bga(pudl_out_eia):\n print(\"\\nInferring complete boiler-generator associations...\")\n bga = pudl_out_eia.bga()\n gens_simple = pudl_out_eia.gens_eia860()[['report_date',\n 'plant_id_eia',\n 'generator_id',\n 'fuel_type_code_pudl']]\n bga_gens = bga[['report_date', 'plant_id_eia',\n 'unit_id_pudl', 'generator_id']].drop_duplicates()\n\n gens_simple = pd.merge(gens_simple, bga_gens,\n on=['report_date', 'plant_id_eia', 'generator_id'],\n validate='one_to_one')\n units_simple = gens_simple.drop('generator_id', axis=1).drop_duplicates()\n units_fuel_count = \\\n units_simple.groupby(\n ['report_date',\n 'plant_id_eia',\n 'unit_id_pudl'])['fuel_type_code_pudl'].count().reset_index()\n units_fuel_count.rename(\n columns={'fuel_type_code_pudl': 'fuel_type_count'}, inplace=True)\n units_simple = pd.merge(units_simple, units_fuel_count,\n on=['report_date', 'plant_id_eia', 'unit_id_pudl'])\n num_multi_fuel_units = len(units_simple[units_simple.fuel_type_count > 1])\n multi_fuel_unit_fraction = num_multi_fuel_units / len(units_simple)\n print(\"\"\" NOTE: {:.0%} of generation units contain generators with\n differing primary fuels.\"\"\".format(multi_fuel_unit_fraction))", "def run(self, verbose=False, build_graph=False):\n from sage.combinat.crystals.letters import CrystalOfLetters\n letters = CrystalOfLetters(self.rigged_con.parent()._cartan_type.classical())\n\n # This is technically bad, but because the first thing we do is append\n # an empty list to ret_crystal_path, we correct this. We do it this\n # way so that we do not have to remove an empty list after the\n # bijection has been performed.\n ret_crystal_path = []\n\n for dim in self.rigged_con.parent().dims:\n ret_crystal_path.append([])\n\n # Check to see if we are a spinor\n if dim[0] == self.n:\n # Perform the spinor bijection by converting to type A_{2n-1}^{(2)}\n # doing the bijection there and pulling back\n\n from sage.combinat.rigged_configurations.bij_type_A2_odd import RCToKRTBijectionTypeA2Odd\n from sage.combinat.rigged_configurations.rigged_configurations import RiggedConfigurations\n from sage.combinat.rigged_configurations.rigged_partition import RiggedPartition, RiggedPartitionTypeB\n \n # Convert to a type A_{2n-1}^{(2)} RC\n RC = RiggedConfigurations(['A', 2*self.n-1, 2], self.cur_dims)\n if verbose:\n print(\"====================\")\n print(repr(RC(*self.cur_partitions, use_vacancy_numbers=True)))\n print(\"--------------------\")\n print(ret_crystal_path)\n print(\"--------------------\\n\")\n print(\"Applying doubling map\\n\")\n # Convert the n-th partition into a regular rigged partition\n self.cur_partitions[-1] = RiggedPartition(self.cur_partitions[-1]._list,\n self.cur_partitions[-1].rigging,\n self.cur_partitions[-1].vacancy_numbers)\n\n bij = RCToKRTBijectionTypeA2Odd(RC(*self.cur_partitions, use_vacancy_numbers=True))\n for i in range(len(self.cur_dims)):\n if bij.cur_dims[i][0] != self.n:\n bij.cur_dims[i][1] *= 2\n for i in range(self.n-1):\n for j in range(len(bij.cur_partitions[i])):\n bij.cur_partitions[i]._list[j] *= 2\n bij.cur_partitions[i].rigging[j] *= 2\n bij.cur_partitions[i].vacancy_numbers[j] *= 2\n\n if build_graph:\n y = self.rigged_con.parent()(*[x._clone() for x in self.cur_partitions], use_vacancy_numbers=True)\n self._graph.append([self._graph[-1][1], (y, len(self._graph)), '2x'])\n \n # Perform the type A_{2n-1}^{(2)} bijection\n \n # Iterate over each column\n for dummy_var in range(dim[1]):\n # Split off a new column if necessary\n if bij.cur_dims[0][1] > 1:\n bij.cur_dims[0][1] -= 1\n bij.cur_dims.insert(0, [dim[0], 1])\n \n # Perform the corresponding splitting map on rigged configurations\n # All it does is update the vacancy numbers on the RC side\n for a in range(self.n):\n bij._update_vacancy_numbers(a)\n\n if build_graph:\n y = self.rigged_con.parent()(*[x._clone() for x in self.cur_partitions], use_vacancy_numbers=True)\n self._graph.append([self._graph[-1][1], (y, len(self._graph)), 'ls'])\n \n while bij.cur_dims[0][0] > 0:\n if verbose:\n print(\"====================\")\n print(repr(RC(*bij.cur_partitions, use_vacancy_numbers=True)))\n print(\"--------------------\")\n print(ret_crystal_path)\n print(\"--------------------\\n\")\n\n bij.cur_dims[0][0] -= 1 # This takes care of the indexing\n b = bij.next_state(bij.cur_dims[0][0])\n # Make sure we have a crystal letter\n ret_crystal_path[-1].append(letters(b)) # Append the rank\n\n if build_graph:\n y = self.rigged_con.parent()(*[x._clone() for x in self.cur_partitions], use_vacancy_numbers=True)\n self._graph.append([self._graph[-1][1], (y, len(self._graph)), letters(b)])\n\n bij.cur_dims.pop(0) # Pop off the leading column\n\n self.cur_dims.pop(0) # Pop off the spin rectangle\n\n self.cur_partitions = bij.cur_partitions\n # Convert the n-th partition back into the special type B one\n self.cur_partitions[-1] = RiggedPartitionTypeB(self.cur_partitions[-1])\n\n # Convert back to a type B_n^{(1)}\n if verbose:\n print(\"====================\")\n print(repr(self.rigged_con.parent()(*bij.cur_partitions, use_vacancy_numbers=True)))\n print(\"--------------------\")\n print(ret_crystal_path)\n print(\"--------------------\\n\")\n print(\"Applying halving map\\n\")\n\n for i in range(self.n-1):\n for j in range(len(self.cur_partitions[i])):\n self.cur_partitions[i]._list[j] //= 2\n self.cur_partitions[i].rigging[j] //= 2\n self.cur_partitions[i].vacancy_numbers[j] //= 2\n\n if build_graph:\n y = self.rigged_con.parent()(*[x._clone() for x in self.cur_partitions], use_vacancy_numbers=True)\n self._graph.append([self._graph[-1][1], (y, len(self._graph)), '1/2x'])\n else:\n # Perform the regular type B_n^{(1)} bijection\n\n # Iterate over each column\n for dummy_var in range(dim[1]):\n # Split off a new column if necessary\n if self.cur_dims[0][1] > 1:\n if verbose:\n print(\"====================\")\n print(repr(self.rigged_con.parent()(*self.cur_partitions, use_vacancy_numbers=True)))\n print(\"--------------------\")\n print(ret_crystal_path)\n print(\"--------------------\\n\")\n print(\"Applying column split\")\n\n self.cur_dims[0][1] -= 1\n self.cur_dims.insert(0, [dim[0], 1])\n\n # Perform the corresponding splitting map on rigged configurations\n # All it does is update the vacancy numbers on the RC side\n for a in range(self.n):\n self._update_vacancy_numbers(a)\n\n if build_graph:\n y = self.rigged_con.parent()(*[x._clone() for x in self.cur_partitions], use_vacancy_numbers=True)\n self._graph.append([self._graph[-1][1], (y, len(self._graph)), '2x'])\n\n while self.cur_dims[0][0] > 0:\n if verbose:\n print(\"====================\")\n print(repr(self.rigged_con.parent()(*self.cur_partitions, use_vacancy_numbers=True)))\n print(\"--------------------\")\n print(ret_crystal_path)\n print(\"--------------------\\n\")\n\n self.cur_dims[0][0] -= 1 # This takes care of the indexing\n b = self.next_state(self.cur_dims[0][0])\n\n # Make sure we have a crystal letter\n ret_crystal_path[-1].append(letters(b)) # Append the rank\n\n if build_graph:\n y = self.rigged_con.parent()(*[x._clone() for x in self.cur_partitions], use_vacancy_numbers=True)\n self._graph.append([self._graph[-1][1], (y, len(self._graph)), letters(b)])\n\n self.cur_dims.pop(0) # Pop off the leading column\n\n if build_graph:\n self._graph.pop(0) # Remove the dummy at the start\n from sage.graphs.digraph import DiGraph\n from sage.graphs.dot2tex_utils import have_dot2tex\n self._graph = DiGraph(self._graph)\n if have_dot2tex():\n self._graph.set_latex_options(format=\"dot2tex\", edge_labels=True)\n\n return self.KRT(pathlist=ret_crystal_path)", "def test_stdstar(self):\n from ..io.fluxcalibration import read_stdstar_models, write_stdstar_models\n nstd = 5\n nwave = 10\n flux = np.random.uniform(size=(nstd, nwave))\n wave = np.arange(nwave)\n fibers = np.arange(nstd)*2\n data = Table()\n data['BESTMODEL'] = np.arange(nstd)\n data['TEMPLATEID'] = np.arange(nstd)\n data['CHI2DOF'] = np.ones(nstd)\n data['REDSHIFT'] = np.zeros(nstd)\n\n fibermap = Table()\n fibermap['TARGETID'] = np.arange(nstd)\n\n input_frames = Table()\n input_frames['NIGHT'] = np.ones(nstd)*20201220\n input_frames['EXPID'] = np.arange(nstd)\n input_frames['CAMERA'] = 'b0'\n\n #- Write with data as Table, array, and dict\n write_stdstar_models(self.testfile, flux, wave, fibers,\n data, fibermap, input_frames)\n write_stdstar_models(self.testfile, flux, wave, fibers,\n np.asarray(data), fibermap, input_frames)\n\n datadict = dict()\n for colname in data.colnames:\n datadict[colname] = data[colname]\n\n write_stdstar_models(self.testfile, flux, wave, fibers, datadict,\n fibermap, input_frames)\n\n #- Now write with coefficients too\n datadict['COEFF'] = np.zeros((nstd, 3))\n write_stdstar_models(self.testfile, flux, wave, fibers, datadict,\n fibermap, input_frames)\n\n fx, wx, fibx, metadata = read_stdstar_models(self.testfile)\n self.assertTrue(np.all(fx == flux.astype('f4').astype('f8')))\n self.assertTrue(np.all(wx == wave.astype('f4').astype('f8')))\n self.assertTrue(np.all(fibx == fibers))", "def bregmisi_all(mix_stft, spectro_mag, win_length=None, hop_length=None, win_type='hann', src_ref=None, beta=2.,\n grad_step=1e-3 * np.ones((2, 2)), max_iter=20):\n\n # Get the corresponding power spectrogram (for d=2)\n spectro_pow = np.power(spectro_mag, 2)\n\n est_1r, sdr_1r = bregmisi(mix_stft, spectro_mag, src_ref=src_ref, win_length=win_length, hop_length=hop_length,\n win_type=win_type, beta=beta, d=1, grad_step=grad_step[0, 0], direc='right',\n max_iter=max_iter)\n est_2r, sdr_2r = bregmisi(mix_stft, spectro_pow, src_ref=src_ref, win_length=win_length, hop_length=hop_length,\n win_type=win_type, beta=beta, d=2, grad_step=grad_step[1, 0], direc='right',\n max_iter=max_iter)\n est_1l, sdr_1l = bregmisi(mix_stft, spectro_mag, src_ref=src_ref, win_length=win_length, hop_length=hop_length,\n win_type=win_type, beta=beta, d=1, grad_step=grad_step[0, 1], direc='left',\n max_iter=max_iter)\n est_2l, sdr_2l = bregmisi(mix_stft, spectro_pow, src_ref=src_ref, win_length=win_length, hop_length=hop_length,\n win_type=win_type, beta=beta, d=2, grad_step=grad_step[1, 1], direc='left',\n max_iter=max_iter)\n\n # Store estimated sources and SDR over iterations (if any)\n out = {'src_est_1r': est_1r,\n 'src_est_2r': est_2r,\n 'src_est_1l': est_1l,\n 'src_est_2l': est_2l,\n 'sdr_1r': sdr_1r,\n 'sdr_2r': sdr_2r,\n 'sdr_1l': sdr_1l,\n 'sdr_2l': sdr_2l\n }\n\n return out", "def generate_radicals(species: Species,\n types: List[str],\n react_aromatic_rings: bool = False,\n ):\n existing_radical_indices, output, aromatic_rings, output_species = list(), list(), list(), list()\n if species is None or len(species.molecule[0].atoms) == 1 \\\n or not any(atom.is_hydrogen() for atom in species.molecule[0].atoms):\n return output\n spc = ARCSpecies(label=species.label, adjlist=species.copy(deep=True).to_adjacency_list(), keep_mol=True)\n res_structures = generate_resonance_structures(spc.mol)\n spc.mol = res_structures[0] if res_structures is not None else spc.mol\n spc.mol.atoms = [a for a in spc.mol.atoms if not a.is_hydrogen()] + [a for a in spc.mol.atoms if a.is_hydrogen()]\n spc.final_xyz = spc.get_xyz(generate=True)\n spc.bdes = list()\n i = 0\n for atom_1 in spc.mol.atoms:\n if not atom_1.is_hydrogen():\n for atom_2, bond_12 in atom_1.edges.items():\n if atom_2.is_hydrogen() and bond_12.is_single():\n # skipping hydrogen bonds\n break\n else:\n continue\n if spc.mol.atoms.index(atom_1) in existing_radical_indices:\n continue\n if not react_aromatic_rings and any(bond.is_benzene() for bond in atom_1.edges.values()):\n continue\n existing_radical_indices.append(spc.mol.atoms.index(atom_1))\n i += 1\n spc.bdes.append((spc.mol.atoms.index(atom_1) + 1, spc.mol.atoms.index(atom_2) + 1))\n\n radicals = [rad for rad in spc.scissors(sort_atom_labels=True) if rad.label != 'H']\n\n rad_i, alkoxyl_i, peroxyl_i = 0, 0, 0\n for i, rad in enumerate(radicals):\n if 'radical' in types:\n if not any(rad.is_isomorphic(spc) for spc in output_species):\n output_species.append(rad)\n output.append((f'{species.label}_radical_{rad_i}', rad.mol.copy(deep=True).to_smiles()))\n rad_i += 1\n if 'alkoxyl' in types:\n alkoxyl = rad.copy()\n alkoxyl.mol_list = None\n oxygen = Atom(element='O', radical_electrons=1, charge=0, lone_pairs=2)\n alkoxyl.mol.add_atom(oxygen)\n alkoxyl.mol.atoms[existing_radical_indices[i]].decrement_radical()\n new_bond = Bond(atom1=alkoxyl.mol.atoms[existing_radical_indices[i]], atom2=oxygen, order=1)\n alkoxyl.mol.add_bond(new_bond)\n if not any(alkoxyl.is_isomorphic(spc) for spc in output_species):\n output_species.append(alkoxyl)\n output.append((f'{species.label}_alkoxyl_{alkoxyl_i}', alkoxyl.mol.to_smiles()))\n alkoxyl_i += 1\n if 'peroxyl' in types:\n peroxyl = rad.copy()\n peroxyl.mol_list = None\n oxygen_1 = Atom(element='O', radical_electrons=0, charge=0, lone_pairs=2)\n oxygen_2 = Atom(element='O', radical_electrons=1, charge=0, lone_pairs=2)\n peroxyl.mol.add_atom(oxygen_1)\n peroxyl.mol.add_atom(oxygen_2)\n peroxyl.mol.atoms[existing_radical_indices[i]].decrement_radical()\n new_bond_1 = Bond(atom1=peroxyl.mol.atoms[existing_radical_indices[i]], atom2=oxygen_1, order=1)\n new_bond_2 = Bond(atom1=oxygen_1, atom2=oxygen_2, order=1)\n peroxyl.mol.add_bond(new_bond_1)\n peroxyl.mol.add_bond(new_bond_2)\n if not any(peroxyl.is_isomorphic(spc) for spc in output_species):\n output_species.append(peroxyl)\n output.append((f'{species.label}_peroxyl_{peroxyl_i}', peroxyl.mol.to_smiles()))\n peroxyl_i += 1\n\n return output", "def gen_species(self, model, options, misc_options):\n Avogadro = model.parameters.get_one(id='Avogadro')\n c = model.compartments.get_one(id='c')\n\n # species types\n\n init_concs = {}\n\n # other basic metabolites\n for species_type in options['basic']:\n species_type_structure = wc_lang.ChemicalStructure(value=species_type['structure_string'], format=eval(species_type['structure_format']))\n species_type_structure.empirical_formula = OpenBabelUtils.get_formula(species_type_structure.get_structure())\n species_type_structure.molecular_weight = species_type_structure.empirical_formula.get_molecular_weight()\n species_type_structure.charge = species_type_structure.get_structure().GetTotalCharge()\n model.species_types.create(id=species_type['id'], name=species_type['name'], type=wc_ontology[species_type['type']], structure=species_type_structure)\n init_concs[species_type['id']] = species_type['init_conc'] * Avogadro.value * c.init_volume.mean\n\n # RNA\n mean_gc_frac = options['rna']['mean_gc_frac']\n RNA_BASES = ['A', 'C', 'G', 'U']\n PROB_BASES = [(1 - mean_gc_frac) / 2, mean_gc_frac /2, mean_gc_frac/2, (1-mean_gc_frac)/2]\n\n\n rna_lens = 3 * self.rand(options['rna']['mean_rna_len'], count=options['rna']['num_rna'], min=2)\n for i in range(options['rna']['num_rna']):\n rna_str = 'AUG'\n for j in range(0, rna_lens[i], 3):\n codon = \"\".join(random.choices(RNA_BASES, weights=PROB_BASES, k=3))\n while codon in ['UAA', 'UAG', 'UGA']:\n codon = \"\".join(random.choices(RNA_BASES, weights=PROB_BASES, k=3))\n rna_str += codon\n rna_str += random.choice(['UAA', 'UAG', 'UGA'])\n rna_str_structure = wc_lang.ChemicalStructure(\n value=rna_str,\n format=wc_lang.ChemicalStructureFormat.BpForms,\n alphabet=wc_lang.ChemicalStructureAlphabet.rna)\n rna_str_structure.empirical_formula = rna_str_structure.get_structure().get_formula()\n rna_str_structure.molecular_weight = rna_str_structure.empirical_formula.get_molecular_weight()\n rna_str_structure.charge = rna_str_structure.get_structure().get_charge()\n rna_id = 'rna_'+str(i+1)\n rna = model.species_types.create(id=rna_id,\n name='RNA '+str(i+1),\n type=wc_ontology['WC:RNA'],\n structure=rna_str_structure)\n half_life_rna = model.parameters.create(id='half_life_'+rna_id,\n type=None,\n value=options['rna']['halflife'],\n units=unit_registry.parse_units('s'))\n init_concs[rna_id] = 1\n\n # protein\n codon_translation = misc_options['codon_translation']\n rna_species_types = [species_types for species_types in model.species_types if species_types.type == wc_ontology['WC:RNA']]\n for rna_species_type in rna_species_types:\n rna_str = rna_species_type.structure.value\n prot_str = ''\n for i in range(0, len(rna_str), 3):\n codon = rna_str[i:i+3]\n aa = codon_translation[codon]\n if aa == 'STOP':\n break\n else:\n prot_str += codon_translation[codon]\n prot_str_structure = wc_lang.ChemicalStructure(\n value=prot_str,\n format=wc_lang.ChemicalStructureFormat.BpForms,\n alphabet=wc_lang.ChemicalStructureAlphabet.protein)\n prot_str_structure.empirical_formula = prot_str_structure.get_structure().get_formula()\n prot_str_structure.molecular_weight = prot_str_structure.empirical_formula.get_molecular_weight()\n prot_str_structure.charge = prot_str_structure.get_structure().get_charge()\n prot_id = 'prot_'+rna_species_type.id[4:]\n prot = model.species_types.create(id=prot_id,\n name='Protein '+rna_species_type.id[4:],\n type=wc_ontology['WC:protein'],\n structure=prot_str_structure)\n half_life_prot = model.parameters.create(id='half_life_'+prot_id,\n type=None,\n value=options['protein']['halflife'],\n units=unit_registry.parse_units('s'))\n init_concs[prot_id] = 5\n\n\n\n # enzymes\n for species_type in options['enzymes']:\n enzyme = model.species_types.create(id=species_type['id'],\n name=species_type['name'],\n type=wc_ontology['WC:protein'])\n init_concs[species_type['id']] = species_type['init_conc']\n\n\n # species and initial concentrations\n for model_species_type in model.species_types:\n model_species = model.species.get_or_create(species_type=model_species_type, compartment=c)\n model_species.id = model_species.gen_id()\n conc = model.distribution_init_concentrations.create(species=model_species, mean=init_concs[model_species_type.id], units=unit_registry.parse_units('molecule'))\n conc.id = conc.gen_id()", "def createSystem(self, params, nonbondedMethod=ff.NoCutoff,\n nonbondedCutoff=1.0*u.nanometer,\n switchDistance=0.0*u.nanometer,\n constraints=None,\n rigidWater=True,\n implicitSolvent=None,\n implicitSolventKappa=None,\n implicitSolventSaltConc=0.0*u.moles/u.liter,\n temperature=298.15*u.kelvin,\n soluteDielectric=1.0,\n solventDielectric=78.5,\n removeCMMotion=True,\n hydrogenMass=None,\n ewaldErrorTolerance=0.0005,\n flexibleConstraints=True,\n verbose=False,\n gbsaModel=None):\n # Load the parameter set\n self.loadParameters(params)\n hasbox = self.topology.getUnitCellDimensions() is not None\n # Check GB input parameters\n if implicitSolvent is not None and gbsaModel not in ('ACE', None):\n raise ValueError('gbsaModel must be ACE or None')\n # Set the cutoff distance in nanometers\n cutoff = None\n if nonbondedMethod is not ff.NoCutoff:\n cutoff = nonbondedCutoff\n # Remove units from cutoff\n if u.is_quantity(cutoff):\n cutoff = cutoff.value_in_unit(u.nanometers)\n\n if nonbondedMethod not in (ff.NoCutoff, ff.CutoffNonPeriodic,\n ff.CutoffPeriodic, ff.Ewald, ff.PME, ff.LJPME):\n raise ValueError('Illegal value for nonbonded method')\n if not hasbox and nonbondedMethod in (ff.CutoffPeriodic,\n ff.Ewald, ff.PME, ff.LJPME):\n raise ValueError('Illegal nonbonded method for a '\n 'non-periodic system')\n if implicitSolvent not in (HCT, OBC1, OBC2, GBn, GBn2, None):\n raise ValueError('Illegal implicit solvent model choice.')\n if not constraints in (None, ff.HAngles, ff.HBonds, ff.AllBonds):\n raise ValueError('Illegal constraints choice')\n\n # Define conversion factors\n length_conv = u.angstrom.conversion_factor_to(u.nanometer)\n _chmfrc = u.kilocalorie_per_mole/(u.angstrom*u.angstrom)\n _openmmfrc = u.kilojoule_per_mole/(u.nanometer*u.nanometer)\n bond_frc_conv = _chmfrc.conversion_factor_to(_openmmfrc)\n _chmfrc = u.kilocalorie_per_mole/(u.radians*u.radians)\n _openmmfrc = u.kilojoule_per_mole/(u.radians*u.radians)\n angle_frc_conv = _chmfrc.conversion_factor_to(_openmmfrc)\n dihe_frc_conv = u.kilocalorie_per_mole.conversion_factor_to(\n u.kilojoule_per_mole)\n ene_conv = dihe_frc_conv\n\n # Create the system and determine if any of our atoms have NBFIX or NBTHOLE (and\n # therefore requires a CustomNonbondedForce instead)\n typenames = set()\n system = mm.System()\n if verbose: print('Adding particles...')\n for atom in self.atom_list:\n typenames.add(atom.type.name)\n system.addParticle(atom.mass)\n\n ############################################################\n # OPLS use geometric combination rule\n # LJ force will be handled by CustomNonbondedForce\n # Coulomb and 1-4 LJ will be handled by NonbondedForce\n ############################################################\n typenames = list(typenames)\n has_nbthole_terms = False\n try:\n for i, typename in enumerate(typenames):\n typ = params.atom_types_str[typename]\n for j in range(i, len(typenames)):\n if typenames[j] in typ.nbthole:\n has_nbthole_terms = True\n raise StopIteration\n except StopIteration:\n pass\n # test if the system containing the Drude particles\n has_drude_particle = False\n try:\n if self.drudeconsts_list:\n has_drude_particle = True\n except AttributeError:\n pass\n\n # Set up the constraints\n def _is_bond_in_water(bond):\n return bond.atom1.residue.resname[:4] in WATNAMES and \\\n tuple(sorted([bond.atom1.type.atomic_number, bond.atom2.type.atomic_number])) == (1, 8)\n\n n_cons_bond = n_cons_angle = 0\n if verbose and (constraints is not None or rigidWater):\n print('Adding constraints...')\n\n for bond in self.bond_list:\n if constraints in (ff.AllBonds, ff.HAngles):\n system.addConstraint(bond.atom1.idx, bond.atom2.idx,\n bond.bond_type.req*length_conv)\n n_cons_bond += 1\n elif constraints is ff.HBonds:\n if bond.atom1.type.atomic_number == 1 or bond.atom2.type.atomic_number == 1:\n system.addConstraint(bond.atom1.idx, bond.atom2.idx,\n bond.bond_type.req*length_conv)\n n_cons_bond += 1\n elif rigidWater:\n if _is_bond_in_water(bond):\n system.addConstraint(bond.atom1.idx, bond.atom2.idx,\n bond.bond_type.req*length_conv)\n n_cons_bond += 1\n\n # Add virtual sites\n if hasattr(self, 'lonepair_list'):\n if verbose: print('Adding lonepairs...')\n for lpsite in self.lonepair_list:\n index=lpsite[0]\n atom1=lpsite[1]\n atom2=lpsite[2]\n atom3=lpsite[3]\n if atom3 >= 0:\n if lpsite[4] > 0 : # relative lonepair type\n r = lpsite[4] /10.0 # in nanometer\n xweights = [-1.0, 0.0, 1.0]\n elif lpsite[4] < 0: # bisector lonepair type\n r = lpsite[4] / (-10.0)\n xweights = [-1.0, 0.5, 0.5]\n theta = lpsite[5] * pi / 180.0\n phi = (180.0 - lpsite[6]) * pi / 180.0\n p = [r*cos(theta), r*sin(theta)*cos(phi), r*sin(theta)*sin(phi)]\n p = [x if abs(x) > 1e-10 else 0 for x in p] # Avoid tiny numbers caused by roundoff error\n system.setVirtualSite(index, mm.LocalCoordinatesSite(atom1, atom3, atom2, mm.Vec3(1.0, 0.0, 0.0), mm.Vec3(xweights[0],xweights[1],xweights[2]), mm.Vec3(0.0, -1.0, 1.0), mm.Vec3(p[0],p[1],p[2])))\n else: # colinear lonepair type\n # find a real atom to be the third one for LocalCoordinatesSite\n for bond in self.bond_list:\n if (bond.atom1.idx == atom2 and bond.atom2.idx != atom1):\n a3=bond.atom2.idx\n elif (bond.atom2.idx == atom2 and bond.atom1.idx != atom1):\n a3=bond.atom1.idx\n r = lpsite[4] / 10.0 # in nanometer\n system.setVirtualSite(index, mm.LocalCoordinatesSite(atom1, atom2, a3, mm.Vec3(1.0, 0.0, 0.0), mm.Vec3(1.0,-1.0, 0.0), mm.Vec3(0.0, -1.0, 1.0), mm.Vec3(r,0.0,0.0)))\n # Add Bond forces\n if verbose: print('Adding bonds...')\n force = mm.HarmonicBondForce()\n #####################################################################\n # Should always use periodic boundary conditions for all forces\n #####################################################################\n force.setUsesPeriodicBoundaryConditions(True)\n force.setForceGroup(self.BOND_FORCE_GROUP)\n # See which, if any, energy terms we omit\n omit_all = not flexibleConstraints and constraints in (ff.AllBonds, ff.HAngles)\n omit_h = not flexibleConstraints and constraints is not None\n omit_h_in_water = not flexibleConstraints and (constraints is not None or rigidWater)\n for bond in self.bond_list:\n if omit_all: continue\n if omit_h and (bond.atom1.type.atomic_number == 1 or bond.atom2.type.atomic_number == 1): continue\n if omit_h_in_water and _is_bond_in_water(bond): continue\n force.addBond(bond.atom1.idx, bond.atom2.idx,\n bond.bond_type.req*length_conv,\n 2*bond.bond_type.k*bond_frc_conv)\n system.addForce(force)\n # Add Angle forces\n if verbose: print('Adding angles...')\n force = mm.HarmonicAngleForce()\n #####################################################################\n # Should always use periodic boundary conditions for all forces\n #####################################################################\n force.setUsesPeriodicBoundaryConditions(True)\n force.setForceGroup(self.ANGLE_FORCE_GROUP)\n if constraints is ff.HAngles:\n num_constrained_bonds = system.getNumConstraints()\n atom_constraints = [[]] * system.getNumParticles()\n for i in range(num_constrained_bonds):\n c = system.getConstraintParameters(i)\n dist = c[2].value_in_unit(u.nanometer)\n atom_constraints[c[0]].append((c[1], dist))\n atom_constraints[c[1]].append((c[0], dist))\n for angle in self.angle_list:\n # Only constrain angles including hydrogen here\n if (angle.atom1.type.atomic_number != 1 and angle.atom3.type.atomic_number != 1):\n continue\n a1 = angle.atom1.type.atomic_number\n a2 = angle.atom2.type.atomic_number\n a3 = angle.atom3.type.atomic_number\n nh = int(a1==1) + int(a3==1)\n if constraints is ff.HAngles:\n constrained = (nh == 2 or (nh == 1 and a2 == 8))\n elif rigidWater:\n constrained = (nh == 2 and a2 == 8 and angle.atom1.residue.resname[:4] in WATNAMES)\n else:\n constrained = False # no constraints\n if constrained:\n l1 = l2 = None\n for bond in angle.atom2.bonds:\n if bond.atom1 is angle.atom1 or bond.atom2 is angle.atom1:\n l1 = bond.bond_type.req * length_conv\n elif bond.atom1 is angle.atom3 or bond.atom2 is angle.atom3:\n l2 = bond.bond_type.req * length_conv\n # Compute the distance between the atoms and add a constraint\n length = sqrt(l1*l1 + l2*l2 - 2*l1*l2*\n cos(angle.angle_type.theteq*pi/180))\n system.addConstraint(angle.atom1.idx, angle.atom3.idx, length)\n n_cons_angle += 1\n if flexibleConstraints or not constrained:\n force.addAngle(angle.atom1.idx, angle.atom2.idx,\n angle.atom3.idx, angle.angle_type.theteq*pi/180,\n 2*angle.angle_type.k*angle_frc_conv)\n if verbose and (constraints is not None or rigidWater):\n print(' Number of bond constraints:', n_cons_bond)\n print(' Number of angle constraints:', n_cons_angle)\n for angle in self.angle_list:\n # Already did the angles with hydrogen above. So skip those here\n if (angle.atom1.type.atomic_number == 1 or angle.atom3.type.atomic_number == 1):\n continue\n force.addAngle(angle.atom1.idx, angle.atom2.idx,\n angle.atom3.idx, angle.angle_type.theteq*pi/180,\n 2*angle.angle_type.k*angle_frc_conv)\n system.addForce(force)\n\n # Add the urey-bradley terms\n if verbose: print('Adding Urey-Bradley terms')\n force = mm.HarmonicBondForce()\n #####################################################################\n # Should always use periodic boundary conditions for all forces\n #####################################################################\n force.setUsesPeriodicBoundaryConditions(True)\n force.setForceGroup(self.UREY_BRADLEY_FORCE_GROUP)\n for ub in self.urey_bradley_list:\n force.addBond(ub.atom1.idx, ub.atom2.idx,\n ub.ub_type.req*length_conv,\n 2*ub.ub_type.k*bond_frc_conv)\n system.addForce(force)\n\n # Add dihedral forces\n if verbose: print('Adding torsions...')\n force = mm.PeriodicTorsionForce()\n #####################################################################\n # Should always use periodic boundary conditions for all forces\n #####################################################################\n force.setUsesPeriodicBoundaryConditions(True)\n force.setForceGroup(self.DIHEDRAL_FORCE_GROUP)\n for tor in self.dihedral_parameter_list:\n force.addTorsion(tor.atom1.idx, tor.atom2.idx, tor.atom3.idx,\n tor.atom4.idx, tor.dihedral_type.per,\n tor.dihedral_type.phase*pi/180,\n tor.dihedral_type.phi_k*dihe_frc_conv)\n system.addForce(force)\n\n if verbose: print('Adding impropers...')\n # Ick. OpenMM does not have an improper torsion class. Need to\n # construct one from CustomTorsionForce that respects toroidal boundaries\n\n # energy_function = 'k*min(dtheta, 2*pi-dtheta)^2; dtheta = abs(theta-theta0);'\n # energy_function += 'pi = %f;' % pi\n # force = mm.CustomTorsionForce(energy_function)\n # #####################################################################\n # # Should always use periodic boundary conditions for all forces\n # #####################################################################\n # force.setUsesPeriodicBoundaryConditions(True)\n # force.addPerTorsionParameter('k')\n # force.addPerTorsionParameter('theta0')\n # force.setForceGroup(self.IMPROPER_FORCE_GROUP)\n # for imp in self.improper_list:\n # force.addTorsion(imp.atom1.idx, imp.atom2.idx,\n # imp.atom3.idx, imp.atom4.idx,\n # (imp.improper_type.k*dihe_frc_conv,\n # imp.improper_type.phieq*pi/180)\n # )\n # system.addForce(force)\n\n ### OPLS improper #######################################################\n # in OPLS convention, the third atom is the central atom\n iforce = mm.CustomTorsionForce('k*(1-cos(2*theta))')\n iforce.addPerTorsionParameter('k')\n iforce.setUsesPeriodicBoundaryConditions(True)\n iforce.setForceGroup(self.IMPROPER_FORCE_GROUP)\n for imp in self.improper_list:\n iforce.addTorsion(imp.atom2.idx, imp.atom3.idx,\n imp.atom1.idx, imp.atom4.idx,\n [imp.improper_type.k * dihe_frc_conv])\n system.addForce(iforce)\n #########################################################################\n\n if hasattr(self, 'cmap_list'):\n if verbose: print('Adding CMAP coupled torsions...')\n force = mm.CMAPTorsionForce()\n #####################################################################\n # Should always use periodic boundary conditions for all forces\n #####################################################################\n force.setUsesPeriodicBoundaryConditions(True)\n force.setForceGroup(self.CMAP_FORCE_GROUP)\n # First get the list of cmap maps we're going to use. Just store the\n # IDs so we have simple integer comparisons to do later\n cmap_type_list = []\n cmap_map = dict()\n for cmap in self.cmap_list:\n if not id(cmap.cmap_type) in cmap_type_list:\n ct = cmap.cmap_type\n cmap_type_list.append(id(ct))\n # Our torsion correction maps need to go from 0 to 360\n # degrees\n grid = ct.grid.switch_range().T\n m = force.addMap(ct.resolution, [x*ene_conv for x in grid])\n cmap_map[id(ct)] = m\n # Now add in all of the cmaps\n for cmap in self.cmap_list:\n if cmap.consecutive:\n id1, id2 = cmap.atom1.idx, cmap.atom2.idx\n id3, id4 = cmap.atom3.idx, cmap.atom4.idx\n id5, id6 = cmap.atom2.idx, cmap.atom3.idx\n id7, id8 = cmap.atom4.idx, cmap.atom5.idx\n else:\n id1, id2 = cmap.atom1.idx, cmap.atom2.idx\n id3, id4 = cmap.atom3.idx, cmap.atom4.idx\n id5, id6 = cmap.atom5.idx, cmap.atom6.idx\n id7, id8 = cmap.atom7.idx, cmap.atom8.idx\n force.addTorsion(cmap_map[id(cmap.cmap_type)],\n id1, id2, id3, id4, id5, id6, id7, id8)\n system.addForce(force)\n # Add nonbonded terms now\n if verbose: print('Adding nonbonded interactions...')\n force = mm.NonbondedForce()\n ############################################################\n # Dispersion is handled by CustomNonbondedForce\n # Do not need dispersion correction here\n ############################################################\n force.setUseDispersionCorrection(False)\n\n #####################################################################\n # Should always use periodic boundary conditions for all forces\n #####################################################################\n try:\n force.setExceptionsUsePeriodicBoundaryConditions(True)\n except:\n print('WARNING: Cannot apply PBC for 1-4 exceptions')\n\n force.setForceGroup(self.NONBONDED_FORCE_GROUP)\n if not hasbox: # non-periodic\n if nonbondedMethod is ff.NoCutoff:\n force.setNonbondedMethod(mm.NonbondedForce.NoCutoff)\n elif nonbondedMethod is ff.CutoffNonPeriodic:\n if cutoff is None:\n raise ValueError('No cutoff value specified')\n force.setNonbondedMethod(mm.NonbondedForce.CutoffNonPeriodic)\n force.setCutoffDistance(cutoff)\n else:\n raise ValueError('Illegal nonbonded method for non-periodic '\n 'system')\n\n # See if we need to use a switching function\n if switchDistance and nonbondedMethod is not ff.NoCutoff:\n # make sure it's legal\n if (_strip_optunit(switchDistance, u.nanometer) >=\n _strip_optunit(nonbondedCutoff, u.nanometer)):\n raise ValueError('switchDistance is too large compared '\n 'to the cutoff!')\n if _strip_optunit(switchDistance, u.nanometer) < 0:\n # Detects negatives for both Quantity and float\n raise ValueError('switchDistance must be non-negative!')\n force.setUseSwitchingFunction(True)\n force.setSwitchingDistance(switchDistance)\n\n else: # periodic\n # Set up box vectors (from inpcrd if available, or fall back to\n # prmtop definitions\n system.setDefaultPeriodicBoxVectors(*self.box_vectors)\n\n # Set cutoff\n if cutoff is None:\n # Compute cutoff automatically\n box = self.boxLengths\n min_box_width = min((box[0]/u.nanometers,\n box[1]/u.nanometers,\n box[2]/u.nanometers))\n CLEARANCE_FACTOR = 0.97\n cutoff = u.Quantity((min_box_width*CLEARANCE_FACTOR)/2.0,\n u.nanometers)\n if nonbondedMethod is not ff.NoCutoff:\n force.setCutoffDistance(cutoff)\n\n # Set nonbonded method.\n if nonbondedMethod is ff.NoCutoff:\n force.setNonbondedMethod(mm.NonbondedForce.NoCutoff)\n elif nonbondedMethod is ff.CutoffNonPeriodic:\n force.setNonbondedMethod(mm.NonbondedForce.CutoffNonPeriodic)\n elif nonbondedMethod is ff.CutoffPeriodic:\n force.setNonbondedMethod(mm.NonbondedForce.CutoffPeriodic)\n elif nonbondedMethod is ff.Ewald:\n force.setNonbondedMethod(mm.NonbondedForce.Ewald)\n elif nonbondedMethod is ff.PME:\n force.setNonbondedMethod(mm.NonbondedForce.PME)\n elif nonbondedMethod is ff.LJPME:\n force.setNonbondedMethod(mm.NonbondedForce.LJPME)\n else:\n raise ValueError('Cutoff method is not understood')\n\n # See if we need to use a switching function\n if switchDistance and nonbondedMethod is not ff.NoCutoff:\n # make sure it's legal\n if (_strip_optunit(switchDistance, u.nanometer) >=\n _strip_optunit(nonbondedCutoff, u.nanometer)):\n raise ValueError('switchDistance is too large compared '\n 'to the cutoff!')\n if _strip_optunit(switchDistance, u.nanometer) < 0:\n # Detects negatives for both Quantity and float\n raise ValueError('switchDistance must be non-negative!')\n force.setUseSwitchingFunction(True)\n force.setSwitchingDistance(switchDistance)\n\n if ewaldErrorTolerance is not None:\n force.setEwaldErrorTolerance(ewaldErrorTolerance)\n\n # Add per-particle nonbonded parameters (coulomb params)\n for atm in self.atom_list:\n force.addParticle(atm.charge, 1.0, 0.0)\n # Now add the custom nonbonded force that implements LJ. First\n # thing we need to do is condense our number of types\n lj_idx_list = [0 for atom in self.atom_list]\n lj_radii, lj_depths = [], []\n num_lj_types = 0\n lj_type_list = []\n for i, atom in enumerate(self.atom_list):\n atom = atom.type\n if lj_idx_list[i]: continue # already assigned\n num_lj_types += 1\n lj_idx_list[i] = num_lj_types\n ljtype = (atom.rmin, atom.epsilon)\n lj_type_list.append(atom)\n lj_radii.append(atom.rmin)\n lj_depths.append(atom.epsilon)\n for j in range(i+1, len(self.atom_list)):\n atom2 = self.atom_list[j].type\n if lj_idx_list[j] > 0: continue # already assigned\n if atom2 is atom:\n lj_idx_list[j] = num_lj_types\n elif not atom.nbfix and not atom.nbthole:\n # Only non-NBFIXed and non-NBTholed atom types can be compressed\n ljtype2 = (atom2.rmin, atom2.epsilon)\n if ljtype == ljtype2:\n lj_idx_list[j] = num_lj_types\n # Now everything is assigned. Create the A-coefficient and\n # B-coefficient arrays\n acoef = [0 for i in range(num_lj_types*num_lj_types)]\n bcoef = acoef[:]\n for i in range(num_lj_types):\n for j in range(num_lj_types):\n namej = lj_type_list[j].name\n try:\n rij, wdij, rij14, wdij14 = lj_type_list[i].nbfix[namej]\n except KeyError:\n # rij = (lj_radii[i] + lj_radii[j]) * length_conv\n ##################################################################\n # OPLS use geometric combination rule for both epsilon and sigma\n ##################################################################\n rij = sqrt(lj_radii[i] * lj_radii[j]) * 2 * length_conv\n wdij = sqrt(lj_depths[i] * lj_depths[j]) * ene_conv\n else:\n rij *= length_conv\n wdij *= ene_conv\n acoef[i+num_lj_types*j] = sqrt(wdij) * rij**6\n bcoef[i+num_lj_types*j] = 2 * wdij * rij**6\n cforce = mm.CustomNonbondedForce('(a/r6)^2-b/r6; r6=r^6;'\n 'a=acoef(type1, type2);'\n 'b=bcoef(type1, type2)')\n ##################################################################\n # Dispersion correction for LJ force\n ##################################################################\n cforce.setUseLongRangeCorrection(True)\n cforce.addTabulatedFunction('acoef',\n mm.Discrete2DFunction(num_lj_types, num_lj_types, acoef))\n cforce.addTabulatedFunction('bcoef',\n mm.Discrete2DFunction(num_lj_types, num_lj_types, bcoef))\n cforce.addPerParticleParameter('type')\n cforce.setForceGroup(self.NONBONDED_FORCE_GROUP)\n if (nonbondedMethod in (ff.PME, ff.LJPME, ff.Ewald, ff.CutoffPeriodic)):\n cforce.setNonbondedMethod(cforce.CutoffPeriodic)\n cforce.setCutoffDistance(nonbondedCutoff)\n elif nonbondedMethod is ff.NoCutoff:\n cforce.setNonbondedMethod(cforce.NoCutoff)\n elif nonbondedMethod is ff.CutoffNonPeriodic:\n cforce.setNonbondedMethod(cforce.CutoffNonPeriodic)\n cforce.setCutoffDistance(nonbondedCutoff)\n else:\n raise ValueError('Unrecognized nonbonded method')\n if switchDistance and nonbondedMethod is not ff.NoCutoff:\n # make sure it's legal\n if (_strip_optunit(switchDistance, u.nanometer) >=\n _strip_optunit(nonbondedCutoff, u.nanometer)):\n raise ValueError('switchDistance is too large compared '\n 'to the cutoff!')\n if _strip_optunit(switchDistance, u.nanometer) < 0:\n # Detects negatives for both Quantity and float\n raise ValueError('switchDistance must be non-negative!')\n cforce.setUseSwitchingFunction(True)\n cforce.setSwitchingDistance(switchDistance)\n for i in lj_idx_list:\n cforce.addParticle((i - 1,)) # adjust for indexing from 0\n\n # Add NBTHOLE terms\n if has_drude_particle and has_nbthole_terms:\n nbt_idx_list = [0 for atom in self.atom_list]\n nbt_alpha_list = [0 for atom in self.atom_list] # only save alpha for NBThole pairs\n num_nbt_types = 0\n nbt_type_list = []\n nbt_set_list = []\n for i, atom in enumerate(self.atom_list):\n atom = atom.type\n if not atom.nbthole: continue # get them as zero\n if nbt_idx_list[i]: continue # already assigned\n num_nbt_types += 1\n nbt_idx_list[i] = num_nbt_types\n nbt_idx_list[i+1] = num_nbt_types\n nbt_alpha_list[i] = pow(-1*self.drudeconsts_list[i][0],-1./6.)\n nbt_alpha_list[i+1] = pow(-1*self.drudeconsts_list[i][0],-1./6.)\n nbt_type_list.append(atom)\n nbt_set_list.append([i,i+1])\n for j in range(i+1, len(self.atom_list)):\n atom2 = self.atom_list[j].type\n if nbt_idx_list[j] > 0: continue # already assigned\n if atom2 is atom:\n nbt_idx_list[j] = num_nbt_types\n nbt_idx_list[j+1] = num_nbt_types\n nbt_alpha_list[j] = pow(-1*self.drudeconsts_list[j][0],-1./6.)\n nbt_alpha_list[j+1] = pow(-1*self.drudeconsts_list[j][0],-1./6.)\n nbt_set_list[num_nbt_types-1].append(j)\n nbt_set_list[num_nbt_types-1].append(j+1)\n num_total_nbt=num_nbt_types+1 # use zero index for all the atoms with no nbthole\n nbt_interset_list=[]\n # need to get all other particles as an independent group, so in total num_nbt_types+1\n coef = [0 for i in range(num_total_nbt*num_total_nbt)]\n for i in range(num_nbt_types):\n for j in range(num_nbt_types):\n namej = nbt_type_list[j].name\n nbt_value = nbt_type_list[i].nbthole.get(namej,0)\n if abs(nbt_value)>TINY and i<j : nbt_interset_list.append([i+1,j+1])\n coef[i+1+num_total_nbt*(j+1)]=nbt_value\n nbtforce = mm.CustomNonbondedForce('-138.935456*charge1*charge2*(1.0+0.5*screen*r)*exp(-1.0*screen*r)/r; screen=coef(type1, type2) * alpha1*alpha2*10.0')\n nbtforce.addTabulatedFunction('coef', mm.Discrete2DFunction(num_total_nbt, num_total_nbt, coef))\n nbtforce.addPerParticleParameter('charge')\n nbtforce.addPerParticleParameter('alpha')\n nbtforce.addPerParticleParameter('type')\n nbtforce.setForceGroup(self.NONBONDED_FORCE_GROUP)\n # go through all the particles to set up per-particle parameters\n for i in range(system.getNumParticles()):\n c=force.getParticleParameters(i)\n cc=c[0]/u.elementary_charge\n aa=nbt_alpha_list[i]\n ti=nbt_idx_list[i]\n nbtforce.addParticle([cc,aa,ti])\n # set interaction group\n for a in nbt_interset_list:\n ai=a[0]\n aj=a[1]\n nbtforce.addInteractionGroup(nbt_set_list[ai-1],nbt_set_list[aj-1])\n nbtforce.setNonbondedMethod(nbtforce.CutoffPeriodic)\n nbtforce.setCutoffDistance(0.5*u.nanometer)\n nbtforce.setUseSwitchingFunction(False)\n # now, add the actual force to the system\n system.addForce(nbtforce)\n\n # build 1-2, 1-3 and 1-4 exclusion list from bond\n if verbose:\n print('Build exclusion list...')\n self._build_exclusion_list()\n if verbose:\n print(' Number of 1-2 exclusion: %i' % len(self.pair_12_list))\n print(' Number of 1-3 exclusion: %i' % len(self.pair_13_list))\n print(' Number of 1-4 exclusion: %i' % len(self.pair_14_list))\n\n # Add 1-4 interactions\n sigma_scale = 2**(-1/6)\n for ia1, ia4 in self.pair_14_list:\n atom1 = self.atom_list[ia1]\n atom4 = self.atom_list[ia4]\n ####################################################################\n # OPLS scale 1-4 interactions by 0.5 for both LJ and Coulomb\n # But 1-4 LJ has already been scaled in prm file\n ####################################################################\n charge_prod = (atom1.charge * atom4.charge) / 2\n epsilon = sqrt(atom1.type.epsilon_14 * atom4.type.epsilon_14) * ene_conv\n sigma = sqrt(atom1.type.rmin_14 * 2 * atom4.type.rmin_14 * 2) * (\n length_conv * sigma_scale)\n force.addException(ia1, ia4, charge_prod, sigma, epsilon)\n\n # Add excluded atoms\n # Drude and lonepairs will be excluded based on their parent atoms\n parent_exclude_list=[]\n for atom in self.atom_list:\n parent_exclude_list.append([])\n for lpsite in self.lonepair_list:\n idx = lpsite[1]\n idxa = lpsite[0]\n parent_exclude_list[idx].append(idxa)\n force.addException(idx, idxa, 0.0, 0.1, 0.0)\n if has_drude_particle:\n for pair in self.drudepair_list:\n idx = pair[0]\n idxa = pair[1]\n parent_exclude_list[idx].append(idxa)\n force.addException(idx, idxa, 0.0, 0.1, 0.0)\n # If lonepairs and Drude particles are bonded to the same parent atom, add exception\n for excludeterm in parent_exclude_list:\n if(len(excludeterm) >= 2):\n for i in range(len(excludeterm)):\n for j in range(i):\n force.addException(excludeterm[j], excludeterm[i], 0.0, 0.1, 0.0)\n # Exclude all bonds and angles, as well as the lonepair/Drude attached onto them\n for ia1, ia2 in self.pair_12_list + self.pair_13_list:\n for excludeatom in [ia1]+parent_exclude_list[ia1]:\n for excludeatom2 in [ia2]+parent_exclude_list[ia2]:\n force.addException(excludeatom, excludeatom2, 0.0, 0.1, 0.0)\n #############################################################################\n # 1-4 scaling for lonepair/Drude\n #############################################################################\n for ia1, ia4 in self.pair_14_list:\n for excludeatom in [ia1]+parent_exclude_list[ia1]:\n for excludeatom4 in [ia4]+parent_exclude_list[ia4]:\n # real atom 1-4 pairs have already been processed before\n if excludeatom == ia1 and excludeatom4 == ia4:\n continue\n qq_scaled = (self.atom_list[excludeatom].charge * self.atom_list[excludeatom4].charge) / 2\n force.addException(excludeatom, excludeatom4, qq_scaled, 0.1, 0.0)\n system.addForce(force)\n\n # Add Drude particles (Drude force)\n if has_drude_particle:\n if verbose: print('Adding Drude force and Thole screening...')\n drudeforce = mm.DrudeForce()\n drudeforce.setForceGroup(self.DRUDE_FORCE_GROUP)\n for pair in self.drudepair_list:\n parentatom=pair[0]\n drudeatom=pair[1]\n p=[-1, -1, -1]\n a11 = 0\n a22 = 0\n charge = self.atom_list[drudeatom].charge\n polarizability = self.drudeconsts_list[parentatom][0]/(-1000.0)\n for aniso in self.aniso_list: # not smart to do another loop, but the number of aniso is small anyway\n if (parentatom==aniso[0]):\n p[0]=aniso[1]\n p[1]=aniso[2]\n p[2]=aniso[3]\n k11=aniso[4]\n k22=aniso[5]\n k33=aniso[6]\n # solve out DrudeK, which should equal 500.0\n a = k11+k22+3*k33\n b = 2*k11*k22+4*k11*k33+4*k22*k33+6*k33*k33\n c = 3*k33*(k11+k33)*(k22+k33)\n DrudeK = (sqrt(b*b-4*a*c)-b)/2/a\n a11=round(DrudeK/(k11+k33+DrudeK),5)\n a22=round(DrudeK/(k22+k33+DrudeK),5)\n drudeforce.addParticle(drudeatom, parentatom, p[0], p[1], p[2], charge, polarizability, a11, a22 )\n system.addForce(drudeforce)\n particleMap = {}\n for i in range(drudeforce.getNumParticles()):\n particleMap[drudeforce.getParticleParameters(i)[0]] = i\n\n for ia1, ia2 in self.pair_12_list + self.pair_13_list:\n alpha1 = self.drudeconsts_list[ia1][0]\n alpha2 = self.drudeconsts_list[ia2][0]\n if abs(alpha1) > TINY and abs(alpha2) > TINY: # both are Drude parent atoms\n thole1 = self.drudeconsts_list[ia1][1]\n thole2 = self.drudeconsts_list[ia2][1]\n drude1 = ia1 + 1 # CHARMM psf has hard-coded rule that the Drude is next to its parent\n drude2 = ia2 + 1\n drudeforce.addScreenedPair(particleMap[drude1], particleMap[drude2], thole1 + thole2)\n\n # If we needed a CustomNonbondedForce, map all of the exceptions from\n # the NonbondedForce to the CustomNonbondedForce\n for i in range(force.getNumExceptions()):\n ii, jj, q, eps, sig = force.getExceptionParameters(i)\n cforce.addExclusion(ii, jj)\n system.addForce(cforce)\n\n if has_drude_particle and has_nbthole_terms:\n for i in range(force.getNumExceptions()):\n ii, jj, q, eps, sig = force.getExceptionParameters(i)\n nbtforce.addExclusion(ii, jj)\n\n # Add GB model if we're doing one\n if implicitSolvent is not None:\n if verbose: print('Adding GB parameters...')\n # If implicitSolventKappa is None, compute it from salt\n # concentration\n if implicitSolventKappa is None:\n if u.is_quantity(implicitSolventSaltConc):\n sc = implicitSolventSaltConc.value_in_unit(u.moles/u.liter)\n implicitSolventSaltConc = sc\n if u.is_quantity(temperature):\n temperature = temperature.value_in_unit(u.kelvin)\n # The constant is 1 / sqrt( epsilon_0 * kB / (2 * NA * q^2 *\n # 1000) ) where NA is avogadro's number, epsilon_0 is the\n # permittivity of free space, q is the elementary charge (this\n # number matches sander/pmemd's kappa conversion factor)\n implicitSolventKappa = 50.33355 * sqrt(implicitSolventSaltConc /\n solventDielectric / temperature)\n # Multiply by 0.73 to account for ion exclusions, and multiply\n # by 10 to convert to 1/nm from 1/angstroms\n implicitSolventKappa *= 7.3\n elif implicitSolvent is None:\n implicitSolventKappa = 0.0\n\n if u.is_quantity(implicitSolventKappa):\n implicitSolventKappa = implicitSolventKappa.value_in_unit(\n (1.0/u.nanometer).unit)\n if implicitSolvent is HCT:\n gb = GBSAHCTForce(solventDielectric, soluteDielectric, gbsaModel,\n cutoff, kappa=implicitSolventKappa)\n elif implicitSolvent is OBC1:\n gb = GBSAOBC1Force(solventDielectric, soluteDielectric, gbsaModel,\n cutoff, kappa=implicitSolventKappa)\n elif implicitSolvent is OBC2:\n gb = GBSAOBC2Force(solventDielectric, soluteDielectric, gbsaModel,\n cutoff, kappa=implicitSolventKappa)\n elif implicitSolvent is GBn:\n gb = GBSAGBnForce(solventDielectric, soluteDielectric, gbsaModel,\n cutoff, kappa=implicitSolventKappa)\n elif implicitSolvent is GBn2:\n gb = GBSAGBn2Force(solventDielectric, soluteDielectric, gbsaModel,\n cutoff, kappa=implicitSolventKappa)\n gb_parms = gb.getStandardParameters(self.topology)\n for atom, gb_parm in zip(self.atom_list, gb_parms):\n gb.addParticle([atom.charge] + list(gb_parm))\n # Set cutoff method\n if nonbondedMethod is ff.NoCutoff:\n gb.setNonbondedMethod(mm.NonbondedForce.NoCutoff)\n elif nonbondedMethod is ff.CutoffNonPeriodic:\n gb.setNonbondedMethod(mm.NonbondedForce.CutoffNonPeriodic)\n gb.setCutoffDistance(cutoff)\n elif nonbondedMethod is ff.CutoffPeriodic:\n gb.setNonbondedMethod(mm.NonbondedForce.CutoffPeriodic)\n gb.setCutoffDistance(cutoff)\n else:\n raise ValueError('Illegal nonbonded method for use with GBSA')\n gb.setForceGroup(self.GB_FORCE_GROUP)\n gb.finalize()\n system.addForce(gb)\n force.setReactionFieldDielectric(1.0) # applies to NonbondedForce\n\n # See if we repartition the hydrogen masses\n if hydrogenMass is not None:\n for bond in self.bond_list:\n # Only take the ones with at least one hydrogen\n if (bond.atom1.type.atomic_number != 1 and\n bond.atom2.type.atomic_number != 1):\n continue\n atom1, atom2 = bond.atom1, bond.atom2\n if atom1.type.atomic_number == 1:\n atom1, atom2 = atom2, atom1 # now atom2 is hydrogen for sure\n if atom1.type.atomic_number != 1:\n transfer_mass = hydrogenMass - atom2.mass\n new_mass1 = (system.getParticleMass(atom1.idx) -\n transfer_mass)\n system.setParticleMass(atom2.idx, hydrogenMass)\n system.setParticleMass(atom1.idx, new_mass1)\n # See if we want to remove COM motion\n if removeCMMotion:\n system.addForce(mm.CMMotionRemover(10))\n\n # Cache our system for easy access\n self._system = system\n\n return system", "def burstensemble( base, x_0, z, dist, xi_p, mass, radius, bean, full_model=False ):\n\n minmdot = 0.0\n maxmdot = 1.0\n mdot_res = 1e-6\n sbt = bean.bstart\n salpha = []\n stime = []\n smdot = []\n se_b = []\n\n mdot = bean.flux_to_mdot(x_0, dist, xi_p, mass, radius, bean.pflux)\n\n for i in range(0, bean.numburstsobs):\n\n tmp = settle(base, z, x_0, mdot[i], 1.0, mass, radius)\n\n res = np.recarray(\n (1,), dtype=[(\"tdel\", np.float64), (\"e_b\", np.float64), (\"alpha\", np.float64), (\"mdot\", np.float64)]\n )\n # assign elements\n res.tdel = tmp.tdel / 24.0\n res.e_b = tmp.E_b*0.8 # multiply eb by 0.8 to account for incomlpete burning of fuel, as in Goodwin et al (2018).\n alpha = tmp.alpha\n alpha = alpha[0]\n res.mdot = mdot[i]\n _e_b = res.e_b\n _e_b = _e_b[0]\n se_b.append(_e_b)\n _mdot = res.mdot\n _mdot = _mdot[0]\n salpha.append(alpha)\n smdot.append(_mdot)\n # stime.append(bstart[i])\n stime.append(tmp.tdel[0])\n mdot_max = max(smdot)\n\n result = dict()\n\n if full_model:\n # model parameters are redundant for the model returned\n result[\"base\"] = [base]\n result[\"z\"] = [z]\n result[\"x_0\"] = [x_0]\n result[\"dist\"] = [dist]\n result[\"xi_p\"] = [xi_p]\n\n result[\"mdot_max\"] = [mdot_max]\n\n result[\"mass\"] = [mass]\n result[\"radius\"] = [radius]\n\n # now the actual predictions\n\n result[\"time\"] = stime\n result[\"mdot\"] = smdot\n result[\"alpha\"] = salpha\n result[\"e_b\"] = se_b\n\n # omit the printing for now, as it prevents assessing the progress\n # print('ensemble')\n # print(f\"In burstrain fluence is {se_b}\")\n\n return result", "def SecondaryComplex_to_Bid_Alternate():\n Parameter('RIP3_0' , 2.0e4) # molecules per cell\n Parameter('BidK_0' , 5.0e3) # molecules per cell\n \n alias_model_components()\n Initial(RIP3(bRHIM = None, state = 'unmod'), RIP3_0) # RIP3\n Initial(BidK(bf = None), BidK_0)\n # ==============================================================\n # Assembly of Complex II, Riptosome and Necrosome\n # --------------------------------------------------------------\n # FADD + TRADD[active] <-> FADD:TRADD[active]\n # FADD + RIP1 <-> FADD:RIP1\n # TRADD + RIP1 <-> TRADD:RIP1\n \n # CD95_to_secondary complex contains the rules for recruitment of proC8 to FADD.\n # (RIP1 or TRADD):FADD + proC8 <-> (RIP1 or TRADD):FADD:proC8\n # (RIP1 or TRADD):FADD:proC8 + proC8 <-> (RIP1 or TRADD):FADD:proC8:proC8\n # (RIP1 or TRADD):FADD:proC8 + flip_L <-> (RIP1 or TRADD):FADD:proC8:flip_L\n # (RIP1 or TRADD):FADD:proC8 + flip_S <-> (RIP1 or TRADD):proC8:flip_S\n \n # RIP1%ProC8%ProC8(in a complex) >> RIP1[trunc] + C8 + (remains of the complex)\n # RIP1%ProC8%cFlip[L](in a complex) >> RIP1[trunc] + remains of the complex)\n # RIP1%cFlip[S](in a complex) + RIP3 >> RIP1:RIP3(in a complex, i.e. necrosome)\n \n # RIP1 + C8 <-> RIP1:C8 >> RIP1[trunc] + C8\n # RIP3 + C8 <-> RIP3:C8 >> RIP3[trunc] + C8\n # Bid + C8 <-> Bid:C8 >> Bid[trunc] + C8\n \n # -------------Assembling Complex II-----------------\n Parameter('Ka_RIP1_FADD', 1e-7) # Biochemica et Biophysica Acta 1834(2013) 292-300\n Parameter('Kd_RIP1_FADD', 1e-8) # Biochemica et Biophysica Acta 1834(2013) 292-300\n alias_model_components()\n \n #Assembling TRADD dependent Complex II\n bind(FADD(bDD = None, bDED1 = None, bDED2 = None), 'bDD', TRADD(bDD1=None, state = 'active'), 'bDD1', [1e-6, 1e-3])\n bind(FADD(bDD = None, bDED1 = None, bDED2 = None), 'bDD', RIP1(bDD = None, state = 'deub'), 'bDD', [1e-8, 1e-1])\n \n #Recruiting RIP1 to secondary complex and TRADD dependent Complex II\n bind(FADD(bDD = None, bDED1 = ANY, bDED2 = ANY), 'bDD', RIP1(bDD=None, bRHIM = None, state = 'unmod'), 'bDD', [Ka_RIP1_FADD, Kd_RIP1_FADD])\n bind(FADD(bDD = None, bDED1 = ANY, bDED2 = ANY), 'bDD', RIP1(bDD=None, bRHIM = None, state = 'deub'), 'bDD', [Ka_RIP1_FADD, Kd_RIP1_FADD])\n \n #bind(TRADD(bDD2 = None, state = 'active'),'bDD2', RIP1(bDD = None, bRHIM = None, state = 'unmod'), 'bDD', [1e-6, 1e-1])\n bind(TRADD(bDD2 = None, state = 'active'),'bDD2', RIP1(bDD = None, bRHIM = None, state = 'deub'), 'bDD', [1e-6, 1e-1])\n # For simplicity, I am neglecting the binary intereaction that occurs between proC8 and RIP1.\n # Binding of proC8 and c-flip to FADD is accomplished in CD95_to_Secondary complex.\n \n #--------------RIP1 Truncation reactions-------------\n #---Truncation by C8---------------------------------\n RIP_CIIA_proC8 = RIP1(bDD=ANY, bRHIM = None, state = 'unmod')% TRADD(bDD2 = None, bDD1 = ANY, state = 'active') % FADD(bDD=ANY, bDED1=ANY, bDED2=ANY)%proC8(bDED=ANY)%proC8(bDED=ANY)\n RIP_CIIA_proC8_alt = RIP1(bDD=ANY, bRHIM = None, state = 'deub')% TRADD(bDD2 = None, bDD1 = ANY, state = 'active') % FADD(bDD=ANY, bDED1=ANY, bDED2=ANY)%proC8(bDED=ANY)%proC8(bDED=ANY)\n \n RIP_CIIB_proC8 = RIP1(bDD=ANY, bRHIM = None, state = 'unmod')% FADD(bDD=ANY, bDED1=ANY, bDED2=ANY)%proC8(bDED=ANY)%proC8(bDED=ANY)\n RIP_CIIB_proC8_alt = RIP1(bDD=ANY, bRHIM = None, state = 'deub')% FADD(bDD=ANY, bDED1=ANY, bDED2=ANY)%proC8(bDED=ANY)%proC8(bDED=ANY)\n \n CIIA = TRADD(bDD2 = None, bDD1 = ANY, state = 'active') % FADD(bDD=ANY, bDED1=None, bDED2=None)\n \n Rule('RIP1_truncation_CIIA', RIP_CIIA_proC8 >> CIIA + C8(bf = None, state = 'A') + RIP1(bDD=None, bRHIM = None, state = 'trunc'), Parameter('k11',1e-1))\n Rule('RIP1_truncation_CIIA_alt', RIP_CIIA_proC8_alt >> CIIA + C8(bf = None, state = 'A') + RIP1(bDD=None, bRHIM = None, state = 'trunc'), Parameter('k11a',1e-6))\n \n Rule('RIP1_truncation_CIIB', RIP_CIIB_proC8 >> FADD(bDD=None, bDED1=None, bDED2=None)+ C8(bf = None, state = 'A') + RIP1(bDD=None, bRHIM = None, state = 'trunc'), Parameter('k12', 1e-1))\n Rule('RIP1_truncation_CIIB_alt', RIP_CIIB_proC8_alt >> FADD(bDD=None, bDED1=None, bDED2=None)+ C8(bf = None, state = 'A') + RIP1(bDD=None, bRHIM = None, state = 'trunc'), Parameter('k12a', 1e-6))\n \n catalyze_state(C8(bf = None, state = 'A'), 'bf', RIP1(bDD=None), 'bRHIM', 'state', 'unmod', 'trunc', [1e-6, 1e-3, 1e-1])\n catalyze_state(C8(bf = None, state = 'A'), 'bf', RIP1(bDD=None), 'bRHIM', 'state', 'deub', 'trunc', [1e-6, 1e-3, 1e-1])\n \n #---Truncation by proC8:cFlip_L---------------------\n Riptosome_FADD = RIP1(bDD=1, bRHIM = None, state = 'unmod')%FADD(bDD=1, bDED1=ANY, bDED2=ANY)%proC8(bDED = ANY)%flip_L(bDED = ANY)\n Riptosome_FADD_alt = RIP1(bDD=1, bRHIM = None, state = 'deub')%FADD(bDD=1, bDED1=ANY, bDED2=ANY)%proC8(bDED = ANY)%flip_L(bDED = ANY)\n \n Riptosome_TRADD = RIP1(bDD=1, bRHIM = None, state = 'unmod')%TRADD(bDD1=ANY, bDD2=1)%FADD(bDD=ANY, bDED1=ANY, bDED2=ANY)%proC8(bDED = ANY)%flip_L(bDED = ANY)\n Riptosome_TRADD_alt = RIP1(bDD=1, bRHIM = None, state = 'deub')%TRADD(bDD1=ANY, bDD2=1)%FADD(bDD=ANY, bDED1=ANY, bDED2=ANY)%proC8(bDED = ANY)%flip_L(bDED = ANY)\n \n Rule('RIP1_truncation_FADD', Riptosome_FADD >> FADD(bDD=None, bDED1=ANY, bDED2=ANY)%proC8(bDED = ANY)%flip_L(bDED = ANY) + RIP1(bDD=None, bRHIM = None, state = 'trunc'), Parameter('k13', 1e-1))\n Rule('RIP1_truncation_FADD_alt', Riptosome_FADD_alt >> FADD(bDD=None, bDED1=ANY, bDED2=ANY)%proC8(bDED = ANY)%flip_L(bDED = ANY) + RIP1(bDD=None, bRHIM = None, state = 'trunc'), Parameter('k13a', 1e-1))\n Rule('RIP1_truncation_TRADD', Riptosome_TRADD >> FADD(bDD=None, bDED1=ANY, bDED2=ANY)%proC8(bDED = ANY)%flip_L(bDED = ANY) + RIP1(bDD=None, bRHIM = None, state = 'trunc'), Parameter('k14', 10))\n Rule('RIP1_truncation_TRADD_alt', Riptosome_TRADD_alt >> FADD(bDD=None, bDED1=ANY, bDED2=ANY)%proC8(bDED = ANY)%flip_L(bDED = ANY) + RIP1(bDD=None, bRHIM = None, state = 'trunc'), Parameter('k14a', 10))\n \n # -------------RIP3 Binding Interactions----------------\n Ripto1_Flip_S = FADD(bDD=ANY, bDED1=ANY, bDED2=ANY) % RIP1(bDD=ANY, bRHIM=None, state='unmod') % TRADD(bDD1=ANY, bDD2=ANY, state='active') % flip_S(bDED=ANY) % proC8(bDED=ANY)\n Ripto2_Flip_S = FADD(bDD=ANY, bDED1=ANY, bDED2=ANY) % RIP1(bDD=ANY, bRHIM=None, state='unmod') % flip_S(bDED=ANY) % proC8(bDED=ANY)\n Necrosome1 = FADD(bDD=ANY, bDED1=ANY, bDED2=ANY) % RIP1(bDD=ANY, bRHIM=6, state='unmod') % TRADD(bDD1=ANY, bDD2=ANY, state='active') % flip_S(bDED=ANY) % proC8(bDED=ANY) % RIP3(bRHIM= 6, state = 'unmod')\n Necrosome2 = FADD(bDD=ANY, bDED1=ANY, bDED2=ANY) % RIP1(bDD=ANY, bRHIM=5, state='unmod') % flip_S(bDED=ANY) % proC8(bDED=ANY) % RIP3(bRHIM= 5, state = 'unmod')\n \n Ripto1_Flip_S_alt = FADD(bDD=ANY, bDED1=ANY, bDED2=ANY) % RIP1(bDD=ANY, bRHIM=None, state='deub') % TRADD(bDD1=ANY, bDD2=ANY, state='active') % flip_S(bDED=ANY) % proC8(bDED=ANY)\n Ripto2_Flip_S_alt = FADD(bDD=ANY, bDED1=ANY, bDED2=ANY) % RIP1(bDD=ANY, bRHIM=None, state='deub') % flip_S(bDED=ANY) % proC8(bDED=ANY)\n Necrosome1_alt = FADD(bDD=ANY, bDED1=ANY, bDED2=ANY) % RIP1(bDD=ANY, bRHIM=6, state='deub') % TRADD(bDD1=ANY, bDD2=ANY, state='active') % flip_S(bDED=ANY) % proC8(bDED=ANY) % RIP3(bRHIM= 6, state = 'unmod')\n Necrosome2_alt = FADD(bDD=ANY, bDED1=ANY, bDED2=ANY) % RIP1(bDD=ANY, bRHIM=5, state='deub') % flip_S(bDED=ANY) % proC8(bDED=ANY) % RIP3(bRHIM= 5, state = 'unmod')\n \n Rule('RIP3_binding1', Ripto1_Flip_S + RIP3(bRHIM= None, state = 'unmod') <> Necrosome1, Parameter('k15', 1e-6), Parameter('k16', 1e-3))\n Rule('RIP3_binding2', Ripto2_Flip_S + RIP3(bRHIM= None, state = 'unmod') <> Necrosome2, Parameter('k17', 1e-6), Parameter('k18', 1e-3))\n Rule('RIP3_binding1_alt', Ripto1_Flip_S_alt + RIP3(bRHIM= None, state = 'unmod') <> Necrosome1_alt, Parameter('k15a', 1e-6), Parameter('k16a', 1e-3))\n Rule('RIP3_binding2_alt', Ripto2_Flip_S_alt + RIP3(bRHIM= None, state = 'unmod') <> Necrosome2_alt, Parameter('k17a', 1e-6), Parameter('k18a', 1e-3))\n \n #RIP3 Truncation\n catalyze_state(C8(bf = None, state = 'A'), 'bf', RIP3(), 'bRHIM', 'state', 'unmod', 'trunc', [1e-6, 1e-3, 1e-1])\n \n #-------------Bid Interactions--------------------------\n # Bid Phosphorylation and Truncation\n catalyze_state(BidK(), 'bf', Bid(), 'bf', 'state', 'U', 'po4', [1e-6, 1e-3, 1e-1])\n catalyze_state(C8(bf = None, state = 'A'), 'bf', Bid(), 'bf', 'state', 'U', 'T', [1.04e-5, 0.005, 0.1])\n \n # Bid-PO4 sequestering RIP1\n bind(RIP1(bDD = None, bRHIM = None, state = 'unmod'), 'bRHIM', Bid(bf = None, state = 'po4'), 'bf', [1e-6, 1e-3])\n bind(RIP1(bDD = None, bRHIM = None, state = 'deub'), 'bRHIM', Bid(bf = None, state = 'po4'), 'bf', [1e-6, 1e-3])", "def make_stehle(self):\n\n temp_k = self.temp * e / k # temperature in K\n dens_cm = self.e_dens * 1.e-6 # electronic density in cm-3\n prefix = 'n_' + str(self.n_upper) + '_' + str(self.n_lower) + '_'\n\n # extract raw tabulated tabulated_data\n tab_temp_k = np.array(pystark.nc.variables[prefix + 'tempe'].data) # tabulated electron temperatures (K)\n olam0 = pystark.nc.variables[prefix + 'olam0'].data # line centre wavelength (A)\n num_tab_dens = pystark.nc.variables[prefix + 'id_max'].data\n fainom = pystark.nc.variables[prefix + 'fainom'].data\n tab_dens_cm = np.array(pystark.nc.variables[prefix + 'dense'].data) # tabulated electron densities (cm ** -3)\n f00 = np.array(pystark.nc.variables[prefix + 'f00'].data) # normal Holtsmark field strength (30 kV / m)\n dl12 = np.array(pystark.nc.variables[prefix + 'dl12'].data)\n dl12s = np.array(pystark.nc.variables[prefix + 'dl12s'].data)\n fainu = pystark.nc.variables[\n prefix + 'fainu'].data # Asymptotic value of iStark * (alpha ** 2.5) (\"wings factor in alfa units\")\n pr0 = np.array(pystark.nc.variables[\n prefix + 'pr0'].data) # Ratio of the mean interelectronic distance to the electronic Debye length\n jtot = np.array(pystark.nc.variables[prefix + 'jtot'].data,\n dtype=np.int) # \"number of wave lengths for the couple (T,Ne)\"\n dom = np.array(pystark.nc.variables[prefix + 'dom'].data) # frequency detunings in units (rad / (s*ues)\n d1om = np.array(pystark.nc.variables[prefix + 'd1om'].data)\n o1line = np.array(pystark.nc.variables[prefix + 'o1line'].data)\n o1lines = np.array(pystark.nc.variables[prefix + 'o1lines'].data)\n\n # ensure given temperature + density falls within tabulated values\n # change sligtly the value of the input density\n # dens_cm in order to remain , as far as possible, inside the tabulation\n # JSA: this first step seems bogus!\n\n if np.abs(dens_cm - tab_dens_cm[0]) / dens_cm <= 1.0E-3:\n dens_cm = tab_dens_cm[0] * 1.001\n\n for id in np.arange(1, num_tab_dens + 1):\n if np.abs(dens_cm - tab_dens_cm[id]) / dens_cm <= 1.0E-3:\n dens_cm = tab_dens_cm[id] * 0.999\n\n if dens_cm >= 2.0 * tab_dens_cm[num_tab_dens]:\n raise Exception(\n 'Your input density is higher than the largest tabulated value %f' % tab_dens_cm[num_tab_dens])\n\n if dens_cm <= tab_dens_cm[0]:\n raise Exception('Your input density is smaller than the smallest tabulated value %f' % tab_dens_cm[0])\n\n if temp_k >= tab_temp_k[9]:\n raise Exception('Your input temperature is higher than the largest tabulated value %f' % tab_temp_k[9])\n\n if temp_k <= tab_temp_k[0]:\n raise Exception('Your input temperature is lower than the smallest tabulated value %f' % tab_temp_k[0])\n\n normal_holtsmark_field = 1.25e-9 * (dens_cm ** (2. / 3.)) # normal field value in ues\n\n # calculate line centre wavelength and frequency using Rydberg formula\n # JSA: I have made this step clearer and corrected for deuteron mass in the Rydberg constant (though the effect is small)\n # TODO make sure this matches olam0 parameter above -- why were there two variables in the first place?!\n # rydberg_m = Rydberg / (1. + (electron_mass / physical_constants['deuteron mass'][0]))\n # wl_0_angst = 1e10 * (rydberg_m * (1 / n_lower ** 2 - 1 / n_upper ** 2)) ** -1\n\n wl_centre_angst = self.wl_centre * 1e10\n\n c_angst = c * 1e10 # velocity of light in Ansgtroms / s\n angular_freq_0 = 2 * np.pi * c_angst / wl_centre_angst # rad / s\n\n otrans = -2 * np.pi * c_angst / wl_centre_angst ** 2\n\n olines = o1lines / np.abs(otrans)\n oline = o1line / np.abs(otrans)\n\n # Limit analysis_tools to uncorrelated plasmas.\n # check that mean interelectronic distance is smaller than the electronic Debye length (equ. 10)\n PR0_exp = 0.0898 * (dens_cm ** (1. / 6.)) / np.sqrt(temp_k) # = (r0 / debye)\n if PR0_exp > 1.:\n raise Exception('The plasma is too strongly correlated\\ni.e. r0/debye=0.1\\nthe line cannot be computed.')\n\n # fainom_exp=fainom*(F00_exp**1.5)\n # fainum_exp=fainom_exp/( (OPI*2.)**1.5)\n\n # ========================\n # TABULATION Format CDS\n # si on veut ecrire\n # n -np lambda0 kalpha Ne E0 T R0/Debye Dalpha iDoppler iStark\n\n # IN_cds= N+0.01\n # INP_cds = NP+0.01\n\n # ***********************************************************\n # Don't edit the CDS format...\n # ***********************************************************\n\n # Skipped the code in the IF statement starting at line 470, since it\n # isn't used, if (.FALSE.) ...\n\n # ==============================================\n # define an unique detunings grid - domm - for the tabulated\n # profiles ( various temperatures , densities)\n # calculate all the line shapes for this common grid\n # units used at this points are Domega_new= Delta(omega)/F00\n # in rd/(s-1 ues)\n\n max_num_dens = 30 # Maximum number of densities\n max_num_tab_temp = 10\n max_num_detunings = 60 # Maximum number of detunings\n jtot = jtot.astype(np.int)\n domm = np.zeros(100000)\n dom0 = np.zeros(10000)\n tprof = np.zeros([max_num_dens, max_num_tab_temp, 10000])\n tprofs = np.zeros([max_num_dens, max_num_tab_temp, 10000])\n uprof = np.zeros([max_num_dens, 10000])\n uprofs = np.zeros([max_num_dens, 10000])\n\n inc = 0\n domm[inc] = 0.0\n # ---- Look to replace this loop\n for id in np.arange(num_tab_dens + 1): # loop over tab densities\n for j in np.arange(max_num_tab_temp): # loop over tab temperatures (?)\n for i in np.arange(1, jtot[id, j]):\n inc += 1\n dom0[inc] = dom[id, j, i]\n\n inc = np.count_nonzero(dom)\n npik = inc + 1\n # nut=10000\n\n # Calling numpy sort instead of piksrt\n tmp = np.sort(dom0[0:npik])\n dom0[0:npik] = tmp[0:npik]\n # dom0 seems to agree with the FORTRAN version\n\n inc = 0\n domm[0] = 0.0\n # print 'npik',npik\n # ---- Look to replace this loop\n for i in np.arange(1, npik):\n dif = (dom0[i] - dom0[i - 1])\n if dif <= 1.0E-6:\n continue\n if dif / np.abs(dom0[i]) <= 0.1:\n continue\n inc = inc + 1\n domm[inc] = dom0[i]\n\n jdom = inc + 1 # One line after marker 35\n\n for id in np.arange(num_tab_dens):\n for j in np.arange(10):\n if pr0[id, j] > 1.0:\n continue\n\n tprof[id, j, 0] = oline[id, j, 0]\n tprofs[id, j, 0] = olines[id, j, 0]\n\n if jtot[id, j] == 0:\n continue\n\n for i in np.arange(1, jdom + 1):\n skip1 = False\n skip2 = False\n # print 'i',i\n domeg = domm[i]\n ij_max = jtot[id, j]\n # print 'domeg,ij_max',domeg,ij_max\n for ij in np.arange(1, ij_max - 1):\n # print 'ij',ij\n test = (domeg - dom[id, j, ij]) * (domeg - dom[id, j, ij - 1])\n # print 'test1:',test\n if test <= 0.0:\n # print 'triggered test1'\n x1 = dom[id, j, ij - 1]\n x2 = dom[id, j, ij]\n x3 = dom[id, j, ij + 1]\n y1 = oline[id, j, ij - 1]\n y2 = oline[id, j, ij]\n y3 = oline[id, j, ij + 1]\n # print 'x1,x2,x3',x1,x2,x3\n # print 'y1,y2,y3',y1,y2,y3\n tprof[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n y1 = olines[id, j, ij - 1]\n y2 = olines[id, j, ij]\n y3 = olines[id, j, ij + 1]\n tprofs[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n # print 'tprof[id,j,i]',tprof[id,j,i]\n # print 'tprofs[id,j,i]',tprofs[id,j,i]\n skip1 = True\n skip2 = True\n break\n\n if skip1 is False:\n test = (domeg - dom[id, j, ij_max - 2]) * (domeg - dom[id, j, ij_max - 1])\n # print 'test2:',test\n # print 'domeg',domeg\n # print 'dom[id,j,ij_max-1]',dom[id,j,ij_max-2]\n # print 'dom[id,j,ij_max]',dom[id,j,ij_max-1]\n if test <= 0.0:\n # print 'triggered test2'\n x1 = dom[id, j, ij_max - 3]\n x2 = dom[id, j, ij_max - 2]\n x3 = dom[id, j, ij_max - 1]\n y1 = oline[id, j, ij_max - 3]\n y2 = oline[id, j, ij_max - 2]\n y3 = oline[id, j, ij_max - 1]\n tprof[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n y1 = olines[id, j, ij_max - 3]\n y2 = olines[id, j, ij_max - 2]\n y3 = olines[id, j, ij_max - 1]\n tprofs[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n skip2 = True\n # print 'x1,x2,x3',x1,x2,x3\n # print 'y1,y2,y3',y1,y2,y3\n # print 'tprof[id,j,i]',tprof[id,j,i]\n # print 'tprofs[id,j,i]',tprofs[id,j,i]\n continue\n\n if skip2 is False:\n if domeg > dom[id, j, ij_max]:\n # print 'triggered test3'\n tprof[id, j, i] = fainom / (domeg ** 2.5)\n tprofs[id, j, i] = tprof[id, j, i]\n continue\n\n # We can skip writing the intermediate file\n\n\n for id in np.arange(num_tab_dens):\n otest_dens = (dens_cm - tab_dens_cm[id]) * (dens_cm - tab_dens_cm[id + 1])\n if otest_dens <= 0.0:\n dense1 = tab_dens_cm[id]\n dense2 = tab_dens_cm[id + 1]\n id1 = id\n id2 = id + 1\n break\n\n if dens_cm >= tab_dens_cm[num_tab_dens]:\n dense1 = tab_dens_cm[num_tab_dens - 1]\n dense2 = tab_dens_cm[num_tab_dens]\n id1 = num_tab_dens - 1\n id2 = num_tab_dens\n\n for it in np.arange(10):\n otest = (temp_k - tab_temp_k[it]) * (temp_k - tab_temp_k[it + 1])\n if otest <= 0.0:\n it1 = it\n it2 = it + 1\n # pr01 = pr0[id2,it1] # max value of pr0 for T1,T2,dense1,dense2\n tempe1 = tab_temp_k[it]\n tempe2 = tab_temp_k[it + 1]\n break\n\n # interpolation in temperature\n for id in np.arange(id1, id2 + 1):\n for i in np.arange(jdom):\n uprof[id, i] = tprof[id, it1, i] + (temp_k - tempe1) * (tprof[id, it2, i] - tprof[id, it1, i]) / (\n tempe2 - tempe1)\n uprofs[id, i] = tprofs[id, it1, i] + (temp_k - tempe1) * (tprofs[id, it2, i] - tprofs[id, it1, i]) / (\n tempe2 - tempe1)\n\n delta_lambda = np.zeros(jdom)\n delta_nu = np.zeros(jdom)\n wprof_nu = np.zeros(jdom)\n wprofs_nu = np.zeros(jdom)\n\n for i in np.arange(jdom):\n wprof = uprof[id1, i] + (dens_cm - dense1) * (uprof[id2, i] - uprof[id1, i]) / (dense2 - dense1)\n wprofs = uprofs[id1, i] + (dens_cm - dense1) * (uprofs[id2, i] - uprofs[id1, i]) / (dense2 - dense1)\n delta_omega = domm[i] * normal_holtsmark_field\n delta_nu[i] = delta_omega / (2 * np.pi)\n delta_lambda[i] = wl_centre_angst * delta_omega / (angular_freq_0 + delta_omega)\n # print(delta_lambda[i])\n wprof_nu[i] = (wprof / normal_holtsmark_field) * (2. * np.pi)\n wprofs_nu[i] = (wprofs / normal_holtsmark_field) * (2. * np.pi)\n # print '%e %e %e %e' %(delta_lambda[i],delta_nu[i],wprof_nu[i],wprofs_nu[i])\n\n delta_lambda2 = np.concatenate((-delta_lambda[::-1], delta_lambda)) + wl_centre_angst # + olam0\n delta_nu2 = np.concatenate((-delta_nu[::-1], delta_nu))\n wprof_nu2 = np.concatenate((wprof_nu[::-1], wprof_nu))\n wprofs_nu2 = np.concatenate((wprofs_nu[::-1], wprofs_nu))\n\n # for some reason, i only get a good agreement with the other models if i take the pure Stark broadened Stehle\n # output and manually convolve it with the Doppler profile -- not sure why...\n ls_sd = wprofs_nu2\n\n # interpolate onto frequency axis\n ls_sd = np.interp(self.freq_axis, delta_nu2 + self.freq_centre, ls_sd)\n\n return ls_sd", "def canon(models=None, chorale_metas=None, sequence_length=50,\n num_iterations=1000,\n timesteps=16,\n model_base_name='models/raw_dataset/tmp/',\n temperature=1., batch_size_per_voice=16,\n pickled_dataset=BACH_DATASET,\n intervals=[7], delays=[32],\n ):\n # load dataset\n X, X_metadatas, voice_ids, index2notes, note2indexes, metadatas = pickle.load(\n open(pickled_dataset, 'rb'))\n\n # variables\n num_voices = len(voice_ids)\n assert num_voices == 2\n\n num_pitches = list(map(len, index2notes))\n max_delay = max(delays)\n delays = np.array([0] + delays)\n intervals = np.array([0] + intervals)\n\n # compute tables\n diatonic_note_names2indexes = _diatonic_note_names2indexes(index2notes)\n print(diatonic_note_names2indexes)\n # load models if not\n if models is None:\n for expert_index in range(num_voices):\n model_name = model_base_name + str(expert_index)\n\n model = load_model(model_name=model_name, yaml=False)\n models.append(model)\n\n seq = np.zeros(\n shape=(2 * timesteps + max_delay + sequence_length, num_voices))\n for expert_index in range(num_voices):\n # Add start and end symbol + random init\n seq[:timesteps, expert_index] = [note2indexes[expert_index][\n START_SYMBOL]] * timesteps\n seq[timesteps:-timesteps - max_delay,\n expert_index] = np.random.randint(num_pitches[expert_index],\n size=sequence_length)\n\n seq[-timesteps - max_delay:, expert_index] = [note2indexes[\n expert_index][\n END_SYMBOL]] * (\n timesteps + max_delay)\n\n if chorale_metas is not None:\n # chorale_metas is a list\n extended_chorale_metas = [np.concatenate((np.zeros((timesteps,)),\n chorale_meta,\n np.zeros((\n timesteps + max_delay,))),\n axis=0)\n for chorale_meta in chorale_metas]\n\n else:\n raise NotImplementedError\n\n min_temperature = temperature\n temperature = 1.5\n\n # Main loop\n for iteration in tqdm(range(num_iterations)):\n\n temperature = max(min_temperature, temperature * 0.9995) # Recuit\n print(temperature)\n\n time_indexes = {}\n probas = {}\n\n for voice_index in range(num_voices):\n batch_input_features = []\n time_indexes[voice_index] = []\n\n for batch_index in range(batch_size_per_voice):\n # soprano based\n if voice_index == 0:\n time_index = np.random.randint(timesteps,\n sequence_length + timesteps)\n else:\n # time_index = sequence_length + timesteps * 2 - time_indexes[0][batch_index]\n time_index = time_indexes[0][batch_index] + delays[\n voice_index]\n\n time_indexes[voice_index].append(time_index)\n\n (left_feature,\n central_feature,\n right_feature,\n label) = all_features(seq, voice_index, time_index, timesteps,\n num_pitches, num_voices)\n\n left_metas, central_metas, right_metas = all_metadatas(\n chorale_metadatas=extended_chorale_metas,\n metadatas=metadatas,\n time_index=time_index, timesteps=timesteps)\n\n input_features = {'left_features': left_feature[:, :],\n 'central_features': central_feature[:],\n 'right_features': right_feature[:, :],\n 'left_metas': left_metas,\n 'central_metas': central_metas,\n 'right_metas': right_metas}\n\n # list of dicts: predict need dict of numpy arrays\n batch_input_features.append(input_features)\n\n # convert input_features\n batch_input_features = {key: np.array(\n [input_features[key] for input_features in\n batch_input_features])\n for key in batch_input_features[0].keys()\n }\n # make all estimations\n probas[voice_index] = models[voice_index].predict(\n batch_input_features,\n batch_size=batch_size_per_voice)\n\n # parallel updates\n for batch_index in range(batch_size_per_voice):\n # create list of masks for each note name\n proba_sop = probas[SOP][batch_index]\n proba_bass = probas[BASS][batch_index]\n\n proba_sop_split = _split_proba(proba_sop,\n diatonic_note_names2indexes[SOP])\n proba_bass_split = _split_proba(proba_bass,\n diatonic_note_names2indexes[BASS])\n\n interval = intervals[1]\n\n # multiply probas\n canon_product_probas, index_merge2pitches = _merge_probas_canon(\n proba_sop_split, proba_bass_split,\n interval,\n diatonic_note_names2indexes)\n\n # draw\n # use temperature\n canon_product_probas /= np.sum(canon_product_probas)\n canon_product_probas = np.log(canon_product_probas) / temperature\n canon_product_probas = np.exp(canon_product_probas) / np.sum(\n np.exp(canon_product_probas)) - 1e-7\n\n # pitch can include slur_symbol\n index_drawn_pitches = np.argmax(\n np.random.multinomial(1, canon_product_probas))\n pitches = index_merge2pitches[index_drawn_pitches]\n for voice_index, pitch in enumerate(pitches):\n seq[time_indexes[voice_index][\n batch_index], voice_index] = pitch\n\n return seq[timesteps:-timesteps, :]", "def initialize_antibodies(experiment, canonicals):\n # Store the order in which the CDRs should be refined\n order = ['H3', 'L3', 'H2', 'H1', 'L1', 'L2']\n # Get the molecule name-CDR associations\n associations = molecule_name_association(experiment, [])\n # Do the refinement protocol 2x for each CDR\n for i in range(2):\n # Go through the CDRs in order\n for cdr in order:\n # If this CDR is not included in the experiment, skip it\n if cdr not in canonicals.keys():\n continue\n # Only allow this CDR to move in the energy minimization\n dummy = free_cdr(experiment, associations, cdr)\n # Find the optimal set of rotamers with the additional\n # OptCDR-specific constraints\n IPRO_FUNCTIONS.Optimal_Rotamers(experiment)\n # Run a rigid-body docking protocol\n IPRO_FUNCTIONS.Docking(experiment, experiment[\"Docking Frequency\"])", "def test_s_gate_deterministic_waltz_basis_gates(self):\n shots = 100\n circuits = ref_1q_clifford.s_gate_circuits_deterministic(final_measure=True)\n targets = ref_1q_clifford.s_gate_counts_deterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='u1,u2,u3,cx')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)", "def generate_humanization_cuts(molecule, spots, database = []): \n # Convert amino acids names to number\n positions = []\n indexNameDict = {}\n for i, residue in enumerate(molecule):\n if residue.name in spots:\n positions.append(i)\n indexNameDict[i] = residue.name\n length = len(molecule)\n positionNum = len(positions)\n #print length, positionNum \n allowedNum = 3\n \n if not positionNum-1 in range(allowedNum):\n text = \"The humanization of the sequences only allows no more than 3 residues mutated at one time\"\n raise DeimmunizationError(text)\n \n cuts= []\n \n if positionNum == 1:\n position = positions[0]\n #print \"Position in generate_humanization_cuts function\", position\n if position < 0 or position > length:\n text = \" The provided residue position is out of the molecule residues range\"\n raise DeimmunizationError(text)\n begin, end = determine_begin_end(molecule, position)\n seqs = extract_all_sequences(molecule, begin, end)\n #print \"seqs: \", len(seqs)\n #count = count_total_mutations(seqs, database)\n count = count_total_mutations_cpp(seqs)\n #print \"count: \", count\n iteraction = 0\n for aa in aminoAcids[\"PDB\"]:\n molecule_mut = molecule.duplicate()\n molecule_mut[position].kind = aa\n seqs_mut = extract_all_sequences(molecule_mut, begin, end)\n #mutCount = count_total_mutations(seqs_mut, database)\n mutCount = count_total_mutations_cpp(seqs_mut)\n #print \"seqs_mut: \", len(seqs_mut)\n #print \"mutCount: \", mutCount\n #print iteration\n iteration += 1\n if mutCount > count:\n solution = {}\n solution[indexNameDict[position]] = aa\n cuts.append(solution) \n\n elif positionNum == 2:\n position1 = positions[0]\n position2 = positions[1]\n if position1 < 0 or position1 > length or position2 < 0 or position2 > length:\n text = \" The provided residue position is out of the molecule residues range\"\n raise DeimmunizationError(text)\n if position1 > position2 :\n position1, position2 = position2, position1\n begin_all, end1 = determine_begin_end(molecule, position1)\n begin2, end_all = determine_begin_end(molecule, position2)\n seqs = extract_all_sequences(molecule, begin_all, end_all)\n #count = count_total_mutations(seqs, database)\n count = count_total_mutations_cpp(seqs)\n #print \"seqs: \", len(seqs)\n #print \"count: \", count\n iteration = 0\n for aa1 in aminoAcids[\"PDB\"]:\n molecule1 = molecule.duplicate()\n molecule1[position1].kind = aa1\n for aa2 in aminoAcids[\"PDB\"]:\n molecule2 = molecule1.duplicate()\n molecule2[position2].kind = aa2\n begin_mut_all, end_mut1 = determine_begin_end(molecule, position1)\n begin_mut2, end_mut_all = determine_begin_end(molecule, position2)\n seqs_mut = extract_all_sequences(molecule2, begin_mut_all, end_mut_all)\n #mutCount = count_total_mutations(seqs_mut, database)\n mutCount = count_total_mutations_cpp(seqs_mut)\n #print \"seqs_mut: \", len(seqs_mut)\n #print \"mutCount: \", mutCount\n #print iteration\n iteration += 1\n if mutCount > count:\n solution = {}\n solution[indexNameDict[position1]] = aa1\n solution[indexNameDict[position2]] = aa2\n cuts.append(solution) \n \n elif positionNum == 3:\n position1 = positions[0]\n position2 = positions[1]\n position3 = positions[2]\n if position1 < 0 or position1 > length or position2 < 0 or position2 > length or position3 < 0 or position3 > length:\n text = \" The provided residue positions are out of the molecule residues range\"\n raise DeimmunizationError(text)\n # Make sure position1 < position2 < position3\n indexs = [position1, position2, position3]\n indexs.sort()\n position1 = indexs[0]\n position2 = indexs[1]\n position3 = indexs[2]\n begin_all, end1 = determine_begin_end(molecule, position1)\n begin3, end_all = determine_begin_end(molecule, position3)\n seqs = extract_all_sequences(molecule, begin_all, end_all)\n #count = count_total_mutations(seqs, database)\n count = count_total_mutations_cpp(seqs)\n #print \"seqs: \", len(seqs)\n #print \"count: \", count\n iteration = 0\n for aa1 in aminoAcids[\"PDB\"]:\n molecule1 = molecule.duplicate()\n molecule1[position1].kind = aa1\n for aa2 in aminoAcids[\"PDB\"]:\n molecule2 = molecule1.duplicate()\n molecule2[position2].kind = aa2\n for aa3 in aminoAcids[\"PDB\"]:\n molecule3 = molecule2.duplicate()\n molecule3 = molecule2.duplicate()\n molecule3[position3].kind = aa3\n begin_mut_all, end_mut1 = determine_begin_end(molecule, position1)\n begin_mut3, end_mut_all = determine_begin_end(molecule, position3)\n seqs_mut = extract_all_sequences(molecule2, begin_mut_all, end_mut_all)\n #mutCount = count_total_mutations(seqs_mut, database)\n mutCount = count_total_mutations_cpp(seqs_mut)\n #print \"seqs_mut: \", len(seqs_mut)\n #print \"mutCount: \", mutCount\n #print iteration\n iteration += 1 \n if mutCount > count:\n solution = {}\n solution[indexNameDict[position1]] = aa1\n solution[indexNameDict[position2]] = aa2\n solution[indexNameDict[position3]] = aa3\n cuts.append(solution) \n \n return cuts", "def _compute_hydrogen_bonds(self, entity):\n\n for (aa1, aa2) in combinations(entity, 2):\n\n # do not consider this pair if the number of atoms of the\n # residues is not sufficient\n if not (validate(aa1) and validate(aa2)):\n continue\n\n # stores both potentials between aa1 and aa2\n potentials = []\n\n segid1 = get_pos(aa1)\n segid2 = get_pos(aa2)\n\n # distance\n dist = np.abs(segid1 - segid2)\n\n # take care of the minimal sequence distance criterion\n # between aa1 and aa2\n if dist < self.min_seq_distance:\n continue\n\n # extract atoms from both amino acids\n atoms = [aa1.get_unpacked_list(),\n aa2.get_unpacked_list()]\n\n for i in range(0, len(atoms)):\n c_carboxyl = np.array(atoms[i][2].get_coord())\n o_carboxyl = np.array(atoms[i][3].get_coord())\n\n nitrogen = np.array(atoms[1-i][0].get_coord())\n hydrogen = None\n for atom in atoms[1-i]:\n if atom.get_name().strip() == 'H':\n hydrogen = np.array(atom.get_coord())\n\n if hydrogen is None:\n potentials.append(0)\n continue\n\n # compute relevant distances\n r_ON = np.linalg.norm(o_carboxyl - nitrogen)\n r_CH = np.linalg.norm(c_carboxyl - hydrogen)\n r_OH = np.linalg.norm(o_carboxyl - hydrogen)\n r_CN = np.linalg.norm(c_carboxyl - nitrogen)\n\n # compute potential\n pot = potential(r_ON, r_CH, r_OH, r_CN)\n\n potentials.append(pot if pot < co.HBOND_THRESHOLD else 0)\n\n # we return this as an result if at least one potential\n # is below the threshold , so they are not both 0\n if sum(potentials) != 0:\n yield (aa1, aa2, potentials[0], potentials[1])", "def build_fully_biconnected_test_graph():\n graph = build_biconnected_test_graph()\n\n # Connect the first and third components to create a ring, converting everything into a single biconnected component\n graph.new_edge(1, 12)\n\n return graph", "def test_assembly_inner_product_1_forms(self):\n func_space_lob = FunctionSpace(self.mesh, '1-lobatto', self.p)\n func_space_gauss = FunctionSpace(self.mesh, '1-gauss', self.p)\n func_space_extgauss = FunctionSpace(self.mesh, '1-ext_gauss', self.p)\n\n basis_lob = BasisForm(func_space_lob)\n basis_lob.quad_grid = 'gauss'\n M_lob = inner(basis_lob, basis_lob)\n\n basis_gauss = BasisForm(func_space_gauss)\n basis_gauss.quad_grid = 'lobatto'\n M_gauss = inner(basis_gauss, basis_gauss)\n\n basis_ext_gauss = BasisForm(func_space_extgauss)\n basis_ext_gauss.quad_grid = 'lobatto'\n M_extgauss = inner(basis_ext_gauss, basis_ext_gauss)\n\n M_lob_ass_ref = assemble_slow(self.mesh, M_lob, func_space_lob.dof_map.dof_map,\n func_space_lob.dof_map.dof_map)\n M_gauss_ass_ref = assemble_slow(self.mesh, M_gauss, func_space_gauss.dof_map.dof_map,\n func_space_gauss.dof_map.dof_map)\n M_extgauss_ass_ref = assemble_slow(\n self.mesh, M_extgauss, func_space_extgauss.dof_map.dof_map_internal, func_space_extgauss.dof_map.dof_map_internal)\n\n M_lob_ass = assemble(M_lob, func_space_lob, func_space_lob).toarray()\n M_gauss_ass = assemble(M_gauss, func_space_gauss, func_space_gauss).toarray()\n M_extgauss_ass = assemble(M_extgauss, func_space_extgauss,\n func_space_extgauss).toarray()\n\n npt.assert_array_almost_equal(M_lob_ass_ref, M_lob_ass)\n npt.assert_array_almost_equal(M_gauss_ass_ref, M_gauss_ass)\n npt.assert_array_almost_equal(M_extgauss_ass_ref, M_extgauss_ass)", "def test_assembly_inner_product_2_forms(self):\n func_space_lob = FunctionSpace(self.mesh, '2-lobatto', self.p)\n func_space_gauss = FunctionSpace(self.mesh, '2-gauss', self.p)\n func_space_extgauss = FunctionSpace(self.mesh, '2-ext_gauss', self.p)\n\n basis_lob = BasisForm(func_space_lob)\n basis_lob.quad_grid = 'gauss'\n M_lob = inner(basis_lob, basis_lob)\n\n basis_gauss = BasisForm(func_space_gauss)\n basis_gauss.quad_grid = 'lobatto'\n M_gauss = inner(basis_gauss, basis_gauss)\n\n basis_ext_gauss = BasisForm(func_space_extgauss)\n print(basis_ext_gauss.num_basis)\n basis_ext_gauss.quad_grid = 'lobatto'\n M_extgauss = inner(basis_ext_gauss, basis_ext_gauss)\n\n M_lob_ass_ref = assemble_slow(self.mesh, M_lob, func_space_lob.dof_map.dof_map,\n func_space_lob.dof_map.dof_map)\n M_gauss_ass_ref = assemble_slow(self.mesh, M_gauss, func_space_gauss.dof_map.dof_map,\n func_space_gauss.dof_map.dof_map)\n M_extgauss_ass_ref = assemble_slow(\n self.mesh, M_extgauss, func_space_extgauss.dof_map.dof_map_internal, func_space_extgauss.dof_map.dof_map_internal)\n\n M_lob_ass = assemble(M_lob, func_space_lob, func_space_lob).toarray()\n M_gauss_ass = assemble(M_gauss, func_space_gauss, func_space_gauss).toarray()\n M_extgauss_ass = assemble(M_extgauss, func_space_extgauss,\n func_space_extgauss).toarray()\n\n npt.assert_array_almost_equal(M_lob_ass_ref, M_lob_ass)\n npt.assert_array_almost_equal(M_gauss_ass_ref, M_gauss_ass)\n npt.assert_array_almost_equal(M_extgauss_ass_ref, M_extgauss_ass)", "def test_validation_correct_systems():\n data_paths = examples_paths()\n exp_builder = ExperimentBuilder()\n basic_script = \"\"\"\n ---\n molecules:\n rec: {{filepath: {0}, leap: {{parameters: leaprc.ff14SB}}}}\n rec_reg: {{filepath: {0}, regions: {{receptregion: 'some dsl'}}, leap: {{parameters: leaprc.ff14SB}}}}\n lig: {{name: lig, leap: {{parameters: leaprc.gaff}}}}\n lig_reg: {{name: lig, regions: {{ligregion: [143, 123]}}, leap: {{parameters: leaprc.gaff}}}}\n solvents:\n solv: {{nonbonded_method: NoCutoff}}\n solv2: {{nonbonded_method: NoCutoff, implicit_solvent: OBC2}}\n solv3: {{nonbonded_method: PME, clearance: 10*angstroms}}\n solv4: {{nonbonded_method: PME}}\n \"\"\".format(data_paths['lysozyme'])\n basic_script = yaml.load(textwrap.dedent(basic_script), Loader=yaml.FullLoader)\n\n systems = [\n {'receptor': 'rec', 'ligand': 'lig', 'solvent': 'solv'},\n {'receptor': 'rec_reg', 'ligand': 'lig_reg', 'solvent': 'solv'},\n {'receptor': 'rec_reg', 'ligand': 'lig', 'solvent': 'solv'},\n {'receptor': 'rec', 'ligand': 'lig', 'solvent': 'solv', 'pack': True},\n {'receptor': 'rec', 'ligand': 'lig', 'solvent': 'solv3',\n 'leap': {'parameters': ['leaprc.gaff', 'leaprc.ff14SB']}},\n\n {'phase1_path': data_paths['bentol-complex'],\n 'phase2_path': data_paths['bentol-solvent'],\n 'ligand_dsl': 'resname BEN', 'solvent': 'solv'},\n {'phase1_path': data_paths['bentol-complex'],\n 'phase2_path': data_paths['bentol-solvent'],\n 'ligand_dsl': 'resname BEN', 'solvent': 'solv4'},\n {'phase1_path': data_paths['bentol-complex'],\n 'phase2_path': data_paths['bentol-solvent'],\n 'ligand_dsl': 'resname BEN', 'solvent1': 'solv3',\n 'solvent2': 'solv2'},\n\n {'phase1_path': data_paths['pxylene-complex'],\n 'phase2_path': data_paths['pxylene-solvent'],\n 'ligand_dsl': 'resname p-xylene', 'solvent': 'solv',\n 'gromacs_include_dir': data_paths['pxylene-gro-include']},\n {'phase1_path': data_paths['pxylene-complex'],\n 'phase2_path': data_paths['pxylene-solvent'],\n 'ligand_dsl': 'resname p-xylene', 'solvent': 'solv'},\n\n {'phase1_path': data_paths['toluene-solvent'],\n 'phase2_path': data_paths['toluene-vacuum'],\n 'ligand_dsl': 'resname TOL'},\n {'phase1_path': data_paths['toluene-solvent'],\n 'phase2_path': data_paths['toluene-vacuum'],\n 'ligand_dsl': 'resname TOL', 'solvent_dsl': 'not resname TOL'},\n\n {'solute': 'lig', 'solvent1': 'solv', 'solvent2': 'solv'},\n {'solute': 'lig_reg', 'solvent1': 'solv', 'solvent2': 'solv'},\n {'solute': 'lig', 'solvent1': 'solv', 'solvent2': 'solv',\n 'leap': {'parameters': 'leaprc.gaff'}}\n ]\n for system in systems:\n modified_script = basic_script.copy()\n modified_script['systems'] = {'sys': system}\n yield exp_builder.parse, modified_script", "def compute_hydration_energies(molecules, parameters):\n\n energies = dict() # energies[index] is the computed solvation energy of molecules[index]\n\n platform = openmm.Platform.getPlatformByName(\"Reference\")\n\n for molecule in molecules:\n # Create OpenMM System.\n system = openmm.System()\n for atom in molecule.GetAtoms():\n mass = OEGetDefaultMass(atom.GetAtomicNum())\n system.addParticle(mass * units.amu)\n\n # Add nonbonded term.\n # nonbonded_force = openmm.NonbondedSoftcoreForce()\n # nonbonded_force.setNonbondedMethod(openmm.NonbondedForce.NoCutoff)\n # for atom in molecule.GetAtoms():\n # charge = 0.0 * units.elementary_charge\n # sigma = 1.0 * units.angstrom\n # epsilon = 0.0 * units.kilocalories_per_mole\n # nonbonded_force.addParticle(charge, sigma, epsilon)\n # system.addForce(nonbonded_force)\n\n # Add GBVI term\n # gbvi_force = openmm.GBVISoftcoreForce()\n gbvi_force = openmm.GBVIForce() \n gbvi_force.setNonbondedMethod(openmm.GBVIForce.NoCutoff) # set no cutoff\n gbvi_force.setSoluteDielectric(1)\n gbvi_force.setSolventDielectric(78)\n\n # Use scaling method.\n # gbvi_force.setBornRadiusScalingMethod(openmm.GBVISoftcoreForce.QuinticSpline)\n # gbvi_force.setQuinticLowerLimitFactor(0.75)\n # gbvi_force.setQuinticUpperBornRadiusLimit(50.0*units.nanometers)\n\n # Build indexable list of atoms.\n atoms = [atom for atom in molecule.GetAtoms()] \n \n # Assign GB/VI parameters.\n for atom in molecule.GetAtoms(): \n atomtype = atom.GetStringData(\"gbvi_type\") # GBVI atomtype\n charge = atom.GetPartialCharge() * units.elementary_charge\n radius = parameters['%s_%s' % (atomtype, 'radius')] * units.angstroms\n gamma = parameters['%s_%s' % (atomtype, 'gamma')] * units.kilocalories_per_mole \n # gamma *= -1.0 # DEBUG\n lambda_ = 1.0 # fully interacting\n # gbvi_force.addParticle(charge, radius, gamma, lambda_) # for GBVISoftcoreForce\n gbvi_force.addParticle(charge, radius, gamma) # for GBVIForce\n\n # Add bonds.\n for bond in molecule.GetBonds():\n # Get atom indices.\n iatom = bond.GetBgnIdx()\n jatom = bond.GetEndIdx()\n # Get bond length.\n (xi, yi, zi) = molecule.GetCoords(atoms[iatom])\n (xj, yj, zj) = molecule.GetCoords(atoms[jatom])\n distance = math.sqrt((xi-xj)**2 + (yi-yj)**2 + (zi-zj)**2) * units.angstroms\n # Identify bonded atoms to GBVI.\n gbvi_force.addBond(iatom, jatom, distance)\n\n # Add the force to the system.\n system.addForce(gbvi_force)\n \n # Build coordinate array.\n natoms = len(atoms)\n coordinates = units.Quantity(numpy.zeros([natoms, 3]), units.angstroms)\n for (index,atom) in enumerate(atoms):\n (x,y,z) = molecule.GetCoords(atom)\n coordinates[index,:] = units.Quantity(numpy.array([x,y,z]),units.angstroms) \n \n # Create OpenMM Context.\n timestep = 1.0 * units.femtosecond # arbitrary\n integrator = openmm.VerletIntegrator(timestep)\n context = openmm.Context(system, integrator, platform)\n\n # Set the coordinates.\n context.setPositions(coordinates)\n \n # Get the energy\n state = context.getState(getEnergy=True)\n energies[molecule] = state.getPotentialEnergy()\n\n return energies", "def make_albedo(self, x_sol, unc, nbands=7):\n\n n_doys = x_sol.shape[0] / 3\n n_bands = x_sol.shape[1]\n bhr_spectral = np.zeros((n_doys, n_bands))\n bhr_spectral_unc = np.zeros((n_doys, n_bands))\n bhr_spectral_nbar = np.zeros((n_doys, n_bands))\n bhr_spectral_nbar_unc = np.zeros((n_doys, n_bands))\n bhr_bb = np.zeros((n_doys, 3))\n bhr_bb_unc = np.zeros((n_doys, 3))\n to_vis = np.array([0.3265, 0., 0.4364, 0.2366, 0, 0, 0])\n a_to_vis = -0.0019\n to_nir = np.array([0., 0.5447, 0, 0, 0.1363, 0.0469, 0.2536])\n a_to_nir = -0.0068\n to_sw = np.array([0.3973, 0.2382, 0.3489, -0.2655, 0.1604, -0.0138, 0.0682])\n a_to_sw = 0.0036\n for band in xrange(n_bands):\n u1 = np.sqrt(unc[band].diagonal()[:n_doys])\n u2 = np.sqrt(unc[band].diagonal()[n_doys:(n_doys * 2)])\n u3 = np.sqrt(unc[band].diagonal()[2 * n_doys:])\n\n bhr_spectral[:, band] = (x_sol[:(n_doys), band] +\n 0.189184 * x_sol[(n_doys):(2 * n_doys), band] +\n 1.377622 * x_sol[(2 * n_doys):, band])\n bhr_spectral_unc[:, band] = (u1 + 0.189184 * u2 + 1.377622 * u3)\n\n bhr_bb[:, 0] = np.sum(bhr_spectral * to_vis, axis=1) + a_to_vis\n bhr_bb[:, 1] = np.sum(bhr_spectral * to_nir, axis=1) + a_to_nir\n bhr_bb[:, 2] = np.sum(bhr_spectral * to_sw, axis=1) + a_to_sw\n bhr_bb_unc[:, 0] = np.sum(bhr_spectral_unc * to_vis, axis=1) + a_to_vis\n bhr_bb_unc[:, 1] = np.sum(bhr_spectral_unc * to_nir, axis=1) + a_to_nir\n bhr_bb_unc[:, 2] = np.sum(bhr_spectral_unc * to_sw, axis=1) + a_to_sw\n\n kr = Kernels(0, 20, 0, \\\n LiType='Sparse', doIntegrals=False, \\\n normalise=1, RecipFlag=True, RossHS=False, MODISSPARSE=True, \\\n RossType='Thick')\n\n # n_doys = x_sol.shape[0] / 3\n for band in xrange(self.nbands):\n bhr_spectral_nbar[:, band] = (x_sol[:(n_doys), band] +\n kr.Ross[0] * x_sol[(n_doys):(2 * n_doys), band] +\n kr.Li[0] * x_sol[(2 * n_doys):, band])\n\n u1 = np.sqrt(unc[band].diagonal()[:n_doys])\n u2 = np.sqrt(unc[band].diagonal()[n_doys:(n_doys * 2)])\n u3 = np.sqrt(unc[band].diagonal()[2 * n_doys:])\n bhr_spectral_nbar_unc[:, band] = (u1 + kr.Ross[0] * u2 + kr.Li[0] * u3)\n\n return bhr_spectral[self.min_doy:self.max_doy], bhr_spectral_unc[self.min_doy:self.max_doy],\\\n bhr_bb[self.min_doy:self.max_doy], bhr_bb_unc[self.min_doy:self.max_doy],\\\n bhr_spectral_nbar[self.min_doy:self.max_doy], bhr_spectral_nbar_unc[self.min_doy:self.max_doy]", "def test_claim_generation():\n print(\"Executing test_claim_generation:\")\n append_9_theory=[\n (13,9),\n (30,)\n ]\n set_false_and_append_9_theory=[\n (13,0),\n (28,),\n (13,9),\n (30,)\n ]\n\n print(\"append_9_theory:\")\n print(language.program_string(append_9_theory))\n print(\"set_false_and_append_9_theory:\")\n print(language.program_string(set_false_and_append_9_theory))\n\n mind=minds.new_mind(theories=[append_9_theory,set_false_and_append_9_theory],claims=[(True,[])])\n\n for i in range(10):\n minds.generate_claims(mind)\n\n print(minds.mind_string(mind))" ]
[ "0.55873483", "0.54603845", "0.54429716", "0.5408802", "0.5403582", "0.5389649", "0.53191894", "0.5310863", "0.52691495", "0.52479964", "0.52202135", "0.5188183", "0.51712984", "0.5130737", "0.512443", "0.5119801", "0.5115056", "0.511297", "0.51073724", "0.51057136", "0.509822", "0.50856316", "0.5078628", "0.5065311", "0.50559086", "0.50515634", "0.50501007", "0.5047573", "0.50426716", "0.50258875", "0.5021562", "0.5006155", "0.4988171", "0.49877492", "0.49859148", "0.4975548", "0.4973135", "0.49720725", "0.4966232", "0.49614897", "0.49596068", "0.49539125", "0.49527526", "0.49449795", "0.49414095", "0.4939558", "0.49336144", "0.4929058", "0.49215972", "0.49201044", "0.4919918", "0.49184242", "0.4917631", "0.49154112", "0.49072564", "0.49061903", "0.48997542", "0.48944822", "0.4893685", "0.48935607", "0.4890988", "0.48848617", "0.48804468", "0.487573", "0.48733908", "0.4871686", "0.4868997", "0.48678514", "0.48666722", "0.48654106", "0.48590666", "0.4848272", "0.4847683", "0.48454654", "0.48454094", "0.4844409", "0.48357207", "0.48345813", "0.48245215", "0.48229137", "0.48196277", "0.481469", "0.48123428", "0.48071128", "0.48058873", "0.479711", "0.47965682", "0.47922125", "0.4788438", "0.47836062", "0.47809458", "0.4777429", "0.47763875", "0.47744498", "0.47738203", "0.477187", "0.47709835", "0.47668773", "0.47658417", "0.4764686" ]
0.5812885
0
send the value to set for the piezo to the controller
def func_piezo(self,piezo): self.write('SOURce:VOLTage:PIEZo '+str(piezo)) self.read()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def value(self, value):\n self.set_data(value)", "def update_controller(self):", "def _set_parameter(self):\n # Get parameter keys\n self.input_parameter = self.parameter_combobox.currentText()\n self.result_parameter = self.result_parameters[self.input_parameter]\n # Adjust axes labels\n self.ax.set_xlabel('{} steunpunt'.format(self.input_parameter))\n self.ax.set_ylabel('{} uitvoerlocatie'.format(self.input_parameter))\n # Set data\n self._set_data()", "def set_value (self):\n raise NotImplementedError", "def Update(self, controller):\n pass", "def value(self, value):\n\n\t\tself.__value = value", "def on_data(self, vp, value):\n eventName = self.name + \"_set_vp\"\n self._hass.bus.fire(eventName, {\"vp\": vp, \"value\": value})", "def value(self, value):\n\n self._value = value", "def value(self, value):\n\n self._value = value", "def value(self, value):\n\n self._value = value", "def value(self, value):\n\n self._value = value", "def value(self, value):\n\n self._value = value", "def value(self, value):\n\n self._value = value", "def post(self,request,format=None):\n option = int(request.data['option'])\n value = int(request.data['value'])\n industry = int(request.data['industry'])\n\n industry_obj = SpecialModeIndustry.objects.get(front_end_id=industry)\n industry_param = SpecialModeParameter.objects.get(user=request.user.info,industry=industry_obj)\n if(option==1):\n industry_param.slider1 = value\n elif(option==2):\n industry_param.slider2 = value\n elif(option==3):\n industry_param.slider3 = value\n elif(option==4):\n industry_param.slider4 = value\n \n industry_param.save()\n\n return Response(data={\"Success\":\"Params Updated\"},status=status.HTTP_200_OK)", "def set_value(self, name, value, force=False):\n par=self.params[name]\n if force or par.value_handler.is_set_allowed(allow_focus=self.change_focused_control):\n return self.display_table.set_value((self.display_table_root,name),value)", "def value(self, value):\n self._update_value(value)", "def change_value(self,val):\n self.val = val", "def set_parameter(self, param, value, stripe = 0, location = 3):\n if param in self.fpga.cabac_top[0].params:\n self.fpga.set_cabac_value(param, value, stripe)\n time.sleep(0.1)\n self.fpga.send_cabac_config(stripe)\n\n elif param in [\"V_SL\", \"V_SH\", \"V_RGL\", \"V_RGH\", \"V_PL\", \"V_PH\"]:\n self.fpga.set_clock_voltages({param: value})\n\n elif param == \"I_OS\":\n self.fpga.set_current_source(value, stripe)\n\n else:\n print(\"Warning: unidentified parameter for the REB: %s\" % param)", "def setValue(self,selected):\n self['input'].setValue(selected)", "def set_setpoint(self, value):\n value = value * self.conf['PSICONV']\n log.debug(\"Set pressure regulator %d to %f\", self.id_, value)\n self.synth.cbox.set_dac(self.id_, value)", "def set_value(self, value):\n self.value = value", "def set_value(self, value):\n self.value = value", "def set_value(self, value):\n self.value = value", "def update_object ( self, event ):\n self.value = self.control.GetValue()", "def value_set(self, model):\n if not model or model is self._model:\n return\n\n self._model = model\n self.part_text_set(\"user_id\", model.uname)\n self.part_text_set(\"text\", model.text)\n self.part_text_set(\"status_info\", model.info)\n \n model.request_thumbnail(self.cb_load_thumbnail)", "def prebaci_dan_nazad(self):\r\n value = int(self.brojDanaCombo.currentText()) #integer broj dana\r\n self.emit(QtCore.SIGNAL('promjeni_datum(PyQt_PyObject)'), -value)\r\n msg = 'request pomak {0} dana unazad'.format(value)\r\n logging.info(msg)", "def changeValue(self):\r\n # productive #onUpDnArrow\r\n profprint()\r\n widget = slicer.modules.NeedleFinderWidget\r\n # widget.scrollPointButton.setText('Scroll Point for Needle ' + str(widget.editNeedleTxtBox.value) + ' (pt: ' + str(self.ptNumber) + ')')\r\n self.lockControlPoints(widget.editNeedleTxtBox.value)\r\n self.unlockControlPoints(widget.editNeedleTxtBox.value)\r\n widget.drawValidationNeedlesButton.text = \"Render Manual Needle \" + str(widget.editNeedleTxtBox.value)", "def set_value ( self, object, value ):\n object[ self.index ] = value", "def seleccionar_promocion(request):\n\n registros = Promotion.objects.all()\n for registro in registros:\n if request.method == 'POST':\n try:\n if request.POST['value'] == str(registro.id_promotion):\n registro.promotion_selected = 1\n registro.save()\n else:\n registro.promotion_selected = 0\n registro.save()\n except Exception:\n registro = None\n\n # serializer = PromotionSerializers(registros)\n return JSONResponse(status=201)", "def setValue(self,val):\n self.input.setValues(val)", "def value(self):", "def set_control(self, value):\n self.control = value", "def getvalue(self):\n ...", "def getvalue(self):\n ...", "def updateValue(self):\n self.value = self.var.get()", "def set(self, request, _object):\n\n value = request._get_parameter_value(self)\n value.object = _object", "def set_variable(self, request, context):\n response = SetVariableResponse()\n value = decode(request.value)\n self._delegator.set_variable(request.component, request.variable, value)\n return response", "def set_progress_value(self, value):\r\n\r\n pass", "def prebaci_dan_naprijed(self):\r\n value = int(self.brojDanaCombo.currentText()) #integer broj dana\r\n self.emit(QtCore.SIGNAL('promjeni_datum(PyQt_PyObject)'), value)\r\n msg = 'request pomak {0} dana unaprijed'.format(value)\r\n logging.info(msg)", "def setFactura(self, caja): \n self.caja = caja", "def setValue(self, value):\n self._value = value", "def set_setpoint(self, value):\n act = SetpointAction(self, value)\n return act.invoke()", "def setValue(self, value):\n self.setValues((value, value))", "def set_value(self, value):\n self.value = value\n return self", "def save(self, data):\n\t\tif self.value:\n\t\t\tdata['value'] = self.value", "def constant(fluid,network,propname,value,**params):\n network.set_pore_data(phase=fluid,prop=propname,data=value)", "def __set__(self, obj, value):\n driver = obj.driver\n WebDriverWait(driver, 100).until(\n lambda driver: driver.find_element_by_name(self.locator))\n driver.find_element_by_name(self.locator).send_keys(value)", "def update_proxy(self, instance, value):\n self.value = value", "def get_value(self):", "def controller():\n\n params.update()\n\n form = ControllerForm(\n request.form,\n obj=params\n )\n if request.method == 'POST' and form.validate():\n if form.submit_button.data:\n print(\"mode_heating=\" + form.mode_heating.data)\n print(\"mode_ecs=\" + form.mode_ecs.data)\n flash('Data posted')\n elif form.refresh_button.data:\n # enforce to reload the form by redirect and call 'GET' requests\n return redirect(url_for('controller'))\n else:\n display_form_errors(form)\n\n return render_template('controller.html', form=form, user=current_user)", "def set_pref(self, name, value):\r\n pass", "def value(self, grid):\n pass", "def changeValue(self):\n #productive #onUpDnArrow\n profprint()\n widget = slicer.modules.NeedleFinderWidget\n widget.scrollPointButton.setText('Scroll Point for Needle ' + str(widget.editNeedleTxtBox.value)+ ' (pt: '+str(self.ptNumber)+')')", "def Set(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def set_value(self,x):\n self._value = x", "def set_value(self,x):\n self._value = x", "def set_persistent_value(self, value, *args, **kwargs):\n pass", "def output(self):\n return {\n \"device\": self.device.id, \n \"action\": \"SetCurrentSetpoint\", \n \"arguments\": [\n {\n \"name\": \"NewCurrentSetpoint\", \n \"value\": self.value\n }\n ], \n \"service\": \"urn:upnp-org:serviceId:TemperatureSetpoint1\"\n }", "def value(self, value):\n self._value = value\n self.is_dirty = True", "def set_data(self, value):\n self._set_data(value)\n self.data_changed = True\n return", "def update(self, value):\n self.bar.setValue(value)\n self.text_label.setText('{}: {}/{}'.format(self.label, value, self.num_regions))", "def set_value(self, val):\n self.value = val", "def setParameter(self, name, value):", "def set_value(self, value):\n self.value = value\n self._layout.set_markup(self._get_markup(value))", "def handle_view(self, controller):\n \n order = controller.customer.my_order ## make a reference to the order of customer\n \n for i in range(len(order.items)):\n if not order.items[i]:\n continue\n \n label0 = Label(self, text=order.items[i])\n label0.grid(row=i+2, column=0, columnspan=2, padx=10)\n \n label1 = Label(self, text=\"QTY:\")\n label1.grid(row=i+2, column=2)\n \n qty = order.items[i].quantity\n var = IntVar()\n self.vars[i] = var\n self.vars[i].set(qty)\n combobox0 = ttk.Combobox(self, textvariable=self.vars[i], state=\"readonly\", values=[j+1 for j in range(self.max_range)], width='3')\n combobox0.bind(\"<<ComboboxSelected>>\", lambda event, c=controller.customer, p=i:self.onChange(c, p)) ## change pizza quantity\n combobox0.focus_set()\n combobox0.grid(row=i+2, column=3)\n\n button3 = Button(self, text=\"Remove\", command=lambda p=i:self.onRemove(controller, p))\n button3.grid(row=i+2, column=4)\n\n button4 = Button(self, text=\"CHECKOUT\", command=lambda:self.onCheckout(controller))\n button4.grid(row=1, column=4, columnspan=2, sticky='e')\n \n self.showOrderPrice(order)", "def set_Value(self, n_value):\n#Joerg S/Martin W advice\n self.StoredValue=n_value", "def setValue(self, value):\n self.setText(str(value))", "def set_setpoint(self, value):\n self.voltageSetpoint = value/self.gain\n self.widgets['p_setpoint'].setValue(value)\n #Need to reset the PID loop with this new setpoint value\n self._update_PID()", "def value_set(self, model):\n if not model or model is self._model:\n return\n\n self._model = model\n self.part_text_set(\"user_id\", model.uname)\n self.part_text_set(\"text\", model.text)\n \n model.request_thumbnail(self.cb_load_thumbnail)", "def __init__(__self__, *,\n value: str):\n pulumi.set(__self__, \"value\", value)", "def OnUpdatePlantCtrl(self, _):\n self.saveTexts()\n self.updateMarkers()", "def set_value(self, index, mode, value):\n address = self.get_address(index, mode)\n self.program[address] = value", "def _set_param(self, name, value):\n self._frozenjson._data[name] = value", "def setvalue(self,num,name,val):\n self.M.reconfigure(num,{name:float(val)})", "def send_foot_controller(self, value=0, ch=None):\n self.send_control_change(FOOT_CONTROLLER, value, ch=ch)", "def setValue(self, name: unicode, value: object) -> None:\n ...", "def setValue(self,val):\n if val:\n self.input.setValue(val)", "def config_cuerpo(self, cuerpo):\n # type: (Robot_Cuerpo)->None\n self.cuerpo = cuerpo", "def valuechange():\n\n tempmin.setMaximum(tempmax.value())\n tempmax.setMinimum(tempmin.value())\n hummin.setMaximum(hummax.value())\n hummax.setMinimum(hummin.value())\n\n self.variables.default_values_dict[\"settings\"][\n \"current_tempmin\"\n ] = tempmin.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_tempmax\"\n ] = tempmax.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_hummin\"\n ] = hummin.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_hummax\"\n ] = hummax.value()\n\n max = build_command(\n self.variables.devices_dict[\"temphum_controller\"],\n (\"set_hummax\", hummax.value()),\n )\n min = build_command(\n self.variables.devices_dict[\"temphum_controller\"],\n (\"set_hummin\", hummin.value()),\n )\n\n self.variables.vcw.write(\n self.variables.devices_dict[\"temphum_controller\"], max\n )\n self.variables.vcw.write(\n self.variables.devices_dict[\"temphum_controller\"], min\n )", "def plastic(self, value):\n self._custom_setter('plastic', value)", "def set_config_value(self, value, index=None):", "def set_value(self, value):\n self.value = str(value)", "def OnAdd(self, controller):\n pass", "def set():\n #Get values\n dmx = int(request.args.get('dmx'))\n value = int(request.args.get('value', default=\"-1\"))\n color = request.args.get('color', default=\"#000000\").strip(\"#\")\n #Check if in usable range\n #Dismantle colors\n r = int(color[0:2], 16)\n g = int(color[2:4], 16)\n b = int(color[4:7], 16)\n c,m,y,k = rgb_to_cmyk(r, g, b)\n fixture = channels[dmx].split(\" | \")[0]\n print fixture\n \"\"\"\n max_count = len(all_lights[fixture])\n print max_count\n \"\"\"\n prev_fix = fixture\n if r+g+b != 0:\n count = -1\n while(True):\n if count >= 512:\n break\n count += 1\n if channels[count] == \"\":\n continue\n fixture = channels[count].split(\" | \")[0]\n if fixture != prev_fix:\n continue\n name = channels[count].split(\" | \")[1]\n print name\n if name == \"R\":\n adresses[count] = r\n if name == \"G\":\n adresses[count] = g\n if name == \"B\":\n adresses[count] = b\n if name == \"C\":\n adresses[count] = c\n if name == \"M\":\n adresses[count] = m\n if name == \"Y\":\n adresses[count] = y\n if name == \"K\":\n adresses[count] = k\n else:\n if not 0 <= value <= 255:\n return \"Invalid Value\"\n adresses[dmx] = value\n dmxsender.send(adresses)\n #Return Debug information\n return json_back()", "def post_set(self, driver: AbstractHasFeatures, value: Any, i_value: Any,\n response: Any):\n pass", "def value(self, value: float):\n\n self._value = value", "def value(self, value: float):\n\n self._value = value", "def Set(self,value):\n self.Bus.Write_uInt8(self.Address,0x50+self.Pin,value)", "def assign(self, value):\n self.value = value", "def form_valid(self, form):\n self.instance = form.save()[0].prueba\n self.instance.evaluar()\n self.instance.save()\n return super(PresentarView, self).form_valid(form)", "def setValue(self, val):\r\n\t\tif self.connected != 1:\r\n\t\t\tprint(\"error: cannot set value before connect\")\r\n\t\t\treturn -1\r\n\t\t\r\n\t\tif val < 0 or val > 95:\r\n\t\t\tprint(\"error: cannot set value. not in range(0-95)\")\r\n\t\t\treturn -1\r\n\t\t\r\n\t\tcmd = str.encode(\"V\" + str(val) + \";\")\r\n\t\ttry:\r\n\t\t\tself.ser.write(cmd)\r\n\t\texcept:\r\n\t\t\tprint(\"error: set value failed\")\r\n\t\t\treturn -1\r\n\t\t\t\r\n\t\tprint(\"set value success\")", "def set_variable(self, name, value):\n self.send_to_backend('set', name=name, value=value)\n self.refresh_variable(name)", "def result(self, value):\n self.set_local(0, value)", "def setColor(self,value):\n\t\tself.politics = value if(type(value) is int)else int(value[1:],16)\n\t\tself.canvas.itemconfig('node_'+self.identifier,fill=self.toRGB())", "def setField(self, data):\n\t\tview = self.view\n\t\tview.sbAbstraccion.setValue(data['sbAbstraccion'])", "def update(self, value):\n log_gui.debug(\"update value of field %s with : %s\", repr(self._name), value)\n wid = self._store_widget\n wid.setProperty(\"python-object\", value)\n wid.emit(self._sig)", "def _set_par(vid, par, value):\n traci.vehicle.setParameter(vid, \"carFollowModel.%s\" % par, str(value))", "def data(value):\n return value", "def output(self):\n return {\n \"device\": self.device.id, \n \"action\": \"SetColor\",\n \"arguments\": [\n {\n \"name\": \"newColorTargetValue\", \n \"value\": self.value\n }\n ], \n \"service\": \"urn:upnp-org:serviceId:RGBController1\"\n }", "def handle_set_radar_vis(self, req):\n self.radar_vis = req.radar_state\n msg = \"Radar state of vehicle #%i successfully set to %s\" % (self.vehicle_id, self.radar_vis)\n return srvs.SetRadarVisResponse(True, msg)" ]
[ "0.593418", "0.5828751", "0.57180893", "0.5538856", "0.5415524", "0.54018885", "0.5385673", "0.5354321", "0.5354321", "0.5354321", "0.5354321", "0.5354321", "0.5354321", "0.5338695", "0.5303437", "0.5277722", "0.5271039", "0.5270866", "0.5265829", "0.52549756", "0.525472", "0.525472", "0.525472", "0.5195756", "0.515856", "0.5140134", "0.5136455", "0.5133309", "0.5112598", "0.5079909", "0.5072006", "0.50652236", "0.50644565", "0.50644565", "0.5062978", "0.5057825", "0.5057307", "0.5049082", "0.50389135", "0.502478", "0.5022014", "0.50192136", "0.5015564", "0.5000597", "0.49902984", "0.4981888", "0.49782538", "0.49678", "0.49613544", "0.49580187", "0.4954676", "0.4954599", "0.49527273", "0.49432388", "0.49414834", "0.49414834", "0.49406266", "0.4940369", "0.4936845", "0.4936012", "0.4923804", "0.49205822", "0.49124667", "0.49074695", "0.4902759", "0.49000007", "0.48940834", "0.4886341", "0.48840764", "0.48816124", "0.48812658", "0.48692217", "0.48680004", "0.4856912", "0.48558694", "0.48555377", "0.4850695", "0.48474225", "0.48350897", "0.48241618", "0.48203892", "0.48091617", "0.48075646", "0.48048645", "0.4802277", "0.4801158", "0.4801158", "0.47994602", "0.47971216", "0.4794616", "0.4793863", "0.47928745", "0.4786384", "0.47862872", "0.47860652", "0.47795662", "0.47733808", "0.47732666", "0.47718602", "0.4770601" ]
0.5316031
14
conv_forward performs forward propagation over a convolutional layer of a neural network.
def conv_forward(A_prev, W, b, activation, padding="same", stride=(1, 1)): m, h_prev, w_prev, c_prev = A_prev.shape kh, kw, _, c_new = W.shape ph = pw = 0 sh, sw = stride if padding == 'same': ph = int(((h_prev - 1) * sh + kh - h_prev) / 2) pw = int(((w_prev - 1) * sw + kw - w_prev) / 2) elif type(padding) == tuple: ph, pw = padding pad = np.pad(A_prev, ((0, 0), (ph, ph), (pw, pw), (0, 0)), 'constant') ch = int((h_prev + 2 * ph - kh) / sh) + 1 cw = int((w_prev + 2 * pw - kw) / sw) + 1 conv_W = np.zeros((m, ch, cw, c_new)) for i in range(ch): for j in range(cw): for k in range(c_new): slide_img = pad[:, i * sh:i * sh + kh, j * sw:j * sw + kw] kernel = W[:, :, :, k] element = np.multiply(slide_img, kernel) conv_W[:, i, j, k] = np.sum(np.sum(np.sum(element, axis=1), axis=1), axis=1) Z = conv_W + b return activation(Z)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward_pass_on_convolutions(self, x):\n conv_output = None\n for module_name, module in self.model._modules.items():\n print(module_name)\n if module_name == 'fc':\n return conv_output, x\n x = module(x) # Forward\n # print(module_name, module)\n if module_name == self.target_layer:\n print('True')\n x.register_hook(self.save_gradient)\n conv_output = x # Save the convolution output on that layer\n return conv_output, x", "def conv_forward(x, w):\n out = None\n ###########################################################################\n # TODO: Implement the convolutional forward pass. #\n # Hint: you can use the function np.pad for padding. #\n ###########################################################################\n N,C,H,W = x.shape\n F,C,HH,WW = w.shape\n H1 = H-HH+1\n W1 = W-WW+1\n out = np.zeros([N,F,H1,W1])\n wn = np.tile(w,(N,1,1,1,1))\n all_but_first = tuple(range(out.ndim))[1:]\n for f in range(F):\n for i in range(H1):\n for j in range(W1):\n out[:,f,i,j] = np.sum(x[:,:,i:i+HH,j:j+WW] * wn[:,f], axis=all_but_first)\n\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, w)\n return out, cache", "def conv_forward(x, w):\n out = None\n ###########################################################################\n # TODO: Implement the convolutional forward pass. #\n # Hint: you can use the function np.pad for padding. #\n ###########################################################################\n N, C, H, W = x.shape\n F, C, HH, WW = w.shape\n H_prime = H - (HH - 1)\n W_prime = W - (WW - 1)\n out = np.zeros((N, F, H_prime, W_prime))\n \n for n in range(N):\n for f in range(F):\n for i in range(H_prime):\n for j in range(W_prime):\n out[n, f, i, j] = np.sum(x[n, :, i:i+HH, j:j+WW] * w[f])\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, w)\n return out, cache", "def forward(self, x):\n x = self.conv1(x)\n if self.use_bn:\n x = self.batchnorm(x)\n if self.use_dropout:\n x = self.dropout(x)\n x = self.activation(x)\n x = self.conv2(x)\n if self.use_bn:\n x = self.batchnorm(x)\n if self.use_dropout:\n x = self.dropout(x)\n x = self.activation(x) \n x = self.maxpool(x) \n return x", "def forward(self, state):\n '''\n state = F.relu(self.conv1(state))\n state = F.relu(self.conv2(state))\n state = F.relu(self.conv3(state))\n state = F.relu(self.fc1(state))\n \n action = F.relu(self.fc2(state))\n \n return action\n '''\n \n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n \n return x", "def conv_forward_naive(x, w, b, conv_param):\n out = None\n ###########################################################################\n # TODO: Implement the convolutional forward pass. #\n # Hint: you can use the function np.pad for padding. #\n ###########################################################################\n stride = conv_param['stride']\n pad = conv_param['pad']\n #Use np.pad for zero padding of the input.\n #Save shape of input data and filters.\n N,C,H,W = x.shape\n F,C,HH,WW = w.shape\n x = np.pad(x,[(0,0),(0,0),(1,1),(1,1)],mode = 'constant')\n #Convolve each filter to create the activation maps.\n '''Compute activation maps size.First dimension:number of training examples.\n Second dimension:depth is as the number of filters.\n Width and height will be computed based on the equation that we showed in the lectures.\n The equation :(W - F + 2P)/S + 1 where:\n -W:input size.\n -F:receptive field(number of filters).\n -P: padding size.\n -S: the stride that we use.\n '''\n out_width = int((W - WW + 2 * pad) / (stride) + 1)\n out_height = int((H - HH + 2 * pad) / (stride) + 1)\n out = np.zeros((N,F,out_height,out_width))\n #Compute the activation maps for each one of the N training examples.\n for t in range(N):\n curr_x = x[t,:,:,:]\n #Loop over each filter.\n for k in range(F):\n curr_filter = w[k,:,:,:]\n #Go over all valid spots in current training example.\n out_i = 0\n for i in range(0,x.shape[2] - HH + 1,stride):\n out_j = 0\n for j in range(0,x.shape[3] - WW + 1,stride):\n #Compute dot product in current spot.\n dot_product = np.sum(curr_filter * curr_x[:,i:(i + HH),j:(j + WW)])\n out[t,k,out_i,out_j] = dot_product \\\n + b[k]\n #Increment out_j\n out_j += 1\n out_i += 1\n\n\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, w, b, conv_param)\n return out, cache", "def forward(self, x):\n\n x = self.first_conv_layer(x)\n x = self.second_conv_layer(x)\n x = self.third_conv_layer(x)\n x = self.fourth_conv_layer(x)\n x = self.fifth_conv_layer(x)\n\n '''\n x = x.view(-1, 4 * 4 * 512)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n '''\n\n sigmoid_out = nn.functional.sigmoid(x)\n\n return sigmoid_out", "def forward(self, image):\n first_conv_features = self.pool(F.relu(self.conv1(image)))\n second_conv_features = self.pool(F.relu(self.conv2(\n first_conv_features)))\n third_conv_features = self.pool(F.relu(self.conv3(\n second_conv_features)))\n # flatten all dimensions except batch\n flattened_features = torch.flatten(third_conv_features, 1)\n fully_connected_first_out = F.relu(self.fc1(flattened_features))\n fully_connected_second_out = F.relu(self.fc2(fully_connected_first_out))\n two_way_output = self.fc3(fully_connected_second_out)\n return two_way_output", "def forward(self, x):\n c_out = self.conv_net.forward(x)\n\n c_out_flat = c_out.flatten(start_dim=1)\n \n \n return self.linear.forward(c_out_flat)", "def conv_forward_naive(x, w, b, conv_param):\n out = None\n #############################################################################\n # TODO: Implement the convolutional forward pass. #\n # Hint: you can use the function np.pad for padding. #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n cache = (x, w, b, conv_param)\n return out, cache", "def conv_forward_naive(x, w, b, conv_param):\n out = None\n #############################################################################\n # TODO: Implement the convolutional forward pass. #\n # Hint: you can use the function np.pad for padding. #\n #############################################################################\n N, C, H, W = x.shape\n F, C, HH, WW = w.shape\n\n stride = conv_param['stride']\n pad = conv_param['pad']\n\n Hc = 1 + (H + 2 * pad - HH) / stride\n Wc = 1 + (H + 2 * pad - WW) / stride\n\n ## pad all the images\n xp = np.pad(x,\n ((0, 0), (0, 0), (pad, pad), (pad, pad)),\n mode='constant', constant_values=0)\n\n out = np.random.randn(N, F, Hc, Wc)\n\n hc, wc = (0, 0)\n for i in xrange(N):\n for j in xrange(F):\n for hc in xrange(Hc):\n for wc in xrange(Wc):\n xs = xp[i, :, hc*stride:hc*stride+HH, wc*stride:wc*stride+WW]\n out[i, j, hc, wc] = np.sum(xs * w[j,:,:,:]) + b[j]\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n cache = (x, w, b, conv_param)\n return out, cache", "def forward(self, state):\n output = self.conv_layers(state)\n output = output.view(-1, 7*7*64)\n output = self.fc(output)\n return output", "def conv_forward(x, w):\n out = None\n ###########################################################################\n # TODO: Implement the convolutional forward pass. #\n # Hint: you can use the function np.pad for padding. #\n ###########################################################################\n N, C, H, W = x.shape\n F, C, HH, WW = w.shape\n out = np.zeros((N,F,H-HH+1,W-WW+1))\n \n w = np.flip(w)\n\n for n in range(N):\n for f in range(F):\n out[n][f] = filt3D(x[n],w[f])\n \n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, w)\n return out, cache", "def forward(self, x):\n\n x = self.first_conv_layer(x)\n x = self.second_conv_layer(x)\n x = self.third_conv_layer(x)\n x = self.fourth_conv_layer(x)\n\n #print 'x.shape=', x.shape\n x = x.view(-1, 5 * 5 * 64)\n x = F.relu(self.fc1(x))\n\n sigmoid_out = nn.functional.sigmoid(x)\n\n return sigmoid_out", "def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n x = self.pool1(F.relu(self.batch1(self.conv1(x))))\n x = self.pool2(F.relu(self.batch2(self.conv2(x))))\n x = F.relu(self.batch3a(self.conv3a(x)))\n x = self.pool3(F.relu(self.batch3b(self.conv3b(x))))\n x = F.relu(self.batch4a(self.conv4a(x)))\n x = self.pool4(F.relu(self.batch4b(self.conv4b(x))))\n x = F.relu(self.batch5a(self.conv5a(x)))\n x = self.pool5(F.relu(self.batch5b(self.conv5b(x))))\n x = self.avgpool(x)\n x = x.reshape(x.shape[0], -1)\n out = self.fc1(x)\n\n# raise NotImplementedError\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out", "def forward(self, inp: torch.Tensor) -> torch.Tensor:\n x = self.conv1(inp)\n x = self.maxpool(x)\n\n for i in range(self._num_layers):\n x = getattr(self, \"C%d\" % (i + 1))(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n return x", "def forward(self, x):\n # Convolutional Layers\n ## add pooling layers\n x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))\n x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))\n x = x.view(-1, 256) # flatten to pass to fully connected layers\n\n # fully connected layers\n ## and dropout layers\n x = F.relu(self.dropout(self.fc1(x)))\n x = F.relu(self.dropout(self.fc2(x)))\n x = self.fc3(x)\n\n return x", "def forward(self, x):\n x = self.pad_tensor(x)\n if self.network_controller.is_float_coefficient:\n return self.bn(self.conv(x))\n else:\n res = F.conv2d(x, self.q(self.conv.weight), self.conv.bias, self.stride,\n self.padding_conv, self.dilation, self.group)\n return self.bn(res)", "def forward(self, x):\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.maxpool(out)\n out = self.avgpool(out)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = self.avgpool(out)\n out = torch.flatten(out, 1)\n out = self.fc(out)\n return out", "def forward(self, input, padding=0):\n return self.conv(input, weight=self.weight, groups=self.groups, padding=padding)", "def forward(self, input):\n return self.conv(input, weight=self.weight, groups=self.groups)", "def forward(self, input):\n return self.conv(input, weight=self.weight, groups=self.groups)", "def forward(self, x):\n x = self.pool(x)\n x = self.conv(x)\n x = x.reshape(x.shape[0], -1)\n x = self.relu(self.fc1(x))\n x = self.dropout1(x)\n x = self.fc2(x)\n x = self.dropout2(x)\n x = self.fc3(x)\n x = self.dropout3(x)\n x = self.fc4(x)\n\n return x", "def forward(self, x):\n\n\t\t## Conv layers\n\t\tx = self.avgpool(F.tanh(self.conv1(x)))\n\t\tx = self.avgpool(F.tanh(self.conv2(x)))\n\t\tx = F.tanh(self.conv3(x))\n\n\t\t## Flatten\n\t\tx = x.view(x.size(0), -1)\n\n\t\t## Fully connected layers\n\t\tx = F.tanh(self.fc1(x))\n\t\tx = self.fc2(x)\n\n\t\tx = F.softmax(x, dim=1)\n\n\t\treturn x", "def forward(self, input_x) -> Tensor:\n conv_out = self.conv(input_x).view(input_x.size()[0], -1)\n return self.head(conv_out)", "def forward(self, input_x) -> Tensor:\n conv_out = self.conv(input_x).view(input_x.size()[0], -1)\n return self.head(conv_out)", "def forward(self, x):\n\n x = F.max_pool2d(F.relu(self.batch_norm1(self.conv1(x))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm2(self.conv2(x))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm3_b(self.conv3_b(F.relu(self.batch_norm3_a(self.conv3_a(x)))))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm4_b(self.conv4_b(F.relu(self.batch_norm4_a(self.conv4_a(x)))))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm5_b(self.conv5_b(F.relu(self.batch_norm5_a(self.conv5_a(x)))))), 3, stride=2, padding=1)\n x = self.avg_pool(x).view(-1,512)\n out = self.linear(x)\n\n return out", "def forward_pass(self, x):\n # Forward pass on the convolutions\n conv_output, x = self.forward_pass_on_convolutions(x)\n x = x.view(x.size(0), -1) # Flatten\n # Forward pass on the classifier\n x = self.model.fc(x)\n return conv_output, x", "def forward(self, x):\n x = self.first_deconv(x)\n x = self.first_batch_norm(x)\n x = F.leaky_relu(x)\n\n x = self.second_deconv(x)\n x = self.second_batch_norm(x)\n x = F.leaky_relu(x)\n\n x = self.third_deconv(x)\n x = self.third_batch_norm(x)\n x = F.leaky_relu(x)\n\n x = self.fourth_deconv(x)\n x = self.fourth_batch_norm(x)\n\n x = self.fifth_deconv(x)\n x = self.fifth_batch_norm(x)\n\n x = self.sixth_deconv(x)\n x = self.sixth_batch_norm(x)\n\n x = self.seventh_deconv(x)\n\n # sigmoid_out = nn.functional.sigmoid(x)\n tanh_out = nn.functional.tanh(x)\n\n out = (tanh_out + 1) * 255 / 2\n\n # print 'out.shape =', out.shape\n\n return out", "def forward(self, conv_input, fc_input, conv_hidden_states=None):\n batch_size = fc_input.shape[0]\n lstm_out, hidden = self.conv_lstm(conv_input, hidden_state=conv_hidden_states)\n fc_in_conv = self.dropout(lstm_out[-1][:,-1,:,:,:]).reshape((batch_size, -1))\n fc_in = torch.cat([fc_in_conv, fc_input], dim=1)\n \n return self.fully_connected(fc_in), hidden", "def forward(self, x):\n return self.relu(self.conv(x))", "def forward(self, x):\n \n x = F.relu(self.conv1_bn(self.conv1(self.conv0_bn(x))))\n x = F.relu(self.conv2_bn(self.conv2(x)))\n x = F.relu(self.conv3_bn(self.conv3( self.maxpool2(x))))\n x = F.relu(self.conv4_bn(self.conv4( self.maxpool3(x))))\n x = self.maxpool4(x) \n x = x.view(-1, 1184)\n x = F.relu(self.fc1(x))\n x = self.dense1_bn(x)\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x)", "def forward(self, x):\n conv_output = self.conv1(x)\n\n # The window size of max pooling layer of CNN depends on the dimension of conv1d output.\n # Since padding size is 1 and kernal size is 5, so the output of conv1d is with dimension\n # length_of_input_sequence - 2 + 5 - 1 = length_of_input_sequence - 2\n x_conv = F.max_pool1d(F.relu(conv_output), x.size()[-1] - 2)\n return x_conv", "def conv_forward_naive(x, w, b, conv_param):\n\tout = None\n\tN, C, H, W = x.shape\n\tF, _, HH, WW = w.shape\n\tpad = conv_param['pad']\n\tstride = conv_param['stride']\n\tHp = int(1 + (H + 2 * pad - HH) / stride)\n\tWp = int(1 + (W + 2 * pad - WW) / stride)\n\n\tpad_width = ((0,0), (0,0), (pad,pad), (pad,pad))\n\tpadded = np.pad(x, pad_width=pad_width, mode='constant', constant_values=0)\n\n\tout = np.zeros((N, F, Hp, Wp))\n\t\n\tfor data_ind in range(N):\n\t\tfor filter_ind in range(F):\n\t\t\tfor fw in range(Wp):\n\t\t\t\tws = fw*stride\n\t\t\t\tfor fh in range(Hp):\n\t\t\t\t\ths = fh*stride\n\t\t\t\t\tout[data_ind, filter_ind, fh, fw] += np.sum(padded[data_ind][:, hs:hs+HH,\n\t\t\t\t\t\tws:ws+WW] * w[filter_ind]) + b[filter_ind]\n\tcache = (x, w, b, conv_param)\n\treturn out, cache", "def forward(self, x):\n residual = x\n out = self.conv1(x)\n out = self.bn1(out)\n out += residual\n\n residual = out\n out = self.conv2(out)\n out = self.bn2(out)\n out += residual\n\n residual = out\n out = self.conv3(out)\n out = self.bn3(out)\n out += residual\n if self.apply_activation: out = self.relu(out)\n return out", "def forward(self, x): \n # Layer 1\n x = F.elu(self.conv1(x)) # bsize x l1_channels x 1 x Nsamples\n x = self.batchnorm1(x)\n x = F.dropout(x, 0.25)\n x = x.permute(0, 2, 1, 3) # bsize x 1 x l1_channels x Nsamples\n\n # Layer 2\n x = self.padding1(x)\n x = F.elu(self.conv2(x)) # bsize x l2_channels x l1_channels x Nsamples\n x = self.batchnorm2(x) \n x = F.dropout(x, 0.25)\n x = self.pooling2(x) # bsize x l2_channels x floor(l1_channels/2) x floor(Nsamples/4)\n\n # Layer 3\n x = self.padding2(x)\n x = F.elu(self.conv3(x)) # bsize x l3_channels x floor(l1_channels/2) x floor(Nsamples/4)\n x = self.batchnorm3(x)\n x = F.dropout(x, 0.25)\n x = self.pooling3(x) # bsize x l3_channels x floor(l1_channels/4) x floor(Nsamples/16)\n\n # Fully-connected Layer\n x = x.view(-1, self.fc1.in_features) # bsize x (l3_channels*floor(l1_channels/4)*floor(Nsamples/16))\n x = F.sigmoid(self.fc1(x)) # bisze x self.fc1.out_features \n \n if self.fc1.out_features == 1:\n x = x.view(-1) # bsize (1D if 1 output unit)\n \n return x", "def forward(self, x):\n\n # 2.2 BUG: Did Bob do anything wrong in the forward method?\n # HINT: Usually a CNN would expect correctly normalized data.\n # Roughly make input to be within -1 to 1 range\n x = (x - 127.5) / 127.5\n\n # Apply conv layers\n x = self.convs(x)\n\n # Global average pooling\n x = x.mean(-1).mean(-1)\n\n # Output layer\n x = self.output(x)\n\n return x", "def forward(self, x):\n out_conv1 = self.conv1(x)\n out_conv2 = self.conv2(out_conv1)\n out_conv3 = self.conv3(out_conv2)\n out_conv4 = self.conv4(out_conv3)\n out_conv5 = self.conv5(out_conv4)\n out_conv6 = self.conv6(out_conv5)\n out_conv7 = self.conv7(out_conv6)\n\n out_upconv7 = self.crop_top_left(self.upconv7(out_conv7), out_conv6)\n concat7 = torch.cat((out_upconv7, out_conv6), 1)\n out_iconv7 = self.iconv7(concat7)\n\n out_upconv6 = self.crop_top_left(self.upconv6(out_iconv7), out_conv5)\n concat6 = torch.cat((out_upconv6, out_conv5), 1)\n out_iconv6 = self.iconv6(concat6)\n\n out_upconv5 = self.crop_top_left(self.upconv5(out_iconv6), out_conv4)\n concat5 = torch.cat((out_upconv5, out_conv4), 1)\n out_iconv5 = self.iconv5(concat5)\n\n out_upconv4 = self.crop_top_left(self.upconv4(out_iconv5), out_conv3)\n concat4 = torch.cat((out_upconv4, out_conv3), 1)\n out_iconv4 = self.iconv4(concat4)\n disp4 = self.alpha * self.predict_disp4(out_iconv4) + self.beta\n\n out_upconv3 = self.crop_top_left(self.upconv3(out_iconv4), out_conv2)\n disp4_up = self.crop_top_left(torch.nn.functional.interpolate(disp4,\n scale_factor=2,\n mode='bilinear',\n align_corners=False), out_conv2)\n concat3 = torch.cat((out_upconv3, out_conv2, disp4_up), 1)\n out_iconv3 = self.iconv3(concat3)\n disp3 = self.alpha * self.predict_disp3(out_iconv3) + self.beta\n\n out_upconv2 = self.crop_top_left(self.upconv2(out_iconv3), out_conv1)\n disp3_up = self.crop_top_left(torch.nn.functional.interpolate(disp3,\n scale_factor=2,\n mode='bilinear',\n align_corners=False), out_conv1)\n concat2 = torch.cat((out_upconv2, out_conv1, disp3_up), 1)\n out_iconv2 = self.iconv2(concat2)\n disp2 = self.alpha * self.predict_disp2(out_iconv2) + self.beta\n\n out_upconv1 = self.crop_top_left(self.upconv1(out_iconv2), x)\n disp2_up = self.crop_top_left(torch.nn.functional.interpolate(disp2,\n scale_factor=2,\n mode='bilinear',\n align_corners=False), x)\n concat1 = torch.cat((out_upconv1, disp2_up), 1)\n out_iconv1 = self.iconv1(concat1)\n disp1 = self.alpha * self.predict_disp1(out_iconv1) + self.beta\n\n if self.training:\n return disp1, disp2\n else:\n return disp1", "def forward_pass_on_convolutions(x, target_layer):\n net.features[-1].register_forward_hook(save_target_output)", "def forward(self, x):\n x=T.div(x,255.0)\n \n #print(state[20:,20:,0])\n #print(state[:,0,:,:])\n conv1 = F.relu(self.conv1(x))\n conv2 = F.relu(self.conv2(conv1))\n conv3 = F.relu(self.conv3(conv2))\n ###\n conv_state = conv3.view(conv3.size()[0], -1)\n flat1 = F.relu(self.fc1(conv_state))\n flat2 = F.relu(self.fc2(flat1))\n\n V = self.V(flat2)\n A = self.A(flat2)\n\n return V, A\n return x", "def conv_forward(x, w, b, conv_param):\n stride = conv_param['stride']\n pad = conv_param['pad']\n N, C, H, W = x.shape\n F, C, HH, WW = w.shape\n H_out = 1 + (H + 2 * pad - HH) / stride\n W_out = 1 + (H + 2 * pad - WW) / stride\n H_out = int(H_out)\n W_out = int(W_out)\n\n out = np.zeros((N, F, H_out, W_out))\n for n in range(N):\n conv_in = np.pad(x[n], ((0, 0), (pad, pad), (pad, pad)), mode='constant')\n for f in range(F):\n conv_w = w[f]\n conv_b = b[f]\n for i in range(H_out):\n for j in range(W_out):\n conv_i = i * stride\n conv_j = j * stride\n conv_area = conv_in[:, conv_i : conv_i + HH, conv_j : conv_j + WW]\n out[n, f, i, j] = np.sum(conv_area * conv_w) + conv_b\n\n cache = (x, w, b, conv_param)\n return out, cache", "def forward(self, x):\n x = self.conv(x)\n return x", "def forward(self, someInputs):\n\n if self.spaceConv is True:\n someInputs = self.SpaceConvMatrixTranspose(someInputs)\n if self.outputValues.shape == self.outputShape:\n pass\n else:\n self.outputValues = np.transpose(self.outputValues, (3, 1, 2, 0))\n else:\n someInputs = np.reshape(someInputs, (self.inputShape))\n\n assert someInputs.shape == self.inputShape\n\n # Adds Zero Padding\n if self.zeroPadding is 0: # no padding added\n self.inputs = someInputs\n\n elif self.zeroPadding > 0: # adds padding\n self.inputs = np.zeros((self.inputShape[0], self.inputShape[1], self.inputShape[2] + 2 * self.zeroPadding,\n self.inputShape[\n 3] + 2 * self.zeroPadding)) # creates a zeros vector with the shape of the padded inputs\n\n for n in range(self.inputShape[0]): # does the padding along the W dimension\n for cin in range(self.inputShape[1]):\n for h in range(self.inputShape[2]):\n self.inputs[n, cin, h, :] = np.lib.pad(self.someInputs[n, cin, h, :],\n (self.zeroPadding, self.zeroPadding),\n 'constant', constant_values=(0, 0))\n for n in range(self.inputShape[0]): # does the padding along the H dimmension\n for cin in range(self.inputShape[1]):\n for w in range(self.inputShape[3]):\n self.inputs[n, cin, :, w + self.zeroPadding] = np.lib.pad(self.someInputs[n, cin, :, w],\n (self.zeroPadding, self.zeroPadding),\n 'constant', constant_values=(0, 0))\n\n # Do the convolution\n print \"Performing convolution\"\n timeA = time.time()\n for n in range(self.inputShape[0]):\n for cout in range(self.numberOfFilters):\n for cin in range(self.inputShape[1]):\n nh = 0\n for h in np.arange(0, self.inputShape[2] - self.kernelSize[0] + 1, self.stride[0]):\n nw = 0\n for w in np.arange(0, self.inputShape[3] - self.kernelSize[1] + 1, self.stride[1]):\n activationMap = self.inputs[n, cin, h:h + self.kernelSize[0],\n w:w + self.kernelSize[1]] # Portion of the input feature map convolved\n kernel = self.weights[cout, :, :] # kernel used for the convolution\n self.outputValues[n, cout, nh, nw] = np.sum(activationMap * kernel) + self.bias[\n cout] # convolution\n nw += 1\n nh += 1\n\n timeB = time.time()\n\n if self.spaceConv is True:\n self.outputValues = np.transpose(self.outputValues, (3, 1, 2, 0))\n\n # print \"Convolution took \" + str(timeB - timeA) + \" seconds\"\n\n # Applies the activation function to the resultant matrix\n if self.activationFunction is 'relu':\n self.outcome = self.relu(self.outputValues)\n # Applies reLU function\n if self.__nextLayer is None:\n return self.outcome\n else:\n return self.__nextLayer.forward(self.outcome) # Applies eLU function\n\n elif self.activationFunction is 'elu':\n self.outcome = self.elu(self.outputValues, self.alpha)\n if self.__nextLayer is None:\n return self.outcome\n else:\n return self.__nextLayer.forward(self.outcome)\n\n elif self.activationFunction is 'sigmoid': # Applies sigmoid function\n\n self.outcome = self.sigmoid(self.outputValues)\n if self.__nextLayer is None:\n return self.outcome\n else:\n return self.__nextLayer.forward(self.outcome)", "def forward(self, x: torch.Tensor) -> torch.Tensor:\n model_output = None\n #######################################################################\n # Student code begins\n #######################################################################\n\n (N,C,H,W) = x.shape\n\n conv_features = self.conv_layers(x)\n \n flat_features = conv_features.reshape(-1, 500)\n model_output = self.fc_layers(flat_features)\n\n\n #######################################################################\n # Student code ends\n #######################################################################\n return model_output", "def forward(self, x):\r\n x = self.conv1(x)\r\n x = self.conv1_BN(x)\r\n x = F.relu(x)\r\n x = self.conv1_dp(x)\r\n x = self.Block2_1(x)\r\n x = self.Block2_2(x)\r\n x = self.Block3_1(x)\r\n x = self.Block3_2(x)\r\n x = self.Block3_3(x)\r\n x = self.Block3_4(x)\r\n x = self.Block4_1(x)\r\n x = self.Block4_2(x)\r\n x = self.Block4_3(x)\r\n x = self.Block4_4(x)\r\n x = self.Block5_1(x)\r\n x = self.Block5_2(x)\r\n x = self.MP(x)\r\n x = x.view(x.size(0),-1)\r\n x = self.fc(x)\r\n \r\n return x", "def forward(self, x):\n\n def run0(x, dummy):\n lout1 = self.lconv1(x)\n out1 = self.conv1(lout1)\n lout2 = self.lconv2(out1 + lout1)\n out2 = self.conv2(lout2)\n lout3 = self.lconv3(out2 + lout2)\n out3 = self.conv3(lout3)\n lout4 = self.lconv4(out3 + lout3)\n out4 = self.conv4(lout4)\n lout5 = self.lconv5(out4 + lout4)\n out5 = self.conv5(lout5)\n lout6 = self.lconv6(out5 + lout5)\n out6 = self.conv6(lout6)\n lout7 = self.lconv7(out6 + lout6)\n out7 = self.conv7(lout7)\n mat = out7[:, :, :, None] + out7[:, :, None, :]\n cur = mat\n if self.num_1d:\n output1d = self.final_1d(out7)\n return cur, output1d\n else:\n return cur\n\n dummy = torch.Tensor(1)\n dummy.requires_grad = True\n if self.num_1d:\n cur, output1d = checkpoint(run0, x, dummy)\n else:\n cur = checkpoint(run0, x, dummy)\n\n def run1(cur):\n first = True\n for lm, m in zip(self.lconvtwos[:7], self.convtwos[:7]):\n if first:\n cur = lm(cur)\n\n first = False\n else:\n cur = lm(cur) + cur\n cur = m(cur) + cur\n return cur\n\n def run2(cur):\n for lm, m in zip(self.lconvtwos[7:13], self.convtwos[7:13]):\n cur = lm(cur) + cur\n cur = m(cur) + cur\n return cur\n\n def run3(cur):\n for lm, m in zip(self.lconvtwos[13:], self.convtwos[13:]):\n cur = lm(cur) + cur\n cur = m(cur) + cur\n\n cur = self.final(cur)\n cur = 0.5 * cur + 0.5 * cur.transpose(2, 3)\n return cur\n\n cur = checkpoint(run1, cur)\n cur = checkpoint(run2, cur)\n cur = checkpoint(run3, cur)\n\n if self.num_1d:\n return cur, output1d\n else:\n return cur", "def keras_functional_conv_net():\n inputs = tf.keras.layers.Input(shape=(28, 28, 3))\n x = tf.keras.layers.Conv2D(4, kernel_size=3, activation=None)(inputs)\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.Activation(\"relu\")(x)\n x = tf.keras.layers.Conv2D(16, kernel_size=3, activation=None)(x)\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.PReLU()(x)\n x = tf.keras.layers.Conv2D(16, kernel_size=3, activation=None)(x)\n outputs = tf.keras.layers.Conv2D(32, kernel_size=3, activation=\"relu\")(x)\n model = tf.keras.Model(inputs=inputs, outputs=outputs)\n\n return model", "def functional_forward(self, x, weights):\n\n for block in [1, 2, 3, 4, 5, 6]:\n x = functional_conv_block(x, weights[f'conv{block}.0.weight'], weights[f'conv{block}.0.bias'],\n weights.get(f'conv{block}.1.weight'), weights.get(f'conv{block}.1.bias'))\n x = x.view(x.size(0), -1)\n x = F.linear(x, weights['logits.weight'], weights['logits.bias'])\n\n return x", "def forward_pass(X,architecture):\n \n architecture['layer1'][0] = X\n kernel_shape1 = architecture['layer1'][7]\n stride1 = architecture['layer1'][8]\n if kernel_shape1 is not None and not isinstance(kernel_shape1,int):\n X_input_1_im2col,imX = im2col(X,kernel_shape1,stride1,im_needed = False, shape_specified = True)\n architecture['layer1'][4] = X_input_1_im2col\n else:\n architecture['layer1'][4] = None\n\n for layer in range(len(architecture)): # Feedforward from the first till the second last layer\n X_input,X_output,weightsi,biasi,X_input_1_im2col,imi,output_shapei,kernel_shapei,stridei,operationi,imx = architecture['layer{}'.format(layer+1)]\n\n if operationi == 'conv_bn_relu':\n conv_output = relu(BatchNorm(torch.t(X_input_1_im2col).mm(weightsi) + biasi))\n conv_output = torch.reshape(conv_output,output_shapei)\n architecture['layer{}'.format(layer+1)][1] = conv_output # resetting output as convolved shape\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = conv_output # resetting intput of next layer as convolved shape\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n if operationi__1 == 'maxpool':\n architecture['layer{}'.format(layer+2)][4] = maxpool_im2col(conv_output,kernel_shapei__1,stridei__1)\n else:\n architecture['layer{}'.format(layer+2)][4],imX = im2col(conv_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n # resetting input im2col of next layer as the im2col of the output of this layer\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'conv_relu':\n conv_output = relu(torch.t(X_input_1_im2col).mm(weightsi) + biasi)\n conv_output = torch.reshape(conv_output,output_shapei)\n architecture['layer{}'.format(layer+1)][1] = conv_output # resetting output as convolved shape\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = conv_output # resetting intput of next layer as convolved shape\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n if operationi__1 == 'maxpool':\n architecture['layer{}'.format(layer+2)][4] = maxpool_im2col(conv_output,kernel_shapei__1,stridei__1)\n else:\n architecture['layer{}'.format(layer+2)][4],imX = im2col(conv_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n # resetting input im2col of next layer as the im2col of the output of this layer\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'conv_bn_sigmoid':\n conv_output = sigmoid(BatchNorm(torch.t(X_input_1_im2col).mm(weightsi) + biasi))\n conv_output = torch.reshape(conv_output,output_shapei)\n architecture['layer{}'.format(layer+1)][1] = conv_output # resetting output as convolved shape\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = conv_output # resetting intput of next layer as convolved shape\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n if operationi__1 == 'maxpool':\n architecture['layer{}'.format(layer+2)][4] = maxpool_im2col(conv_output,kernel_shapei__1,stridei__1)\n else:\n architecture['layer{}'.format(layer+2)][4],imX = im2col(conv_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n # resetting input im2col of next layer as the im2col of the output of this layer\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'conv_sigmoid':\n conv_output = sigmoid(torch.t(X_input_1_im2col).mm(weightsi) + biasi)\n conv_output = torch.reshape(conv_output,output_shapei)\n architecture['layer{}'.format(layer+1)][1] = conv_output # resetting output as convolved shape\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = conv_output # resetting intput of next layer as convolved shape\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n if operationi__1 == 'maxpool':\n architecture['layer{}'.format(layer+2)][4] = maxpool_im2col(conv_output,kernel_shapei__1,stridei__1)\n else:\n architecture['layer{}'.format(layer+2)][4],imX = im2col(conv_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n # resetting input im2col of next layer as the im2col of the output of this layer\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'maxpool':\n maxpool_output = maxpool(X_input,kernel_shapei,stridei)\n\n maxpool_output = torch.reshape(maxpool_output,output_shapei)\n\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = maxpool_output\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n architecture['layer{}'.format(layer+2)][4],imX = im2col(maxpool_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'flatten_dense_relu':\n # kernel_shapei in this case refers to the output channels: stride for dense layer will be None\n output = flatten_and_dense(X_input,kernel_shapei,weightsi,biasi,activation = 'relu',initialise_weights = False)\n architecture['layer{}'.format(layer+1)][1] = output\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = output\n elif operationi == 'flatten_dense_none':\n # kernel_shapei in this case refers to the output channels: stride for dense layer will be None\n output = flatten_and_dense(X_input,kernel_shapei,weightsi,biasi,activation = 'none',initialise_weights = False)\n architecture['layer{}'.format(layer+1)][1] = output\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = output\n elif operationi == 'flatten_dense_sigmoid':\n # kernel_shapei in this case refers to the output channels: stride for dense layer will be None\n output = flatten_and_dense(X_input,kernel_shapei,weightsi,biasi,activation = 'sigmoid',initialise_weights = False)\n architecture['layer{}'.format(layer+1)][1] = output\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = output\n elif operationi == 'softmax':\n Xin = architecture['layer{}'.format(layer+1)][0]\n output = softmax(Xin).squeeze()\n architecture['layer{}'.format(layer+1)][1] = output\n if layer == len(architecture) - 1:\n y_pred = architecture['layer{}'.format(len(architecture))][1]\n \n return y_pred", "def forward(self, x):\r\n\r\n y = self.conv1(x)\r\n y = self.bn1(y)\r\n y = F.relu(y, inplace = True)\r\n y = self.conv2(y)\r\n y = self.bn2(y)\r\n y = F.relu(y, inplace = True)\r\n\r\n return y", "def forward(self, X):\n features = self.get_conv_feats(X)\n prediction = blah\n return (prediction)", "def forward(self, state):\n x = self.conv(state).view(-1, self.hid_size)\n x = self.fc1(x)\n x = F.relu(x)\n return self.fc2(x)", "def forward(self, x):\n x1, x2 = x\n y1 = self.conv_net.forward(x1)\n y2 = self.sparse_net.forward(x2)\n return y1, y2", "def conv_relu_forward(x, w, b, conv_param):\n a, conv_cache = conv_forward_fast(x, w, b, conv_param)\n out, relu_cache = relu_forward(a)\n cache = (conv_cache, relu_cache)\n return out, cache", "def conv_relu_forward(x, w, b, conv_param):\n a, conv_cache = conv_forward_fast(x, w, b, conv_param)\n out, relu_cache = relu_forward(a)\n cache = (conv_cache, relu_cache)\n return out, cache", "def forward(self, *inputs):\n\n x = functional.relu(functional.max_pool2d(self.conv1(*inputs), 2))\n x = functional.relu(functional.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = functional.relu(functional.max_pool2d(self.conv3(x), 2))\n x = x.view(x.size(0), -1)\n x = functional.relu(self.fc1(x))\n x = functional.dropout(x, training=self.training)\n x = self.fc2(x)\n return functional.log_softmax(x, dim=1)", "def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.act(x)\n return x", "def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.act(x)\n return x", "def dynamic_conv_forward(self, features: Tensor, weights: List[Tensor],\n biases: List[Tensor], num_insts: int) -> Tensor:\n n_layers = len(weights)\n x = features\n for i, (w, b) in enumerate(zip(weights, biases)):\n x = F.conv2d(x, w, bias=b, stride=1, padding=0, groups=num_insts)\n if i < n_layers - 1:\n x = F.relu(x)\n return x", "def forward(self, x):\n x = x.view(1, 1, *x.size())\n return self.conv(x, weight=self.weight, padding=self.padding).squeeze()", "def forward(self, x):\n if x.size()[0] != 1 or x.size()[1] != 200 or x.size()[2] != 96:\n return torch.zeros(1,1)\n x = x.view(1,1,x.size()[1],x.size()[2]) #1,1,200,96\n x = nn.MaxPool2d(2)(self.conv1(x))\n x = self.dropout(F.relu(x)) #1,3,96,46\n x = nn.MaxPool2d(2)(self.conv2(x))\n x = self.dropout(F.relu(x)) #1,6,47,21\n x = nn.MaxPool2d(2)(self.conv3(x))\n x = self.dropout(F.relu(x)) #1,12,21,8\n x = nn.MaxPool2d(2)(self.conv4(x))#1,24,8,2\n x = x.view(1,-1)#1,384\n x = self.fc1(F.relu(x))\n x = self.fc2(F.relu(x))\n x = self.fc3(F.relu(x))\n return F.sigmoid(x)", "def test_propagate_forward(self):\n nn = NeuralNet(0, 0, '', '', blank=True)\n nn.create_net(2, 1, 2, 2)\n\n # Override weights to static value for reproducibility\n for node in nn.layers[2].nodes:\n node.weights = [0.6, 0.6]\n\n for node in nn.layers[3].nodes:\n node.weights = [1.0, 1.0]\n\n nn.propagate_forward([2, 3], test=True)\n model_output = nn.layers[-1].nodes[0].value\n\n self.assertEqual(round(model_output, 3), 0.823)", "def forward(self, batch):\n # Convolutional layers\n batch = self.conv1(batch)\n batch = F.relu(batch)\n batch = self.pool(batch)\n batch = self.conv2(batch)\n batch = F.relu(batch)\n batch = self.pool(batch)\n # Flatten\n batch = batch.reshape(batch.shape[0], -1)\n # Fully connected layers\n batch = self.fc1(batch)\n batch = self.dropout(batch)\n batch = self.fc2(batch)\n batch = torch.sigmoid(batch)\n return batch", "def conv_forward(A_prev, W, b, hparameters):\n \n # Size and dimension\n (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape\n (f, f, n_C_prev, n_C) = W.shape\n stride = hparameters[\"stride\"]\n pad = hparameters[\"pad\"]\n n_H = int((n_H_prev - f + 2 * pad) / stride) + 1\n n_W = int((n_W_prev - f + 2 * pad) / stride) + 1\n\n Z = np.zeros((m, n_H, n_W, n_C)) # Initialize output\n A_prev_pad = zero_pad(A_prev, pad) # Padding the previous layer\n \n for i in range(m):\n a_prev_pad = A_prev_pad[i, :, :, :]\n for h in range(n_H):\n for w in range(n_W):\n for c in range(n_C):\n # Find the current \"slice\"\n vert_start = h * stride\n vert_end = h * stride + f\n horiz_start = w * stride\n horiz_end = w * stride + f\n a_slice_prev = a_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :]\n\n # Convolve the slice with current filter W and bias b\n Z[i, h, w, c] = conv_single_step(a_slice_prev, W[:, :, :, c], b[:, :, :, c])\n\n assert(Z.shape == (m, n_H, n_W, n_C))\n cache = (A_prev, W, b, hparameters)\n\n return Z, cache", "def forward(self, x):\n out = self.fc1(x)\n out = out.view(-1, 196, 4, 4)\n out = self.tconv(out)\n\n return out", "def forward(self, images):\n x0 = self.lrelu(self.bn0(self.conv0(images)))\n x1 = self.lrelu(self.bn1(self.conv1(x0)))\n x2 = self.lrelu(self.bn2(self.conv2(x1)))\n x3 = self.lrelu(self.bn3(self.conv3(x2)))\n x4 = self.lrelu(self.bn4(self.conv4(x3)))\n x5 = self.lrelu(self.bn5(self.conv5(x4)))\n\n # x1 = self.lrelu(self.bn1(self.conv1(x0)))\n out = x5\n return out", "def Naive_forwardpass(self):\n\n for filter_k in range(0, self.n_filters):\n filter_col = self.im2col(self.filter_map[filter_k].data_mtx)\n for hgt_indx in range(0, self.Output_Height):\n for wdth_indx in range(0, self.Output_Width):\n wdth_start_index = wdth_indx * self.stride_len\n wdth_end_index= wdth_start_index + self.filter_size\n hgt_start_index = hgt_indx * self.stride_len\n hgt_end_index = hgt_start_index + self.filter_size\n trn_img_area = self.input_vol.padded_mtx[:, wdth_start_index:wdth_end_index,\n hgt_start_index:hgt_end_index]\n trn_img_col = self.im2col(trn_img_area)\n self.output_Tensor.data_mtx[filter_k,wdth_indx , hgt_indx] = self.convolution_op(trn_img_col,\n filter_col) + np.sum(self.bias_vol[filter_k].data_mtx)\n return self.output_Tensor", "def forward(self, x, h, u, time, feat_kernels_enc_conv, feat_bias_enc_conv, feat_kernels_enc_fc, feat_bias_enc_fc, feat_kernels_enc_3dgru, feat_bias_enc_3dgru):\n\n\n conv1a_wt,conv1b_wt,conv2a_wt,conv2b_wt,conv2c_wt,conv3a_wt,conv3b_wt,conv3c_wt,conv4a_wt,conv4b_wt,conv5a_wt,conv5b_wt,conv5c_wt,conv6a_wt,conv6b_wt = feat_kernels_enc_conv\n conv1a_bias,conv1b_bias,conv2a_bias,conv2b_bias,conv2c_bias,conv3a_bias,conv3b_bias,conv3c_bias,conv4a_bias,conv4b_bias,conv5a_bias,conv5b_bias,conv5c_bias,conv6a_bias,conv6b_bias = feat_bias_enc_conv\n t_x_s_update_fc_layer, t_x_s_update_conv3d, t_x_s_reset_fc_layer, t_x_s_reset_conv3d, t_x_rs_fc_layer, t_x_rs_conv3d = feat_kernels_enc_3dgru\n t_x_s_update_bias, t_x_s_reset_bias, t_x_rs_bias = feat_bias_enc_3dgru\n\n conv1a = F.conv2d(x, conv1a_wt, bias=conv1a_bias, padding=3) #self.conv1a(x)\n rect1a = self.leaky_relu(conv1a)\n conv1b = F.conv2d(rect1a, conv1b_wt, bias=conv1b_bias, padding=1) #self.conv1b(rect1a)\n rect1 = self.leaky_relu(conv1b)\n pool1 = self.pool(rect1)\n \n \n conv2a = F.conv2d(pool1, conv2a_wt, bias=conv2a_bias, padding=1) #self.conv2a(pool1)\n rect2a = self.leaky_relu(conv2a)\n conv2b = F.conv2d(rect2a, conv2b_wt, bias=conv2b_bias, padding=1) #self.conv2b(rect2a)\n rect2 = self.leaky_relu(conv2b)\n conv2c = F.conv2d(pool1, conv2c_wt, bias=conv2c_bias) #self.conv2c(pool1)\n res2 = conv2c + rect2\n pool2 = self.pool(res2)\n \n \n conv3a = F.conv2d(pool2, conv3a_wt, bias=conv3a_bias, padding=1) #self.conv3a(pool2)\n rect3a = self.leaky_relu(conv3a)\n conv3b = F.conv2d(rect3a, conv3b_wt, bias=conv3b_bias, padding=1) #self.conv3b(rect3a)\n rect3 = self.leaky_relu(conv3b)\n conv3c = F.conv2d(pool2, conv3c_wt, bias=conv3c_bias) #self.conv3c(pool2)\n res3 = conv3c + rect3\n pool3 = self.pool(res3)\n \n conv4a = F.conv2d(pool3, conv4a_wt, bias=conv4a_bias, padding=1) #self.conv4a(pool3)\n rect4a = self.leaky_relu(conv4a)\n conv4b = F.conv2d(rect4a, conv4b_wt, bias=conv4b_bias, padding=1) #self.conv4b(rect4a)\n rect4 = self.leaky_relu(conv4b)\n pool4 = self.pool(rect4)\n \n \n conv5a = F.conv2d(pool4, conv5a_wt, bias=conv5a_bias, padding=1) #self.conv5a(pool4)\n rect5a = self.leaky_relu(conv5a)\n conv5b = F.conv2d(rect5a, conv5b_wt, bias=conv5b_bias, padding=1) #self.conv5b(rect5a)\n rect5 = self.leaky_relu(conv5b)\n conv5c = F.conv2d(pool4, conv5c_wt, bias=conv5c_bias) #self.conv5c(pool4)\n res5 = conv5c + rect5\n pool5 = self.pool(res5)\n \n \n conv6a = F.conv2d(pool5, conv6a_wt, bias=conv6a_bias, padding=1) #self.conv6a(pool5)\n rect6a = self.leaky_relu(conv6a)\n conv6b = F.conv2d(rect6a, conv6b_wt, bias=conv6b_bias, padding=1) #self.conv6b(rect6a)\n rect6 = self.leaky_relu(conv6b)\n res6 = pool5 + rect6\n pool6 = self.pool(res6)\n \n \n pool6 = pool6.view(pool6.size(0), -1)\n \n \n fc7 = F.linear(pool6, feat_kernels_enc_fc[0], bias=feat_bias_enc_fc[0]) #self.fc7(pool6)\n rect7 = self.leaky_relu(fc7)\n \n t_x_s_update = self.t_x_s_update(rect7, h, t_x_s_update_fc_layer, t_x_s_update_conv3d, t_x_s_update_bias)\n t_x_s_reset = self.t_x_s_reset(rect7, h, t_x_s_reset_fc_layer, t_x_s_reset_conv3d, t_x_s_reset_bias)\n \n update_gate = self.sigmoid(t_x_s_update)\n complement_update_gate = 1 - update_gate\n reset_gate = self.sigmoid(t_x_s_reset)\n \n rs = reset_gate * h\n t_x_rs = self.t_x_rs(rect7, rs, t_x_rs_fc_layer, t_x_rs_conv3d, t_x_rs_bias)\n tanh_t_x_rs = self.tanh(t_x_rs)\n \n gru_out = update_gate * h + complement_update_gate * tanh_t_x_rs\n \n return gru_out, update_gate", "def forward(self, input_tensor, cur_state):\n h_cur, c_cur = cur_state\n \n combined = torch.cat([input_tensor, h_cur], dim=1) # concatenate along channel axis\n \n combined_conv = self.conv(combined)\n cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1) \n i = torch.sigmoid(cc_i)\n f = torch.sigmoid(cc_f)\n o = torch.sigmoid(cc_o)\n g = torch.tanh(cc_g)\n \n c_next = f * c_cur + i * g\n h_next = o * torch.tanh(c_next)\n \n return h_next, c_next", "def forward(self, x):\n sources = list()\n new_sources = list()\n\n # apply lds to the initial image\n x_pool = self.lds(x)\n\n # apply vgg up to conv4_3\n for k in range(22):\n x = self.features[k](x)\n conv4_3_bn = self.ibn1(x)\n x_pool1_skip, x_pool1_icn = self.icn1(x_pool)\n s = self.Norm1(conv4_3_bn * x_pool1_icn)\n\n # apply vgg up to fc7\n for k in range(22, 34):\n x = self.features[k](x)\n conv7_bn = self.ibn2(x)\n x_pool2_skip, x_pool2_icn = self.icn2(x_pool1_skip)\n p = self.Norm2(self.dsc1(s) + conv7_bn * x_pool2_icn)\n\n x = self.features[34](x)\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extra):\n x = v(x)\n if k == 0:\n x_pool3_skip, x_pool3_icn = self.icn3(x_pool2_skip)\n w = self.Norm3(self.dsc2(p) + x * x_pool3_icn)\n elif k == 2:\n x_pool4_skip, x_pool4_icn = self.icn4(x_pool3_skip)\n q = self.Norm4(self.dsc3(w) + x * x_pool4_icn)\n elif k == 4:\n o = self.Norm5(self.dsc4(q) + x)\n sources.append(o)\n elif k == 7 or k == 9:\n sources.append(x)\n else:\n pass\n\n # project the forward features into lower dimension.\n tmp1 = self.proj1(p)\n tmp2 = self.proj2(w)\n tmp3 = self.proj3(q)\n tmp4 = self.proj4(o)\n\n # The conv4_3 level\n proj1 = F.upsample(tmp1, scale_factor=2, mode='bilinear')\n proj2 = F.upsample(tmp2, scale_factor=4, mode='bilinear')\n proj3 = F.upsample(tmp3, scale_factor=8, mode='bilinear')\n proj4 = F.upsample(tmp4, scale_factor=16, mode='bilinear')\n proj = torch.cat([proj1, proj2, proj3, proj4], dim=1)\n\n agent1 = self.agent1(s)\n\n convert1 = self.convert1(proj)\n pred1 = torch.cat([agent1, convert1], dim=1)\n pred1 = self.merge1(pred1)\n new_sources.append(pred1)\n\n # The fc_7 level\n proj2 = F.upsample(tmp2, scale_factor=2, mode='bilinear')\n proj3 = F.upsample(tmp3, scale_factor=4, mode='bilinear')\n proj4 = F.upsample(tmp4, scale_factor=8, mode='bilinear')\n proj = torch.cat([proj2, proj3, proj4], dim=1)\n\n agent2 = self.agent2(p)\n convert2 = self.convert2(proj)\n pred2 = torch.cat([agent2, convert2], dim=1)\n pred2 = self.merge2(pred2)\n new_sources.append(pred2)\n\n # The conv8 level\n proj3 = F.upsample(tmp3, scale_factor=2, mode='bilinear')\n proj4 = F.upsample(tmp4, scale_factor=4, mode='bilinear')\n proj = torch.cat([proj3, proj4], dim=1)\n\n agent3 = self.agent3(w)\n convert3 = self.convert3(proj)\n pred3 = torch.cat([agent3, convert3], dim=1)\n pred3 = self.merge3(pred3)\n new_sources.append(pred3)\n\n # The conv9 level\n proj4 = F.upsample(tmp4, scale_factor=2, mode='bilinear')\n proj = proj4\n\n agent4 = self.agent4(q)\n convert4 = self.convert4(proj)\n pred4 = torch.cat([agent4, convert4], dim=1)\n pred4 = self.merge4(pred4)\n new_sources.append(pred4)\n\n for prediction in sources:\n new_sources.append(prediction)\n\n return new_sources", "def forward(self, x):\n cnn_out = self.hidden_layers(x) # apply hidden layers (N, n_in_channels, X, Y) -> (N, n_kernels, X, Y)\n pred = self.output_layer(cnn_out) # apply output layer (N, n_kernels, X, Y) -> (N, 1, X, Y)\n return pred", "def forward(self, batch):\n # Apply first convolution, followed by ReLU non-linearity; \n # use batch-normalization on its outputs\n batch = func.relu(self.conv1(self.conv1_normed(batch)))\n batch = func.relu(self.one1(self.one1_normed(batch)))\n \n # Apply conv2 and conv3 similarly\n batch = func.relu(self.conv2(self.conv2_normed(batch)))\n batch = func.relu(self.one2(self.one2_normed(batch)))\n batch = func.relu(self.conv3(self.conv3_normed(batch)))\n batch = func.relu(self.one3(self.one3_normed(batch)))\n \n \n # Pass the output of conv3 to the pooling layer\n batch = self.pool(batch)\n\n # Reshape the output of the conv3 to pass to fully-connected layer\n batch = batch.view(-1, self.num_flat_features(batch))\n \n # Connect the reshaped features of the pooled conv3 to fc1\n batch = func.relu(self.fc1(batch))\n \n # Connect fc1 to fc2 - this layer is slightly different than the rest (why?)\n batch = self.fc2(batch)\n\n\n # Return the class predictions\n #TODO: apply an activition function to 'batch'\n return func.sigmoid(batch)", "def forward(self, input):\n\n x = self.conv(input)\n x = self.bn(x)\n out = self.act(x)\n return out", "def forward(self, inputData):\n weights = self.Weights\n biases = self.Biases\n poolParams = self.poolParams\n cache = [] #zmienna przechowujaca produkty warstw - pomocna do propagacji wstecznej\n #warstwa wejsciowa\n layer0 = np.asarray(inputData)\n cache.append(layer0)\n #pierwsza warstwa konwolucyjna\n layer1 = convolution_forward(np.asarray([layer0]),weights[0],biases[0])\n cache.append(layer1)\n #pierwsza warstwa max poolingu\n layer2 = maxpool_forward(layer1, poolParams[0][0], poolParams[0][1])\n cache.append(layer2)\n #druga warstwa konwolucyjna\n layer3 = convolution_forward(layer2,weights[1],biases[1])\n cache.append(layer3)\n #druga warstwa max poolingu\n layer4 = maxpool_forward(layer3, poolParams[1][0], poolParams[1][1])\n cache.append(layer4)\n #pierwsza warstwa fully connected zrealizowana jako warstwa konwolucyjna\n layer5 = convolution_forward( layer4,weights[2] ,biases[2] )\n cache.append(layer5)\n #druga warstwa fully connected z funkcja aktywacji typu ReLU\n layer6 = act.relu(np.dot(weights[3],layer5[:,0]).transpose() + biases[3]).transpose()\n cache.append(layer6)\n #softmax\n layer7 = np.dot( weights[4], layer6[:,0] ).transpose() + biases[4]\n layer7 -= np.max(layer7)\n layer7 = np.exp(layer7)/sum(np.exp(layer7))\n\n return (layer7, cache)", "def forward(self,x):\n embeds = self.embedding(x)\n \n x = torch.unsqueeze(embeds,1)\n # print('x',x.shape)\n xs = []\n for conv in self.convs:\n x2 = torch.tanh(conv(x))\n # print('after filter',x2.shape)\n x2 = torch.squeeze(x2,-1)\n # print('after squeeze',x2.shape)\n x2 = F.max_pool1d(x2,x2.size(2))\n \n xs.append(x2)\n \n x = torch.cat(xs,2)\n x = x.view(x.size(0),-1)\n logits = self.fc(x)\n return torch.sigmoid(logits)", "def forward(self, inputs):\n x = F.relu(self.bn_1(self.conv_1(inputs)))\n\n # If it is the last layer, use sigmoid activation instead of hyperbolic tangent\n if self.last:\n x = F.tanh(self.bn_2(self.conv_2(x)))\n else:\n x = F.relu(self.bn_2(self.conv_2(x)))\n\n # Performing max pooling if needed\n if self.pool:\n x, indices = self.max_pool(x)\n\n return x", "def forward(self, raw_conv_feats):\n\n features = self.linear(raw_conv_feats)\n\n if self.simple:\n # if just getting an embed_size embedding for an image from here\n features = self.avgpool(features).squeeze()\n else:\n features = features.view(features.size(0), features.size(1), -1)\n features = features.permute(2, 0, 1).contiguous()\n\n return features", "def forward(self, x_conv_out: torch.Tensor) -> torch.Tensor:\n x_proj = F.relu(self.proj(x_conv_out))\n x_gate = torch.sigmoid(self.gate(x_conv_out))\n x_highway = x_gate * x_proj + (1 - x_gate) * x_conv_out\n return x_highway", "def forward(self, inputs):\n inputs = inputs.transpose(1, 2).unsqueeze(2).contiguous()\n internal_outputs = self.conv_blocks(inputs)\n outputs = []\n for idx in range(self.joint_count):\n outputs.append(self.fc_layer[idx](internal_outputs[:, :, 0, idx]))\n return torch.cat(outputs, 1), internal_outputs", "def conv_layer(n_inputs: int, n_filters: int,\n kernel_size: int = 3, stride=1,\n zero_batch_norm: bool = False, use_activation: bool = True,\n activation: torch.nn.Module = nn.ReLU(inplace=True)) -> torch.nn.Sequential:\n batch_norm = nn.BatchNorm2d(n_filters)\n # initialize batch normalization to 0 if its the final conv layer\n nn.init.constant_(batch_norm.weight, 0. if zero_batch_norm else 1.)\n conv_2d = conv(n_inputs, n_filters, kernel_size, stride=stride)\n layers = [conv_2d, batch_norm]\n if use_activation: layers.append(activation)\n return nn.Sequential(*layers)", "def forward(self, feats: Tuple[Tensor]) -> Tensor:\n x = feats[-1]\n\n x = self.conv(x)\n\n return x.reshape(-1, self.num_joints, 3)", "def forward(self, x):\n # define feedforward behavior, applying activations as necessary\n out = self.leaky_relu(self.conv1(x))\n out = self.leaky_relu(self.conv2(out))\n out = self.leaky_relu(self.conv3(out))\n out = self.leaky_relu(self.conv4(out))\n\n out = self.res_blocks(out)\n\n out = self.leaky_relu(self.deconv1(out))\n out = self.leaky_relu(self.deconv2(out))\n out = self.leaky_relu(self.deconv3(out))\n\n # tanh applied to last layer\n out = F.tanh(self.out_layer(out))\n out = torch.clamp(out, min=-0.5, max=0.5)\n\n return out", "def forward(self, x):\n\n x = torch.unsqueeze(x, dim=1)\n x = F.pad(x, self.pad, mode=self.padmode, value=self.padvalue)\n x = self.conv(x, weight=self.weight.to(x.device), groups=self.groups)\n return torch.squeeze(x, dim=1)", "def forward(self, x, vars=None, bn_training=True):\n\n if vars is None:\n vars = self.vars\n\n idx = 0\n bn_idx = 0\n\n for name, param in self.config:\n if name is 'conv2d':\n w, b = vars[idx], vars[idx + 1]\n # remember to keep synchrozied of forward_encoder and forward_decoder!\n x = F.conv2d(x, w, b, stride=param[4], padding=param[5])\n idx += 2\n # print(name, param, '\\tout:', x.shape)\n elif name is 'convt2d':\n w, b = vars[idx], vars[idx + 1]\n # remember to keep synchrozied of forward_encoder and forward_decoder!\n x = F.conv_transpose2d(x, w, b, stride=param[4], padding=param[5])\n idx += 2\n # print(name, param, '\\tout:', x.shape)\n elif name is 'linear':\n w, b = vars[idx], vars[idx + 1]\n o = F.linear(x, w, b)\n idx += 2\n # print('forward:', idx, x.norm().item())\n elif name is 'bn':\n w, b = vars[idx], vars[idx + 1]\n running_mean, running_var = self.vars_bn_mean[bn_idx], self.vars_bn_var[bn_idx]\n x = F.batch_norm(x, running_mean, running_var, weight=w, bias=b, training=bn_training)\n idx += 2\n bn_idx += 1\n\n elif name is 'flatten':\n x = x.reshape(((x.shape)[0], -1))\n elif name is 'reshape':\n # [b, 8] => [b, 2, 2, 2]\n x = x.view(x.size(0), *param)\n elif name is 'relu':\n x = F.relu(x)\n elif name is 'leakyrelu':\n x = F.leaky_relu(x, negative_slope=param[0], inplace=param[1])\n elif name is 'tanh':\n x = F.tanh(x)\n elif name is 'sigmoid':\n x = F.sigmoid(x)\n elif name is 'upsample':\n x = F.upsample_nearest(x, scale_factor=param[0])\n elif name is 'max_pool2d':\n x = F.max_pool2d(x, param[0], param[1], param[2])\n elif name is 'avg_pool2d':\n x = F.avg_pool2d(x, param[0], param[1], param[2])\n\n else:\n raise NotImplementedError\n\n # make sure variable is used properly\n assert idx == len(vars)\n assert bn_idx == len(self.vars_bn_mean)\n\n\n return o", "def _forward(self):\n\n tf.summary.image(\"image\", tensor=tf.reshape(self.x, (self.batch_size, 28, 28, 1)), max_outputs=10)\n x = self.x\n\n # x = layers.dropout(self.x, keep_prob=0.7)\n # with tf.variable_scope(\"layer1\") as scope:\n h = tf.nn.relu(layers.fully_connected(x, num_outputs=self.input_size // 2, activation_fn=None))\n # tf.summary.histogram(\"moving_mean1\", tf.get_variable(scope + \"moving_mean\"))\n # with tf.variable_scope(\"layer2\") as scope:\n # h = tf.nn.relu(layers.fully_connected(h, num_outputs=32, activation_fn=None))\n # tf.summary.histogram(\"moving_mean2\", tf.get_variable(\"moving_mean\"))\n # with tf.variable_scope(\"layer3\") as scope:\n self.logits = layers.fully_connected(h, num_outputs=10, activation_fn=None)\n # tf.summary.histogram(\"moving_mean3\", tf.get_variable(\"moving_mean\"))\n\n self.probability = tf.nn.softmax(self.logits)\n self.prediction = tf.argmax(self.probability, axis=1)", "def forward(self, x, c=None):\r\n if self.condition:\r\n assert c is not None\r\n x = torch.cat((x,c), dim=1)\r\n x = self.input(x)\r\n x = self.inputbn(x)\r\n for i, layer in enumerate(self.FC):\r\n x = layer(x)\r\n x = self.BatchNorm[i](x)\r\n x = self.Dropout(x)\r\n x = F.leaky_relu(x)\r\n x = self.output(x)\r\n if self.wgan:\r\n return x\r\n else:\r\n return torch.sigmoid(x)", "def conv_forward(A_prev, W, b, activation,\n padding=\"same\", stride=(1, 1)):\n m, h_prev, w_prev, c_prev = A_prev.shape\n kh, kw, c_prev, c_new = W.shape\n sh, sw = stride\n\n if padding == 'same':\n pad_h = int(((h_prev * (sh - 1)) - sh + kh) / 2)\n pad_w = int(((w_prev * (sw - 1)) - sw + kw) / 2)\n elif type(padding) == tuple:\n pad_h, pad_w = padding\n else:\n pad_h = 0\n pad_w = 0\n img_pad = np.pad(A_prev, ((0, 0), (pad_h, pad_h),\n (pad_w, pad_w), (0, 0)),\n 'constant', constant_values=(0))\n img_pad_h = img_pad.shape[1]\n img_pad_w = img_pad.shape[2]\n h_out = int((img_pad_h - kh) / sh) + 1\n w_out = int((img_pad_w - kw) / sw) + 1\n result = np.zeros((m, h_out, w_out, c_new))\n for i in range(h_out):\n for j in range(w_out):\n for k in range(c_new):\n result[:, i, j, k] = np.sum(img_pad[:,\n i * sh: i * sh + kh,\n j * sw: j * sw + kw] *\n W[:, :, :, k],\n axis=(1, 2, 3))\n return activation(result + b)", "def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x", "def test_positional_convolution_forward(ctx):\n # num_batch * channel * height * width input\n # i.e. (2, 2, 6, 6)\n in_data = \\\n mx.nd.array(\n [\n [[[1, 2, -1, 0, 1, 1],\n [3, 6, -5, 4, 2, -2],\n [9, 6, -1, 3, 1, 3],\n [4, 2, 5, 7, 3, 1],\n [0, 1, 1, 2, 2, 1],\n [3, 1, 2, 4, 3, 3]],\n\n [[3, 1, 2, 4, 3, 3],\n [0, 1, 1, 2, 2, 1],\n [4, 2, 5, 7, 3, 1],\n [9, 6, -1, 3, 1, 3],\n [3, 6, -5, 4, 2, -2],\n [1, 2, -1, 0, 1, 1]]],\n [[[1, 2, 3, 4, 5, 6],\n [6, 5, 4, 3, 2, 1],\n [0, 0, 1, 1, 2, 2],\n [3, 3, 0, -1, -1, -2],\n [3, 1, 0, 3, 3, 2],\n [5, 6, 7, -1, -2, 0]],\n\n [[5, 6, 7, -1, -2, 0],\n [3, 1, 0, 3, 3, 2],\n [3, 3, 0, -1, -1, -2],\n [0, 0, 1, 1, 2, 2],\n [6, 5, 4, 3, 2, 1],\n [1, 2, 3, 4, 5, 6]]]\n ], ctx=ctx)\n\n # num_filter * channel * K * K weight\n # i.e. (2, 2, 3, 3)\n weight = \\\n mx.nd.array(\n [\n [[[1, 0, 1],\n [0, 2, -1],\n [2, 3, 1]],\n\n [[1, 1, 0],\n [2, -1, 2],\n [3, -2, 4]]],\n\n [[[0, 1, 2],\n [-1, 2, 3],\n [4, 1, -5]],\n\n [[3, 0, -1],\n [-1, 2, 1],\n [5, 6, 2]]]\n ], ctx=ctx)\n\n # num_batch * channel * out_height * out_width scale\n # i.e. (2, 2, 6, 6)\n scale = \\\n mx.nd.array(\n [\n [[[1, 1, 1, 1, 1, 1],\n [1, -1, 1, -1, 1, -1],\n [-1, 1, -1, 1, -1, 1],\n [-1, -1, -1, -1, -1, -1],\n [2, 1, 2, 2, 1, 1],\n [1, 2, 1, 2, 1, 2]],\n\n [[1, 1, 1, 1, 1, 1],\n [1, -1, -1, 1, 1, 1],\n [-1, 1, -1, 1, -1, 1],\n [1, -1, -1, -1, -1, 1],\n [2, -1, 2, -2, 1, 1],\n [1, 2, 1, 2, 1, 2]]],\n\n [[[6, 5, 4, 3, 2, 1],\n [1, 2, 3, 4, 5, 6],\n [1, -1, 2, -2, 3, -3],\n [4, -4, 5, -5, 6, -6],\n [1, 1, 1, 1, 1, 1],\n [-1, -1, -1, -1, -1, -1]],\n\n [[-1, -1, -1, -1, -1, -1],\n [1, 1, 1, 1, 1, 1],\n [4, -4, 5, -5, 6, -6],\n [1, -1, 2, -2, 3, -3],\n [1, 2, 3, 4, 5, 6],\n [6, 5, 4, 3, 2, 1]]],\n ], ctx=ctx)\n\n # num_filter bias\n # i.e. (2, )\n bias = \\\n mx.nd.array(\n [1, 2], ctx=ctx)\n\n in_data_var = mx.symbol.Variable(name=\"in_data\")\n weight_var = mx.symbol.Variable(name=\"weight\")\n scale_var = mx.symbol.Variable(name=\"scale\")\n bias_var = mx.symbol.Variable(name=\"bias\")\n\n op = mx.symbol.contrib.PositionalConvolution(name='test_positional_convolution',\n data=in_data_var,\n scale=scale_var,\n weight=weight_var,\n bias=bias_var,\n num_filter=2,\n pad=(1, 1), kernel=(3, 3), stride=(1, 1))\n be = op.bind(ctx=ctx, args={'in_data': in_data,\n 'scale': scale,\n 'weight': weight,\n 'bias': bias})\n be.forward(True)\n out_o = be.outputs[0].asnumpy()\n print(out_o)", "def forward(self, x):\n previous_batch, current_batch = x\n previous_batch_pc, previous_batch_f = previous_batch[0], previous_batch[1]\n current_batch_pc, current_batch_f = current_batch[0], current_batch[1]\n\n f1 = previous_batch_pc[:, :, 3:]\n pc1 = previous_batch_pc[:, :, :3]\n\n f2 = current_batch_pc[:, :, 3:]\n pc2 = current_batch_pc[:, :, :3]\n\n batch_size, n_points_prev, _ = previous_batch_pc.shape\n batch_size, n_points_cur, _ = current_batch_pc.shape\n\n # All outputs of the following layers are tuples of (pos, features)\n # --- Point Feature Part ---\n pf_prev_1, pf_prev_2, pf_prev_3 = self._point_feature_net(pc1.float(), f1.float())\n pf_curr_1, pf_curr_2, pf_curr_3 = self._point_feature_net(pc2.float(), f2.float())\n\n # --- Flow Embedding / Point Mixture Part ---\n _, fe_2, fe_3 = self._point_mixture(x1=pf_prev_3, x2=pf_curr_3)\n\n # --- Flow Refinement Part ---\n x = self._flow_refinement(pf_curr_1=pf_curr_1, pf_curr_2=pf_curr_2, pf_curr_3=pf_curr_3, fe_2=fe_2, fe_3=fe_3)\n\n # --- Final fully connected layer ---\n pos, features = x\n features = features.transpose(1, 2)\n x = self._fc(features)\n return x", "def __feed_forward(self, X):\n # go over all layers\n for layer in self.__layers:\n X = layer.compute_act(X)\n\n return X", "def forward(ctx, input, filter, bias=None, padding=None, index_back=None):\n outputs = conv1D_cuda(input, filter, bias, padding, index_back)\n output = outputs[0]\n xfft, yfft, W, WW, fft_size = outputs[1:]\n if ctx:\n ctx.W = W\n ctx.WW = WW\n ctx.fft_size = fft_size\n ctx.save_for_backward(xfft, yfft)\n return output", "def forward(self, input_array, training=False):\n self.input_array = input_array\n self.padded_input_array = padding(input_array,\n self.zero_padding)\n self.output_array = conv_matrix(self.padded_input_array, self.filters, self.filter_width, self.filter_height, self.output_array, self.stride, self.bias)\n\n element_wise_op(self.output_array,\n self.activator.forward)", "def hybrid_forward(self, F, x):\n identity = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n out = self.conv3(out)\n out = self.bn3(out)\n if self.downsample is not None:\n identity = self.downsample(x)\n out = F.Activation(out + identity, act_type='relu')\n\n if self.nonlocal_block is not None:\n out = self.nonlocal_block(out)\n return out", "def forward(self, images):\n x0 = self.lrelu(self.bn0(self.conv0(images)))\n out = x0\n return out", "def forward(self, x): \n pal1_sources = list()\n pal2_sources = list()\n loc_pal1 = list()\n conf_pal1 = list()\n loc_pal2 = list()\n conf_pal2 = list()\n\n # apply vgg up to conv3_3 relu\n for k in range(16):\n x = self.vgg[k](x)\n\n of1 = x\n s = self.L2Normof1(of1)\n pal1_sources.append(s)\n \n # apply vgg up to conv4_3 relu\n for k in range(16, 23):\n x = self.vgg[k](x)\n\n of2 = x\n s = self.L2Normof2(of2)\n pal1_sources.append(s)\n\n # apply vgg up to conv5_3 relu\n for k in range(23, 30):\n x = self.vgg[k](x)\n of3 = x\n s = self.L2Normof3(of3)\n pal1_sources.append(s)\n\n # apply vgg up to fc7\n for k in range(30, len(self.vgg)):\n x = self.vgg[k](x)\n of4 = x\n pal1_sources.append(of4)\n \n # apply extra layers and cache source layer outputs\n for k in range(2):\n x = F.relu(self.extras[k](x), inplace=True)\n of5 = x\n pal1_sources.append(of5)\n for k in range(2, 4):\n x = F.relu(self.extras[k](x), inplace=True)\n of6 = x\n pal1_sources.append(of6)\n\n ## fpn module\n \"\"\"\n lfpn6 = self.fpn_topdown6(of6)\n lfpn5 = self._upsample_product(self.fpn_topdown5(of6), self.fpn_latlayer5(of5))\n lfpn4 = self._upsample_product(self.fpn_topdown4(of5), self.fpn_latlayer4(of4))\n lfpn3 = self._upsample_product(self.fpn_topdown3(of4), self.fpn_latlayer3(of3))\n lfpn2 = self._upsample_product(self.fpn_topdown2(of3), self.fpn_latlayer2(of2))\n lfpn1 = self._upsample_product(self.fpn_topdown1(of2), self.fpn_latlayer1(of1))\n\n\n ef1 = self.fpn_fem3_3(lfpn1)\n ef1 = self.L2Normef1(ef1)\n ef2 = self.fpn_fem4_3(lfpn2)\n ef2 = self.L2Normef2(ef2)\n ef3 = self.fpn_fem5_3(lfpn3)\n ef3 = self.L2Normef3(ef3)\n\n ef4 = self.fpn_fem7(lfpn4)\n ef5 = self.fpn_fem6_2(lfpn5)\n ef6 = self.fpn_fem7_2(lfpn6)\n \"\"\"\n\n conv7 = F.relu(self.fpn_topdown[0](of6), inplace=True)\n x = F.relu(self.fpn_topdown[1](conv7), inplace=True)\n conv6 = F.relu(self._upsample_product(x, self.fpn_latlayer[0](of5)), inplace=True)\n\n x = F.relu(self.fpn_topdown[2](conv6), inplace=True)\n convfc7_2 = F.relu(self._upsample_product(x, self.fpn_latlayer[1](of4)), inplace=True)\n\n x = F.relu(self.fpn_topdown[3](convfc7_2), inplace=True)\n conv5 = F.relu(self._upsample_product(x, self.fpn_latlayer[2](of3)), inplace=True)\n\n x = F.relu(self.fpn_topdown[4](conv5), inplace=True)\n conv4 = F.relu(self._upsample_product(x, self.fpn_latlayer[3](of2)), inplace=True)\n\n x = F.relu(self.fpn_topdown[5](conv4), inplace=True)\n conv3 = F.relu(self._upsample_product(x, self.fpn_latlayer[4](of1)), inplace=True)\n\n ef1 = self.fpn_fem[0](conv3)\n ef1 = self.L2Normef1(ef1)\n ef2 = self.fpn_fem[1](conv4)\n ef2 = self.L2Normef2(ef2)\n ef3 = self.fpn_fem[2](conv5)\n ef3 = self.L2Normef3(ef3)\n ef4 = self.fpn_fem[3](convfc7_2)\n ef5 = self.fpn_fem[4](conv6)\n ef6 = self.fpn_fem[5](conv7)\n\n pal2_sources = (ef1, ef2, ef3, ef4, ef5, ef6)\n\n ## first shot \n for (x, l, c) in zip(pal1_sources, self.loc_pal1, self.conf_pal1):\n loc_pal1.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf_pal1.append(c(x).permute(0, 2, 3, 1).contiguous())\n \n ## second shot\n for (x, l, c) in zip(pal2_sources, self.loc_pal2, self.conf_pal2):\n loc_pal2.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf_pal2.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n # first shot\n loc_pal1 = torch.cat([o.view(o.size(0), -1) for o in loc_pal1], 1)\n conf_pal1 = torch.cat([o.view(o.size(0), -1) for o in conf_pal1], 1)\n \n # second shot\n loc_pal2 = torch.cat([o.view(o.size(0), -1) for o in loc_pal2], 1)\n conf_pal2 = torch.cat([o.view(o.size(0), -1) for o in conf_pal2], 1)\n\n if self.phase == 'test':\n # 测试时, 仅使用shot2 的输出\n output = self.detect(\n loc_pal2.view(loc_pal2.size(0), -1, 4),\n self.softmax(conf_pal2.view(conf_pal2.size(0), -1,\n self.num_classes)), # conf preds\n )\n else:\n ## 训练时,使用shot1 和 shot2 的输出\n output = (\n loc_pal1.view(loc_pal1.size(0), -1, 4),\n conf_pal1.view(conf_pal1.size(0), -1, self.num_classes),\n loc_pal2.view(loc_pal2.size(0), -1, 4),\n conf_pal2.view(conf_pal2.size(0), -1, self.num_classes))\n return output", "def forward(self, x):\n\n out = torch.relu(self.conv1(x))\n out = torch.relu(self.conv2(out))\n\n out = torch.relu(self.resnet_block(out))\n\n out = torch.relu(self.deconv1(out))\n out = torch.tanh(self.deconv2(out))\n\n return out", "def forward(network: dict, x: np.array) -> np.array:\n W1, W2, W3 = network['W1'], network['W2'], network['W3']\n b1, b2, b3 = network['b1'], network['b2'], network['b3']\n z1 = _forward(x, W1, b1, 'sigmoid')\n z2 = _forward(z1, W2, b2, 'sigmoid')\n y = _forward(z2, W3, b3, 'identity')\n return y", "def conv_relu_forward_naive(x, w, b, conv_param):\n\ta, conv_cache = conv_forward_naive(x, w, b, conv_param)\n\tout, relu_cache = relu_forward(a)\n\tcache = (conv_cache, relu_cache)\n\treturn out, cache", "def forward(self, input):\n\n # Work on each channel separately\n all_features = []\n\n for channel in range(0, self.n_channels):\n input_channel = input[:, :, channel]\n\n # Add a dummy (spatial) dimension for the time convolutions\n # Conv1D format : (batch_size, n_feature_maps, duration)\n input_channel = input_channel.unsqueeze(1)\n\n high = self.all_conv_high[channel](input_channel)\n low = self.all_conv_low[channel](input_channel)\n ap_residual = self.all_residual[channel](input_channel)\n\n # Time convolutions are concatenated along the feature maps axis\n output_channel = torch.cat([\n high,\n low,\n ap_residual\n ], dim=1)\n all_features.append(output_channel)\n\n # Concatenate along the feature maps axis\n all_features = torch.cat(all_features, dim=1)\n # Flatten for the Linear layers\n all_features = all_features.view(-1,\n 9 * self.n_channels * 12) # <-- 12: depends of the initial sequence length (100).\n # If you have shorter/longer sequences, you probably do NOT even need to modify the modify the network architecture:\n # resampling your input gesture from T timesteps to 100 timesteps will (surprisingly) probably actually work as well!\n\n # Fully-Connected Layers\n output = self.fc(all_features)\n\n return output" ]
[ "0.7160891", "0.7006296", "0.6955772", "0.6950637", "0.6934328", "0.6888566", "0.68857485", "0.6878123", "0.6862401", "0.6859037", "0.6839946", "0.68377876", "0.6797625", "0.67588794", "0.6732545", "0.6729661", "0.6714939", "0.6703585", "0.6667598", "0.66208446", "0.6617689", "0.6617689", "0.66139406", "0.66134214", "0.65998757", "0.65998757", "0.6594509", "0.6582347", "0.6579337", "0.6570174", "0.64864355", "0.6474537", "0.6451219", "0.6440551", "0.6437754", "0.6436255", "0.6397146", "0.6393394", "0.6373397", "0.6332954", "0.6310848", "0.6307111", "0.6305212", "0.6295394", "0.6295276", "0.62950456", "0.6281748", "0.6264043", "0.6220525", "0.6204509", "0.6202325", "0.6191907", "0.6191754", "0.61906016", "0.61906016", "0.6187061", "0.61836946", "0.61836946", "0.6157758", "0.61303896", "0.6127814", "0.61235005", "0.6118709", "0.61052686", "0.6097124", "0.60856825", "0.60791826", "0.6078824", "0.6078091", "0.6061172", "0.60449207", "0.60326463", "0.6031411", "0.6028358", "0.6017525", "0.6016666", "0.6015037", "0.6013999", "0.60132015", "0.60077286", "0.5997984", "0.5978668", "0.5977906", "0.5972745", "0.59708667", "0.596525", "0.5958564", "0.5955847", "0.5950986", "0.59483737", "0.59469444", "0.59285307", "0.5926482", "0.5925574", "0.59246165", "0.5922347", "0.59167945", "0.5913848", "0.5908472", "0.59025824" ]
0.6130725
59
messages1 and messages2 represent the encoded headlines from two news sources corr represents the correlation between the two currently returns average correlation
def average_similarity(messages1, messages2): if np.array_equal(messages2, messages1): return 1 corr = np.corrcoef(messages1, messages2) return np.average(corr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def corr(arr1, arr2):\n\n\n X = []\n Y = []\n for index in range(len(arr1)):\n if arr1[index] == None or arr2[index] == None:\n continue\n X.append(arr1[index])\n Y.append(arr2[index])\n\n\n r = np.corrcoef(X, Y)[0,1]\n f = 0.5*np.log((1+r)/(1-r))\n se = 1/np.sqrt(len(X)-3)\n ucl = f + 2*se\n lcl = f - 2*se\n\n lcl = (np.exp(2*lcl) - 1) / (np.exp(2*lcl) + 1)\n ucl = (np.exp(2*ucl) - 1) / (np.exp(2*ucl) + 1)\n\n return r,lcl,ucl", "def _merge_correlation_helper(corr_mat1, mean1, std1, n1,\n corr_mat2, mean2, std2, n2):\n if corr_mat1 is None:\n return corr_mat2\n elif corr_mat2 is None:\n return corr_mat1\n elif len(mean1) == 0:\n return corr_mat2\n elif len(mean2) == 0:\n return corr_mat1\n\n std_mat1 = np.outer(std1, std1)\n std_mat2 = np.outer(std2, std2)\n mean_diff_vector = mean1 - mean2\n mean_diff_mat = np.outer(mean_diff_vector, mean_diff_vector)\n\n cov1 = corr_mat1 * std_mat1\n cov2 = corr_mat2 * std_mat2\n\n n = n1 + n2\n\n cov = cov1 * (n1 - 1) + cov2 * (n2 - 1) + mean_diff_mat * (n1 * n2) / n\n cov = cov / (n - 1)\n\n delta = mean2 - mean1\n M2_1 = (n1 - 1) * (std1 ** 2)\n M2_2 = (n2 - 1) * (std2 ** 2)\n M2 = M2_1 + M2_2 + delta ** 2 * n1 * n2 / n\n std = np.sqrt(M2 / (n - 1))\n\n std_mat = np.outer(std, std)\n corr_mat = cov / std_mat\n\n return corr_mat", "def getcorrelation(movieid1,movieid2):\n\n #the initialized integer, cosine_sum, has an initialized value of -100\n #such that in the case where correlation shouldn't be updated, the value\n #remains unchanged\n cosine_sum = NEGATIVE\n #variable r_a,i and r_b,i in the formula\n r_mv1 = 0\n r_mv2 = 0\n #numerator\n nume_sum = 0\n #two parts in the denominator (before taking square root)\n deno_mv1_sum = 0\n deno_mv2_sum = 0\n denominator = 0\n #variable that keeps track of count of common users\n currentCommon = 0\n\n #firstly check if the count of user passes the threshold for each movie\n if(len(dictMovie.get(movieid1))<threshold or\n len(dictMovie.get(movieid2))<threshold):\n #if either does not, returns a negative correlation (to be invalid)\n return cosine_sum\n #if both pass threshold, get the intersection (of users) of two movies\n else:\n intersect=dictMovie.get(movieid1).intersection(dictMovie.get(movieid2))\n #if the number of common users is smaller than threshold, return\n if (len(intersect) < threshold):\n return cosine_sum\n #otherwise, start counting correlation\n else:\n #get the average rating of two movies\n mv1_bar = float(dictMovieRate.get(movieid1))\n mv2_bar = float(dictMovieRate.get(movieid2))\n #iterate through common users and use formula\n for commonuser in intersect:\n #increment common user count\n currentCommon += 1\n r_mv1 = int(dictUser.get(commonuser).get(movieid1))\n r_mv2 = int(dictUser.get(commonuser).get(movieid2))\n nume_sum += ( (r_mv1)-mv1_bar )*( (r_mv2)-mv2_bar )\n deno_mv1_sum += ( (r_mv1)-mv1_bar )**2\n deno_mv2_sum += ( (r_mv2)-mv2_bar )**2\n #when done with denominator separate calculation, combine\n denominator = math.sqrt(deno_mv1_sum * deno_mv2_sum)\n #handle the case where denominator=0 (invalid)\n if denominator == 0:\n return cosine_sum\n #otherwise, successful. return valid values and pass in\n #common count to global variable for program to catch\n else:\n cosine_sum = nume_sum / denominator\n global currentCommonCount\n currentCommonCount = currentCommon\n return cosine_sum", "def _compute_correlation(ts1, ts2, comparison_mode, correlation_type,\r\n tail_type, num_permutations, confidence_level,\r\n perform_detailed_comparisons=False,\r\n expected_sample_id=None):\r\n # Convert our notion of tail type into the format expected by PyCogent's\r\n # correlation_test().\r\n if tail_type == 'two-sided':\r\n tail_type = None\r\n\r\n if comparison_mode != 'paired' and comparison_mode != 'expected':\r\n raise ValueError(\"Invalid comparison mode '%s'. Must be one of %r.\" %\r\n (comparison_mode, comparison_modes))\r\n\r\n # Make sure that the second taxa summary has only one sample if we weren't\r\n # provided an expected sample ID to compare against.\r\n if (comparison_mode == 'expected' and expected_sample_id is None and\r\n len(ts2[0]) != 1):\r\n raise ValueError(\"The second taxa summary file must contain a single \"\r\n \"sample (column) to compare all samples in the first taxa \"\r\n \"summary file against when the comparison mode is 'expected' \"\r\n \"and an expected sample ID is not provided. You provided a \"\r\n \"file with %d samples.\"\r\n % len(ts2[0]))\r\n\r\n if comparison_mode == 'paired':\r\n # Make sure the number of samples match between the two files (the IDs\r\n # do not have to match because of the sample ID map).\r\n if len(ts1[0]) != len(ts2[0]):\r\n raise ValueError(\"The two taxa summaries are incompatible because \"\r\n \"they do not have the same number of sample IDs. \"\r\n \"The taxa summaries must be made compatible \"\r\n \"before attempting to perform \"\r\n \"pairwise-comparisons between samples.\")\r\n\r\n # Make sure the taxa information is the same (i.e. the summaries have been\r\n # sorted and filled).\r\n if ts1[1] != ts2[1]:\r\n raise ValueError(\"The taxa do not match exactly between the two taxa \"\r\n \"summary files. The taxa must be sorted and filled \"\r\n \"before attempting to compare them.\")\r\n\r\n # Find the index of the expected sample ID.\r\n if comparison_mode == 'expected':\r\n if expected_sample_id:\r\n try:\r\n expected_idx = ts2[0].index(expected_sample_id)\r\n except ValueError:\r\n raise ValueError(\"The expected sample ID '%s' is not in the \"\r\n \"taxa summary file.\" % expected_sample_id)\r\n else:\r\n # We know the 'expected' taxa summary has a single sample in it, so\r\n # this is the only possible index.\r\n expected_idx = 0\r\n\r\n # Compute the overall correlation between each sample and the expected\r\n # sample, or each of the paired samples, and optionally the correlation\r\n # between each pair of samples individually.\r\n corr_vec = None\r\n if perform_detailed_comparisons:\r\n corr_vec = []\r\n num_comparisons = len(ts1[0])\r\n\r\n all_ts1_data = []\r\n all_ts2_data = []\r\n for samp_idx, samp_id in enumerate(ts1[0]):\r\n if comparison_mode == 'paired':\r\n paired_idx = samp_idx\r\n elif comparison_mode == 'expected':\r\n paired_idx = expected_idx\r\n else:\r\n # Redundant check, but here for safety in case the one above is\r\n # changed or removed.\r\n raise ValueError(\"Invalid comparison mode '%s'. Must be one of \"\r\n \"%r.\" % (comparison_mode, comparison_modes))\r\n\r\n # Grab the columns of data for the current sample and its pair.\r\n ts1_data = ts1[2].T[samp_idx]\r\n ts2_data = ts2[2].T[paired_idx]\r\n all_ts1_data.extend(ts1_data)\r\n all_ts2_data.extend(ts2_data)\r\n\r\n if perform_detailed_comparisons:\r\n # Compare the current sample and its pair.\r\n corr_coeff, param_p_val, unused, nonparam_p_val, conf_interval = \\\r\n correlation_test(ts1_data, ts2_data,\r\n method=correlation_type,\r\n tails=tail_type,\r\n permutations=num_permutations,\r\n confidence_level=confidence_level)\r\n\r\n # Compute the Bonferroni-corrected p-values.\r\n param_p_val_corr = min(param_p_val * num_comparisons, 1)\r\n nonparam_p_val_corr = None if nonparam_p_val is None else \\\r\n min(nonparam_p_val * num_comparisons, 1)\r\n\r\n corr_vec.append((samp_id, ts2[0][paired_idx], corr_coeff,\r\n param_p_val, param_p_val_corr, nonparam_p_val,\r\n nonparam_p_val_corr, conf_interval))\r\n\r\n # Compare all paired samples at once.\r\n results = correlation_test(all_ts1_data, all_ts2_data,\r\n method=correlation_type, tails=tail_type,\r\n permutations=num_permutations,\r\n confidence_level=confidence_level)\r\n # We don't need to return all of the permuted correlation coefficients.\r\n overall_corr = (results[0], results[1], results[3], results[4])\r\n return overall_corr, corr_vec", "def _merge_correlation(self, other):\n corr_mat1 = self.correlation_matrix\n corr_mat2 = other.correlation_matrix\n n1 = self.total_samples - self.row_is_null_count\n n2 = other.total_samples - other.row_is_null_count\n if n1 == 0:\n return corr_mat2\n if n2 == 0:\n return corr_mat1\n\n if corr_mat1 is None or corr_mat2 is None:\n return None\n\n # get column indices without nan\n col_ids1 = np.where(~np.isnan(corr_mat1).all(axis=0))[0]\n col_ids2 = np.where(~np.isnan(corr_mat2).all(axis=0))[0]\n\n if len(col_ids1) != len(col_ids2) or len(col_ids1) <= 1:\n return None\n if (col_ids1 != col_ids2).any():\n return None\n\n mean1 = np.array(\n [self._profile[idx].profile['statistics']['mean']\n for idx in range(len(self._profile)) if idx in col_ids1])\n std1 = np.array(\n [self._profile[idx].profile['statistics']['stddev']\n for idx in range(len(self._profile)) if idx in col_ids1])\n\n mean2 = np.array(\n [other._profile[idx].profile['statistics']['mean']\n for idx in range(len(self._profile)) if idx in col_ids2])\n std2 = np.array(\n [other._profile[idx].profile['statistics']['stddev']\n for idx in range(len(self._profile)) if idx in col_ids2])\n return self._merge_correlation_helper(corr_mat1, mean1, std1, n1,\n corr_mat2, mean2, std2, n2)", "def determine_correlation(var1,var2):\n v1 = np.array(var1)\n v2 = np.array(var2)\n mat = np.c_[(v1,v2)]# np.vstack((v1,v2)) #\n corr = np.corrcoef(mat.T)\n return corr[0][1]", "def image_correlation(image1, image2):\n im1=im_to_coord(image1)\n im2=im_to_coord(image2)\n z1=im1[:,2]\n z2=im2[:,2]\n mu_z1 = z1.mean()\n mu_z2 = z2.mean()\n n = z1.shape[0]\n s_z1 = z1.std(0, ddof=n - 1)\n s_z2 = z2.std(0, ddof=n - 1)\n cov = np.dot(z1,\n z2.T) - n * np.dot(mu_z1,\n mu_z2)\n return cov / np.dot(s_z1, s_z2)", "def coupling_coef_corrs(coupling_coefs1, coupling_coefs2, correlation='pearson'):\n n_neurons = coupling_coefs1.shape[0]\n correlations = np.zeros(n_neurons)\n\n for neuron in range(n_neurons):\n ccs1 = coupling_coefs1[neuron]\n ccs2 = coupling_coefs2[neuron]\n\n if np.array_equal(ccs1, ccs2):\n correlations[neuron] = 1.\n elif np.all(ccs1 == 0) or np.all(ccs2 == 0):\n correlations[neuron] = 0\n else:\n if correlation == 'pearson':\n correlations[neuron] = np.corrcoef(ccs1, ccs2)[0, 1]\n elif correlation == 'spearman':\n correlations[neuron] = spearmanr(ccs1, ccs2).correlation\n elif correlation == 'cosine':\n correlations[neuron] = cosine_similarity(ccs1, ccs2)\n\n return correlations", "def _get_correlation(self, user1_id, user2_id):\n shared_ratings = self.get_shared_ratings(user1_id, user2_id)\n\n # Substract means for both users\n shared_ratings['rating_x'] -= self.get_mean_user_rating(user1_id)\n shared_ratings['rating_y'] -= self.get_mean_user_rating(user2_id)\n\n # Compute correlation as inverse of disparity\n disparity = (shared_ratings['rating_x'] - shared_ratings['rating_y']).abs().mean()\n return 1.0/disparity", "def test__same_text_correlation(self):\n \n _log.info('-'*80)\n \n # arrange \n text1 = \"love is rain as long story short\"\n text2 = text1\n\n dump_file = getInputFile(\"swiki_knowledge_output.xml\")\n parsed_file = getOutputFile(\"swiki_knowledge_output.parsed.xml\")\n #wdb_file = getOutputFile(\"swiki_knowledge_output.wdb\")\n\n articles = ['Rain', 'Love', 'Tree'] \n \n # act\n wn.make_dump(dump_file, articles, compress=False)\n wn.parse_dump(dump_file, parsed_file)\n db_wrapper = wn.build_database_wrapper(parsed_file, StopWordsStemmer([]))\n \n #self.addCleanup(os.remove, self.tmp_dump_file)\n \n comparer = SemanticComparer(db_wrapper)\n correlation = comparer.compare(text1, text2)\n _log.info(test_utils.get_texts_correlation_message(text1, text2, correlation))\n self.assertAlmostEqual(correlation, 1.0, msg=\"for same text correlation should be 1\")", "def corr_score(file1,file2,delta,bin=1.,dur=100.,ncell=500):\r\n\td1 = numpy.loadtxt(file1)\r\n\td2 = numpy.loadtxt(file2)\r\n\tx = numpy.zeros(int(ncell*dur/bin))\r\n\ty = numpy.zeros(int(ncell*dur/bin))\r\n\tfor j in range(ncell):\r\n\t\tif d1.size == 2:\r\n\t\t\ts1 = numpy.array(d1[0]*(d1[1]==j))\r\n\t\telse:\r\n\t\t\ts1 = d1[d1[:,1]==j,0]\r\n\t\tif d2.size == 2:\r\n\t\t\ts2 = numpy.array(d2[0]*(d2[1]==j))\r\n\t\telse:\r\n\t\t\ts2 = d2[d2[:,1]==j,0]\r\n\t\tkern = numpy.append(numpy.arange(delta/bin),numpy.arange(delta/bin,-1,-1))\r\n\t\tts1,dump = pylab.histogram(s1,numpy.arange(0.,dur+bin,bin))\r\n\t\tts2,dump = pylab.histogram(s2,numpy.arange(0.,dur+bin,bin))\r\n\t\tx[j*dur/bin:(j+1)*dur/bin] = numpy.convolve(ts1,kern,'same')\r\n\t\ty[j*dur/bin:(j+1)*dur/bin] = numpy.convolve(ts2,kern,'same')\r\n x = x - pylab.mean(x)\r\n y = y - pylab.mean(y)\r\n cor = sum(x*y)/(len(x)*pylab.std(x)*pylab.std(y))\r\n return cor", "def corr(a, b):\n ma = np.mean(a)\n mb = np.mean(b)\n\n a_ = a - ma\n b_ = b - mb\n\n norma = np.sqrt(np.sum(a_ ** 2, axis=0))\n normb = np.sqrt(np.sum(b_ ** 2, axis=0))\n\n norma[norma < TOLERANCE] = 1.0\n normb[normb < TOLERANCE] = 1.0\n\n a_ *= 1.0 / norma\n b_ *= 1.0 / normb\n\n ip = np.dot(a_.T, b_)\n\n if ip.shape == (1, 1):\n return ip[0, 0]\n else:\n return ip", "def combine_for_correlation(df1=get_us_ridership(), df2=get_sales_data()):\n df1.index.astype(int)\n df2.index.astype(int)\n temp = pd.concat([df1, df2], axis=1)\n return temp.dropna()", "def calculate_distance(seq1,seq2):\r\n mmcounter = 0 #mismatchcount\r\n seqlen = 0 #sequence length\r\n \r\n #cout the sequence length and mismatches\r\n for i in range(len(seq1)):\r\n if seq1[i]!='-' and seq2[i]!='-':\r\n seqlen += 1\r\n if seq1[i] != seq2[i]:\r\n mmcounter += 1\r\n #compute p\r\n p = (mmcounter/seqlen)\r\n #adjust p \r\n if p >= 0.75:\r\n pcorr = float(30)\r\n else:\r\n pcorr = (-3/4)*np.log(1-((4/3)*p))\r\n \r\n return(pcorr)", "def find_correspondences(pts1, pts2, desc1, desc2, match_score_type='ratio'):\n N = pts1.shape[0]\n X = np.sum(desc1**2, axis=1, keepdims=True)\n Y = np.sum(desc2**2, axis=1, keepdims=True).T\n XY = np.dot(desc1,desc2.T)\n L = X + Y - 2*XY\n\n D = (np.maximum(L, 0))\n scores = np.min(D, axis = 1)\n indices = np.argmin(D,axis = 1)\n corr = []\n for j,index in enumerate(indices):\n corr.append(np.hstack([pts1[j],pts2[index]]))\n if match_score_type=='ratio': \n p = np.sort(D, axis = 1)\n scores = p[:,0]/p[:,1]\n return np.array(corr), indices, scores", "def xcorr2_qwik(img0, img1):\n # 2009-12-17 10:13 IJC: Created. Based on idea by J. Johnson.\n from numpy import zeros, max, min, sum\n\n im00 = img0.sum(0)\n im01 = img0.sum(1)\n im10 = img1.sum(0)\n im11 = img1.sum(1)\n n0 = len(im00)\n n1 = len(im01)\n noffsets0 = 2*n0-1\n noffsets1 = 2*n1-1\n corr0 = zeros(noffsets0,float)\n corr1 = zeros(noffsets1,float)\n\n for ii in range(noffsets0):\n firstind0 = max((ii-n0+1,0))\n lastind0 = min((n0, ii+1))\n firstind1 = max((n0-ii-1,0))\n lastind1 = min((2*n0-ii-1,n0))\n corr0[ii] = sum(im00[firstind0:lastind0]*im10[firstind1:lastind1])\n\n for jj in range(noffsets1):\n firstind0 = max((jj-n0+1,0))\n lastind0 = min((n0, jj+1))\n firstind1 = max((n0-jj-1,0))\n lastind1 = min((2*n0-jj-1,n0))\n corr1[jj] = sum(im01[firstind0:lastind0]*im11[firstind1:lastind1])\n\n ret = find([corr0==corr0.max()])-n0+1, find([corr1==corr1.max()])-n0+1\n return ret", "def compare_emails(first, second):\n match = 0\n ignored = ['Subject', 'From', 'X-Authentication-Warning', 'Received']\n # Compare subject\n if first.subject == second.subject:\n match += SUBJECT_PRIORITY\n elif not_empty(first.subject, second.subject):\n match += compare_dicts(compute_word_frequencies_from_text(first.subject),\n compute_word_frequencies_from_text(second.subject)) * SUBJECT_PRIORITY / 2\n # they are not equal, only some words occurrences\n\n # Compare from\n if first.From == second.From:\n match += FROM_PRIORITY\n\n # compare X authentication warning\n if first.x_authentication_warning == second.x_authentication_warning:\n match += WARNING_PRIORITY\n\n # compare receive history chain\n length = max(len(first.received), len(second.received))\n receive_match = set(first.received).intersection(second.received)\n if length > 0:\n match += (len(receive_match) / length) * RECEIVED_PRIORITY\n\n MatchedHeaders = 0\n # compare secondary headers\n for header in first.AllHeaders:\n if header[0] not in ignored:\n if header in second.AllHeaders:\n MatchedHeaders += 1\n\n match += SECONDARY_PRIORITY * MatchedHeaders / max(len(first.AllHeaders), len(second.AllHeaders))\n # compare payloads\n match += PAYLOAD_PRIORITY * compare_payloads(first.payloads, second.payloads)\n return match", "def correlate(eye1, eye2):\n\n assert len(eye1) == len(eye2), \"Eyes must come in pairs\"\n\n # Start off this way; since A and B have no particular meaning, this could\n # just as well be reversed.\n eyeA = [eye1[0]]\n eyeB = [eye2[0]]\n\n # Skip frame 0, since it has already been assigned\n for i in range(1, len(eye1)):\n da1 = distance(eyeA[-1], eye1[i])\n da2 = distance(eyeA[-1], eye2[i])\n db1 = distance(eyeB[-1], eye1[i])\n db2 = distance(eyeB[-1], eye2[i])\n if (da1 < db1) and (db2 < da2):\n eyeA.append(eye1[i])\n eyeB.append(eye2[i])\n elif (da2 < db2) and (db1 < da1):\n eyeA.append(eye2[i])\n eyeB.append(eye1[i])\n else:\n raise RuntimeError('Ambiguous eye assignment')\n return (eyeA, eyeB)", "def calculate_correlation(data):\n pass", "def correlate(spectrum1, spectrum2, range=None, unit=None, errorweight=False):\n\n if range is not None:\n spectrum1 = spectrum1.slice(*range, unit=unit)\n spectrum2 = spectrum2.slice(*range, unit=unit)\n\n if not (spectrum1.xarr.shape == spectrum2.xarr.shape) or not all(spectrum1.xarr == spectrum2.xarr):\n spectrum2 = interpolation.interp(spectrum2, spectrum1)\n\n data1 = spectrum1.data\n data2 = spectrum2.data\n\n xcorr = np.correlate(data1, data2, mode='same')\n\n # very simple propagation of error\n # each element is multiplied, multiplicative error is given such that (sigma_xy/xy)**2 = (sigma_x/x)**2 + (sigma_y/y)**2\n # error = (np.correlate( (spectrum1.error/spectrum1.data)**2 , np.ones(xcorr.shape), mode='same') +\n # np.correlate( (spectrum2.error/spectrum2.data)**2 , np.ones(xcorr.shape), mode='same'))**0.5 * xcorr\n # That approach sucks - what if data == 0?\n #\n # this might be more correct: http://arxiv.org/pdf/1006.4069v1.pdf eqn 4\n # but it doesn't quite fit my naive expectations so:\n error = (np.correlate((spectrum1.error)**2, np.ones(xcorr.shape), mode='same') +\n np.correlate((spectrum2.error)**2, np.ones(xcorr.shape), mode='same'))**0.5\n\n xarr = spectrum1.xarr\n x_range = xarr.max()-xarr.min()\n xmin = -x_range/2.\n xmax = x_range/2.\n offset_values = np.linspace(xmin, xmax, len(xarr))\n\n offset_xarr = units_module.SpectroscopicAxis(offset_values, unit=xarr.unit)\n\n header = headers.intersection(spectrum1.header, spectrum2.header)\n header['CRPIX1'] = 1\n try:\n header['CRVAL1'] = xmin\n except ValueError:\n try:\n header['CRVAL1'] = xmin.tolist()\n except NotImplementedError:\n header['CRVAL1'] = xmin.value\n try:\n header['CDELT1'] = offset_xarr.cdelt()\n except ValueError:\n header['CDELT1'] = offset_xarr.cdelt().value\n\n return classes.XCorrSpectrum(xarr=offset_xarr, data=xcorr, header=header, error=error)", "def correlation(self, other):\n dates=self.get_dates(other.get_dates())\n #print(len(self.get_values(dates)))\n #print(len(other.get_values(dates)))\n #print(self.get_values(dates))\n r,p=stats.pearsonr(self.get_values(dates), other.get_values(dates))\n return r", "def corr_coef_chan0(\n a: Union[np.ndarray, torch.Tensor], b: Union[np.ndarray, torch.Tensor]\n) -> float:\n if a is None or b is None:\n return None\n a = a[0:1,]\n b = b[0:1,]\n return corr_coef(a, b)", "def main_correlate(tel1, date1, tel2, date2, nchan, tstart, tend, dedisperse,\n do_foldspec, ntbin, ngate,\n do_waterfall, ntw_min,\n save_xcorr, verbose=0):\n comm = MPI.COMM_WORLD\n if comm.size > 1 and save_xcorr:\n if comm.rank == 0:\n\t print(\"Warning, h5py mpio is sometimes slow. Consider disabling save_xcorr\")\n\t# save_xcorr = False\n # observing parameters\n t0 = Time(tstart, scale='utc')\n t1 = Time(tend, scale='utc')\n\n Obs = obsdata()\n obskey1 = Obs[tel1].nearest_observation(date1)\n obskey2 = Obs[tel2].nearest_observation(date2)\n psr1 = Obs[tel1][obskey1]['src']\n psr2 = Obs[tel2][obskey2]['src']\n files1 = Obs[tel1].file_list(obskey1)\n files2 = Obs[tel2].file_list(obskey2)\n\n assert psr1 == psr2\n if comm.rank == 0:\n print(\"forming visibilities from (telescope, observation_key) = \\n\"\n \"\\t ({0}, {1}) and ({2}, {3}), source {4}\".format(tel1, obskey1, tel2, obskey2, psr1))\n dm = Obs['psrs'][psr1]['dm']\n with LOFARdata_Pcombined(*files1, comm=comm) as fh1,\\\n GMRTdata(*files2, comm=comm) as fh2:\n phasepol1 = Obs['lofar'][obskey1].get_phasepol(fh1.time0, rphase=None)\n phasepol2 = Obs['gmrt'][obskey2].get_phasepol(fh2.time0, rphase=None)\n nt = min(fh1.ntimebins(t0, t1), fh2.ntimebins(t0, t1))\n # out = (foldspec, icount, waterfall)\n out = correlate.correlate(fh1, fh2, dm=dm, nchan=nchan, ngate=ngate,\n ntbin=ntbin, nt=nt, ntw=ntw_min,\n t0=t0, t1=t1, dedisperse=dedisperse,\n phasepol=(phasepol1, phasepol2),\n do_waterfall=do_waterfall,\n do_foldspec=do_foldspec,\n save_xcorr=save_xcorr,\n comm=comm)\n myfoldspec = out[0]\n myicount = out[1]\n mywaterfall = out[2]\n\n savepref = \"{0}{1}_{2}chan{3}ntbin\".format(tel1[0], tel2[0], nchan, ntbin)\n dt = t1 - t0\n if do_waterfall:\n waterfall = np.zeros_like(mywaterfall)\n comm.Reduce(mywaterfall, waterfall, op=MPI.SUM, root=0)\n if comm.rank == 0:\n # waterfall = normalize_counts(waterfall)\n np.save(\"{0}waterfall_{1}+{2:08}sec.npy\"\n .format(savepref, t0, dt.sec), waterfall)\n\n if do_foldspec:\n foldspec = np.zeros_like(myfoldspec)\n icount = np.zeros_like(myicount)\n comm.Reduce(myfoldspec, foldspec, op=MPI.SUM, root=0)\n comm.Reduce(myicount, icount, op=MPI.SUM, root=0)\n if comm.rank == 0:\n fname = (\"{0}foldspec_{1}+{2:08}sec.npy\")\n iname = (\"{0}icount_{1}+{2:08}sec.npy\")\n np.save(fname.format(savepref, t0, dt.sec), foldspec)\n np.save(iname.format(savepref, t0, dt.sec), icount)\n\n # get normalized flux in each bin (where any were added)\n f2 = normalize_counts(foldspec, icount)\n foldspec1 = f2.sum(axis=2)\n fluxes = foldspec1.sum(axis=0)\n foldspec3 = f2.sum(axis=0)\n\n with open('{0}flux_{1}+{2:08}sec.dat'\n .format(savepref, t0, dt.sec), 'w') as f:\n for i, flux in enumerate(fluxes):\n f.write('{0:12d} {1:12.9g}\\n'.format(i + 1, flux))\n\n plots = True\n if plots and comm.rank == 0:\n if do_waterfall:\n w = waterfall.copy()\n try:\n pmap('{0}waterfall_{1}+{2:08}sec.pgm'\n .format(savepref, t0, dt.sec), w, 1, verbose=True)\n except:\n pass\n if do_foldspec:\n pmap('{0}folded_{1}+{2:08}sec.pgm'\n .format(savepref, t0, dt.sec), foldspec1, 0, verbose)\n # TODO: Note, I (aaron) don't think this works for LOFAR data\n # since nchan=20, but we concatenate several subband files\n # together, so f2.nchan = N_concat * nchan\n # It should work for my \"new\" LOFAR_Pconcate file class\n pmap('{0}foldedbin_{1}+{2:08}sec.pgm'\n .format(savepref, t0, dt.sec),\n f2.transpose(0, 2, 1).reshape(nchan, -1), 1, verbose)\n pmap('{0}folded3_{1}+{2:08}sec.pgm'\n .format(savepref, t0, dt.sec), foldspec3, 0, verbose)", "def find_similarity(message1, message2):\n total = 0\n for i in range(len(message1)):\n max = 0\n for j in range(len(message2)):\n message1_encoded = embed([message1[i]])\n message2_encoded = embed([message2[j]])\n sim = average_similarity(message1_encoded, message2_encoded)\n if sim > max:\n max = sim\n total += max\n return total/len(message1)", "def cross_correlation(values1, values2, lags=100):\n lags, corr, line, x = pl.xcorr( values1, values2, maxlags=lags, usevlines=False, marker=None)\n return lags, corr", "def coupling_coef_corrs(fits_path, dataset1, dataset2):\n fits = h5py.File(fits_path, 'r')\n coefs1 = np.median(fits[dataset1]['coupling_coefs'][:], axis=0)\n coefs2 = np.median(fits[dataset2]['coupling_coefs'][:], axis=0)\n\n n_neurons = coefs1.shape[0]\n corrs = np.zeros(n_neurons)\n\n for neuron in range(n_neurons):\n corrs[neuron] = np.corrcoef(coefs1[neuron], coefs2[neuron])[0, 1]\n\n return corrs", "def corr_1d(tensor_a: torch.Tensor, tensor_b: torch.Tensor):\n assert tensor_a.dim() == 2 and tensor_b.dim() == 2, \\\n \"corr_1d :: tensor_a and tensor_b must be 2D\"\n assert tensor_a.size(0) == tensor_b.size(0) and \\\n tensor_a.dim(1) == tensor_b.dim(1), \\\n \"corr_1d :: tensor_a and tensor_b must have same shape\"\n\n num = tensor_a.mul(tensor_b).mean(1) - tensor_a.mean(1)*tensor_b.mean(1)\n den = ((tensor_a.pow(2).mean(1) - tensor_a.mean(1).pow(2)).pow(0.5) *\n (tensor_b.pow(2).mean(1) - tensor_b.mean(1).pow(2)).pow(0.5))\n return num / den.add(1e-8)", "def mimo_sync(self,re1,im1,re2,im2):\n wnd = np.int_(4*(self._GI + self._FFT))\n Nprep = np.int_(self._FFT/2)\n mavg = np.int_(self._FFT/4) # moving average period for power and corr12\n mavg3 = 2*self._FFT # average period for corr3\n if np.size(re1)!=np.size(im1) or np.size(re2)!=np.size(im2) or np.size(re1)!=np.size(re2):\n raise Exception(\"Vectors re1, im1, re2, im2 do not have the same length!!!\")\n if np.size(re1) < (wnd-mavg+mavg3+self._FFT/2):\n raise Exception(\"Vectors re1, im1, re2, im2 not long enough ({}) to run synchronization (required length={})!!!\".format(np.size(re1),wnd-mavg+mavg3+self._FFT/2))\n iqcpx = np.empty(re1.shape, dtype=complex)\n iqcpx.real = (re1+re2)/2\n iqcpx.imag = (im1+im2)/2\n iqdata = np.concatenate((np.zeros(Nprep,),iqcpx))\n power = np.zeros((wnd,1))\n corr12 = np.zeros((wnd,1), dtype=complex)\n corr3 = np.zeros((wnd,1), dtype=complex)\n # perform the autocorrelation on the STF symbols\n for n in range(0, wnd-mavg):\n power[n] = np.real(np.dot(iqdata[n:n+mavg].transpose(),\n iqdata[n:n+mavg].conjugate())/mavg)\n corr12[n+mavg] = np.sum(iqdata[n+self._FFT/4:n+self._FFT/4+mavg] *\n np.conj(iqdata[n+self._FFT/2:n+self._FFT/2+mavg]) -\n iqdata[n:n+mavg] *\n np.conj(iqdata[n+self._FFT/4:n+self._FFT/4+mavg]))\n corr3[n+mavg] = np.dot(np.transpose(iqdata[n+self._FFT/4:n+self._FFT/4+mavg3]),\n np.conj(iqdata[n+self._FFT/2:n+self._FFT/2+mavg3]))\n # get first index where power rises above threshold\n idx1 = np.flatnonzero((power>0.75*np.sum(power)/np.size(power)))[0]\n idx2 = np.argmax(np.abs(corr12[idx1:idx1+self._FFT/2]))\n idx = idx1+idx2-Nprep\n c3i = idx1+idx2-Nprep-1+mavg\n # get the phase at the start index and calculate the frequency offset\n fo_meas = -np.angle(np.mean(corr3[c3i:c3i+mavg]))/(np.pi/2*self._FFT)*self._FS\n return idx, fo_meas", "def compute_ncc_impl(image1, image2):\n height, width, _ = image1.shape\n ncc = np.zeros((height, width))\n for i in range(height):\n for j in range(width):\n ncc[i, j] = np.correlate(image1[i, j], image2[i, j])[0]\n return ncc", "def delay_between(h1, h2):\n h1 = np.atleast_2d(h1)\n h2 = np.atleast_2d(h2)\n assert h1.shape[-1] == h2.shape[-1], \"h1 and h2 must have same number of samples\"\n\n L = h1.shape[-1]\n\n delay = np.zeros((h1.shape[0], h2.shape[0]), dtype=int)\n for i in range(h1.shape[0]):\n for j in range(h2.shape[0]):\n xcorrmax = np.argmax(np.correlate(h2[j], h1[i], mode=\"full\"))\n delay[i, j] = xcorrmax - L + 1\n\n return delay.squeeze()", "def corr(self):\n pass", "def corr(A,B):\n\n # Rowwise mean of input arrays & subtract from input arrays themeselves\n A_mA = A - A.mean(1)[:,None]\n B_mB = B - B.mean(1)[:,None]\n\n # Sum of squares across rows\n ssA = (A_mA**2).sum(1);\n ssB = (B_mB**2).sum(1);\n\n # Finally get corr coeff\n return np.dot(A_mA,B_mB.T)/np.sqrt(np.dot(ssA[:,None],ssB[None]))", "def GetCorr_and_RMSE(v1, v2):\n\treturn pearsonr(v1,v2)[0], math.sqrt(np.mean([(a-b)**2 for a,b in zip(v1,v2)]))", "def overlay_lines(self, p1, p2, FT, frame):\n \n if p1 == p2:\n self.show_dif_class_msg()\n \n else:\n a1 = complete_scores[p1, p2][0]\n a2 = complete_scores[p1, p2][1]\n projection1 = make_1D(extract_2D[p1], a1)\n projection2 = make_1D(extract_2D[p2], a2)\n\n if FT: \n pad_p1 = np.pad(projection1.vector, pad_width=(0, shape-projection1.size()))\n pad_p2 = np.pad(projection2.vector, pad_width=(0, shape-projection2.size()))\n A = abs(np.fft.rfft(pad_p1))\n B = abs(np.fft.rfft(pad_p2))\n \n f = Figure(figsize=(8,4))\n ax = f.add_subplot(111)\n\n ax.bar(range(len(A)), A, alpha=0.35, color='deepskyblue', ec='k', linewidth=1)\n ax.bar(range(len(B)), B, alpha=0.35, color='yellow', ec='k', linewidth=1)\n \n ax.get_xaxis().set_ticks([])\n ax.set_xlabel('frequency component')\n ax.set_ylabel('Amplitude')\n\n else:\n a2_flip = complete_scores[p1, p2][1] + 180\n projection2_flip = make_1D(extract_2D[p2], a2_flip)\n\n score_default, r, c = slide_score(projection1, projection2) # Score and location of optimum\n score_flip, r_flip, c_flip = slide_score(projection1, projection2_flip) # Score of phase flipped\n\n if score_default <= score_flip:\n ref_intensity, comp_intensity = r, c\n else:\n ref_intensity, comp_intensity = r_flip, c_flip\n\n f = Figure(figsize=(8,4))\n ax = f.add_subplot(111)\n\n x_axis_max = len(ref_intensity)\n y_axis_max = max(np.amax(ref_intensity), np.amax(comp_intensity))\n y_axis_min = min(np.amin(ref_intensity), np.amin(comp_intensity))\n\n ax.plot(ref_intensity, color='black')\n ax.plot(comp_intensity, color='black')\n\n ax.fill_between(range(len(ref_intensity)), ref_intensity, alpha=0.35, color='deepskyblue')\n ax.fill_between(range(len(comp_intensity)), comp_intensity, alpha=0.35, color='yellow')\n\n ax.set_ylabel('Intensity')\n ax.set_ylim([y_axis_min, (y_axis_max + 0.025*y_axis_max)])\n ax.xaxis.set_visible(False)\n\n f.tight_layout()\n\n if self.projcanvas:\n self.projcanvas.get_tk_widget().destroy()\n self.projtoolbar.destroy()\n\n self.projcanvas = FigureCanvasTkAgg(f, frame)\n self.projcanvas.draw()\n self.projcanvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n self.projcanvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n\n self.projtoolbar = NavigationToolbar2Tk(self.projcanvas, frame)\n self.projtoolbar.update()", "def correlate(array1,array2):\r\n arrayout = np.conj(fft2(array1)) * fft2(array2)\r\n return ifft2(arrayout)", "def corr_coef(\n a: Union[np.ndarray, torch.Tensor], b: Union[np.ndarray, torch.Tensor]\n) -> float:\n if a is None or b is None:\n return None\n if isinstance(a, torch.Tensor):\n a = a.numpy()\n if isinstance(b, torch.Tensor):\n b = b.numpy()\n assert a.shape == b.shape, \"Inputs must be same shape\"\n mean_a = np.mean(a)\n mean_b = np.mean(b)\n std_a = np.std(a)\n std_b = np.std(b)\n cc = np.mean((a - mean_a) * (b - mean_b)) / (std_a * std_b)\n return cc", "def correlation(\n self,\n freq_1: float,\n time_1: float,\n freq_2: Optional[float] = None,\n time_2: Optional[float] = None,\n dw: Optional[tuple] = (1.0, 1.0),\n dagg: Optional[tuple] = (1, 0),\n interaction_picture: Optional[bool] = False,\n change_only: Optional[bool] = False,\n progress_type: Optional[Text] = None) -> complex:\n dt = self._process_tensor.dt\n if time_2 is None:\n time_2 = time_1\n if freq_2 is None:\n freq_2 = freq_1\n self.generate_system_correlations(time_2, progress_type)\n corr_mat_dim = int(np.round(time_2/dt))\n _sys_correlations = self._system_correlations[:corr_mat_dim,\n :corr_mat_dim]\n _sys_correlations = np.nan_to_num(_sys_correlations)\n re_kernel,im_kernel = self._calc_kernel(freq_1, time_1,\n freq_2, time_2, dagg)\n coup_1 = dw[0] * self._bath.correlations.spectral_density(freq_1)**0.5\n coup_2 = dw[1] * self._bath.correlations.spectral_density(freq_2)**0.5\n correlation = np.sum(_sys_correlations.real*re_kernel + \\\n 1j*_sys_correlations.imag*im_kernel) * \\\n coup_1 * coup_2\n if (not change_only) and (freq_1 == freq_2) \\\n and (dagg in ((1, 0), (0, 1))):\n if self._temp > 0:\n correlation += np.exp(-freq_1/self._temp) \\\n / (1 - np.exp(-freq_1/self._temp))\n if dagg == (0, 1):\n correlation += 1\n\n if not interaction_picture:\n correlation *= np.exp(1j * ((2*dagg[0] - 1) * freq_2 * time_2 + \\\n (2*dagg[1] - 1) * freq_1 * time_1))\n return correlation", "def cross_correlation(field1, field2):\n array_len = len(field1)\n # Take the index of the largest value in the array of correlation values calculated via a full convolve\n # cross correlation.\n arg_max = np.argmax((np.correlate([float(i) for i in field1], [float(i) for i in field2], mode='full')))\n # Map the index of the largest correlation value to that of the season lag between metrics\n return -(int(np.arange(-array_len+1, array_len)[arg_max]))", "def same_emitter(track_1, track_2):\n alternate_consistency = False\n start_consistency = False\n start_1_index = 0\n start_2_index = 0\n\n # First of all, check if both tracks use the same frequence to communicate\n freq_consistency = False\n f_1 = track_1.itr_measurement.central_freq_hz\n f_2 = track_2.itr_measurement.central_freq_hz\n if f_1 > 0.99*f_2 and f_1 < 1.01*f_2:\n freq_consistency = True\n\n # Then, check if the bandwidth of both tracks is the same\n bandwidth_consistency = False\n bw_1 = track_1.itr_measurement.bandwidth_hz\n bw_2 = track_2.itr_measurement.bandwidth_hz\n if bw_1 > 0.99*bw_2 and bw_1 < 1.01*bw_2:\n bandwidth_consistency = True\n\n # Is the emission type the same for both tracks ?\n type_consistency = False\n t_1 = track_1.itr_measurement.type\n t_2 = track_2.itr_measurement.type\n if t_1 == t_2:\n type_consistency = True\n\n # If all three criteria above have been fulfilled, check if alternates sequences are similar\n if freq_consistency and type_consistency and bandwidth_consistency:\n # logger.debug(\n # \"\\tFreq and type consistency found : \\n\\t\\t1° Freq - %s - Type - %s \\n\\t\\t2° Freq - %s - Type - %s\" % (f_1, t_1, f_2, t_2))\n alternate_consistency = True\n alternates_1 = track_1.alternates\n alternates_2 = track_2.alternates\n\n alt_duration_1 = [alt.duration_us for alt in alternates_1]\n alt_start_1 = [alt.start.date_ms for alt in alternates_1]\n alt_duration_2 = [alt.duration_us for alt in alternates_2]\n alt_start_2 = [alt.start.date_ms for alt in alternates_2]\n\n # Both tracks may not have been recorded at exactly the same time. Therefore,\n # we only analyse alternates that have finished. Not ongoing alternates.\n n = min(len(alternates_1), len(alternates_2)) - 1\n\n for start_1 in alt_start_1:\n if start_1 in alt_start_2:\n start_1_index = alt_start_1.index(start_1)\n start_2_index = alt_start_2.index(start_1)\n start_consistency = True\n break\n if not start_consistency:\n for start_2 in alt_start_2:\n if start_2 in alt_start_1:\n start_1_index = alt_start_1.index(start_2)\n start_2_index = alt_start_2.index(start_2)\n start_consistency = True\n break\n\n if start_consistency and track_1.itr_measurement.type != 1:\n if start_1_index == 0 or start_2_index == 0:\n start_1_index += 1\n start_2_index += 1\n while start_1_index < len(alt_start_1) and start_2_index < len(alt_start_2):\n # If there is more than a single alternate, we check if the duration of the alternates is consistent\n if alt_duration_1[start_1_index] != alt_duration_2[start_2_index]:\n alternate_consistency = False\n break\n\n # Always check that the start-dates of all alternates are the same.\n if alt_start_1[start_1_index] != alt_start_2[start_2_index]:\n alternate_consistency = False\n break\n\n start_1_index += 1\n start_2_index += 1\n\n # if alternate_consistency:\n # logger.debug(\n # \"\\tBoth tracks are from the same emitter !\")\n bool_response = freq_consistency and bandwidth_consistency and type_consistency and start_consistency and alternate_consistency\n\n track_id = get_track_id(track_1)\n return bool_response, track_id", "def final_homography(pts1, pts2, feats1, feats2):\n\n #\n # Your code here\n #\n\n idxs1, idxs2 = find_matches(feats1, feats2)\n ransac_return = ransac(pts1[idxs1], pts2[idxs2])\n\n return ransac_return, idxs1, idxs2", "def correlation_1d(data1, data2):\n\n N = len(data1)\n assert N == len(data2)\n n_fft = select_power_of_two(N)\n\n # Pad the signal with zeros to avoid the periodic images.\n R_data1 = np.zeros(2*n_fft)\n R_data1[:N] = data1\n R_data2 = np.zeros(2*n_fft)\n R_data2[:N] = data2\n F_data1 = np.fft.fft(R_data1)\n F_data2 = np.fft.fft(R_data2)\n result = np.fft.ifft(F_data1.conj()*F_data2)\n positive_time = result[:N].real/(N-np.arange(N))\n negative_time = result[-N+1:][::-1].real/(N-1-np.arange(N-1))\n\n return np.concatenate((negative_time[::-1], positive_time))", "def main_function(self, prn1, prn2):\n self.check_all_prns()\n fs = 5e6 # 5 MHz sampling rate\n T = 5e-3 # 5 milliseconds sampling duration\n fc = 1.023e6\n code_1 = self.generate_l1ca_codes(prn1)\n samples_1 = self.generate_code_samples(code_1, fs, T, fc)\n code_2 = self.generate_l1ca_codes(prn2)\n samples_2 = self.generate_code_samples(code_2, fs, T, fc)\n # To display correlation check, uncomment the next line\n # self.check_correlations(code_1, code_2, prn1, prn2)\n self.plot_correlation(samples_1, samples_2, T, fs, prn1, prn2)\n # To plot the CA code correlation plots, uncomment the next line\n # self.plot_correlation_ca_code(code_1, code_2, prn1, prn2)", "def check_correlations(self, x, y, prn1, prn2):\n x = self.create_constant_magnitude_signal(x)\n y = self.create_constant_magnitude_signal(y)\n auto_1 = self.circular_correlation(x, x)\n auto_2 = self.circular_correlation(y, y)\n print(\"Max Value in Auto-Correlation for PRN {0}: {1}\".format(prn1, max(auto_1)))\n print(\"Max Value in Auto-Correlation for PRN {0}: {1}\".format(prn2, max(auto_2)))\n auto_1_63, auto_1_65, auto_1_minus_1 = np.sum(auto_1 == 63)/1023, np.sum(auto_1 == -65)/1023, (np.sum(auto_1 == -1)/1023)\n auto_2_63, auto_2_65, auto_2_minus_1 = np.sum(auto_2 == 63)/1023, np.sum(auto_2 == -65)/1023, np.sum(auto_2 == -1)/1023\n print(\"63 appears {0}% in Auto-Correlation for PRN {1}\".format(auto_1_63 * 100, prn1))\n print(\"-65 appears {0}% in Auto-Correlation for PRN {1}\".format(auto_1_65 * 100, prn1))\n print(\"-1 appears {0}% in Auto-Correlation for PRN {1}\".format(auto_1_minus_1 * 100, prn1))\n print(\"63 appears {0}% in Auto-Correlation for PRN {1}\".format(auto_2_63 * 100, prn2))\n print(\"-65 appears {0}% in Auto-Correlation for PRN {1}\".format(auto_2_65 * 100, prn2))\n print(\"-1 appears {0}% in Auto-Correlation for PRN {1}\".format(auto_2_minus_1 * 100, prn2))", "def correlation_analysis():\n\n raw_covid_data = read_covid_data()\n\n pop_data = read_population()\n\n life_expectancy_data = read_life_expectancy()\n\n gdp_data = read_gdp()\n\n edu_data = read_education()\n\n int_data = read_internet()\n\n covid_joined = pd.merge(raw_covid_data, pop_data, on=\"Country\")\n\n covid_joined.insert(4, \"Confirmed rate\", covid_joined[\"Confirmed\"] / covid_joined[\"Population\"])\n covid_joined.insert(5, \"Death rate\", covid_joined[\"Death\"] / covid_joined[\"Population\"])\n\n covid_life_joined = pd.merge(covid_joined, life_expectancy_data, on=\"Country\")\n covid_life_gdp_joined = pd.merge(covid_life_joined, gdp_data, on=\"Country\")\n covid_life_gdp_edu_joined = pd.merge(covid_life_gdp_joined, edu_data, on=\"Country\")\n covid_life_gdp_edu_int_joined = pd.merge(covid_life_gdp_edu_joined, int_data, on=\"Country\")\n covid_life_gdp_edu_int_joined = covid_life_gdp_edu_int_joined[covid_life_gdp_edu_int_joined.Education != '..']\n covid_life_gdp_edu_int_joined = covid_life_gdp_edu_int_joined[covid_life_gdp_edu_int_joined.Internet != '..']\n covid_life_gdp_edu_int_joined['Education'] = covid_life_gdp_edu_int_joined['Education'].astype(float)\n covid_life_gdp_edu_int_joined['Internet'] = covid_life_gdp_edu_int_joined['Internet'].astype(float)\n\n sns.set()\n\n draw_histogram(covid_life_gdp_edu_int_joined[\"Confirmed rate\"], \"COVID-19 Confirmed rate\")\n draw_histogram(covid_life_gdp_edu_int_joined[\"Death rate\"], \"COVID-19 Death rate\")\n\n display_analysis_result(covid_life_gdp_edu_int_joined[\"Life expectancy\"], covid_life_gdp_edu_int_joined[\"Confirmed rate\"], \"Life expectancy\", \"Confirmed rate\")\n display_analysis_result(covid_life_gdp_edu_int_joined[\"Life expectancy\"], covid_life_gdp_edu_int_joined[\"Death rate\"], \"Life expectancy\", \"Death rate\")\n\n display_analysis_result(covid_life_gdp_edu_int_joined[\"GDP\"], covid_life_gdp_edu_int_joined[\"Confirmed rate\"], \"GDP\", \"Confirmed rate\")\n display_analysis_result(covid_life_gdp_edu_int_joined[\"GDP\"], covid_life_gdp_edu_int_joined[\"Death rate\"], \"GDP\", \"Death rate\")\n\n display_analysis_result(covid_life_gdp_edu_int_joined[\"Education\"], covid_life_gdp_edu_int_joined[\"Confirmed rate\"], \"Education\", \"Confirmed rate\")\n display_analysis_result(covid_life_gdp_edu_int_joined[\"Education\"], covid_life_gdp_edu_int_joined[\"Death rate\"], \"Education\", \"Death rate\")\n\n display_analysis_result(covid_life_gdp_edu_int_joined[\"Internet\"], covid_life_gdp_edu_int_joined[\"Confirmed rate\"], \"Internet\", \"Confirmed rate\")\n display_analysis_result(covid_life_gdp_edu_int_joined[\"Internet\"], covid_life_gdp_edu_int_joined[\"Death rate\"], \"Internet\", \"Death rate\")", "def DMAP2FromMoments (moms, corr1):\n\n Nm = ml.matrix([[1, moms[0]],[moms[0], corr1*(moms[1]-moms[0]**2)+moms[0]**2]])\n \n H0, H1 = DRAPFromMoments (moms, Nm)\n \n oldCheckInput = butools.checkInput\n butools.checkInput = False\n \n D0, D1 = CanonicalFromDMAP2 (H0, H1)\n \n butools.checkInput = oldCheckInput\n \n return (D0,D1)", "def process_cross(self, cat1, cat2):\n self.logger.info('Starting process NK cross-correlations for cats %s, %s.',\n cat1.name, cat2.name)\n f1 = cat1.getNField(self.min_sep,self.max_sep,self.b,self.split_method)\n f2 = cat2.getKField(self.min_sep,self.max_sep,self.b,self.split_method)\n\n if f1.sphere != f2.sphere:\n raise AttributeError(\"Cannot correlate catalogs with different coordinate systems.\")\n\n if f1.sphere:\n _treecorr.ProcessCrossNKSphere(self.corr, f1.data, f2.data, self.output_dots)\n else:\n _treecorr.ProcessCrossNKFlat(self.corr, f1.data, f2.data, self.output_dots)", "def cross_correlation(vol1, vol2):\n var_1 = tf.reduce_sum(tf.square(vol1 - tf.reduce_mean(vol1)))\n var_2 = tf.reduce_sum(tf.square(vol2 - tf.reduce_mean(vol2)))\n cov_12 = tf.reduce_sum((vol2 - tf.reduce_mean(vol2)) * (vol1 - tf.reduce_mean(vol1)))\n score = cov_12 / tf.sqrt(var_1 * var_2 + 1e-5)\n score = -tf.cast(score, tf.float32)\n return score", "def corr2d(A, B):\n assert(A.shape==B.shape)\n return ((A - A.mean())/A.std() * (B - B.mean())/B.std()).mean()", "def inter_subj_cc_sim(subj1_id, subj2_id, subj_dir):\n subj1_dir = os.path.join(subj_dir, 'vS%s'%(subj1_id))\n subj2_dir = os.path.join(subj_dir, 'vS%s'%(subj2_id))\n #-- inter-channel similarity\n feat_weights_file1 = os.path.join(subj1_dir, 'plscca',\n 'layer1', 'feat_weights.npy')\n feat_weights_file2 = os.path.join(subj2_dir, 'plscca',\n 'layer1', 'feat_weights.npy')\n feat_cc_corr1 = np.load(feat_cc_corr_file1).reshape(96, 121, 10)\n feat_cc_corr2 = np.load(feat_cc_corr_file2).reshape(96, 121, 10)\n sim_mtx = np.zeros((960, 960))\n for i in range(10):\n data1 = feat_cc_corr1[..., i]\n for j in range(10):\n data2 = feat_cc_corr2[..., j]\n tmp = corr2_coef(data1, data2)\n sim_mtx[i*96:(i+1)*96, j*96:(j+1)*96] = np.abs(tmp)\n np.save('feat_cc_weights_sim_subj_%s_%s.npy'%(subj1_id, subj2_id), sim_mtx)\n #-- inter-CC similarity\n #feat_cc_corr_file1 = os.path.join(subj1_dir, 'plscca',\n # 'layer1', 'feat_cc_corr.npy')\n #feat_cc_corr_file2 = os.path.join(subj2_dir, 'plscca',\n # 'layer1', 'feat_cc_corr.npy')\n #feat_cc_corr1 = np.load(feat_cc_corr_file1).reshape(96, 11, 11, 10)\n #feat_cc_corr2 = np.load(feat_cc_corr_file2).reshape(96, 11, 11, 10)\n #avg_weights1 = vutil.fweights_top_mean(feat_cc_corr1, 0.2)\n #avg_weights2 = vutil.fweights_top_mean(feat_cc_corr2, 0.2)\n #sim_mtx = corr2_coef(avg_weights1, avg_weights2)\n #np.save('feat_cc_sim_subj_%s_%s.npy'%(subj1_id, subj2_id), sim_mtx)\n pass", "def correlation_1D2D_datainput(df1, df2): # correlation function from Rémi (local.py)\n\n df2 = df2.transpose() # sigs_2D.transpose()\n cov = np.dot(df1 - df1.mean(), df2 - df2.mean(axis=0)) / (df2.shape[0] - 1)\n # ddof=1 necessary because covariance estimate is unbiased (divided by n-1)\n p_var = np.sqrt(np.var(df1, ddof=1) * np.var(df2, axis=0, ddof=1))\n r = cov / p_var\n return r", "def correlation_test(x1, x2):\r\n x = pd.DataFrame([x1, x2]).T.dropna().values\r\n return pearsonr(x[:, 0], x[:, 1])", "def correlate_data(fname1='inspection_results.csv',\n fname2='green_markets.json',\n fname3='dataresults.csv'):\n correlate1 = get_score_summary(fname1)\n correlate2 = get_market_density(fname2)\n datareturn = {}\n for key2 in correlate2.iterkeys():\n for key1 in correlate1.iterkeys():\n if key1 == str(key2).upper():\n keyval1 = correlate1[key1][1]\n keyval2 = float(correlate2[key2])/(correlate1[key1][0])\n datareturn[key2] = (keyval1, keyval2)\n datareturn.update(datareturn)\n jsondata = json.dumps(datareturn)\n fhandler = open(fname3, 'w')\n fhandler.write(jsondata)\n fhandler.close()", "def test_correlation(self):\r\n x = [1, 2, 3, 5]\r\n y = [0, 0, 0, 0]\r\n z = [1, 1, 1, 1]\r\n a = [2, 4, 6, 8]\r\n b = [1.5, 1.4, 1.2, 1.1]\r\n c = [15, 10, 5, 20]\r\n\r\n bad = [1, 2, 3] # originally gave r = 1.0000000002\r\n\r\n self.assertFloatEqual(correlation(x, x), (1, 0))\r\n self.assertFloatEqual(correlation(x, y), (0, 1))\r\n self.assertFloatEqual(correlation(y, z), (0, 1))\r\n self.assertFloatEqualAbs(correlation(x, a), (0.9827076, 0.01729), 1e-5)\r\n self.assertFloatEqualAbs(\r\n correlation(x, b), (-0.9621405, 0.03786), 1e-5)\r\n self.assertFloatEqualAbs(correlation(x, c), (0.3779645, 0.622), 1e-3)\r\n self.assertEqual(correlation(bad, bad), (1, 0))", "def plot_by_concreteness(scores: np.ndarray, word_pairs, ax1, ax2, common_subset=False, vecs_names=None,\n concrete_num=100, title_prefix='', pair_score_agg='sum', show=False):\n for synset_agg, ax in zip(['median', 'most_conc'], [ax1, ax2]):\n corrs_by_conc = defaultdict(list)\n ids12, concs = wn_concreteness_for_pairs(word_pairs, synset_agg, pair_score_agg=pair_score_agg)\n scs = scores[ids12]\n for i in range(0, len(ids12), concrete_num):\n corrs = compute_correlations(scs[i:i + concrete_num], 'gt', common_subset=common_subset)\n for k, v in corrs.items():\n corrs_by_conc[k].append(v[0]) # Append correlations score for each embedding\n\n corrs_by_conc_a = dict2struct_array(corrs_by_conc)\n\n vnames = [n for n in corrs_by_conc_a.dtype.names if 'fmri' not in n and 'frcnn' not in n]\n labels = [Embeddings.get_label(n.split(NAME_DELIM)[1]) for n in vnames]\n\n colours, linestyles, alphas = PlotColour.colour_by_modality(labels)\n labelpad = 10\n\n # Concreteness scores on different axis but the same plot\n axn = ax\n axn.plot(concs, color='blue')\n axn.set_xlabel('Word pairs', labelpad=labelpad)\n axn.set_ylabel('WordNet concreteness', labelpad=labelpad)\n axn.yaxis.label.set_color('blue')\n # Xticklabels by step size\n n = scores.shape[0]\n step = 500\n xtlabels = [i for i in range(concrete_num, n) if i % step == 0] + [n]\n axn.xaxis.set_ticks([i - 1 for i in xtlabels])\n axn.set_xticklabels(xtlabels)\n\n # Plot for Spearman's correlations\n axp = axn.twiny().twinx()\n axp = plot_scores(corrs_by_conc_a,\n vecs_names=vnames,\n labels=None,\n colours=colours,\n linestyles=linestyles,\n title='',\n alphas=alphas,\n xtick_labels=None,\n ax=axp,\n show=show)\n axp.set_ylabel(\"Spearman's correlation\", labelpad=labelpad - 3)\n # TODO: Doesn't show, order of axn.twiny().twinx() matters...\n axp.set_xlabel('WordNet concreteness splits by 100 pairs', labelpad=labelpad)\n n = corrs_by_conc_a.shape[0]\n axp.xaxis.set_ticks([i for i in range(-1, n)])\n axp.set_xticklabels(['' for i in axp.get_xticklabels()])\n syna = {'median': 'Median', 'most_conc': 'Most Concrete'}[synset_agg]\n axp.set_title(f'{title_prefix} - Synset Agg {syna}')", "def RMSD(ccdata1, ccdata2):\n natom = ccdata1.natom\n rmsd = 0.0\n maxdiff = 0.0\n for i in range(natom):\n diff = norm(ccdata1.atomcoords[0][i] - ccdata2.atomcoords[0][i])\n rmsd += diff\n if diff > maxdiff:\n maxdiff = diff\n\n rmsd /= natom\n\n return rmsd, maxdiff", "def visualize_correspondence(opt, source_shape, source_face, target_shape, target_face, corres_1, corres_2):\n # save these points with color codes\n P = corres_2.shape[0]\n assert(corres_1.shape[0] == corres_2.shape[0])\n corres_1 = corres_1.cpu().numpy().reshape(-1)\n corres_2 = corres_2.cpu().numpy().reshape(-1)\n normalize = Normalize(vmin=0, vmax=corres_1.shape[0])\n cmap = cm.get_cmap(\"jet\")\n colors_picked = cmap(normalize(np.arange(P, dtype=np.float32)))[:, :3]\n colors_source = np.ones((source_face.shape[1], 3), dtype=np.float32)\n colors_source[corres_1, :] = colors_picked\n save_ply_with_face(source_shape[0].cpu().detach().numpy(), source_face[0].cpu().detach().numpy(),\n os.path.join(opt.log_dir, opt.subdir, \"source_corr.ply\"), colors_source)\n colors_target = np.ones((target_face.shape[1], 3), dtype=np.float32)\n colors_target[corres_2, :] = colors_picked\n save_ply_with_face(target_shape[0].cpu().detach().numpy(), target_face[0].cpu().detach().numpy(),\n os.path.join(opt.log_dir, opt.subdir, \"target_corr.ply\"), colors_target)", "def spatial_correlation(A, B):\n \n assert(A.shape==B.shape)\n \n r, c = A.shape\n corr = np.ones((r, c))\n for xin in range(0, c):\n for yin in range(0, r):\n corr[yin, xin] = (A[0:r-yin, 0:c-xin] * B[yin:r, xin:c]).mean()\n \n return corr", "def multi_precisions_correlate2(self):\n self.query_dict={'code':code2.value,'exchange':exchange2.value,\\\n 'structure':struct2.value,'element':element2.value,'properties':prop2.value}\n print ('POSTING', self.query_dict)\n if not self.query_dict['properties'] == 'Multi':\n self.query_api(endpoint='precvalue')\n self.prop_data = self.plot_data['s{}k'.format(self.properties)]\n self.energy_data = self.plot_data['sE0k'.format(self.properties)]\n layout_doc.children[4].children[1] = self.plot_precision_figure()", "def concatTwoHMMs(hmm1, hmm2):\n hmmOut = {}\n M1,D1 =hmm1['transmat'].shape\n M2,D2 = hmm2['transmat'].shape\n hmmOut['name']=hmm1['name']+hmm2['name']\n hmmOut['startprob'] = hmm2['startprob'] * hmm1['startprob'][M1-1]\n hmmOut['startprob'] = np.concatenate((hmm1['startprob'][0:M1-1], hmmOut['startprob']))\n mul = np.reshape(hmm1['transmat'][0:-1, -1], (M1-1, 1)) @ np.reshape(hmm2['startprob'], (1, M2))\n hmmOut['transmat'] = np.concatenate((hmm1['transmat'][0:-1, 0:-1], mul), axis=1)\n tmp = np.concatenate((np.zeros([M2,M1-1]), hmm2['transmat']), axis=1)\n hmmOut['transmat'] = np.concatenate((hmmOut['transmat'], tmp), axis=0)\n hmmOut['means'] = np.vstack((hmm1['means'],hmm2['means']))\n hmmOut['covars'] = np.vstack((hmm1['covars'],hmm2['covars']))\n return hmmOut", "def get_auto_corr(timeSeries1_pre,timeSeries2_pre,k):\n l=len(timeSeries1_pre)\n timeSeries1=timeSeries1_pre[0:l-k]\n timeSeries2=timeSeries2_pre[k:]\n timeSeries1_mean=timeSeries1.mean()\n timeSeries2_mean=timeSeries2.mean()\n ###doubt\n timeSeries1_std= np.sqrt(timeSeries1_pre.var()*len(timeSeries1_pre))\n timeSeries2_std= np.sqrt(timeSeries2_pre.var()*len(timeSeries2_pre))\n auto_corr = 0\n for i in xrange(l-k):\n if timeSeries1_std == 0 or timeSeries2_std == 0:\n return 0\n else:\n tmp=(timeSeries1[i]-timeSeries1_mean)*(timeSeries2[i]-timeSeries2_mean)/(timeSeries1_std*timeSeries2_std)\n auto_corr = auto_corr + tmp\n \n return auto_corr", "def cross_correlation(arr1, arr2):\n faxes = lambda x: tuple(np.arange(x.ndim - 1) + 1)\n\n return pipe(\n arr1,\n dafftn(axes=faxes(arr1)),\n lambda x: daconj(x) * dafftn(arr2, axes=faxes(arr2)),\n daifftn(axes=faxes(arr1)),\n dafftshift(axes=faxes(arr1)),\n lambda x: x.real / arr1[0].size,\n )", "def plot_corr_diff(tseries1, tseries2, fig=None,\r\n ts_names=['1', '2']):\r\n\r\n if fig is None:\r\n fig = plt.figure()\r\n\r\n ax = fig.add_subplot(1, 1, 1)\r\n\r\n SNR1 = []\r\n SNR2 = []\r\n corr1 = []\r\n corr2 = []\r\n corr_e1 = []\r\n corr_e2 = []\r\n\r\n for i in range(tseries1.shape[0]):\r\n SNR1.append(nta.SNRAnalyzer(ts.TimeSeries(tseries1.data[i],\r\n sampling_rate=tseries1.sampling_rate)))\r\n\r\n corr1.append(np.arctanh(np.abs(SNR1[-1].correlation[0])))\r\n corr_e1.append(SNR1[-1].correlation[1])\r\n\r\n SNR2.append(nta.SNRAnalyzer(ts.TimeSeries(tseries2.data[i],\r\n sampling_rate=tseries2.sampling_rate)))\r\n\r\n corr2.append(np.arctanh(np.abs(SNR2[-1].correlation[0])))\r\n corr_e2.append(SNR1[-1].correlation[1])\r\n\r\n ax.scatter(np.array(corr1), np.array(corr2))\r\n ax.errorbar(np.mean(corr1), np.mean(corr2),\r\n yerr=np.std(corr2),\r\n xerr=np.std(corr1))\r\n plot_min = min(min(corr1), min(corr2))\r\n plot_max = max(max(corr1), max(corr2))\r\n ax.plot([plot_min, plot_max], [plot_min, plot_max], 'k--')\r\n ax.set_xlabel('Correlation (Fischer Z) %s' % ts_names[0])\r\n ax.set_ylabel('Correlation (Fischer Z) %s' % ts_names[1])\r\n\r\n return fig, corr1, corr2", "def correlation_test(sample1, sample2, method='pearson', alpha=0.05,\n alternative='two-sided', show_graph=True, **kwargs):\n text = 'relationship between the two variables'\n hypothesis = {\n 'two-sided_H0': f\"there is no {text}\",\n 'two-sided_H1': f\"there is a {text}\",\n 'greater_H0': f\"there is no positive {text}\",\n 'greater_H1': f\"there is a positive {text}\",\n 'less_H0': f\"there is no negative {text}\",\n 'less_H1': f\"there is a negative {text}\"\n }\n if method == 'pointbiserial':\n pb_corr = pointbiserialr(sample1, sample2)\n df = pd.DataFrame(data={'r': [pb_corr.correlation],\n 'p-val': [pb_corr.pvalue]})\n df = df.rename({0: 'pointbiserial'})\n else:\n df = pg.corr(x=sample1, y=sample2,\n alternative=alternative, method=method)\n if show_graph:\n Visualization.scatter(x=sample1, y=sample2, **kwargs)\n return HypothesisTester.test_alternative(df, hypothesis,\n alternative, alpha).T", "def _xcorrf(self, profile1, profile2, dx):\n corrf = np.correlate(profile2, profile1, mode = 'same') \\\n /np.sum(profile1**2)\n\n if np.isnan(corrf).any():\n displ = np.nan\n corr = 0\n else:\n displ = (np.where(corrf == np.max(corrf))[0][0] - len(corrf)//2)*dx\n corr = np.max(corrf)\n\n return displ, corr", "def CORREL(list1, list2):\n list1 = np.array(list1)\n list2 = np.array(list2)\n try:\n return(np.corrcoef(list1, list2)[0,1])\n except:\n print('Invalid list objects: have you passed int or numeric list objects of same length?')", "def wordSimilarityRatio(sent_1,sent_2):", "def match(desc1,desc2):\n\t\n\tdesc1 = array([d/linalg.norm(d) for d in desc1])\n\tdesc2 = array([d/linalg.norm(d) for d in desc2])\n\t\n\tdist_ratio = 0.6\n\tdesc1_size = desc1.shape\n\t\n\tmatchscores = zeros((desc1_size[0],1))\n\tdesc2t = desc2.T #precompute matrix transpose\n\tfor i in range(desc1_size[0]):\n\t\tdotprods = dot(desc1[i,:],desc2t) #vector of dot products\n\t\tdotprods = 0.9999*dotprods\n\t\t#inverse cosine and sort, return index for features in second image\n\t\tindx = argsort(arccos(dotprods))\n\t\t\n\t\t#check if nearest neighbor has angle less than dist_ratio times 2nd\n#\t\tif arccos(dotprods)[indx[0]] < dist_ratio * arccos(dotprods)[indx[1]]:\n\t\tmatchscores[i] = int(indx[0])\n\t\n\treturn matchscores", "def process_pairwise(self, cat1, cat2):\n self.logger.info('Starting process NK pairwise-correlations for cats %s, %s.',\n cat1.name, cat2.name)\n f1 = cat1.getNSimpleField()\n f2 = cat2.getKSimpleField()\n\n if f1.sphere != f2.sphere:\n raise AttributeError(\"Cannot correlate catalogs with different coordinate systems.\")\n\n if f1.sphere:\n _treecorr.ProcessPairwiseNKSphere(self.corr, f1.data, f2.data, self.output_dots)\n else:\n _treecorr.ProcessPairwiseNKFlat(self.corr, f1.data, f2.data, self.output_dots)", "def pairwise_correlation_difference(self):\r\n\r\n real_cat, synth_cat = self.to_cat(self.origdst, self.synthdst)\r\n\r\n real_cat_dem = self.get_demographics(real_cat)\r\n synth_cat_dem = self.get_demographics(synth_cat)\r\n\r\n corr_real_obj = associations(real_cat_dem, theil_u=True, bias_correction=False, plot=False)\r\n corr_synth_obj = associations(synth_cat_dem, theil_u=True, bias_correction=False, plot=False)\r\n\r\n corr_real = corr_real_obj['corr']\r\n corr_rand = corr_synth_obj['corr']\r\n\r\n substract_m = np.subtract(corr_real, corr_rand)\r\n prwcrdst = LA.norm(substract_m)\r\n\r\n return prwcrdst, substract_m", "def calculate_correlation_coefficient(column1: pd.Series, column2: pd.Series) -> np.float64:\n\n corr = column1.corr(column2)\n return corr", "def mcorr(x,y):\n return ((np.ma.dot(x,y) / (x.shape[0] - 1) / y.std(axis=0)) / x.std())", "def match(desc1,desc2):\n desc1 = array([d/linalg.norm(d) for d in desc1])\n desc2 = array([d/linalg.norm(d) for d in desc2])\n dist_ratio = 0.6\n desc1_size = desc1.shape\n matchscores = zeros((desc1_size[0],1),'int')\n desc2t = desc2.T # precompute matrix transpose\n for i in range(desc1_size[0]):\n dotprods = dot(desc1[i, :], desc2t) # vector of dot products\n dotprods *= 0.9999\n # inverse cosine and sort, return index for features in second image\n indx = argsort(arccos(dotprods))\n # check if nearest neighbor has angle less than dist_ratio times 2nd\n if arccos(dotprods)[indx[0]] < dist_ratio * arccos(dotprods)[indx[1]]:\n matchscores[i] = int(indx[0])\n return matchscores", "def cosinesimilarity_cal(CTRDM1, CTRDM2):\n\n # get number of conditions\n n_cons = np.shape(CTRDM1)[0]\n\n # calculate the number of value above the diagonal in RDM\n n = n_cons * (n_cons - 1)\n\n # initialize two vectors to store the values above the diagnal of two RDMs\n v1 = np.zeros([n], dtype=np.float64)\n v2 = np.zeros([n], dtype=np.float64)\n\n # assignment\n nn = 0\n for i in range(n_cons):\n for j in range(n_cons):\n if i != j:\n v1[nn] = CTRDM1[i, j]\n v2[nn] = CTRDM2[i, j]\n nn = nn + 1\n\n # calculate the Cosine Similarity\n V1 = np.mat(v1)\n V2 = np.mat(v2)\n num = float(V1 * V2.T)\n denom = np.linalg.norm(V1) * np.linalg.norm(V2)\n cos = num / denom\n similarity = 0.5 + 0.5 * cos\n\n return similarity", "def nancrosscorr(\n fr1: np.ndarray,\n fr2: np.ndarray = None,\n thres_n=2,\n fillvalue=np.nan,\n processes=1,\n) -> np.ndarray:\n if fr2 is None:\n fr2 = fr1\n\n is_fr1_ndim2 = fr1.ndim == 2\n if is_fr1_ndim2:\n fr1 = fr1[..., None]\n\n is_fr2_ndim2 = fr2.ndim == 2\n if is_fr2_ndim2:\n fr2 = fr2[..., None]\n\n assert fr1.ndim == 3\n assert fr2.ndim == 3\n assert thres_n >= 2, 'to compute correlation thres_n needs to be >= 2'\n\n fsh1 = np.array(fr1.shape[:2])\n fsh2 = np.array(fr2.shape[:2])\n # csh = fsh1 + fsh2\n\n # NOTE: pad smaller of the two to match max_shape + 2,\n # + 2 to ensure both are padded on both sides to remove smoothing artifact\n max_sh0 = np.amax(np.stack([fsh1, fsh2], axis=0), axis=0)\n max_sh = max_sh0 + 2\n # max_sh = (max_sh // 2) * 2 + 1 # enforce odd numbers so it has a center\n pad1 = max_sh - fsh1\n # pad1 = np.stack([\n # int(np.floor(pad1 / 2))])\n pad2 = max_sh - fsh2\n fr1 = np.pad(fr1, [\n (int(np.floor(pad1[0] / 2)),\n int(np.ceil(pad1[0] / 2))),\n (int(np.floor(pad1[1] / 2)),\n int(np.ceil(pad1[1] / 2))),\n (0, 0)\n ], constant_values=np.nan)\n fr2 = np.pad(fr2, [\n (int(np.floor(pad2[0] / 2)),\n int(np.ceil(pad2[0] / 2))),\n (int(np.floor(pad2[1] / 2)),\n int(np.ceil(pad2[1] / 2))),\n (0, 0)\n ], constant_values=np.nan)\n\n csh = max_sh0 * 2\n cc = np.zeros(tuple(csh) + fr1.shape[2:]) + fillvalue\n # fsh = np.amin(np.stack([fsh1, fsh2], axis=0), axis=0)\n # fsh = np.ceil(max_sh / 2).astype(int)\n fsh = max_sh0\n\n pool = Pool(processes=processes)\n # if processes > 0:\n # pool = Pool(processes=processes)\n # f_map = pool.map\n # else:\n # def f_map(*args, **kwargs):\n # return list(map(*args, **kwargs))\n\n # def ccorrs(dx: int):\n # cc0 = _ccorrs_given_dx(dx, csh, fillvalue, fr1, fr2, fsh, thres_n)\n\n dxs = np.arange(-fsh[0], fsh[0])\n cc[fsh[0] + dxs] = np.array(pool.map(\n _ccorrs_given_dx,\n ((dx, csh, fillvalue, fr1, fr2, fsh, thres_n) for dx in dxs)\n ))\n # cc[fsh[0] + dxs] = np.array(pool.map(ccorrs, dxs))\n\n # if processes > 0:\n # pool.close()\n\n if is_fr1_ndim2 and is_fr2_ndim2:\n assert cc.shape[-1] == 1\n cc = cc[..., 0]\n\n return cc", "def ransac(keypoints1, keypoints2, matches, sampling_ratio=0.5, n_iters=500, threshold=20):\n N = matches.shape[0]\n n_samples = int(N * sampling_ratio)\n\n # Please note that coordinates are in the format (y, x)\n matched1 = pad(keypoints1[matches[:,0]])\n matched2 = pad(keypoints2[matches[:,1]])\n matched1_unpad = keypoints1[matches[:,0]]\n matched2_unpad = keypoints2[matches[:,1]]\n\n max_inliers = np.zeros(N)\n n_inliers = 0\n\n # RANSAC iteration start\n ### YOUR CODE HERE\n raise NotImplementedError() # Delete this line\n ### END YOUR CODE\n return H, matches[max_inliers]", "def spatial_corr(*args):\n # Handle input arguments\n if len(args) == 0 or len(args) > 2:\n raise ValueError, 'requires one or two arguments'\n if len(args) == 1:\n A = B = args[0]\n else:\n A, B = args \n assert A.shape == B.shape, 'shape mismatch between input arrays'\n assert A.ndim == 3, 'input arrays must be rank-3'\n \n # Map and correlogram dimensions\n num_maps, H, W = A.shape \n corr_shape = 2*H-1, 2*W-1 \n \n # Fourier transforms\n A_ = scipy.signal.fft2(A, shape=corr_shape)\n B_ = scipy.signal.fft2(B[:, ::-1, ::-1], shape=corr_shape)\n AB_conv = (A_ * B_).sum(axis=0)\n \n return scipy.signal.real(scipy.signal.ifft2(AB_conv))/num_maps", "def plot_correlation(\n adata, \n gene_1, \n gene_2, \n bandwidth=5, \n contrib_thresh=10, \n kernel_matrix=None, \n row_key='row', \n col_key='col', \n condition=None,\n cmap='RdBu_r',\n colorbar=True,\n ticks=True,\n ax=None,\n figure=None,\n dsize=10,\n estimate='local',\n title=None,\n spot_borders=False,\n border_color='black',\n border_size=0.3,\n fig_path=None,\n fig_format='pdf',\n fig_dpi=150\n ):\n if ax is None:\n if colorbar:\n width = 7\n else:\n width = 5\n figure, ax = plt.subplots(\n 1,\n 1,\n figsize=(width,5)\n )\n\n if estimate == 'local':\n corrs, keep_inds = _plot_correlation_local(\n adata,\n gene_1,\n gene_2,\n bandwidth=bandwidth,\n contrib_thresh=contrib_thresh,\n kernel_matrix=kernel_matrix,\n row_key=row_key, \n col_key=col_key, \n condition=condition,\n cmap=cmap,\n colorbar=colorbar,\n ticks=ticks,\n ax=ax,\n figure=figure,\n dsize=dsize,\n title=title,\n spot_borders=spot_borders,\n border_color=border_color,\n border_size=border_size\n )\n extra_data = {}\n elif estimate == 'regional':\n corrs, keep_inds, ct_to_corr = _plot_correlation_regional(\n adata,\n gene_1,\n gene_2,\n condition,\n kernel_matrix=kernel_matrix,\n row_key=row_key,\n col_key=col_key, \n cmap=cmap,\n colorbar=colorbar,\n ticks=ticks,\n ax=ax,\n figure=figure,\n dsize=dsize,\n title=title,\n spot_borders=spot_borders,\n border_color=border_color,\n border_size=border_size\n )\n extra_data={'region_to_corr': ct_to_corr}\n\n if fig_path:\n plt.tight_layout()\n figure.savefig(\n fig_path,\n format=fig_format,\n dpi=fig_dpi\n )\n plt.show()\n\n return corrs, keep_inds, extra_data", "def concatTwoHMMs(hmm1, hmm2):\n name = hmm1['name']+hmm2['name']\n\n startprob1 = np.array(hmm1[\"startprob\"])\n startprob2 = np.array(hmm2[\"startprob\"])\n startprob = np.hstack((startprob1[0:-1],startprob1[-1]*startprob2))\n\n transmat1 = hmm1[\"transmat\"]\n transmat2 = hmm2[\"transmat\"]\n part1 = np.hstack((transmat1[0:-1,0:-1],np.outer(transmat1[:-1,-1],startprob2)))\n part2 = np.hstack((np.zeros((transmat2.shape[0],transmat1.shape[1]-1)),transmat2))\n transmat = np.vstack((part1,part2))\n \n means1 = hmm1[\"means\"]\n means2 = hmm2[\"means\"]\n means = np.vstack((means1,means2))\n\n covars1 = hmm1[\"covars\"]\n covars2 = hmm2[\"covars\"]\n covars = np.vstack((covars1,covars2))\n\n dict_ = {'name':name,'startprob':startprob,'transmat':transmat,'means':means,'covars':covars}\n return dict_", "def _get_subject_corruption_scores(self, triples, ent_matrix):\n rel_emb, obj_emb = triples[1], triples[2]\n # compute the score by broadcasting the corruption embeddings(ent_matrix) and using the scoring function\n # compute scores as sum(s_corr * p * o)\n sub_corr_score = tf.reduce_sum(\n ent_matrix * tf.expand_dims(rel_emb * obj_emb, 1), 2\n )\n return sub_corr_score", "def xcorr(arr1, arr2, mode = 'full', axes = None):\n if mode not in {'full', 'same'}:\n raise ValueError('Unexpected cross-correlation mode {}'.format(mode))\n \n if axes is None:\n axes = tuple(range(arr1.ndim))\n\n arr1, arr2 = np.asarray(arr1), np.asarray(arr2)\n\n # Determine final size along transformation axes\n # To speed up FFT, shape of Fourier transform might be slightly larger\n # then slice back before returning\n s1 = tuple(arr1.shape[ax] for ax in axes)\n s2 = tuple(arr2.shape[ax] for ax in axes)\n final_shape = tuple( ax1 + ax2 - 1 for ax1, ax2 in zip(s1, s2))\n fast_shape = tuple(map(next_fast_len, final_shape))\n final_slice = tuple([slice(0, int(sz)) for sz in final_shape])\n\n F1 = fftn(arr1, shape = fast_shape, axes = axes)\n F2 = fftn(np.conj(mirror(arr2, axes = axes)), shape = fast_shape, axes = axes)\n xc = ifftn(F1 * F2)[final_slice]\n\n if mode == 'same':\n return _centered(xc, arr1.shape, axes = axes)\n else:\n return xc", "def sig_corr(self, s1, s2, comp_length):\n\n # np.corrcoef returns an array of coefficients -\n # the simple 'R' value is at row 1, col 0\n return np.corrcoef(\n self.max_freq[s1:s1+comp_length],\n self.max_freq[s2:s2+comp_length])[1, 0]", "def test_distance_correlation_fast(self):\n arr1 = np.array(((1,), (2,), (3,), (4,), (5,), (6,)))\n arr2 = np.array(((1,), (7,), (5,), (5,), (6,), (2,)))\n\n covariance = dcor_internals._u_distance_covariance_sqr_fast(\n arr1, arr2)\n self.assertAlmostEqual(covariance, -0.88889, places=5)\n\n correlation = dcor_internals._u_distance_correlation_sqr_fast(\n arr1, arr2)\n self.assertAlmostEqual(correlation, -0.41613, places=5)\n\n covariance = dcor_internals._u_distance_covariance_sqr_fast(\n arr1, arr1)\n self.assertAlmostEqual(covariance, 1.5556, places=4)\n\n correlation = dcor_internals._u_distance_correlation_sqr_fast(\n arr1, arr1)\n self.assertAlmostEqual(correlation, 1, places=5)", "def concatTwoHMMs(hmm1, hmm2):\n A = hmm1['transmat']#4*4\n PI = hmm1['startprob']#1*4\n B = hmm2['transmat']\n P = hmm2['startprob']\n m = A.shape[0] - 1\n m2 = B.shape[0] - 1\n K = m + m2\n A_con = np.zeros((K+1, K+1))\n Pi_con = np.zeros((1, K+1))\n A_con[:m, :m] = A[:m, :m]\n A_con[m:, m:] = B\n A_con[:m, m:] = np.dot(A[:m,m].reshape(-1, 1), P.reshape(1, -1))\n PP = PI.reshape(1, -1)\n Pi_con[0, :m] = PP[0, :m]\n Pi_con[0, m:] = PP[0, m] * P\n\n twoHMMs = {}\n twoHMMs['startprob'] = Pi_con\n twoHMMs['transmat'] = A_con\n twoHMMs['means'] = np.concatenate((hmm1['means'], hmm2['means']), axis=0)\n twoHMMs['covars'] = np.concatenate((hmm1['covars'] ,hmm2['covars']), axis=0)#K*D\n\n return twoHMMs", "def diagcorr(mat1, mat2, rtype='pearson', max_shift=100, percentile=100, clearmaxmin=False, symmetric=False):\n l1, l2 = len(mat1), len(mat2)\n # adjust to same size\n padding = (l1 - l2) // 2\n assert padding >= 0 and (l1-l2)%2 == 0, \\\n \"The first matrix must be larger than the second one, and padding must be symmetric!\"\n if padding > 0:\n mat1 = mat1[padding:l1-padding, padding:l1-padding]\n\n assert l2 > max_shift, \"Shifting distance is too large for input matrices!\"\n \n r = np.zeros(max_shift)\n p = np.zeros(max_shift)\n for s in range(max_shift):\n diag1 = np.diag(mat1, k=s)\n diag2 = np.diag(mat2, k=s)\n if symmetric:\n diag1 = (diag1 + np.diag(mat1, k=-s)) / 2\n diag2 = (diag2 + np.diag(mat2, k=-s)) / 2\n \n if percentile < 100:\n diag1 = np.minimum(np.percentile(diag1, percentile), diag1)\n diag2 = np.minimum(np.percentile(diag2, percentile), diag2)\n\n if clearmaxmin:\n diag1, diag2 = _clear_max_min(diag1, diag2)\n\n if rtype == 'pearson':\n r[s], p[s] = pearsonr(diag1, diag2)\n elif rtype == 'spearman':\n r[s], p[s] = spearmanr(diag1, diag2)\n\n return r, p", "def test_correlation_broadcasts(a, b, metrics):\n # unpack metrics\n metric, _metric = metrics\n metric(a, b.isel(lat=0), dim=\"time\")\n metric(a, b.isel(lat=[0]), dim=\"time\")\n b_changed_coords = b.isel(lat=[0]).assign_coords(lat=[123])\n if (\n \"eff\" not in metric.__name__\n ): # effective metrics require to be applied over time\n with pytest.raises(\n ValueError, match=\"ndex\"\n ): # match \"indexes along dimension\" and \"cannot align objects with join='exact' where index/labels/sizes are not equal along these coordinates (dimensions)\"\n metric(a, b_changed_coords, dim=\"lat\")", "def corr_with(self, other):\n return self.data.corrwith(other)", "def sentence_similarity(self,wnsimilarity,sentence1, sentence2,icneed=False):\n # Tokenize and tag\n sentence1 = pos_tag(word_tokenize(sentence1))\n sentence2 = pos_tag(word_tokenize(sentence2))\n \n # Get the synsets for the tagged words\n synsets1 = [self.tagged_to_synset(*tagged_word) for tagged_word in sentence1]\n synsets2 = [self.tagged_to_synset(*tagged_word) for tagged_word in sentence2]\n \n # Filter out the Nones\n synsets1 = [ss for ss in synsets1 if ss]\n synsets2 = [ss for ss in synsets2 if ss]\n \n \n score, count = 0.0, 0\n # For each word in the first sentence\n for synset in synsets1:\n \n # Get the similarity value of the most similar word in the other sentence\n score_list=[]\n if icneed == True :\n for ss in synsets2:\n try:\n temp=wnsimilarity(synset,ss,self.brown_ic)\n score_list.append(temp)\n except:\n continue\n \n else:\n for ss in synsets2:\n try:\n temp=wnsimilarity(synset,ss)\n score_list.append(temp)\n except:\n continue\n \n \n score_list = np.array(score_list, dtype=np.float64)\n score_list = np.nan_to_num(score_list)\n# print(score_list)\n if len(score_list)>0:\n best_score = np.nanmax(score_list)\n else:\n best_score=0.0\n# print(best_score)\n# print(type(best_score))\n \n # Check that the similarity could have been computed\n if best_score is not None:\n score =score + best_score\n# print(score)\n count = count+ 1\n \n \n# print(\"one sentence over\")\n # Average the values\n score /= count\n return score", "def get_message_metrics(\n messages, hidden_sender, hidden_receiver, meta_data, img_features\n ):\n messages = messages.cpu().numpy()\n\n rsa_sr, rsa_si, rsa_ri, rsa_sm, topological_similarity, pseudo_tre = representation_similarity_analysis(\n img_features, meta_data, messages, hidden_sender, hidden_receiver, tre=True\n )\n\n # rsa = representation_similarity_analysis(messages, meta_data)\n l_entropy = language_entropy(messages)\n\n return (\n rsa_sr,\n rsa_si,\n rsa_ri,\n rsa_sm,\n topological_similarity,\n pseudo_tre,\n l_entropy,\n )", "def plot_correlation_ca_code(self, x, y, prn1, prn2):\n x = self.create_constant_magnitude_signal(x)\n y = self.create_constant_magnitude_signal(y)\n auto_1 = self.circular_correlation(x, x)\n auto_2 = self.circular_correlation(y, y)\n cross = self.circular_correlation(x, y)\n fig = plt.figure(figsize=(12,5))\n ax1 = fig.add_subplot(1, 3, 1)\n ax2 = fig.add_subplot(1, 3, 2)\n ax3 = fig.add_subplot(1, 3, 3)\n t = arange(0, len(x))\n ax1.plot(t, auto_1)\n ax1.set_title('Auto-correlation PRN {0}'.format(prn1))\n ax1.set_xlabel('Sample Number')\n ax2.plot(t, auto_2)\n ax2.set_title('Auto-correlation PRN {0}'.format(prn2))\n ax2.set_xlabel('Sample Number')\n ax3.plot(t, cross)\n ax3.set_title('Cross-Correlation b/t PRN {0} and PRN {1}'.format(prn1, prn2))\n ax3.set_xlabel('Sample Number')\n xlim = (-10, len(x))\n ylim = ax1.get_ylim()\n for ax in [ax1, ax2]:\n ax.set_ylim(ylim)\n ax.set_xlim(xlim)\n ax3.set_xlim(xlim)\n plt.show()", "def __cycle_consistency_loss(self, reconstructedA, reconstructedB):\n loss = self.opt.lamA * tf.reduce_mean(tf.abs(reconstructedA - self.realA)) + \\\n self.opt.lamB * tf.reduce_mean(tf.abs(reconstructedB - self.realB))\n\n return loss", "def difference(first, second, rf, rs, years=(1980, 2000),smooth=1, corpus='bok'):\n try:\n a_first = nb_ngram(first, years=years, smooth=smooth, corpus=corpus)\n a_second = nb_ngram(second, years=years, smooth=smooth, corpus=corpus)\n a = a_first.join(a_second) \n b_first = nb_ngram(rf, years=years, smooth=smooth, corpus=corpus)\n b_second = nb_ngram(rs, years=years, smooth=smooth, corpus=corpus)\n if rf == rs:\n b_second.columns = [rs + '2']\n b = b_first.join(b_second)\n s_a = a.mean()\n s_b = b.mean()\n f1 = s_a[a.columns[0]]/s_a[a.columns[1]]\n f2 = s_b[b.columns[0]]/s_b[b.columns[1]]\n res = f1/f2\n except:\n res = 'Mangler noen data - har bare for: ' + ', '.join([x for x in a.columns.append(b.columns)])\n return res", "def similarity_score(self, img1, img2):\n\t\t# resize into the same shape first\n\t\tif img1.shape != img2.shape:\n\t\t\tv, h = max(img1.shape[0], img2.shape[0]), max(img1.shape[1], img2.shape[1])\n\t\t\tdim = (h, v)\n\t\t\th_scale = min(img1.shape[1], img2.shape[1]) / h\n\t\t\tv_scale = min(img1.shape[0], img2.shape[0]) / v\n\t\t\timg1 = cv2.resize(img1, dim, interpolation = cv2.INTER_AREA)\n\t\t\timg2 = cv2.resize(img2, dim, interpolation = cv2.INTER_AREA)\n\t\t# # histogram\n\t\t# diff = 0\n\t\t# for c in range(3):\n\t\t# \thist1 = cv2.calcHist([img1], [c], None, [256], [0, 256])\n\t\t# \thist2 = cv2.calcHist([img2], [c], None, [256], [0, 256])\n\t\t# \tdiff += np.linalg.norm(hist1 - hist2)\n\n\t\t# HoG\n\t\tfd1, _ = hog(img1, orientations=8, pixels_per_cell=(16, 16),\n cells_per_block=(1, 1), visualize=True, multichannel=True)\n\t\tfd2, _ = hog(img2, orientations=8, pixels_per_cell=(16, 16),\n cells_per_block=(1, 1), visualize=True, multichannel=True)\n\t\t# Combine both\n\t\tdist = np.linalg.norm(fd1 - fd2)\n\t\taim = mean_pixel_intensity_diff(img1, img2)\n\t\tscore = 1 / (dist + aim + 1)\n\t\treturn score", "def matrix_discrepancy(centers1, rotations1, centers2, rotations2,\n angle_weight=None, center_weight=None):\n\n n = len(centers1)\n\n assert len(centers2) == n\n assert len(rotations1) == n\n assert len(rotations2) == n\n assert n >= 2\n\n if not angle_weight:\n angle_weight = 1.0\n\n if not center_weight:\n center_weight = [1.0] * n\n\n if n > 2:\n rotation_matrix, new1, mean1, RMSD, sse = \\\n besttransformation_weighted(centers1, centers2, center_weight)\n\n orientation_error = 0\n angles = []\n for r1, r2 in zip(rotations1, rotations2):\n if r1.shape[0] > 0 and r2.shape[0] > 0:\n angle = angle_of_rotation(np.dot(np.dot(rotation_matrix, r2),\n np.transpose(r1)))\n orientation_error += np.square(angle)\n discrepancy = np.sqrt(sse + angle_weight * orientation_error) / n\n\n else:\n\n R1 = np.dot(np.transpose(rotations1[1]),rotations1[0]) # rotation from nt 0 to nt1 of 1st motif\n R2 = np.dot(np.transpose(rotations2[0]),rotations2[1]) # rotation from nt 0 to nt1 of 2nd motif\n\n rot1 = np.dot(R1,R2)\n ang1 = angle_of_rotation(rot1)\n\n rot2 = np.dot(np.transpose(R1),np.transpose(R2))\n ang2 = angle_of_rotation(rot2)\n\n T1 = np.dot(centers1[1] - centers1[0],rotations1[0])\n T2 = np.dot(centers1[0] - centers1[1],rotations1[1])\n\n S1 = np.dot(centers2[1] - centers2[0],rotations2[0])\n S2 = np.dot(centers2[0] - centers2[1],rotations2[1])\n\n D1 = T1-S1\n D2 = T2-S2\n\n discrepancy = np.sqrt(D1[0]**2 + D1[1]**2 + D1[2]**2 + (angle_weight*ang1)**2)\n discrepancy += np.sqrt(D2[0]**2 + D2[1]**2 + D2[2]**2 + (angle_weight*ang2)**2)\n\n# factor = 1/(4*np.sqrt(2)) # factor to multiply by discrepancy; faster to precompute?\n\n discrepancy = discrepancy * 0.17677669529663687\n\n return discrepancy", "def concatTwoHMMs(hmm1, hmm2):\n \n concatedHMM = {}\n #M is the number of emitting states in each HMM model (could be different for each)\n #K is the sum of the number of emitting states from the input models\n \n M1 = hmm1['means'].shape[0]\n M2 = hmm2['means'].shape[0]\n K = M1 + M2\n \n concatedHMM['name'] = hmm1['name'] + hmm2['name']\n concatedHMM['startprob'] = np.zeros((K + 1, 1))\n concatedHMM['transmat'] = np.zeros((K + 1, K + 1))\n concatedHMM['means'] = np.vstack((hmm1['means'],hmm2['means']))\n concatedHMM['covars'] = np.vstack((hmm1['covars'],hmm2['covars']))\n \n \n start1 = hmm1['startprob'].reshape(-1,1)\n start2 = hmm2['startprob'].reshape(-1,1)\n \n concatedHMM['startprob'][:hmm1['startprob'].shape[0]-1,:] = start1[:-1,:]\n concatedHMM['startprob'][hmm1['startprob'].shape[0]-1:,:] = np.dot(start1[-1,0],start2)\n trans = concatedHMM['transmat']\n trans1 = hmm1['transmat']\n trans2 = hmm2['transmat']\n\n trans[:trans1.shape[0]-1,:trans1.shape[1]-1] = trans1[:-1,:-1]\n temp = trans1[:-1,-1].reshape(-1,1)\n trans[:trans1.shape[0]-1,trans1.shape[1]-1:] = \\\n np.dot(temp,start2.T)\n trans[trans1.shape[0]-1:,trans1.shape[1]-1:] = trans2\n concatedHMM['transmat'] = trans \n \n return concatedHMM", "def cross_correlation(person1, person2, framerate=25, constrain_seconds=2):\n\n # convert lists to numpy arrays\n x = np.array(person1)\n y = np.array(person2)\n\n # calculate cross correlation values\n correlations = np.correlate(x, y, \"full\")\n\n # trim the cross-correlation values to a range (-lag_limits : +lag_limits)\n # trim AFTER cross correlation calculation to avoid 0 padding of signals\n # assumes x and y are equal length\n lag_limits = constrain_seconds * framerate\n trimmed_correlations = correlations[len(x) - 1 - lag_limits : len(x) + lag_limits]\n\n # normalize the cross-correlation values for ease of comparison between\n # subjects\n norm_array = trimmed_correlations / (np.linalg.norm(x) * np.linalg.norm(y))\n\n # get maximum normalized cross-correlation value\n max_R = max(norm_array)\n\n # get lag of max correlation value\n max_lag = np.argmax(norm_array)\n\n # trimmed array is now 2*(lag_limits)+1 elements long\n # adjust it so that lag 0 is a complete match\n max_lag_adj = max_lag - lag_limits\n\n # Get the normalized zero lag correlation value\n zero_R = norm_array[lag_limits]\n\n return float(max_R), float(max_lag_adj), float(zero_R), norm_array.tolist()", "def match_objects(coords1,coords2,tail1=(),tail2=(),accuracy=1.):\n acc2=accuracy**2\n nc=len(coords1)\n np1=len(coords1[0])\n np2=len(coords2[0])\n a1=array(coords1)\n a2=array(coords2)\n nt1=len(tail1)\n for i in range(nt1): \n if len(tail1[i])!= np1: raise 'Not the same lenght as coordinates 1'\n nt2=len(tail2)\n for i in range(nt2): \n if len(tail2[i])!= np2: raise 'Not the same lenght as coordinates 2'\n match=zeros(np1, int)-1\n for j in range(np1):\n #dist=add.reduce((a1[:,j,NewAxis]-a2[:,:])**2)\n a1j = a1[:,j]\n dist=add.reduce((reshape(a1j, (len(a1j), 1)) - a2)**2)\n i_min=argmin(dist)\n if dist[i_min]<acc2:match[j]=i_min\n good=greater_equal(match,0)\n n1=compress(good,list(range(np1))) \n match=compress(good,match)\n a1=compress(good,a1)\n salida=list(a1)\n for i in range(nt1):\n if type(tail1[i][0])==type('si'):\n t=[]\n for j in n1: t.append(tail1[i][j])\n else:\n t=take(tail1[i],n1)\n salida.append(t)\n for i in range(nt2):\n if type(tail2[i][0])==type('si'):\n t=[]\n for j in match: t.append(tail2[i][j])\n else:\n t=take(tail2[i],match)\n salida.append(t)\n return salida", "def _consist_string(col, _df1, _df2, _key1, _key2):\n\n df1, df2 = _df1.copy(), _df2.copy()\n df = pd.merge(df1, df2, left_on=_key1, right_on=_key2, how=\"inner\")\n\n if (df['%s_x' %(col)].dropna().shape[0] == 0) or (df['%s_y' %(col)].dropna().shape[0] == 0):\n if (df['%s_x' %(col)].dropna().shape[0] == 0) and (df['%s_y' %(col)].dropna().shape[0] == 0):\n error_msg = 'all nan in both table'\n elif df['%s_x' %(col)].dropna().shape[0] == 0:\n error_msg = 'all nan in table1'\n else:\n error_msg = 'all nan in table2'\n return {'column': col, 'error_msg': error_msg}\n\n df['diff_temp'] = df.apply(lambda x: \"Same\" if x['%s_x' %(col)] == x['%s_y' %(col)] else \"Diff\", axis=1)\n df['diff_temp'] = df.apply(lambda x: \"Same\" if (str(x['%s_x' % (col)]) == 'nan'\n and str(x['%s_y' % (col)]) == 'nan') else x['diff_temp'], axis=1)\n\n corr = round(df[df['diff_temp'] == \"Same\"].shape[0] * 1.0 / df.shape[0], 3)\n output = [\n {'feature': 'column', 'value': col},\n {'feature': 'corr', 'value': corr}\n ]\n\n if corr == 1:\n return {'column': col, 'result_df': [pd.DataFrame(output), pd.DataFrame()], 'corr': {'column': col, 'corr': corr}}\n else:\n diff_df = df[df['diff_temp'] == \"Diff\"].reset_index(drop=True)\n diff_df['diff_combo'] = diff_df['%s_x' %(col)].map(str) + ' -> ' + diff_df['%s_y' %(col)].map(str)\n diff_df_vc = pd.DataFrame(diff_df['diff_combo'].value_counts())\n diff_df_vc.columns = ['count']\n diff_df_vc['diff_combo'] = diff_df_vc.index.values\n diff_df_vc = diff_df_vc.sort_values(by='count', ascending=False).head(10)[['diff_combo', 'count']]\n\n return {'column': col, 'result_df': [pd.DataFrame(output), diff_df_vc],\n 'corr': {'column': col, 'corr': corr}}", "def xcorr(a0,b0,shift):\n\t\t\t\ta = copy.deepcopy(a0)\n\t\t\t\tb = copy.deepcopy(b0)\n\n\t\t\t\t## shift the wavelength of b\n\t\t\t\tlength = b.oriFlux.shape[0]\n\t\t\t\tif shift >= 0:\n\t\t\t\t\tmask_a = np.arange(0,shift,1)\n\t\t\t\t\ta.oriFlux = np.delete(a.oriFlux,mask_a)\n\t\t\t\t\tmask_b = np.arange(length-1,length-shift-1,-1)\n\t\t\t\t\tb.oriFlux = np.delete(b.oriFlux,mask_b)\n\n\t\t\t\telif shift < 0:\n\t\t\t\t\tmask_a = np.arange(length-1,length+shift-1,-1)\n\t\t\t\t\ta.oriFlux = np.delete(a.oriFlux,mask_a)\n\t\t\t\t\tmask_b = np.arange(0,-shift,1)\n\t\t\t\t\tb.oriFlux = np.delete(b.oriFlux,mask_b)\n\n\t\t\t\t## shift the wavelength of b\n\t\t\t\t#b.wave += shift * step\n\t\t\t\t## discard the points where the wavelength values\n\t\t\t\t## are larger\n\t\t\t\t#condition = (a.wave > b.wave[0]) & (a.wave < b.wave[-1])\n\t\t\t\t\n\t\t\t\t#a.flux = a.flux[np.where(condition)]\n\t\t\t\t#a.wave = a.wave[np.where(condition)]\n\t\t\t\t## resampling the telluric model\n\t\t\t\t#b.flux = np.array(smart.integralResample(xh=b.wave, \n\t\t\t\t#\tyh=b.flux, xl=a.wave))\n\t\t\t\t\n\t\t\t\treturn np.inner(a.oriFlux, b.oriFlux)/\\\n\t\t\t\t(np.average(a.oriFlux)*np.average(b.oriFlux))/a.oriFlux.shape[0]", "def match(desc1, desc2):\n desc1 = array([d/linalg.norm(d) for d in desc1])\n desc2 = array([d/linalg.norm(d) for d in desc2])\n\n dist_ratio = 0.6\n disc1_size = desc1.shape\n\n matchscores = zeros((desc1_size[0]), \"int\")\n desc2t = desc2.T\n for i in range(desc1_size[0]):\n dotprods = dot(desc1[i, :], desc2t)\n dotprods = 0.9999 * dotprods\n\n indx = argsort(arccos(dotprods))\n\n if arccos(dotprods)[indx[0]] < dist_ratio * arccos(dotprods)[indx[1]]:\n matchscores[i] = int(indx[0])\n\n return matchscores", "def computeCorr(pred_act,responses):\n\n num_pres,num_neurons = np.shape(responses)\n corr=np.zeros(num_neurons)\n \n for i in xrange(0,num_neurons):\n if np.all(pred_act[:,i]==0) & np.all(responses[:,i]==0):\n corr[i]=1.\n elif not(np.all(pred_act[:,i]==0) | np.all(responses[:,i]==0)):\n # /!\\ To prevent errors due to very low values during computation of correlation\n if abs(pred_act[:,i]).max()<1:\n pred_act[:,i]=pred_act[:,i]/abs(pred_act[:,i]).max()\n if abs(responses[:,i]).max()<1:\n responses[:,i]=responses[:,i]/abs(responses[:,i]).max() \n corr[i]=pearsonr(np.array(responses)[:,i].flatten(),np.array(pred_act)[:,i].flatten())[0]\n \n return corr" ]
[ "0.6506768", "0.6241487", "0.61946344", "0.6189344", "0.6137197", "0.60439324", "0.589026", "0.5836525", "0.58354324", "0.58140975", "0.5799354", "0.575254", "0.5746967", "0.5743244", "0.57305694", "0.5727684", "0.5720525", "0.56870764", "0.5681944", "0.5680134", "0.56749845", "0.5632209", "0.563108", "0.5617163", "0.56152314", "0.5592436", "0.55767226", "0.55509746", "0.554289", "0.55399644", "0.552883", "0.5506937", "0.5505442", "0.55025506", "0.5493222", "0.54834116", "0.5472677", "0.5460186", "0.5454623", "0.545387", "0.54526395", "0.54248124", "0.5408297", "0.5405356", "0.5401729", "0.53835166", "0.5373499", "0.5365089", "0.53649086", "0.5357915", "0.53422034", "0.53413445", "0.5334781", "0.52930474", "0.5284154", "0.5282242", "0.5278201", "0.52699167", "0.52652293", "0.5259999", "0.5258315", "0.52539515", "0.5249973", "0.5238249", "0.5236091", "0.5235819", "0.52287096", "0.5222573", "0.52214015", "0.5212428", "0.5210323", "0.51985055", "0.5198093", "0.5186299", "0.5180344", "0.5161097", "0.51553893", "0.5147877", "0.5138759", "0.5135229", "0.51299316", "0.51282996", "0.5119985", "0.51172704", "0.5117084", "0.511316", "0.5112945", "0.51060987", "0.51020885", "0.5093636", "0.50927156", "0.5079157", "0.50767946", "0.5075279", "0.50678587", "0.50618523", "0.5059887", "0.5057302", "0.5053369", "0.50528944" ]
0.6749743
0
represents messages as vectors which are used to calculate similarity
def find_similarity(message1, message2): total = 0 for i in range(len(message1)): max = 0 for j in range(len(message2)): message1_encoded = embed([message1[i]]) message2_encoded = embed([message2[j]]) sim = average_similarity(message1_encoded, message2_encoded) if sim > max: max = sim total += max return total/len(message1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wrapMsg(self,vec):\n return vec.todense()", "def plot_similarity(self) -> None:\n if isinstance(self.model, FastTextWrapper):\n self.valid_data[\"vector\"] = self.valid_data[\"text\"].apply(\n lambda x: self.model.inference(word_tokenize(x), sentence_level=True))\n else:\n self.valid_data[\"vector\"] = self.valid_data[\"text\"].apply(\n lambda x: self.model.inference(word_tokenize(x))[0])\n messages = list(self.valid_data[\"label\"])\n vectors = list(self.valid_data[\"vector\"])\n similarity_matrix(messages=messages, vectors=vectors, name=self.folder, save_path=self.base_path)", "def semantic_vector(self,words, joint_words, info_content_norm):\n\t sent_set = set(words)\n\t semvec = np.zeros(len(joint_words))\n\t i = 0\n\t for joint_word in joint_words:\n\t if joint_word in sent_set:\n\t # if word in union exists in the sentence, s(i) = 1 (unnormalized)\n\t semvec[i] = 1.0\n\t if info_content_norm:\n\t semvec[i] = semvec[i] * math.pow(self.info_content(joint_word), 2)\n\t else:\n\t # find the most similar word in the joint set and set the sim value\n\t sim_word, max_sim = self.most_similar_word(joint_word, sent_set)\n\t semvec[i] = self.PHI if max_sim > self.PHI else 0.0\n\t if info_content_norm:\n\t semvec[i] = semvec[i] * self.info_content(joint_word) * self.info_content(sim_word)\n\t i = i + 1\n\t return semvec", "def get_message_metrics(\n messages, hidden_sender, hidden_receiver, meta_data, img_features\n ):\n messages = messages.cpu().numpy()\n\n rsa_sr, rsa_si, rsa_ri, rsa_sm, topological_similarity, pseudo_tre = representation_similarity_analysis(\n img_features, meta_data, messages, hidden_sender, hidden_receiver, tre=True\n )\n\n # rsa = representation_similarity_analysis(messages, meta_data)\n l_entropy = language_entropy(messages)\n\n return (\n rsa_sr,\n rsa_si,\n rsa_ri,\n rsa_sm,\n topological_similarity,\n pseudo_tre,\n l_entropy,\n )", "def tweetToVect(tweet, dicoGlove): \n #return model.infer_vector(tweet) \n \n gArray, wSize = w.wordsToGlove(tweet.split(), dicoGlove) \n meanMatrixOverview = w.meanWords(gArray, wSize)\n \n return meanMatrixOverview", "def question_to_vec(question, embeddings, dim):\r\n\r\n words = question.split()\r\n\r\n counter = 0\r\n res = np.zeros(dim)\r\n for word in words:\r\n if word in embeddings:\r\n res += np.array(embeddings[word])\r\n counter += 1\r\n if counter!=0:\r\n return res/counter # mean of all word embeddings\r\n else:\r\n return res # vector of zeros\r", "def seq2Vec(sequences):\r\n global dict_words_n_vectors\r\n for sent in sequences:\r\n for i in range(len(sent)):\r\n if sent[i] in dict_words_n_vectors:\r\n sent[i] = dict_words_n_vectors[sent[i]]\r\n else:\r\n sent[i] = np.zeros(300)\r\n return np.array(sequences, dtype=\"float32\")", "def get_word_vector():\n\n patten = r\"[0-9\\s+\\.\\!\\/_,$%^*()?;;:-【】+\\\"\\']+|[+——!,;:。?、~@#¥%……&*()]+\"\n s1 = input(\"句子1:\").strip()\n s2 = input(\"句子2:\").strip()\n s1 = re.sub(patten, \" \", s1)\n s2 = re.sub(patten, \" \", s2)\n cut1 = jieba.cut(s1)\n cut2 = jieba.cut(s2)\n\n list_word1 = (' '.join(cut1)).split()\n list_word2 = (' '.join(cut2)).split()\n print(list_word1)\n print(list_word2)\n\n key_word = list(set(list_word1 + list_word2)) # 取并集\n print(key_word)\n\n word_vector1 = np.zeros(len(key_word)) # 给定形状和类型的用0填充的矩阵存储向量\n word_vector2 = np.zeros(len(key_word))\n\n for i in range(len(key_word)): # 依次确定向量的每个位置的值\n for j in range(len(list_word1)): # 遍历key_word中每个词在句子中的出现次数\n if key_word[i] == list_word1[j]:\n word_vector1[i] += 1\n for k in range(len(list_word2)):\n if key_word[i] == list_word2[k]:\n word_vector2[i] += 1\n\n print(word_vector1) # 输出向量\n print(word_vector2)\n return word_vector1, word_vector2", "def text_to_vecs(self):\n # convert word strings into word vectors\n sent_vec = []\n for w in self.sentence:\n if w in self.word_vectors.getVocab():\n sent_vec.append( self.word_vectors.getWordVectors()[w] )\n else:\n sent_vec.append( self.word_vectors.getOOVWordVector() )\n \n assert(len(self.sentence) == len(sent_vec)) \n self.sent_vec = sent_vec", "def wordSimilarityRatio(sent_1,sent_2):", "def to_vector(texto,model,idf):\n tokens = normalizer(texto).split() # splits the text by space and returns a list of words\n vec = np.zeros(300) # creates an empty vector of 300 dimensions\n for word in tokens: # iterates over the sentence\n if (word in model) & (word in idf): # checks if the word is both in the word embedding and the tf-idf model\n vec += model[word]*idf[word] # adds every word embedding to the vector\n if np.linalg.norm(vec) > 0:\n return vec / np.linalg.norm(vec) # divides the vector by their normal\n else:\n return vec", "def semantic_similarity(self,sentence_1, sentence_2, info_content_norm):\n\t words_1 = sentence_1.getList_of_words()\n\t words_2 = sentence_2.getList_of_words()\n\t joint_words = set(words_1).union(set(words_2))\n\t vec_1 = self.semantic_vector(words_1, joint_words, info_content_norm)\n\t vec_2 = self.semantic_vector(words_2, joint_words, info_content_norm)\n\t return np.dot(vec_1, vec_2.T) / (np.linalg.norm(vec_1) * np.linalg.norm(vec_2))", "def sentences2vec(self, sentences, unseen=None):\r\n keys = self.keys\r\n # print(sentences)\r\n if unseen:\r\n unseen_vec = self.model.wv.word_vec(unseen)\r\n\r\n # if unseen:\r\n # vec.append([self.model.wv.word_vec(y) if y in set(sentences) & keys\r\n # else unseen_vec for y in sentences])\r\n # else:\r\n # vec.append([self.model.wv.word_vec(y) for y in sentences\r\n # if y in set(sentences) & keys])\r\n vec = np.array([0 for _ in range(300)])\r\n for y in sentences:\r\n if len(vec) == 0:\r\n vec = np.array(self.model.wv.word_vec(y))\r\n elif y in self.keys:\r\n vec = vec + np.array(self.model.wv.word_vec(y))\r\n # print(len(vec))\r\n return vec", "def question_to_vec(question, embeddings):\n\n dim = embeddings['dog'].size\n result = np.zeros((dim,))\n\n words = question.split(' ')\n\n count = 0\n for word in words:\n if word not in embeddings or not len(embeddings[word]):\n continue\n result += embeddings[word][:dim]\n count += 1\n\n return result / max(count, 1)", "def doc2vec(self, text: str) -> np.array:\n # tfidf_matrix = self.tfidf.transform([text])\n # vectors = []\n # for token in self.tokenize(text):\n # if token in self.word2vec and token in self.feature_names:\n # tfidf_score = tfidf_matrix[0, self.feature_names.index(token)]\n # vectors.append(self.word2vec[token] * tfidf_score)\n vectors = [self.word2vec[token] for token in self.tokenize(text) if token in self.word2vec]\n if not vectors:\n return np.zeros(300)\n return np.mean(vectors, axis=0)", "def test_similarity(self):\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'dog.n.01'), 1))\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'mammal.n.01'), 0.180901358))", "def solve(vectors):\n\tv = np.array(vectors)\n\tstop = 9\n\talignment_error1 = max(v[:,1])-min(v[:,1])\n\tupdate_positions(v)\n\talignment_error2 = max(v[:,1])-min(v[:,1])\n\trate_of_change = alignment_error1-alignment_error2\n\n\tupdate_positions(v, (alignment_error2-stop)/rate_of_change)\n\n\tmsg_start_y = min(v[:,1])\n\tmsg_start_X = min(v[:,0])\n\tmsg_width = max(v[:,0]) - msg_start_X + 1\n\tmsg_height = max(v[:,1]) - msg_start_y + 1 \n\t\n\tans = \"\"\n\n\tgrid = np.zeros(shape = (msg_width+1, msg_height+1))\n\tfor vector in v:\n\t\tgrid[vector[0]-msg_start_X, vector[1]-msg_start_y] = 1\n\n\n\tfor y in range(msg_height):\n\t\tans +=''.join(\"#\" if 1==grid[x,y] else \".\" for x in range(msg_width))\n\t\tans += \"\\n\"\n\n\treturn ans, (alignment_error1-stop)/rate_of_change", "def _WordSimAveVec(self,df,a):\r\n #Obtain the course description for the course provided and convert the string into a list of individual words.\r\n Description = df['description'][a].split()\r\n #Create a placeholder zero vector of the same size as the vector embedding.\r\n Vector = np.zeros(self.WordVecModel.layer1_size)\r\n wordCount = 0\r\n #Iterate over each word in the description.\r\n for word in Description:\r\n #If the word is in the trained vocabulary, obtain the word vector. \r\n #Continue to add the word vectors to the placeholder vector to get the running sum.\r\n if word in self.WordVecModel.wv.vocab:\r\n vector = self.WordVecModel.wv.get_vector(word)\r\n Vector +=vector\r\n #Keep track of how many word vectors (which were included in the vocabulary) were added.\r\n wordCount +=1\r\n #Calculate the mean by dividing the sum by the number of vectors.\r\n return Vector/wordCount", "def review_to_vec(words, model, num_features , index2word_set):\n \n feature_vec = np.zeros((num_features), dtype=\"float32\")\n word_count = 0\n \n \n \n for word in words:\n if word in index2word_set: \n word_count += 1\n feature_vec += model[word]\n\n if word_count == 0:\n word_count = 1\n\n feature_vec /= word_count\n\n return feature_vec", "def word2vec(self, words):\n with torch.no_grad():\n words = torch.LongTensor(self.doc2token(words))\n result = self.model.embedding(words).numpy()\n return result", "def _words_to_vec(self, sentence):\n return torch.FloatTensor([self._use_embeddings(word) for word in sentence])", "def build_matrix(self):\n \n for p1 in self._properties: \n p1 = p1.get_vectorized_data()\n \n for p2 in self._properties:\n p2 = p2.get_vectorized_data()\n v1, v2 = self.prepare_vectors(p1, p2)\n self._similarity_matrix.append(cosine_similarity([v1],[v2]))", "def to_vector(text, model, idf, is_tokenized=False):\n if not is_tokenized: text= text.split() # splits the text by space and returns a list of words\n vec = np.zeros(300) # creates an empty vector of 300 dimensions\n for word in text: # iterates over the sentence\n if (word in model) & (word in idf): # checks if the word is both in the word embedding and the tf-idf model\n vec += model[word]*idf[word] # adds every word embedding to the vector\n if np.linalg.norm(vec) > 0:\n return vec / np.linalg.norm(vec) # divides the vector by their normal\n else:\n return vec", "def similarity(self, token1, token2):\n vec1 = self.get_vector(token1)\n vec2 = self.get_vector(token2)\n assert vec1 is not None and vec2 is not None, \"Cannot compute similarity between None type vectors.\"\n if not self.normalize:\n # if model not loaded as normalized embeddings \n vec1 = vec1 / np.linalg.norm(vec1)\n vec2 = vec2 / np.linalg.norm(vec2)\n return np.dot(vec1, vec2)", "def embed(self, smi_or_mol):\n if not isinstance(smi_or_mol, Chem.Mol):\n mol = Chem.MolFromSmiles(smi_or_mol)\n else:\n mol = smi_or_mol\n wv = self.model.wv\n sentence = self.substructure(mol)\n vec = np.zeros(self.model.vector_size)\n for fp in sentence:\n if fp in wv.vocab:\n vec += wv[fp]\n return vec", "def compute_similarity(self, vis_feats, language_feats, gram):\n queries_dim = language_feats.dim()\n M = language_feats.size(1) if queries_dim==3 else language_feats.size(0)\n N = vis_feats.size(0)\n d = self.embed_size\n\n # If too many queries, split computation to avoid out-of-memory\n max_num_queries = 100\n if M <= max_num_queries:\n vis_feats = vis_feats.unsqueeze(1).expand(N, M, d)\n scores_gram = torch.mul(vis_feats, language_feats)\n scores_gram = scores_gram.sum(2)\n scores = scores_gram.view(-1, M)\n #scores = torch.matmul(vis_feats, lang_feats.squeeze().transpose(0,1)) #other version\n\n else:\n scores_gram = [] \n vis_feats = vis_feats.unsqueeze(1).expand(N, M, d)\n num_splits = M//max_num_queries if (M%max_num_queries)==0 else M//max_num_queries+1\n for j in range(num_splits): \n start_query = j*max_num_queries \n end_query = start_query + max_num_queries if start_query + max_num_queries <= M else M\n scores_gram_split = torch.mul(vis_feats[:,start_query:end_query,:], language_feats[:,start_query:end_query,:])\n scores_gram_split = scores_gram_split.sum(2)\n scores_gram.append(scores_gram_split)\n scores = torch.cat([scores_gram_split for scores_gram_split in scores_gram],1)\n\n return scores", "def average_similarity(messages1, messages2):\n if np.array_equal(messages2, messages1):\n return 1\n corr = np.corrcoef(messages1, messages2)\n return np.average(corr)", "def compute_similarities_from_vec(self, dataset, a):\n self.model.fit(dataset.X, a)\n return self.model.coef_", "def vectorize_tweet(tweet):\n tweet_vector = np.zeros(100)\n for word in tokenize(tweet.text):\n if word in word2vec.wv.vocab:\n tweet_vector = tweet_vector + word2vec[word]\n\n components = pca.transform(tweet_vector)\n x = components[0, 0]\n y = components[0, 1]\n\n return str(x), str(y)", "def w2v_aggregation_letters(X, length_vector=100):\n global w2v_model_3gram\n if w2v_model_3gram == None:\n w2v_model_3gram = gensim.models.KeyedVectors.load_word2vec_format(os.path.join(os.environ['NOBULL_PATH'], 'w2v_char.vec'))\n X_raw = []\n for x in X:\n x_letter = cleanText_letters(x)\n X_raw.append(x_letter)\n\n\n num_row = len(X_raw)\n\n max_matrix = np.zeros(shape=(num_row, length_vector))\n\n average_matrix = np.zeros(shape=(num_row, length_vector))\n\n for row in range(num_row):\n \n temp_text = X_raw[row] \n temp_vector = temp_text.split()\n \n unique_vector = list(set(temp_vector))\n num_index = len(unique_vector)\n \n temp_matrix = np.zeros(shape=(num_index, length_vector))\n \n j = 0\n for word in unique_vector:\n \n temp_matrix[j] = get_vector(word, w2v_model_3gram, 100)\n j += 1\n\n max_matrix[row] = np.maximum.reduce(temp_matrix)\n average_matrix[row] = np.mean(temp_matrix, axis=0)\n \n result = np.concatenate((average_matrix, max_matrix), axis=1)\n result = sparse.csr_matrix(result)\n \n header = []\n \n for i in range(length_vector):\n temp_string = \"neww2v_average_\" + str(i) + \"-th\"\n header.append(temp_string)\n \n for i in range(length_vector):\n temp_string = \"neww2v_maximum_\" + str(i) + \"-th\"\n header.append(temp_string)\n\n return result, header", "def compare_vectors(word_vector1, word_vector2):\n all_words = list(set(word_vector1).union(set(word_vector2)))\n frequency_dict1 = word_frequencies(word_vector1)\n frequency_dict2 = word_frequencies(word_vector2)\n\n frequency_vector1 = [frequency_dict1.get(word, 0) for word in all_words]\n frequency_vector2 = [frequency_dict2.get(word, 0) for word in all_words]\n\n return similarity(frequency_vector1, frequency_vector2)", "def get_vector_representation(self):\r\n vectorizer = CountVectorizer(lowercase=False,\r\n tokenizer=lambda x: x, # Tokenization should already be done by preprocessor\r\n stop_words=None,\r\n min_df=5,\r\n max_features=None, ## use all features\r\n ngram_range=(1, 1), ## This uses only unigram counts\r\n binary=False) ## This sets the beatures to be frequency counts\r\n pipeline = Pipeline([('vec', vectorizer), ('tfidf', TfidfTransformer())])\r\n\r\n X = pipeline.fit_transform(self.df['tokens'])\r\n Y = self.df['label'].values\r\n return ((X[:self.train_index], Y[:self.train_index]),\r\n (X[self.train_index:self.valid_index], Y[self.train_index:self.valid_index]),\r\n (X[self.valid_index:], Y[self.valid_index:]))", "def style_vectorize(main_sentences, context_sentences, use_comments):\n X_reply = []\n for sent in main_sentences:\n X_reply.append(writingstyle_vector(sent))\n\n X_reply = np.array(X_reply)\n\n if use_comments:\n print(\"Using Comment Text as well....\")\n X_comment = []\n for sent in context_sentences:\n X_comment.append(writingstyle_vector(sent))\n\n X_comment = np.array(X_comment)\n X = np.concatenate((X_reply, X_comment), axis=1)\n\n else:\n X = X_reply\n\n return X", "def create_vector(string):\n vec = {}\n words = string.split()\n\n for word in words:\n if len(word) <= NGRAM_SIZE:\n add(vec, word)\n else:\n for i in range(len(word) - NGRAM_SIZE + 1):\n add(vec, word[i : i + NGRAM_SIZE])\n\n return vec", "def vec(e):\n N = e.getShape().dim(0)\n \n rows = [i for i in range(N) for j in range(i,N)]\n cols = [j for i in range(N) for j in range(i,N)]\n vals = [ 2.0**0.5 if i!=j else 1.0 for i in range(N) for j in range(i,N)]\n\n return Expr.flatten(Expr.mulElm(e, Matrix.sparse(N,N,rows,cols,vals)))", "def vec(self):\n return np.matrix(self.val.ravel()).transpose()", "def similarity_vector(self, x):\n r_x = self.encoder(x)[1]\n return self.prototypes_layer(r_x)", "def similarity(self, word1, word2):\n common_vect = +np.ones(self.nEmbed) * 10000\n if word1 not in self.vocab and word2 in self.vocab:\n id_word_2 = self.w2id[word2]\n w1 = common_vect\n w2 = self.U[id_word_2]\n elif word1 in self.vocab and word2 not in self.vocab:\n id_word_1 = self.w2id[word1]\n w1 = self.U[id_word_1]\n w2 = common_vect\n elif word1 not in self.vocab and word2 not in self.vocab:\n w1 = common_vect\n w2 = common_vect\n else:\n id_word_1 = self.w2id[word1]\n id_word_2 = self.w2id[word2]\n w1 = self.U[id_word_1]\n w2 = self.U[id_word_2]\n\n # scalair = w1.dot(w2)/np.linalg.norm(w1,w2)\n similarity = w1.dot(w2) / (np.linalg.norm(w1) * np.linalg.norm(w2))\n # similarity = 1 / (1 + np.exp(-scalair))\n # similarity = scalair / (np.linalg.norm(w1) * np.linalg.norm(w2))\n return similarity", "def V_vect(self, distances):\n distances_norm2 = norm2(distances)\n distances_norm = np.sqrt(distances_norm2)\n isColliding = self.isColliding(distances_norm)\n\n # Collision term proportional to d**2 (cutoff)\n v_colliding = -distances_norm2/self.d_coll**2 + 1.5+0.5 * \\\n (self.d_attr/self.d_coll)**(2*self.n) - (self.d_attr/self.d_coll)**self.n\n v_colliding *= isColliding\n\n # Interaction potential: d - ln d\n v_interact = 0.5*self.d_attr**(2*self.n)/(np.identity(np.shape(distances_norm2)[1])[None, :, :]+distances_norm2)**self.n - self.d_attr**self.n/(\n np.identity(np.shape(distances_norm2)[1])[None, :, :]+distances_norm2)**(self.n/2) + 0.5\n v_interact *= (1 - isColliding)\n\n v = v_colliding + v_interact\n\n # A particle does not interact with itself\n for i in range(len(v)):\n np.fill_diagonal(v[i], 0)\n return v", "def similarity(self, e1, e2):\n\t\tpass", "def cosine_similarity(vec_x, vec_y):\n sim_prod = 0.0\n len_x = 0\n len_y = 0\n\n for ngram in vec_x:\n len_x += vec_x[ngram] ** 2\n\n for ngram in vec_y:\n len_y += vec_y[ngram] ** 2\n\n len_x = math.sqrt(len_x)\n len_y = math.sqrt(len_y)\n\n for ngram in vec_x:\n if ngram in vec_y:\n sim_prod += vec_x[ngram] * vec_y[ngram]\n\n return sim_prod / (len_x * len_y)", "def vectorize_sentence(sentence, model):\n final_vec = np.zeros(300, )\n count = 0\n for word in sentence:\n count += 1\n dummy_vec = np.zeros(300, )\n try:\n temp_vec = get_vector(word, model)\n final_vec += temp_vec\n except:\n final_vec += dummy_vec\n return final_vec / count", "def vectorize(self, phrase):\n return np.array([0])", "def get_similarity(df):\n count = CountVectorizer()\n count_matrix = count.fit_transform(df[\"bag_of_words\"])\n cosine_sim = cosine_similarity(count_matrix, count_matrix)\n return cosine_sim", "def getGloveoCosineSimilarity(question1, question2):\n questions = [question1, question2]\n\n ## for the sentences we need to get the count vectors\n vec = CountVectorizer(max_features=5000, stop_words=None,binary=True)\n count_vectors = vec.fit_transform(questions)\n\n ## get the vocabulary of words from the questions\n vocab_index = vec.vocabulary_\n\n ## get the index of the words and embeddings\n index_word = {v:k for k, v in vocab_index.items()}\n\n ## get the question vectors\n question_vectors = np.zeros((count_vectors.shape[0], 300))\n\n ## iterate through count vectors for each word get the embeddings\n ## for each embedding, we will then average by the number of words\n ## this will be then used for cosine similarity\n for i in range(count_vectors.shape[0]):\n row = count_vectors[i, :].toarray()\n word_ids = np.where(row > 0)[1]\n word_counts = row[:, word_ids][0]\n numWords = np.sum(word_counts)\n\n ## if there are no words, continue\n if numWords == 0:\n continue\n\n ## initialize the word embeddings to 0\n word_embeddings = np.zeros((word_ids.shape[0], 300))\n\n ## update the word embeddings\n for j in range(word_ids.shape[0]):\n word_id = word_ids[j]\n word_embeddings[j, :] = word_counts[j] * gloveDict[index_word[word_id]]\n question_vectors[i, :] = np.sum(word_embeddings, axis=0) / numWords\n\n return(cosine_similarity(question_vectors[0], question_vectors[1])[0][0])", "def test_vector_packet():\n f = Level3File(get_test_data('nids/KOUN_SDUS64_NHITLX_201305202016'))\n for page in f.graph_pages:\n for item in page:\n if 'vectors' in item:\n x1, x2, y1, y2 = np.array(item['vectors']).T\n assert len(x1)\n assert len(x2)\n assert len(y1)\n assert len(y2)", "def convert_str_list_to_vector(self, string_list: Tuple[str]) -> numpy.ndarray:\n if len(string_list) != 4:\n logger.error(\"convert_str_list_to_vector got a too short or long string list: {}. We return a zero-vector!\",\n string_list)\n return numpy.zeros(shape=(self.word2vec_embedding_size +\n self.word2vec_embedding_size / 2 +\n self.word2vec_embedding_size / 3 +\n self.word2vec_embedding_size / 4,),\n dtype=\"float32\"\n )\n ret = numpy.zeros(shape=(0,), dtype=\"float32\")\n for i, token in enumerate(string_list):\n logger.trace(\"Process the {}. token \\\"{}\\\"\", (i + 1), string_list[i])\n ret = numpy.concatenate([ret,\n numpy.average(\n numpy.reshape(\n self.word2vec_dict.get(string_list[i],\n numpy.negative(\n numpy.ones(\n shape=(self.word2vec_embedding_size,),\n dtype=\"float32\")\n )),\n (int(self.word2vec_embedding_size / (i + 1)), (i + 1))\n ),\n axis=1)],\n axis=0)\n return ret", "def evaluate_similarity(kv: KeyedVectors, X, y):\n mean_vector = np.mean(kv.vectors, axis=0, keepdims=True)\n missing_words = np.sum(np.isin(X, kv.index2word, invert=True))\n if missing_words > 0:\n logging.warning(\"Missing {} words. Will replace them with mean vector\".format(missing_words))\n get = np.vectorize(gensim_helper.get_vector, signature='(),(),(m)->(m)')\n timer = mytimer.Timer(\"getting vectors for words\")\n wv_x = get(X, kv, mean_vector)\n timer.stop()\n a = wv_x[:, 0]\n b = wv_x[:, 1]\n # timer = mytimer.Timer()\n # a = np_helper.normalize_over_cols_2d(a)\n # b = np_helper.normalize_over_cols_2d(b)\n # scores = np.diag(np.matmul(a, b.T))\n # timer.stop()\n # print(scores.shape)\n #\n # A = np.vstack(kv.get(word, mean_vector) for word in X[:, 0])\n # B = np.vstack(kv.get(word, mean_vector) for word in X[:, 1])\n timer = mytimer.Timer()\n scores = np.array([v1.dot(v2.T) / (np.linalg.norm(v1) * np.linalg.norm(v2)) for v1, v2 in zip(a, b)])\n timer.stop()\n # print(scores.shape)\n return scipy.stats.spearmanr(scores, y)", "def vectorize_documents(documents, model):\n document_vectors = []\n count = 0\n for document in documents:\n count += 1\n sentence_vectors = [vectorize_sentence(sentence, model) for sentence in document]\n document_vector = get_aggregate_vector(sentence_vectors)\n document_vectors.append(document_vector)\n return document_vectors", "def calculate_similarities(self) -> List[float]:\n sims = list()\n for i, r in self.sim_data.iterrows():\n if isinstance(self.model, FastTextWrapper):\n vecs = self.model.inference([r[\"Word 1\"], r[\"Word 2\"]])\n else:\n vecs = self.model.inference_batches([[r[\"Word 1\"]], [r[\"Word 2\"]]])\n vecs = [x[0] for x in vecs]\n if len(vecs) == 2:\n s = cosine_similarity([vecs[0]], [vecs[1]])[0][0]\n sims.append(s)\n else:\n sims.append(np.nan)\n self.sim_data[\"assigned_sim\"] = sims\n self.sim_data = self.sim_data.dropna()\n self.mean_error()\n self.correlation()\n return sims", "def to_s_matrix(w,v):\n pass", "def transform(self):\n result = []\n for item in self.doc_topic_matrix:\n result.append(item / np.sum(item))\n result = np.array(result)\n return result", "def decode(word_vecs, vec):\n sim = -1000\n word = str()\n for w in word_vecs:\n if 1 - spatial.distance.cosine(vec, word_vecs[w]) > sim and w != '<TOP>' and w != '<BOT>':\n word = w\n sim = 1 - spatial.distance.cosine(vec, word_vecs[w]) \n return word", "def processObservations(message, agent):\n if not message:\n print(\"Message is empty\")\n return None\n else:\n # # Check if joint values are in the expected order and size.\n if message.joint_names != agent['jointOrder']:\n # Check that the message is of same size as the expected message.\n if len(message.joint_names) != len(agent['jointOrder']):\n raise Exception\n\n return np.array(message.actual.positions) # + message.actual.velocities", "def test_matrix_structure(self):\n k = [2, 3, 4, 5, 6]\n model = self.create_chain_model(k)\n\n model.create_matrices()\n\n for edge, i in model.message_index.items():\n from_index = model.var_index[edge[0]]\n to_index = model.var_index[edge[1]]\n assert model.message_from[i] == from_index, \"Message sender index is wrong\"\n assert model.message_to[i] == to_index, \"Message receiver index is wrong\"\n model.message_to_map.getrow(i).getcol(to_index), \"Message receiver matrix map is wrong\"\n\n assert np.all(np.sum(model.message_to_map.todense(), axis=1) == 1), \\\n \"Message sender map has a row that doesn't sum to 1.0\"", "def similarity(self, w1, w2):\r\n return self.represent(w1).dot(self.represent(w2))", "def similarity(self, w1, w2):\r\n return self.represent(w1).dot(self.represent(w2))", "def distance(self, u, v):\n numerator = np.dot(u,v)\n denominator = np.linalg.norm(u) * np.linalg.norm(v)\n similarity = numerator/(denominator +1e-7)\n return similarity", "def labels2Vec(labels):\r\n global dict_words_n_vectors\r\n\r\n for i in range(len(labels)):\r\n if labels[i] in dict_words_n_vectors:\r\n labels[i] = dict_words_n_vectors[labels[i]]\r\n else:\r\n labels[i] = np.zeros(300)\r\n return np.array(labels, dtype=\"float32\")", "def msv(sentences,\n original_indices,\n sent_representations):\n ranking = []\n indices = []\n bases = []\n\n # Compute cluster centroid (and convert to 2d-array for cdist)\n cluster_centroid = np.mean(sent_representations, axis=0)[None, :]\n\n # Pick the right sentences from sentence list (to match representation matrix)\n reprd_sentences = [sentences[i] for i in original_indices]\n\n # Add first sentence: farthest from cluster centroid\n distances = cdist(sent_representations, cluster_centroid, metric='cosine')\n index = np.argmax(distances)\n sentence = reprd_sentences[index]\n indices.append(index)\n ranking.append((index, sentence))\n base_vector = normalize(sent_representations[index][:, np.newaxis], axis=0).ravel()\n bases.append(base_vector)\n\n # Add other sentences: greedy furthest from subspace\n for i in range(len(reprd_sentences)-1):\n if i == 50:\n break\n print(\"Starting iteration {}\".format(i))\n distances = np.array([distance_from_subspace(s, bases)\n for s in sent_representations])\n\n distances[indices] = np.nan\n\n # Find index of furthest sentence\n index = np.nanargmax(distances)\n sentence = reprd_sentences[index]\n indices.append(index)\n ranking.append((index, sentence))\n base_vector = normalize(sent_representations[index][:, np.newaxis], axis=0).ravel()\n bases.append(base_vector)\n\n # Return list of indices & sentences,\n # and replace the filtered indices with the original ones.\n ranking = [(original_indices[i], s) for i, s in ranking]\n\n return ranking", "def vernam(msg):\n global v\n\n l = len(msg)\n\n for i in range(0, len(v)):\n v[i] = 0\n\n for i in range(0, l):\n v[i] = (i_rand_a()) ^ msg[i]\n\n return bytes(v)", "def similarity(self, w1, w2):\r\n sim = self.represent(w1).dot(self.represent(w2))\r\n return sim", "def tfidfvector(self,f_matrix):\n tfidfObject = TfidfTransformer(norm=\"l2\")\n tfidfMatrix = tfidfObject.fit_transform(f_matrix)\n tfidfMatrixTemp = tfidfMatrix.toarray()\n y_label=[]\n for x in tfidfMatrixTemp.tolist():\n ylabel_temp = []\n for i,y in enumerate(x) :\n if(y != 0.0):\n ylabel_temp.append(i)\n y_label.append(ylabel_temp) \n return (tfidfMatrix,y_label)", "def unigram_representation(data):\r\n vec = CountVectorizer()\r\n vec = vec.fit(data)\r\n return vec", "def conteos_mensaje2(mensaje,letras=\"abcdefghijklmnñopqrstuvwxyz\"):\n return np.array(list(conteos_mensaje(mensaje,letras).values()))", "def embedding_similarity(model, validation_pairs):\n scores = dict()\n for pair in validation_pairs:\n author1 = pair[0]\n author2 = pair[1]\n scores[author1 + ' ' +\n author2] = cosine_similarity(model.wv[author1], model.wv[author2])\n return scores", "def get_vectors_and_labels_self():\n pos_t, pos_post_t = ngram.generate_n_gram_dict(ds.POS_DICT_SELF, 1)\n neg_t, neg_post_t = ngram.generate_n_gram_dict(ds.NEG_DICT_SELF, 1)\n neu_t, neu_post_t = ngram.generate_n_gram_dict(ds.NEU_DICT_SELF, 1)\n ds.POS_UNI_GRAM_SELF, is_success = commons.dict_update(ds.POS_UNI_GRAM, pos_t)\n ds.NEG_UNI_GRAM_SELF, is_success = commons.dict_update(ds.NEG_UNI_GRAM, neg_t)\n ds.NEU_UNI_GRAM_SELF, is_success = commons.dict_update(ds.NEU_UNI_GRAM, neu_t)\n ds.POS_POST_UNI_GRAM_SELF, is_success = commons.dict_update(ds.POS_POST_UNI_GRAM, pos_post_t)\n ds.NEG_POST_UNI_GRAM_SELF, is_success = commons.dict_update(ds.NEG_POST_UNI_GRAM, neg_post_t)\n ds.NEU_POST_UNI_GRAM_SELF, is_success = commons.dict_update(ds.NEU_POST_UNI_GRAM, neu_post_t)\n temp_pos_dict = ds.POS_DICT.copy()\n temp_neg_dict = ds.NEG_DICT.copy()\n temp_neu_dict = ds.NEU_DICT.copy()\n temp_pos_dict_self = ds.POS_DICT_SELF.copy()\n temp_neg_dict_self = ds.NEG_DICT_SELF.copy()\n temp_neu_dict_self = ds.NEU_DICT_SELF.copy()\n temp_pos_dict_final = {}\n temp_neg_dict_final = {}\n temp_neu_dict_final = {}\n temp_pos_dict_final.update(temp_pos_dict)\n temp_neg_dict_final.update(temp_neg_dict)\n temp_neu_dict_final.update(temp_neu_dict)\n temp_pos_dict_final.update(temp_pos_dict_self)\n temp_neg_dict_final.update(temp_neg_dict_self)\n temp_neu_dict_final.update(temp_neu_dict_self)\n pos_vec, pos_lab = load_matrix_sub(temp_pos_dict_final, cons.LABEL_POSITIVE, True)\n neg_vec, neg_lab = load_matrix_sub(temp_neg_dict_final, cons.LABEL_NEGATIVE, True)\n neu_vec, neu_lab = load_matrix_sub(temp_neu_dict_final, cons.LABEL_NEUTRAL, True)\n ds.VECTORS_SELF = pos_vec + neg_vec + neu_vec\n ds.LABELS_SELF = pos_lab + neg_lab + neu_lab\n return is_success", "def message(self, edges):\n # concat features for edge mlp\n if self.edge_feat_size > 0:\n f = torch.cat(\n [\n edges.src[\"h\"],\n edges.dst[\"h\"],\n edges.data[\"radial\"],\n edges.data[\"a\"],\n ],\n dim=-1,\n )\n else:\n f = torch.cat(\n [edges.src[\"h\"], edges.dst[\"h\"], edges.data[\"radial\"]], dim=-1\n )\n\n msg_h = self.edge_mlp(f)\n msg_x = self.coord_mlp(msg_h) * edges.data[\"x_diff\"]\n\n return {\"msg_x\": msg_x, \"msg_h\": msg_h}", "def string_vector(self):\n pass", "def scalar_product(self, u, v):\n sp = 0.0\n n1 = len(u)\n n2 = len(v)\n i = j = 0\n d = self.dictionary_db\n while (i < n1 and j < n2):\n if u[i].word_info(d).index > v[j].word_info(d).index:\n j += 1\n elif v[j].word_info(d).index > u[i].word_info(d).index:\n i += 1\n else:\n sp += self.tf_idf(u[i]) * self.tf_idf(v[j])\n i += 1\n j += 1\n\n return sp", "def to_vector(self, ordering: SortedList, definitions: dict) -> VectorReaction:\n return VectorReaction(self.lhs.to_vector(ordering),\n self.rhs.to_vector(ordering),\n self.rate,\n self.label)", "def embed(self, sequence):\n words = sequence.split(' ')\n vecs = [self._E[self._w2i[i]] if i in self._w2i else self._E[self._w2i[\"UNK\"]]\n for i in words]\n return vecs", "def cosine_similarity(u, v):\n\n distance = 0.0\n\n ### START CODE HERE ###\n # Compute the dot product between u and v (≈1 line)\n dot = np.dot(u, v)\n # Compute the L2 norm of u (≈1 line)\n norm_u = np.sqrt(np.dot(u, u))\n\n # Compute the L2 norm of v (≈1 line)\n norm_v = np.sqrt(np.dot(v, v)) ##np.linalg.norm(u)\n # Compute the cosine similarity defined by formula (1) (≈1 line)\n cosine_similarity = dot / (norm_u * norm_v)\n ### END CODE HERE ###\n\n return cosine_similarity", "def sentence_to_vec(s, embeddings_dict, stop_words, tokenizer):\n \n words = str(s).lower()\n words = tokenizer(words)\n # remove stop words, if any, and only alpha-numeric tokens\n words = [w for w in words if not w in stop_words and w.isalpha()]\n \n embeddings = []\n for w in words:\n if w in embeddings_dict:\n embeddings.append(embeddings_dict[w])\n \n # dimensions = 300\n if len(embeddings)==0:\n return np.zeros(300)\n\n # list of embeddings to array\n embeddings = np.array(embeddings)\n\n # normalized vector\n sum = embeddings.sum(axis=0)\n return sum/np.sqrt((sum**2).sum())", "def vectorize(self,clean_path):\n \n #load pretrained embedding model (GloVe)\n glove = spacy.load('en_core_web_lg')\n #extract unique words (aka vocabulary)\n unique_words = set()\n for d in self.docs: \n txt = d.text\n doc = glove(txt)\n for word in doc: \n if word.has_vector:\n unique_words.add(word.text)\n #change set to list type\n unique_words = list(unique_words)\n #save vector representation\n word_vectors = np.array([glove(word).vector for word in unique_words if glove(word).has_vector])\n #index vectors by corresponding word \n corpus_vectors = pd.DataFrame(word_vectors, index=unique_words)\n with open(clean_path + 'corpus_vectors.pkl', 'wb') as f:\n pickle.dump(corpus_vectors,f)\n self.vectors = corpus_vectors\n print('Saved embedding vectors.')\n return", "def semantic_match(sentence, attention_vec):\n w = 3 # window size\n dimension, n_tokens = sentence.shape\n k = np.argmax(attention_vec) # position k, of the most similar word\n window = range(max(0, k-w), min(k+w, n_tokens)) # window centered on k\n vector = np.zeros(dimension)\n for i in window:\n vector += attention_vec[i] * sentence[:, i]\n return vector", "def get_BERT_similarity(questions, paragraphs):\n E_Q = encoder_BERT(tokenize_BERT(questions))\n E_P = encoder_BERT(tokenize_BERT(paragraphs))\n sim = torch.matmul(E_Q, E_P.T)\n return sim.numpy()", "def self_similarity_matrix(feature_vectors):\n norm_feature_vectors, mean, std = at.normalize_features([feature_vectors.T])\n norm_feature_vectors = norm_feature_vectors[0].T\n sim_matrix = 1.0 - distance.squareform(\n distance.pdist(norm_feature_vectors.T, 'cosine'))\n return sim_matrix", "def get_cosine_similarity(word2vec: Word2Vec) -> np.ndarray:\n return cosine_similarity(word2vec.wv.vectors)", "def _seqs2vec(seqs):\n for n,seq in enumerate(seqs):\n if n==0: vec=_seq2vec(seq)\n else: vec+=_seq2vec(seq)\n vec /= vec.sum(axis=-1)[...,None]\n return vec", "def fashion_similarity(input_txt, features, keys):\n feature_index = keys.index(input_txt)\n input_vector = features[feature_index]\n\n scores = [similarity_function(input_vector, partner) for partner in features]\n return scores", "def _matvec(self, x):\n \n x = x.reshape((self.NH,))\n #\n # Compute kinetic energy operator\n #\n tx = self.KEO @ x \n \n # \n # Compute potential energy operator\n #\n xquad = self.basis.fbrToQuad(x,axis = 0) # xquad has shape (Nq,)\n vx = self.basis.quadToFbr(self.V * xquad) # vx has shape (NH,)\n \n return tx + vx", "def publish_messages(self, V_translation, V_rotation, terrain_grid_points, V_viz_points, frame_J1, frame_J2):\n\n ##################################################################################\n\n # Create a posestamped message containing position information\n\n # Create pose message\n msg = PoseStamped()\n\n # Header details for pose message\n msg.header.frame_id = \"map\"\n msg.header.stamp = rospy.Time.now()\n\n # Pose information\n msg.pose.position.x = V_translation[0]\n msg.pose.position.y = V_translation[1]\n msg.pose.position.z = V_translation[2]\n msg.pose.orientation.x = V_rotation[0]\n msg.pose.orientation.y = V_rotation[1]\n msg.pose.orientation.z = V_rotation[2]\n msg.pose.orientation.w = V_rotation[3]\n\n\n ##################################################################################\n\n # Create an multi array message containing pose information\n\n # Create array message\n array_msg = Float32MultiArray()\n array_msg.layout.dim.append(MultiArrayDimension())\n array_msg.layout.dim[0].label = \"vehicle_position\"\n array_msg.layout.dim[0].size = 3\n array_msg.layout.dim[0].stride = 3\n\n # Append data\n array_msg.data.append(V_translation[0])\n array_msg.data.append(V_translation[1])\n array_msg.data.append(V_translation[2])\n\n ##################################################################################\n\n # Create point cloud and publish to rviz\n\n # Create a point cloud from the xyz values in the array list\n header = Header()\n header.stamp = rospy.Time.now()\n header.frame_id = 'map'\n point_cloud = pcl2.create_cloud_xyz32(header, terrain_grid_points)\n\n ##################################################################################\n\n # Create a marker to vizualize the footprint of the vehicle\n viz_points = Marker()\n viz_points.header.frame_id = \"map\"\n viz_points.header.stamp = rospy.Time.now()\n viz_points.ns = \"grid_marker\"\n viz_points.id = 1\n viz_points.action = viz_points.ADD\n viz_points.type = viz_points.CUBE_LIST\n\n viz_points.scale.x = 0.01\n viz_points.scale.y = 0.01\n viz_points.scale.z = 0.01\n\n viz_points.color.a = 1.0\n viz_points.color.r = 1.0\n viz_points.color.g = 0.0\n viz_points.color.b = 0.0\n viz_points.points = V_viz_points\n\n\n ################################################################\n\n # Create pose message for joints 1 & 2\n msg1 = PoseStamped()\n msg2 = PoseStamped()\n\n # Header details for pose message\n msg1.header.frame_id = \"vehicle_frame\"\n msg1.header.stamp = rospy.Time.now()\n\n msg2.header.frame_id = \"vehicle_frame\"\n msg2.header.stamp = rospy.Time.now()\n\n # Pose information\n joint_1 = tf_conversions.toMsg(frame_J1)\n joint_2 = tf_conversions.toMsg(frame_J2)\n \n msg1.pose = joint_1\n msg2.pose = joint_2\n\n # Publish pose, vizualization, array information and point cloud\n self.pose_publisher.publish(msg)\n self.joint1_pose_publisher.publish(msg1)\n self.joint2_pose_publisher.publish(msg2)\n self.pose_array_publisher.publish(array_msg)\n self.point_cloud_publisher.publish(point_cloud)\n self.grid_publisher.publish(viz_points)", "def compute(self, old_videos, videos):\n\t\tlinear_simialarity = LinearStructuralSimilarity([1.0, 0.5, 0.5, 0.5])\n\t\ttitle_similarity = TitleSimilarity()\n\t\ttag_similarity = TagSimilarity()\n\t\tstar_similarity = StarSimilarity()\n\n\t\t#compute doc vector for new vector\n\t\tself.compute_doc_vector(videos)\n\t\t#init s_matrix for new vector\n\t\tfor i, new_video in enumerate(videos):\n\t\t\tmatrix_row = []\n\t\t\theapq.heapify(matrix_row)\n\t\t\tself.s_matrix[new_video.vid] = matrix_row\n\n\t\t#compute similarity between old video and videos\n\t\tif old_videos is not None:\n\t\t\tprint \"compute old video similarity\"\n\t\t\tfor i, old_video in enumerate(old_videos):\n\t\t\t\tprint(\"compute old video: %d\" % i)\n\t\t\t\told_vid = old_video.vid\n\t\t\t\tif old_vid in self.doc_vector and old_vid in self.s_matrix:\n\t\t\t\t\tfor j, new_video in enumerate(videos):\n\t\t\t\t\t\tnew_vid = new_video.vid\n\t\t\t\t\t\tdesc_score = 1 - cosine(self.doc_vector[old_vid], self.doc_vector[new_vid])\n\t\t\t\t\t\ttitle_score = title_similarity.cosin_compute(old_video.name, new_video.name)\n\t\t\t\t\t\ttag_score = tag_similarity.compute(old_video.tags, new_video.tags)\n\t\t\t\t\t\tstar_score = star_similarity.compute(old_video.stars, new_video.stars)\n\t\t\t\t\t\tlinear_simialarity.set_similarity([desc_score, title_score, tag_score, star_score])\n\t\t\t\t\t\ts = linear_simialarity.compute()\n\t\t\t\t\t\tsp = ScorePair(new_vid, s)\n\t\t\t\t\t\tif len(self.s_matrix[old_vid]) < self.topN:\n\t\t\t\t\t\t\tself.s_matrix[old_vid].append(sp)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\theapq.heappushpop(self.s_matrix[old_vid], sp)\n\t\t\t\t\t\ts_sp = ScorePair(old_vid, s)\n\t\t\t\t\t\tif len(self.s_matrix[new_vid]) < self.topN:\n\t\t\t\t\t\t\tself.s_matrix[new_vid].append(s_sp)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\theapq.heappushpop(self.s_matrix[new_vid], s_sp)\n\n\t\t#compute inner simialrity between videos\n\t\tfor i, fir_video in enumerate(videos):\n\t\t\tprint \"compute similarity documents: \" + str(i)\n\t\t\tfir_vid = fir_video.vid\n\t\t\tfor j, sec_video in enumerate(videos):\n\t\t\t\tsec_vid = sec_video.vid\n\t\t\t\tif i < j:\n\t\t\t\t\tdesc_score = 1 - cosine(self.doc_vector[fir_vid], self.doc_vector[sec_vid])\n\t\t\t\t\ttitle_score = title_similarity.cosin_compute(fir_video.name, sec_video.name)\n\t\t\t\t\ttag_score = tag_similarity.compute(fir_video.tags, sec_video.tags)\n\t\t\t\t\tstar_score = star_similarity.compute(fir_video.stars, sec_video.stars)\n\t\t\t\t\tlinear_simialarity.set_similarity([desc_score, title_score, tag_score, star_score])\n\t\t\t\t\ts = linear_simialarity.compute()\n\t\t\t\t\tsp = ScorePair(j, s)\n\t\t\t\t\tif len(self.s_matrix[fir_vid]) < self.topN:\n\t\t\t\t\t\tself.s_matrix[fir_vid].append(sp)\n\t\t\t\t\telse:\n\t\t\t\t\t\theapq.heappushpop(self.s_matrix[fir_vid], sp)\n\t\t\t\t\ts_sp = ScorePair(i, s)\n\t\t\t\t\tif len(self.s_matrix[sec_vid]) < self.topN:\n\t\t\t\t\t\tself.s_matrix[sec_vid].append(s_sp)\n\t\t\t\t\telse:\n\t\t\t\t\t\theapq.heappushpop(self.s_matrix[sec_vid], s_sp)\n\n\t\tfor vid, matrix_row in self.s_matrix.items():\n\t\t\tself.s_matrix[vid] = heapq.nlargest(self.topN, matrix_row)", "def viterbi(sent, dqml, eqml, S, V_CASE=-1):\n\n if type(sent) is list:\n sent_words = sent\n else:\n sent_words = word_tokenize(sent)\n n = len(sent_words)\n\n # define and initialize PI table\n pi = defaultdict(Counter)\n pi[0]['*'] = 1\n bp = {}\n\n for k in range(1, n+1):\n bp[k] = {}\n for v in S:\n eml = compute_eml(V_CASE, eqml, k, sent_words, v)\n if k-1 is 0: # w e S_0 -> w = '*'\n qmlr = compute_qml(dqml, v, '*')\n pival = pi[0]['*'] * qmlr * eml\n pi[k][v] = pival\n bp[k][v] = '*'\n else: # for w e S_k, S_k = S\n max_S = None\n max_w = -1\n for w in S:\n qmlr = compute_qml(dqml, v, w)\n currmax = pi[k-1][w] * qmlr * eml\n if currmax > 0 and currmax > max_w:\n max_w = currmax\n max_S = w\n # if word is unknown use tag 'NN'\n if max_S is None:\n max_w = 0.0\n max_S = UNKNOWN_TAG\n pi[k][v] = max_w\n bp[k][v] = max_S\n\n # calculate y_n\n max_y = -1\n yn = None\n for v in S:\n nextmax = pi[n][v] * compute_propability('STOP', v, dqml)\n if nextmax > max_y:\n max_y = nextmax\n yn = v\n\n # calculate y_n-1....y1\n yk1 = yn\n tagSequence = list()\n tagSequence.append(yn)\n for k in range(n-1,0,-1):\n yk = bp[k+1][yk1]\n tagSequence.append(yk)\n yk1 = yk\n\n tagSequence.reverse()\n return tagSequence", "def vectorize(self, sentences, _ngrams=1):\n\n if self.__verbose:\n print('Vectorizing', len(sentences), 'sentences')\n\n vectors = []\n\n for sent in sentences:\n v = []\n for gram in self.ngrams(sent, _ngrams):\n if gram in self.__dictionary:\n v.append(self.__dictionary[gram])\n else:\n v.append(self.__dictionary['unk'])\n vectors.append(v)\n\n return np.asarray(vectors)", "def cosine_similarity(v1, v2):\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))", "def _gu_matvec(x1, x2):\n return (x1 @ x2[..., np.newaxis])[..., 0]", "def compare_vectors(self, left, right, weights):\n\n # Get the indices of the levels\n index_1 = self.labels == int(left)\n index_2 = self.labels == int(right)\n\n # Get the vectors\n vector_1 = self.level_vectors[index_1] * weights\n vector_2 = self.level_vectors[index_2] * weights\n\n # Generate the distances\n cosine = pairwise.cosine_similarity(vector_1, vector_2).item(0)\n\n return [left, right, cosine]", "def transform(self, graph, instances):\n check_is_fitted(self, [\"model_\"])\n\n feature_vectors = []\n for instance in instances:\n feature_vectors.append(self.model_.wv.get_vector(str(instance)))\n return feature_vectors", "def transform(self, v):\n #matrix vector multiply, convert from matrix to array type at the end\n return np.array( v * self.M )", "def get_matrix_of_vectors(wv_from_bin, required_words=['softball', 'technology','street','project','fellow','maps','view','fuel','summer','clubhouse','ball','steal','soccer','driving','motor','comedy']):\n import random\n words = list(wv_from_bin.vocab.keys())\n print(\"Shuffling words ...\")\n random.shuffle(words)\n wrds = words[:10000]\n print(\"Putting %i words into word2Ind and matrix M...\" % len(words))\n word2Ind = {}\n M = []\n curInd = 0\n for w in words:\n try:\n M.append(wv_from_bin.word_vec(w))\n word2Ind[w] = curInd\n curInd += 1\n except KeyError:\n continue\n for w in required_words:\n try:\n M.append(wv_from_bin.word_vec(w))\n word2Ind[w] = curInd\n curInd += 1\n except KeyError:\n continue\n M = np.stack(M)\n print(\"Done.\")\n return M, word2Ind", "def transform(self, graph, instances):\n check_is_fitted(self, ['model_'])\n\n feature_vectors = []\n for instance in instances:\n feature_vectors.append(self.model_.wv.get_vector(str(instance)))\n return feature_vectors", "def create_matrix(self, vocab, token_pairs):\n vocab_size = len(vocab)\n graph = np.zeros((vocab_size, vocab_size), dtype='float')\n for word_1, word_2 in token_pairs:\n i, j = vocab[word_1], vocab[word_2]\n graph[i][j] = 1\n graph = graph + graph.T - np.diag(graph.diagonal()) # symmetrize matrix\n norm = np.sum(graph, axis=0) # normalize matrix\n graph = np.divide(graph, norm, where= norm!= 0) #ignore the elements that = 0 in norm\n return graph", "def _to_full_vector(self, query_vector: List[Tuple[str, float]]) -> np.array:\n terms = list(self.index.get_terms())\n terms.sort()\n vector = np.zeros(len(terms))\n\n for (term, weight) in query_vector:\n index = terms.index(term)\n vector[index] = weight\n\n return vector", "def qlist_to_vec(self, max_length, q_list):\n qvec = np.zeros(max_length)\n embed_matrix = None\n if self.use_embed:\n embed_matrix = np.zeros((max_length, EMBEDDING_SIZE))\n \"\"\" pad on the left \"\"\"\n # for i in range(max_length):\n # if i < max_length - len(q_list):\n # cvec[i] = 0\n # else:\n # w = q_list[i-(max_length-len(q_list))]\n # # is the word in the vocabulary?\n # if self.qdict.has_key(w) is False:\n # w = ''\n # qvec[i] = self.qdict[w]\n # cvec[i] = 0 if i == max_length - len(q_list) else 1\n \"\"\" pad on the right \"\"\"\n for i in range(len(q_list)):\n w = q_list[i]\n if self.use_embed:\n if w not in self.embed_dict:\n self.embed_dict[w] = self.nlp(u'%s' % w).vector\n embed_matrix[i] = self.embed_dict[w]\n\n if w not in self.qdict:\n w = ''\n qvec[i] = self.qdict[w]\n return qvec, embed_matrix", "def calculate_user_similarity_profile(self, ratings_vector):\r\n num_users, num_movies = self.ratings_matrix.get_shape()\r\n\r\n user_similarities = sp.dok_matrix((1, num_users))\r\n for i in range(num_users):\r\n\r\n user_similarities[0, i] = self.calculate_pairwise_user_similarity(self.ratings_matrix.getrow(i), ratings_vector)\r\n\r\n return user_similarities.tocsr()", "def lemmatize_verbs(self):\n lemmas = []\n # lemmas = \"\"\n for word in self.words:\n lemma = wn.lemmatize(word, pos='v')\n lemmas.append(lemma)\n # lemmas += f\"{lemma} \"\n self.words = lemmas\n return self", "def _forward(self, hidden_states, vector_filters):\n hidden_states = tf.expand_dims(hidden_states, axis=1)\n messages = hidden_states * vector_filters\n messages = tf.reduce_sum(messages, axis=2) # total message to atom i = sum over messages from atoms j\n\n return messages", "def incoming_text_similarity(dataset, mid, user, twidf_df, n):\n # Dataset containing all previous emails sent to person 'user'\n dataset_to_rec = dataset[dataset['recipients'].map(lambda x: user in x)]\n # Measure similarity between the message of id 'mid' and all the messages received\n dataset_similar = top_n_similarity(n, mid, dataset_to_rec, twidf_df)\n df_incoming = pd.DataFrame(columns=['mid', 'user', 'contact', 'incoming_text'])\n list_sender = np.unique(dataset['sender'].tolist())\n df_incoming['contact'] = pd.Series(list_sender)\n df_incoming['mid'] = mid\n df_incoming['user'] = user\n df_incoming['incoming_text'] = pd.Series([1 if c in dataset_similar['sender'] else -1 for c in list_sender])\n return df_incoming" ]
[ "0.6673192", "0.6187192", "0.6166319", "0.6151653", "0.60369694", "0.5990431", "0.59603184", "0.593852", "0.59355754", "0.5926192", "0.5897822", "0.58214325", "0.577277", "0.5761639", "0.57491434", "0.57217455", "0.570953", "0.5705797", "0.56846756", "0.56698257", "0.5666215", "0.56567603", "0.5648202", "0.56411135", "0.5607599", "0.55533266", "0.55509007", "0.5522629", "0.55164415", "0.5516118", "0.5508145", "0.54989755", "0.54735637", "0.5473134", "0.54395896", "0.543838", "0.5435481", "0.5415621", "0.5412161", "0.5407772", "0.54058546", "0.53969383", "0.53956014", "0.5395391", "0.53683704", "0.53663707", "0.5358723", "0.53545916", "0.5352264", "0.53512466", "0.53416663", "0.533883", "0.533853", "0.5325198", "0.5323938", "0.5319963", "0.5319963", "0.5316997", "0.5305381", "0.53034043", "0.5301553", "0.52922165", "0.52879375", "0.5281854", "0.5280043", "0.52794045", "0.527908", "0.52650034", "0.5250067", "0.52363193", "0.52311957", "0.5225921", "0.52187204", "0.52113265", "0.5205429", "0.52011514", "0.5194892", "0.5194562", "0.5194293", "0.5191068", "0.519039", "0.51896936", "0.5188928", "0.5182135", "0.5179426", "0.51786673", "0.5178429", "0.51772267", "0.51714534", "0.5170525", "0.51685786", "0.5162638", "0.5161048", "0.51543665", "0.5153416", "0.51532274", "0.51503396", "0.51486576", "0.5145913", "0.5144187" ]
0.6307364
1
An iterator that will in turn yield all drawable curves in the form of (kind, name, ds, style) tuples (where kind is one of 'algorithm', 'oracle', 'unifpf', 'strategy').
def _pds_plot_iterator(pds, dim, funcId): i = 0 for (algname, ds) in pds.algds_dimfunc((dim, funcId)): yield ('algorithm', algname, ds, _style_algorithm(algname, i)) i += 1 yield ('oracle', 'oracle', pds.oracle((dim, funcId)), _style_oracle()) yield ('unifpf', 'eUNIF', pds.unifpf().dictByDimFunc()[dim][funcId][0], _style_unifpf()) i = 0 for (stratname, ds) in pds.stratds_dimfunc((dim, funcId)): yield ('strategy', stratname, ds, _style_strategy(stratname, i)) i += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iter_svgs(self):\n for name in self.parent.layers:\n yield name, self.parent.layers[name]\n for elem in self.parent.elements:\n if isinstance(elem, SVG):\n yield None, elem", "def efficiency_curves(self):\n for key in self._efficiency_curves:\n yield key, self._data[key]", "def iterCurves(self):\n for c in range(self.length()):\n yield self.curve(c)", "def style_lines(self):\n self.parent.finalize()\n for name, svg in self.iter_svgs(): # recurse here\n for line in svg._meta.style_lines():\n yield line\n if isinstance(self.parent.style, str):\n yield self.parent.style\n else:\n for cls in self.parent.style:\n yield \"%s {\" % str(cls)\n for key, value in self.parent.style[cls].items():\n yield \" %s: %s;\" % (key, value)\n yield \"}\"", "def pump_curves(self):\n for key in self._pump_curves:\n yield key, self._data[key]", "def lines(self):\n for pair in pairs(self.points):\n yield Line(pair, shape=self)", "def _get_iterator(self, dataset_type, eval_mode, **kwargs):", "def __iter__(self):\n if self.use_dic:\n for data in sorted(self.dic):\n self.data = data\n for activity in sorted(self.dic[data]):\n self.activity = activity\n for imsize in sorted(self.dic[data][activity]):\n self.imsize = imsize\n self.allimgs, self.alllabels = [], []\n for img in sorted(self.dic[data][activity][imsize]):\n self.img = img\n self.labels = self.dic[data][activity][imsize][img]\n if self.imlist:\n self.allimgs.append(self.img)\n self.alllabels.append(self.labels)\n else:\n yield data, activity, imsize, img, self.labels\n self.i += 1\n if self.imlist:\n yield data, activity, imsize, self.allimgs, self.alllabels\n self.i += 1\n else:\n for data in sorted(self.dic):\n self.img = data\n self.labels = self.dic[data]\n yield self.img, self.labels\n self.i += 1", "def __iter__(self):\n for key in self.sprite_order:\n if key not in self.sprite_groups:\n # abstract type\n continue\n for s in self.sprite_groups[key]:\n yield s", "def parse_and_construct_graphic_layer(ds):\r\n graphic_layers = list()\r\n for item in ds.SegmentSequence:\r\n layer = {\r\n \"GraphicLayer\": str(item.SegmentDescription).upper(),\r\n \"GraphicLayerOrder\": item.SegmentNumber,\r\n \"GraphicLayerRecommendedDisplayCIELabValue\": [49512, 38656, 52736]\r\n }\r\n graphic_layers.append(layer)\r\n return graphic_layers", "def iterdescriptors(self):", "def symbols(self):\n # get the names(identifiers) of all curves in the graph:\n curvenames = self.g.element_show()\n # foreach curve, add a diamond symbol, filled with the\n # color of the curve ('defcolor') and with a size of 2:\n for curvename in curvenames:\n self.g.element_configure(curvename, symbol='diamond',\n outlinewidth=2, fill='defcolor')", "def __iter__(self):\n for feature in itertools.izip(self.shapes, self.records):\n yield feature", "def iter_implementations(self, opt):\n opt_desc = self.opt_dict[opt]\n for imp in self.imp_dict[opt_desc['imptype']]:\n yield imp", "def draw_stratas(enum_func):\n maximum_strata_index = 7\n graph = ot.Graph(\"\", \"$\\\\alpha_1$\", \"$\\\\alpha_2$\", True)\n if enum_func.__class__.__name__ == \"LinearEnumerateFunction\":\n graph.setTitle(\"Linear enumeration rule\")\n elif enum_func.__class__.__name__ == \"HyperbolicAnisotropicEnumerateFunction\":\n graph.setTitle(f\"q={enum_func.getQ()}\")\n offset = 0\n for strata_index in range(maximum_strata_index):\n strata_cardinal = enum_func.getStrataCardinal(strata_index)\n sample_in_layer = [enum_func(idx + offset) for idx in range(strata_cardinal)]\n offset += strata_cardinal\n cloud = ot.Cloud(sample_in_layer)\n cloud.setLegend(str(strata_index))\n cloud.setPointStyle(\"circle\")\n graph.add(cloud)\n palette = ot.DrawableImplementation.BuildDefaultPalette(maximum_strata_index)\n graph.setColors(palette)\n graph.setIntegerXTick(True)\n graph.setIntegerYTick(True)\n graph.setLegendPosition(\"topright\")\n return graph", "def iter_segments(dataset: Dataset) -> Iterator: # noqa: E501\n if not hasattr(dataset, 'PixelData'):\n raise AttributeError(\n 'Data set does not contain a Pixel Data attribute.'\n )\n segment_description_lut = {\n int(item.SegmentNumber): item\n for item in dataset.SegmentSequence\n }\n segment_number_per_frame = np.array([\n int(item.SegmentIdentificationSequence[0].ReferencedSegmentNumber)\n for item in dataset.PerFrameFunctionalGroupsSequence\n ])\n pixel_array = dataset.pixel_array\n if pixel_array.ndim == 2:\n pixel_array = pixel_array[np.newaxis, ...]\n for i in np.unique(segment_number_per_frame):\n indices = np.where(segment_number_per_frame == i)[0]\n yield (\n pixel_array[indices, ...],\n tuple([\n dataset.PerFrameFunctionalGroupsSequence[index]\n for index in indices\n ]),\n segment_description_lut[i],\n )", "def drawable_iterable(drawable, unpack_stacks = False, reverse_stacks = False):\n # Check if we are using a THStack\n if is_stack(drawable) and unpack_stacks:\n # Extract histograms from the stack\n result = list(drawable.GetHists())\n\n # Reverse if necessary\n if reverse_stacks:\n result.reverse()\n\n return result\n elif is_histo(drawable) or is_graph(drawable) or is_line(drawable):\n return (drawable,)\n\n # Already an iterable\n return drawable", "def untyped_curves(self):\n defined = set(self._data.keys())\n untyped = defined.difference(self._pump_curves, self._efficiency_curves, \n self._headloss_curves, self._volume_curves)\n for key in untyped:\n yield key, self._data[key]", "def pixelIter():\n\t\t\tx,y,i = 0,0,0\n\t\t\tfor i,c in enumerate(space):\n\t\t\t\tx = i % w\n\t\t\t\ty = i / w\n\t\t\t\tisSolid = (c=='#')\n\t\t\t\tyield x,y,i,isSolid", "def __get_curves_lips(self, uol, uil, lol, lil):\n uol_curve = self.__draw_curve(uol)\n uil_curve = self.__draw_curve(uil)\n lol_curve = self.__draw_curve(lol)\n lil_curve = self.__draw_curve(lil)\n return uol_curve, uil_curve, lol_curve, lil_curve", "def get_available_figures(self):\n return sorted((method[5:], func) \\\n for method, func in self.__class__.__dict__.iteritems() \\\n if method.startswith(\"plot_\") and callable(func))", "def volume_curves(self):\n for key in self._volume_curves:\n yield key, self._data[key]", "def parse_graphs(self, graph_iterator):\n #filter_cache = make_graph_filter_cache() \n for graph in graph_iterator: \n raw_chart = self.parse(graph)\n # The raw chart contains parser operations, need to decode the parse forest from this \n res = td_chart_to_cky_chart(raw_chart)\n yield res", "def _pathology_iterator(graph):\n for u, v in _iter_pairs(graph):\n if graph.node[u][FUNCTION] == PATHOLOGY:\n yield u\n if graph.node[v][FUNCTION] == PATHOLOGY:\n yield v", "def iterator(self):\n return _osgAnimation.VertexList_iterator(self)", "def iter(self, iters, executor=None):\n deps_by_kind = self.dependencies_by_kind()\n\n if len(deps_by_kind) > 1:\n # Sync the iterators that provide time info for each data kind\n # (first in deps_by_kind lists) by endtime\n iters.update(strax.sync_iters(\n partial(strax.same_stop, func=strax.endtime),\n {d[0]: iters[d[0]]\n for d in deps_by_kind.values()}))\n\n # Convert to iterators over merged data for each kind\n new_iters = dict()\n for kind, deps in deps_by_kind.items():\n if len(deps) > 1:\n synced_iters = strax.sync_iters(\n strax.same_length,\n {d: iters[d] for d in deps})\n new_iters[kind] = strax.merge_iters(synced_iters.values())\n else:\n new_iters[kind] = iters[deps[0]]\n iters = new_iters\n\n if self.rechunk_input:\n iters = self.rechunk_input(iters)\n\n pending = []\n for chunk_i in itertools.count():\n try:\n if not self.check_next_ready_or_done(chunk_i):\n # TODO: avoid duplication\n # but also remain picklable...\n self.close(wait_for=tuple(pending))\n return\n compute_kwargs = {k: next(iters[k])\n for k in deps_by_kind}\n except StopIteration:\n self.close(wait_for=tuple(pending))\n return\n except Exception:\n self.close(wait_for=tuple(pending))\n raise\n\n if self.parallel and executor is not None:\n new_f = executor.submit(self.do_compute,\n chunk_i=chunk_i,\n **compute_kwargs)\n pending = [f for f in pending + [new_f]\n if not f.done()]\n yield new_f\n else:\n yield self.do_compute(chunk_i=chunk_i, **compute_kwargs)", "def _plot_curves(self, curves_dict):\n for name, curve in curves_dict.items():\n fig = plt.figure()\n ax = plt.gca()\n\n plot_type = name.split('_')[-1]\n ax.set_title(plot_type)\n if plot_type == 'PRC':\n precision, recall, _ = curve\n ax.step(recall, precision, color='b', alpha=0.2, where='post')\n ax.fill_between(recall, precision, step='post', alpha=0.2, color='b')\n ax.set_xlabel('Recall')\n ax.set_ylabel('Precision')\n elif plot_type == 'ROC':\n false_positive_rate, true_positive_rate, _ = curve\n ax.plot(false_positive_rate, true_positive_rate, color='b')\n ax.plot([0, 1], [0, 1], 'r--')\n ax.set_xlabel('False Positive Rate')\n ax.set_ylabel('True Positive Rate')\n else:\n ax.plot(curve[0], curve[1], color='b')\n\n ax.set_ylim([0.0, 1.05])\n ax.set_xlim([0.0, 1.0])\n\n fig.canvas.draw()\n\n curve_img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')\n curve_img = curve_img.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n self.summary_writer.add_image(name.replace('_', '/'), curve_img, global_step=self.global_step)", "def _iter_distributions(self) -> Iterator[\"BaseDistribution\"]:\n raise NotImplementedError()", "def __iter__(self):\n return self.cli.essids.essids().__iter__()", "def get_layer_names_gen(self):\n for lyr in self.get_layer_names_as_array():\n yield lyr", "def draw_schem(self):\n schem_cursor = DrawerCursor(self)\n while schem_cursor.draw_comps_here():\n pass\n while self._state: # TODO Add real stack ability\n self.pop()\n schem_cursor.step_back()\n schem_cursor.draw_comps_here('down')\n self.draw()\n self.save('resources/khiri.png')", "def get_curves(p):\n curve_list = []\n for well in p:\n curves = well.data.keys()\n for c in curves:\n curve_list.append(c)\n return sorted(set(curve_list))", "def headloss_curves(self):\n for key in self._headloss_curves:\n yield key, self._data[key]", "def pixelIter():\n\t\t\tx,y,i = 0,0,0\n\t\t\tfor i,c in enumerate(space):\n\t\t\t\tx = i % w\n\t\t\t\ty = i / w\n\t\t\t\tisSolid = (c=='#')\n\t\t\t\tyield x,y,i,isSolid\n\t\t\t\tprintSpace(x,y) # print state for debugging", "def get_elements():\n elements = { 'Shapes':[], 'Strokes':[] }\n curves_knob = rp_node['curves']\n root_layer = curves_knob.rootLayer\n elements = parse_layer(root_layer, elements, [root_layer])\n print elements", "def task_generate_sc_figure4():\n for dept in Department.list():\n yield {\n 'name': dept.name,\n 'file_dep': [\n dept.block_groups_path,\n dept.police_precincts_path,\n ],\n 'targets': [dept.sc_figure4_path],\n 'actions': [dept.generate_sc_figure4],\n 'clean': True,\n }", "def _generators(group):\n gens = []\n for sym in group.symbols:\n elm = ((sym, 1),)\n gens.append(group.dtype(elm))\n return tuple(gens)", "def defs_lines(self):\n self.parent.finalize()\n for name, svg in self.iter_svgs(): # recurse here\n for line in svg._meta.defs_lines():\n yield line\n for line in self.parent.defs:\n yield line\n for name, svg in self.parent.masks.items():\n yield \"\"\"<clipPath id=\"%s\">\"\"\" % name\n for line in svg._meta.body_lines():\n yield line\n yield \"\"\"</clipPath>\"\"\"", "def linestyle_generator(colors=_colors, lines=_lines,\n markers=_markers, hollow_styles=_marker_types):\n\n # If both lines and markers are empty or None, do nothing\n is_nothing = False\n if not lines and not markers:\n is_nothing = True\n\n if colors:\n color_cycle = itertools.cycle(colors)\n else: # default line color is almost_black\n color_cycle = itertools.cycle([almost_black])\n\n if lines:\n line_cycle = itertools.cycle(lines)\n else: # empty list or None supplied, disable line connection\n line_cycle = itertools.cycle([''])\n\n if markers and hollow_styles: # solid and hollow markers\n full_markers = itertools.product(markers, hollow_styles)\n elif markers and not hollow_styles: # all solid markers\n full_markers = itertools.product(markers, [None])\n else: # no markers\n full_markers = itertools.product(['None'], [None])\n marker_cycle = itertools.cycle(full_markers)\n\n while True:\n if not is_nothing:\n # Use next() instead of .next to work with both Python 2 & 3\n color = next(color_cycle)\n linestyle = next(line_cycle)\n marker, hollow = next(marker_cycle)\n if hollow is None: # only filled markers\n mew = 1\n mec = color\n mfc = color\n elif hollow: # make hollow markers\n mew = 1\n mec = color\n mfc = 'None'\n else: # otherwise, make filled markers\n mew = 1\n mec = color\n mfc = color\n yield {'color': color, 'linestyle': linestyle,\n 'marker': marker, 'mew': mew, 'mec': mec, 'mfc': mfc}\n else:\n yield {}", "def task_generate_sc_figure2():\n for dept in Department.list():\n yield {\n 'name': dept.name,\n 'file_dep': [\n dept.census_tracts_path,\n dept.police_precincts_path,\n ],\n 'targets': [dept.sc_figure2_path],\n 'actions': [dept.generate_sc_figure2],\n 'clean': True,\n }", "def generate_test_images():\n results = {}\n for antialias, aa_descriptor in antialias_options:\n for canvas, canvas_descriptor in canvas_options:\n for func in (generate_test_001,\n generate_test_002,\n generate_test_003,\n generate_test_004,\n generate_test_005,\n generate_test_007,\n ):\n points, name = func()\n aggregators = draw_lines(canvas, points, antialias)\n img = shade(aggregators, cmap=cmap01)\n description = \"{}_{}_{}\".format(\n name, aa_descriptor, canvas_descriptor)\n results[description] = img\n\n for func in (generate_test_006, ):\n points, name = func()\n aggregator = draw_multi_segment_line(canvas, points, antialias)\n img = shade(aggregator, cmap=cmap01)\n description = \"{}_{}_{}\".format(\n name, aa_descriptor, canvas_descriptor)\n results[description] = img\n return results", "def task_generate_sc_figure3():\n for dept in Department.list():\n yield {\n 'name': dept.name,\n 'file_dep': [\n dept.block_groups_path,\n dept.police_precincts_path,\n ],\n 'targets': [dept.sc_figure3_path],\n 'actions': [dept.generate_sc_figure3],\n 'clean': True,\n }", "def read_scatter_curves(curve_files, units, param):\n\n curves = []\n\n for curve_file in curve_files:\n\n curve = {}\n\n curve['file'] = curve_file\n # Read in the scattering curve\n # Modeling is performed in angstroms so convert files in nm to a\n if units == 'nm':\n curve['data'] = load_scatter_curve(curve_file,\n param['rfac']['qmin'] * 10.0,\n param['rfac']['qmax'] * 10.0)\n curve['data'][:, 0] = curve['data'][:, 0] / 10.0\n else:\n curve['data'] = load_scatter_curve(curve_file,\n param['rfac']['qmin'],\n param['rfac']['qmax'])\n\n if 'rxs2' in param:\n curve.update(\n get_curve_descriptors(\n curve['data'],\n param['rg']['fitmin'],\n param['rg']['fitmax'],\n param['rxs1']['fitmin'],\n param['rxs1']['fitmax'],\n param['rxs2']['fitmin'],\n param['rxs2']['fitmax']))\n else:\n curve.update(get_curve_descriptors(curve['data'],\n param['rg']['fitmin'],\n param['rg']['fitmax'],\n param['rxs1']['fitmin'],\n param['rxs1']['fitmax']))\n\n curves.append(curve)\n\n return curves", "def iter(self, iters, executor=None):\n deps_by_kind = self.dependencies_by_kind()\n\n # Merge iterators of data that has the same kind\n kind_iters = dict()\n for kind, deps in deps_by_kind.items():\n kind_iters[kind] = strax.merge_iters(\n strax.sync_iters(\n strax.same_length,\n {d: iters[d] for d in deps}))\n\n if len(deps_by_kind) > 1:\n # Sync iterators of different kinds by time\n kind_iters = strax.sync_iters(\n partial(strax.same_stop, func=strax.endtime),\n kind_iters)\n\n iters = kind_iters\n pending = []\n yield from self._inner_iter(iters, pending, executor)\n self.cleanup(wait_for=pending)", "def vertex_iterator(self):\n for X in self.fe.L:\n for x in self.K.unit_group:\n yield (X, x)", "def generate(self, diagram):", "def draw_collection(collection):\n args = [FrameArtist._get_args(primitive) for primitive in collection]\n points, lines = zip(*args)\n lines = itertools.chain(*lines)\n geometry = [None, None]\n geometry[0] = compas_ghpython.draw_points(points)\n geometry[1] = compas_ghpython.draw_lines(lines)\n return geometry", "def all_lightcurves():\n for fname in listdir(join(DATA_PATH, 'lightcurves')):\n try:\n locus_id, fmt = fname.split('.')\n assert fmt == 'lc'\n yield fetch_lightcurve(locus_id)\n except Exception as e:\n print(f'{repr(e)} fetching {fname}')", "def curves(self):\n return self._curve_reg", "def __iter__(self):\n for point in self.points:\n yield point", "def get_all_drawables(self): \n drawables = []\n if len(self.component_list) > 0:\n for c in self.component_list:\n drawables.append(c.get_drawables())\n return drawables", "def draw(self):\n duplets = list(self._chunker(self.attr, 2))\n colors = self._set_colors(duplets)\n\n for i, duplet in enumerate(duplets, start=1):\n self.chart.make_scatter(self.source, duplet[0], duplet[1], 'circle', colors[i - 1])\n\n if i < len(duplets):\n self.create_plot_if_facet()\n\n self.reset_legend()", "def __iter__(self):\n return iter(self._datasets)", "def __iter__(self) -> Iterable[Union[Point, LabwareLike]]:\n return iter(\n (\n self._point,\n self._labware,\n )\n )", "def roi_curves(self, data):\n if not data or not any(len(d) for d in data.values()):\n self.roi_traces = None\n default_curve = hv.Curve([], 'Spectrum', 'CL').opts(color='red') \n return hv.NdOverlay({0: default_curve}).opts(show_legend=False) # code breaks without using a curve in ndoverlay\n \n curves = {}\n data = zip(data['x0'], data['x1'], data['y0'], data['y1'])\n self.roi_traces = []\n for i, (x0, x1, y0, y1) in enumerate(data):\n selection = self.xds.sel(x=slice(x0, x1), y=slice(y1, y0))\n selection_avg = selection.mean(['x','y'])\n self.roi_traces.append(selection_avg)\n if self.roi_toggle == 'Trans': # apparently param knows when this changes without having to make it a 'stream' var\n if i == 0:\n substrate = selection_avg.copy()\n selection_avg /= substrate\n curves[i] = hv.Curve(selection_avg)\n \n color_cycle_opts = opts.Curve(color= hv.Cycle(self.color_cycle))\n return hv.NdOverlay(curves).opts(color_cycle_opts)", "def _parse_curves(block, **kwargs):\n count = int(block.pop(0))\n\n curves = []\n for i in range(count):\n for param in ['mod_reduc', 'damping']:\n length, name = parse_fixed_width([(5, int), (65, to_str)], block)\n curves.append(\n site.NonlinearProperty(\n name,\n parse_fixed_width(length * [(10, float)], block),\n parse_fixed_width(length * [(10, float)], block),\n param\n )\n )\n\n length = int(block[0][:5])\n soil_types = parse_fixed_width((length + 1) * [(5, int)], block)[1:]\n\n # Group soil type number and curves together\n return {(soil_types[i // 2], c.param): c for i, c in enumerate(curves)}", "def get_components_drawables(self):\n # print self.component_list\n print len(self.component_list)\n for c in self.component_list:\n return c.get_drawables()", "def _create_node_iterator(self) -> Iterator[GraphNode]:\n return\n yield", "def _generate_examples(self,\n split: Text = 'train'\n ) -> Iterator[Tuple[Text, Dict[Text, Any]]]:\n with tf.io.gfile.GFile(self.splits[split]) as split_file: # pytype: disable=attribute-error # gen-stub-imports\n for i, img_class_line in enumerate(split_file.read().split('\\n')):\n if not img_class_line:\n continue\n key = f'{self.builder_config.name}_{split}_{i:08d}'\n\n example_path, example_class = img_class_line.split(' ')\n example_fullpath = os.path.join(self.img_path, example_path) # pytype: disable=attribute-error # gen-stub-imports\n\n yield key, {'image': example_fullpath, 'label': int(example_class)}", "def iterator(self):\n yield", "def get_drawables(self):\n to_draw = []\n for k,v in self._to_draw.items():\n if isinstance(v,Iterable):\n for i in v:\n to_draw.append(i)\n else:\n to_draw.append(v)\n return to_draw", "def draw_all(self, style=\"ubl\", thumbs=False):\n if style == \"ubl\":\n self.col_map = ubl_col_map\n elif style == \"pfsmsff\":\n self.col_map = acast_col_map\n elif style == \"unk_domains\":\n self.col_map = unk_domains\n elif style == \"ptp\":\n self.col_map = ptp_map\n\n for n, item in enumerate(self.data):\n if style == \"ubl\":\n self.draw(item, self.__draw_ubl_style, thumbs)\n elif style == \"pfsmsff\":\n self.draw(item, self.__draw_db_style, thumbs)\n elif style == \"gen\":\n self.draw(item, self.__draw_gen_style, thumbs)\n elif style == \"unk_domains\":\n self.draw(item, self.__draw_unk_style, thumbs)\n elif style == \"ptp\":\n self.draw(item, self.__draw_ubl_style, thumbs)\n\n return(None)", "def illustratorCurves(*args, caching: bool=True, constructionHistory: bool=True,\n illustratorFilename: AnyStr=\"\", nodeState: Union[int, bool]=0, object:\n bool=True, tolerance: Union[float, bool]=0.001, q=True, query=True,\n e=True, edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass", "def data_shapes(self):", "def SeparateCurves2(context):\n Curves = []\n active = context.active_object\n splines = active.data.splines\n\n for idx, spline in enumerate(splines): \n\n CurveName = active.name + \"_\" + str(idx)\n StrandVerts = [v.co for v in spline.points]\n # create Object\n curveOB = CreateNewCurveFromPoints(StrandVerts, CurveName)\n Curves.append(curveOB)\n\n #delete original curve that had all the curves in one object\n bpy.ops.object.select_all(action='DESELECT')\n bpy.data.objects[active.name].select = True\n bpy.ops.object.delete()\n\n return Curves", "def gen_graph():\n if config_pagination:\n gdata = tgraph.call_graph(offset=offset, limit=limit)\n else:\n gdata = tgraph.call_graph(start=start, end=end, contineous=contineous)\n\n for data in gdata:\n yield data", "def _curveNamesToList(settings):\n return [getattr(GroupName, val) for val in settings.eccCurves]", "def _dataset_split_generators(self):\n raise NotImplementedError()", "def aesthetics(cls: type[geom]) -> set[str]:\n main = cls.DEFAULT_AES.keys() | cls.REQUIRED_AES\n other = {\"group\"}\n # Need to recognize both spellings\n if \"color\" in main:\n other.add(\"colour\")\n if \"outlier_color\" in main:\n other.add(\"outlier_colour\")\n return main | other", "def __iter__(self):\n it = self.ctx.Iterator5(\n self.addr,\n ScType.EdgeDCommonConst,\n ScType.Unknown,\n ScType.EdgeAccessConstPosPerm,\n self.relAddr)\n\n return Iterator(it)", "def iterator(self):\n return _osgAnimation.mapVertexInfluence_iterator(self)", "def gen_graph(self):", "def __iter__(self):\n keys = [CoolProp.iDmass,CoolProp.iHmass,CoolProp.iP,CoolProp.iSmass,CoolProp.iT]\n for key in sorted(keys):\n yield key", "def edges_iter(self) -> Generator:\n for u, v, k, data in self.graph.edges(keys=True, data=True):\n yield u, v, k, data", "def get_paintings(orb):\n paintings = []\n with open('data/data.csv', 'r') as f:\n f_reader = csv.DictReader(f)\n for row in f_reader:\n im = cv2.imread(f\"{PAINTINGS_FOLDER}/{row['Image']}\")\n kp, descr = compute_kp_descr(im, orb)\n image = Image(filename=row['Image'], image=im, descriptors=descr, keypoints=kp)\n image.title = row['Title']\n image.author = row['Author']\n image.room = row['Room']\n paintings.append(image)\n return paintings", "def __iter__(self):\n return iter(self._axl_data)", "def draw_all_plots(self):\n\n plot_names = []\n e = self.find_di_tri(self.lang_found)\n letter_dct = e[1]\n di_dct = e[2]\n tri_dct = e[3]\n\n plot_name = self.lang_found + '_letters'\n self.wykres(letter_dct, 'Wyres liter', 'litera', plot_name, 0)\n plot_names.append(plot_name)\n plot_name = self.lang_found + '_digram'\n self.wykres(di_dct, 'Wykres digramów', 'digram', plot_name, 1)\n plot_names.append(plot_name)\n plot_name = self.lang_found + '_trigram'\n self.wykres(tri_dct, 'Wykres trigramów', 'trigram', plot_name, 2)\n plot_names.append(plot_name)\n\n for cnt, plt_scn in enumerate(self.plot_scenes):\n pic = QtGui.QPixmap(self.img_dir + '/' + plot_names[cnt] + \".png\")\n plt_scn.setPixmap(pic.scaled(427, 320, Qt.KeepAspectRatio))", "def iter_keypoints_labels(self):\n return iter(self.schema)", "def colorCurvesByCelltype(self):\n for curve, path in self.curve_path_dict.items():\n celltype = path.rpartition('/')[-1].rpartition('_')[0]\n style = curve.style()\n color = None\n try:\n color = self.celltype_color_dict[celltype]\n except KeyError:\n print celltype, 'not in celltype-color dict'\n continue\n # if style != curve.NoCurve: # line plot, not raster\n pen = curve.pen()\n pen.setColor(color) \n curve.setPen(pen)\n # else:\n pen = curve.symbol().pen()\n pen.setColor(color) \n symbol = curve.symbol()\n symbol.setPen(pen)\n curve.setSymbol(symbol)\n self.replot()", "def enumerated_design_space() -> EnumeratedDesignSpace:\n x = RealDescriptor('x', lower_bound=0.0, upper_bound=1.0, units='')\n color = CategoricalDescriptor('color', categories=['r', 'g', 'b'])\n data = [dict(x=0, color='r'), dict(x=1.0, color='b')]\n return EnumeratedDesignSpace('enumerated', description='desc', descriptors=[x, color], data=data)", "def pdf_curve(self):\n return dict(zip(self.strike_grid, list(map(lambda K: self.pdf(K), self.strike_grid))))", "def generate_style(self, data):\n\n s = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<sld:StyledLayerDescriptor xmlns=\"http://www.opengis.net/sld\" xmlns:sld=\"http://www.opengis.net/sld\" xmlns:ogc=\"http://www.opengis.net/ogc\" xmlns:gml=\"http://www.opengis.net/gml\" version=\"1.0.0\">\n <sld:NamedLayer>\n <sld:Name>People affected by more than 1m of inundation</sld:Name>\n <sld:UserStyle>\n <sld:Name>People affected by more than 1m of inundation</sld:Name>\n <sld:Title>People Affected By More Than 1m Of Inundation</sld:Title>\n <sld:Abstract>People Affected By More Than 1m Of Inundation</sld:Abstract>\n <sld:FeatureTypeStyle>\n <sld:Name>People affected by more than 1m of inundation</sld:Name>\n <sld:Rule>\n <sld:RasterSymbolizer>\n <sld:Geometry>\n <ogc:PropertyName>geom</ogc:PropertyName>\n </sld:Geometry>\n <sld:ChannelSelection>\n <sld:GrayChannel>\n <sld:SourceChannelName>1</sld:SourceChannelName>\n </sld:GrayChannel>\n </sld:ChannelSelection>\n <sld:ColorMap>\n <sld:ColorMapEntry color=\"#ffffff\" opacity=\"0\" quantity=\"-9999.0\"/>\n <sld:ColorMapEntry color=\"#38A800\" opacity=\"0\" quantity=\"2\"/>\n <sld:ColorMapEntry color=\"#38A800\" quantity=\"5\"/>\n <sld:ColorMapEntry color=\"#79C900\" quantity=\"10\"/>\n <sld:ColorMapEntry color=\"#CEED00\" quantity=\"20\"/>\n <sld:ColorMapEntry color=\"#FFCC00\" quantity=\"50\"/>\n <sld:ColorMapEntry color=\"#FF6600\" quantity=\"100\"/>\n <sld:ColorMapEntry color=\"#FF0000\" quantity=\"200\"/>\n <sld:ColorMapEntry color=\"#7A0000\" quantity=\"300\"/>\n </sld:ColorMap>\n </sld:RasterSymbolizer>\n </sld:Rule>\n </sld:FeatureTypeStyle>\n </sld:UserStyle>\n </sld:NamedLayer>\n</sld:StyledLayerDescriptor>\n\n \"\"\"\n\n return s", "def test_learning_curves():\n\n p = pipeline.Pipeline(\n FX_TRAIN,\n FX_TEST,\n FX_LOOKUP,\n RESULTS_DIR\n )\n\n data = p.learning_curves()", "def semigroup_generators(self):", "def create_dataset_iterator(pathes, batch_size=64):\n path_ds = tf.data.Dataset.from_tensor_slices(pathes)\n image_ds = path_ds.map(\n load_and_preprocess_image, num_parallel_calls=tf.data.experimental.AUTOTUNE\n )\n dataset = image_ds.cache() # Especially performant if the data fits in memory.\n dataset = dataset.shuffle(buffer_size=len(pathes))\n dataset = dataset.repeat()\n dataset = dataset.batch(batch_size)\n dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n return tf.compat.v1.data.make_one_shot_iterator(dataset)", "def __iter__(self):\n return iter(self.vertices.values())", "def make_data_iterator(dataset, batch_size):\n \n if dataset == '8gaussians':\n scale = 2.\n centers = [\n (1, 0),\n (-1, 0),\n (0, 1),\n (0, -1),\n (1. / np.sqrt(2), 1. / np.sqrt(2)),\n (1. / np.sqrt(2), -1. / np.sqrt(2)),\n (-1. / np.sqrt(2), 1. / np.sqrt(2)),\n (-1. / np.sqrt(2), -1. / np.sqrt(2))\n ]\n centers = [(scale * x, scale * y) for x, y in centers]\n while True:\n dataset = []\n for i in range(batch_size):\n point = np.random.randn(2) * .2\n center = random.choice(centers)\n point[0] += center[0]\n point[1] += center[1]\n dataset.append(point)\n dataset = torch.Tensor(dataset)\n dataset /= 1.414 # stdev\n yield dataset\n \n elif dataset == 'sine':\n while True:\n noise = 0.2\n x = torch.linspace(-4, 4, batch_size, dtype=torch.float32)\n y = np.sin(x) + noise*np.random.randn(*x.shape)\n yield torch.stack([x, y], dim=1)\n \n elif dataset == 'heteroscedastic':\n theta = torch.linspace(0, 2, batch_size)\n x = np.exp(theta)*np.tan(0.1*theta)\n while True:\n b = (0.001 + 0.5 * np.abs(x)) * np.random.normal(1, 1, batch_size)\n y = np.exp(theta)*np.sin(0.1*theta) + b\n yield torch.stack([x, y], dim=1)\n \n elif dataset == 'moon':\n noise = 0.1\n while True:\n data, _ = sklearn.datasets.make_moons(n_samples=batch_size,\n noise=noise)\n yield torch.Tensor(data)\n \n elif dataset == 'helix':\n noise = 0.2\n while True:\n t = torch.linspace(0, 20, batch_size)\n x = np.cos(t)\n x2 = np.sin(t) + noise * np.random.randn(*x.shape)\n \n yield torch.stack([x, x2, t], dim=1)\n \n elif dataset == 'circle':\n while True:\n t = np.random.random(batch_size) * 2 * np.pi - np.pi\n length = 1 - np.random.random(batch_size)*0.4\n x = torch.Tensor(np.multiply(np.cos(t), length))\n y = torch.Tensor(np.multiply(np.sin(t), length))\n \n yield torch.stack([x, y], dim=1)\n\n elif dataset == '2spirals':\n while True:\n z = torch.randn(batch_size, 2)\n n = torch.sqrt(torch.rand(batch_size // 2)) * 540 * (2 * math.pi) / 360\n d1x = - torch.cos(n) * n + torch.rand(batch_size // 2) * 0.5\n d1y = torch.sin(n) * n + torch.rand(batch_size // 2) * 0.5\n x = torch.cat([torch.stack([ d1x, d1y], dim=1),\n torch.stack([-d1x, -d1y], dim=1)], dim=0) / 3\n yield x + 0.1*z", "def draw_schematic(self):\n # start off with all the component instances\n for inst in self.design.component_instances:\n comp = self.design.components.components[inst.library_id]\n for body, attr in zip(comp.symbols[inst.symbol_index].bodies,\n inst.symbol_attributes):\n # draw the appropriate body, at the position in attr\n pos = Point(attr.x, attr.y)\n self.draw_symbol(body, pos, attr.rotation, attr.flip)\n # draw in any annotations\n for ann in attr.annotations:\n if ann.visible:\n pos = self.base_xform.chain(Point(ann.x, ann.y))\n self.canvas.text((pos.x, pos.y), ann.value,\n fill=self.options.style['annot'])\n\n for shape in self.design.shapes:\n draw_method = getattr(self, 'draw_shape_%s' % shape.type)\n draw_method(shape, self.base_xform, self.options.style['annot'])\n\n for net in self.design.nets:\n self.draw_net(net)\n\n for ann in self.design.design_attributes.annotations:\n if ann.visible:\n pos = self.base_xform.chain(Point(ann.x, ann.y))\n self.canvas.text((pos.x, pos.y), ann.value,\n fill=self.options.style['annot'])", "def getCurve(self, attr: Union[int, Str], view=...) -> AnimCurve:\n ...", "def __iter__(self):\n for datum in self.data[self.name]:\n yield datum", "def gen_sc_descriptors(points, window_sizes, nbins_r, nbins_theta, r_inner, r_outer):\n max_window_size = max(window_sizes)\n\n descriptors = shape_context(points, window=window_sizes[0], max_window_size=max_window_size, nbins_r=nbins_r,\n nbins_theta=nbins_theta, r_inner=r_inner, r_outer=r_outer)\n\n for window in window_sizes[1::]:\n desc = shape_context(points, window=window, max_window_size=max_window_size, nbins_r=nbins_r,\n nbins_theta=nbins_theta, r_inner=r_inner, r_outer=r_outer)\n descriptors = np.hstack((descriptors, desc))\n\n return descriptors", "def _draw_layer_geometry(self, layer, plane, pval, ax_hand):\n for sname, (shape, material) in layer.shapes.items():\n if isinstance(shape, Circle):\n ax = self._draw_layer_circle(layer, shape, material, plane, pval, ax_hand)\n else:\n self.log.warning('Drawing of shape {} not '\n 'supported'.format(data['type']))\n return ax", "def get_pipelines() -> Iterable[DataPipeline]:\n for pipeline_name in get_pipeline_names():\n yield DataPipeline.load(pipeline_name)", "def generate_curves(self, seed=None):\n num_context = tf.random_uniform(\n shape=[], minval=3, maxval=self._max_num_context, dtype=tf.int32, seed=seed)\n\n # If we are testing we want to have more targets and have them evenly\n # distributed in order to plot the function.\n if self._testing:\n num_target = self._x_data.get_shape().as_list()[0]\n num_total_points = num_target\n # During training the number of target points and their x-positions are\n # selected at random\n else:\n num_target = tf.random_uniform(shape=(), minval=0, \n maxval=self._max_num_context - num_context,\n dtype=tf.int32, seed=seed)\n num_total_points = num_context + num_target\n\n # idx for x vals in target\n idxs = []\n # which instance to get y data from\n insts = []\n for i in range(self._batch_size):\n idxs.append( tf.random_shuffle(tf.range(self._num_pts_per_inst), seed=seed) )\n insts.append( tf.random_uniform(shape=[], minval=0, maxval=self._num_inst-1, dtype=tf.int32, seed=seed) )\n \n idxs = tf.stack(idxs)\n insts = tf.stack(insts)\n \n # batchsize x numtotalpoints x size (xsize or ysize)\n x_values = tf.stack([tf.expand_dims(tf.gather(self._x_uniq, idxs[tf.cast(i,tf.int32)][:tf.cast(num_total_points,tf.int32)]), axis=-1) for i in range(self._batch_size)])\n y_values = tf.stack([tf.expand_dims(tf.gather(self._y_data[insts[i]*self._num_pts_per_inst:(insts[i]+1)*self._num_pts_per_inst], idxs[i][:num_total_points]), axis=-1) for i in range(self._batch_size)])\n \n \n \n if self._testing:\n # Select the targets\n target_x = x_values\n target_y = y_values\n\n # Select the observations\n idx_ctxt = tf.random_shuffle(tf.range(num_target), seed=seed)\n context_x = tf.gather(x_values, idx_ctxt[:num_context], axis=1)\n context_y = tf.gather(y_values, idx_ctxt[:num_context], axis=1)\n\n else:\n # Select the targets which will consist of the context points as well as\n # some new target points\n target_x = x_values[:, :num_target + num_context, :]\n target_y = y_values[:, :num_target + num_context, :]\n\n # Select the observations\n context_x = x_values[:, :num_context, :]\n context_y = y_values[:, :num_context, :]\n \n context_x = tf.squeeze(context_x,-1)\n target_x = tf.squeeze(target_x,-1)\n\n query = ((context_x, context_y), target_x)\n\n return NPRegressionDescription(\n query=query,\n target_y=target_y,\n num_total_points=tf.shape(target_x)[1],\n num_context_points=num_context)", "def create_svgs(self):\n\n principal = self.app.principal\n types = {\n 'majorz': (\n 'candidates',\n ),\n 'proporz': (\n 'candidates', 'lists', 'connections', 'lists-panachage',\n 'party-strengths', 'parties-panachage',\n ),\n 'compound': (\n 'seat-allocation', 'list-groups', 'party-strengths',\n 'parties-panachage',\n ),\n 'compound-part': (\n 'party-strengths',\n ),\n 'ballot': (\n 'entities-map', 'districts-map'\n ) if principal.has_districts else ('entities-map',)\n }\n\n # Read existing SVGs\n fs = self.app.filestorage\n if not fs.exists(self.svg_dir):\n fs.makedir(self.svg_dir)\n existing = fs.listdir(self.svg_dir)\n\n # Generate the SVGs\n created = 0\n filenames = []\n for election in self.session.query(Election):\n last_modified = election.last_modified\n for locale in self.app.locales:\n for type_ in types[election.type]:\n filename = svg_filename(\n election, type_, locale, last_modified\n )\n filenames.append(filename)\n if filename not in existing:\n created += self.generate_svg(\n election, type_, filename, locale\n )\n\n for compound in self.session.query(ElectionCompound):\n last_modified = compound.last_modified\n for locale in self.app.locales:\n for type_ in types['compound']:\n filename = svg_filename(\n compound, type_, locale, last_modified\n )\n filenames.append(filename)\n if filename not in existing:\n created += self.generate_svg(\n compound, type_, filename, locale\n )\n for segment in principal.get_superregions(compound.date.year):\n compound_part = ElectionCompoundPart(\n compound, 'superregion', segment\n )\n for type_ in types['compound-part']:\n filename = svg_filename(\n compound_part, type_, locale, last_modified\n )\n filenames.append(filename)\n if filename not in existing:\n created += self.generate_svg(\n compound_part, type_, filename, locale\n )\n\n if principal.use_maps:\n for ballot in self.session.query(Ballot):\n if principal.is_year_available(ballot.vote.date.year):\n last_modified = ballot.vote.last_modified\n for locale in self.app.locales:\n for type_ in types['ballot']:\n filename = svg_filename(\n ballot, type_, locale, last_modified\n )\n filenames.append(filename)\n if filename not in existing:\n created += self.generate_svg(\n ballot, type_, filename, locale\n )\n\n # Delete obsolete SVGs\n obsolete = set(existing) - set(filenames)\n self.remove(self.svg_dir, obsolete)\n\n return created, len(obsolete)", "def __iter__(self) -> Iterator:\n return iter(self.get_data_loader())", "def efficiency_curve_names(self):\n return list(self._efficiency_curves)", "def draw(iiter):\n from matplotlib import pyplot as plt\n fig = plt.gcf()\n fig.canvas.draw()", "def draw(self, **kwargs):\n for o in sorted(self._drawables, key=default_itemgetter(\"z\", default=0)):\n o.draw(**kwargs)", "def get_iterator(self) -> Iterator[KeypointLabelPair]:\n for i in range(len(self._ids)):\n yield self[i]" ]
[ "0.625494", "0.62062955", "0.61237484", "0.5620857", "0.5553316", "0.54564095", "0.5426019", "0.5304748", "0.5274277", "0.5261457", "0.525136", "0.5238986", "0.521688", "0.5185847", "0.5158221", "0.5131699", "0.51237047", "0.5082937", "0.5075545", "0.5069475", "0.5063264", "0.505663", "0.5034377", "0.5006482", "0.5004874", "0.49992245", "0.49929115", "0.49888667", "0.49870312", "0.49743384", "0.49591702", "0.4950899", "0.4949019", "0.49485508", "0.4941738", "0.49353406", "0.4892069", "0.48805314", "0.48789597", "0.48701948", "0.48601097", "0.48542747", "0.48501572", "0.4841304", "0.48275802", "0.48106822", "0.48039272", "0.47892687", "0.47892305", "0.47775558", "0.4776538", "0.47711912", "0.4770262", "0.4770208", "0.475815", "0.47542256", "0.47481692", "0.47466433", "0.47446796", "0.47402787", "0.47364095", "0.47360435", "0.473439", "0.4724528", "0.47227588", "0.4722661", "0.47184527", "0.4717615", "0.4713628", "0.47114375", "0.47105497", "0.47097516", "0.47095516", "0.47055608", "0.46973872", "0.46956033", "0.46951237", "0.46934035", "0.4692322", "0.46895504", "0.46839896", "0.4683825", "0.46805125", "0.4675198", "0.46729404", "0.46727765", "0.46653458", "0.4664856", "0.46643656", "0.46593943", "0.465872", "0.46573767", "0.46566063", "0.46560848", "0.46528658", "0.4652025", "0.46497902", "0.4646397", "0.46458334", "0.46439573" ]
0.6866871
0
Show a legend. obj can be an Axes or Figure (in that case, also pass handles and labels arguments).
def legend(obj, ncol=3, **kwargs): # Font size handling here is a bit weird. We specify fontsize=6 # in legend constructor since that affects spacing. However, we # need to manually override with 'small' later, because the original # specification did not take effect on whole-figure legends (and for # actual text, 6 is a wee bit small). We get a specific cramped # appearance and correct behavior for whole-figure legends this way. l = obj.legend(ncol=ncol, fancybox=True, markerscale=0.66, fontsize=6, **kwargs) plt.setp(l.get_texts(), fontsize='small')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def legend (self, **kwargs):\n axes = self.twin_axes or self.axes\n self.mpl_legend = axes.legend (self.mpl_lines, self.labels, **kwargs)", "def legend_extras(\n self, handles=None, labels=None, *, loc=None,\n frame=None, frameon=None, ncol=None, ncols=None,\n center=None, order='C', label=None, title=None,\n fontsize=None, fontweight=None, fontcolor=None,\n **kwargs\n):\n # Parse input args\n # TODO: Legend entries for colormap or scatterplot objects! Idea is we\n # pass a scatter plot or contourf or whatever, and legend is generated by\n # drawing patch rectangles or markers using data values and their\n # corresponding cmap colors! For scatterplots just test get_facecolor()\n # to see if it contains more than one color.\n # TODO: It is *also* often desirable to label a colormap object with\n # one data value. Maybe add a legend option for the *number of samples*\n # or the *sample points* when drawing legends for colormap objects.\n # Look into \"legend handlers\", might just want to add own handlers by\n # passing handler_map to legend() and get_legend_handles_labels().\n if order not in ('F', 'C'):\n raise ValueError(\n f'Invalid order {order!r}. Choose from '\n '\"C\" (row-major, default) and \"F\" (column-major).'\n )\n ncol = _not_none(ncols=ncols, ncol=ncol)\n title = _not_none(label=label, title=title)\n frameon = _not_none(frame=frame, frameon=frameon, default=rc['legend.frameon'])\n if handles is not None and not np.iterable(handles): # e.g. a mappable object\n handles = [handles]\n if labels is not None and (not np.iterable(labels) or isinstance(labels, str)):\n labels = [labels]\n if title is not None:\n kwargs['title'] = title\n if frameon is not None:\n kwargs['frameon'] = frameon\n fontsize = kwargs.get('fontsize', None) or rc['legend.fontsize']\n if fontsize is None:\n pass\n elif fontsize in mfonts.font_scalings:\n kwargs['fontsize'] = rc._scale_font(fontsize)\n else:\n kwargs['fontsize'] = units(fontsize, 'pt')\n\n # Handle and text properties that are applied after-the-fact\n # NOTE: Set solid_capstyle to 'butt' so line does not extend past error bounds\n # shading in legend entry. This change is not noticable in other situations.\n kw_text = {}\n for key, value in (('color', fontcolor), ('weight', fontweight)):\n if value is not None:\n kw_text[key] = value\n kw_handle = _pop_props(kwargs, 'lines')\n kw_handle['solid_capstyle'] = 'butt'\n\n # Get axes for legend handle detection\n # TODO: Update this when no longer use \"filled panels\" for outer legends\n axs = [self]\n if self._panel_hidden:\n if self._panel_parent: # axes panel\n axs = list(self._panel_parent._iter_axes(hidden=False, children=True))\n else:\n axs = list(self.figure._iter_axes(hidden=False, children=True))\n\n # Handle list of lists (centered row legends)\n # NOTE: Avoid very common plot() error where users draw individual lines\n # with plot() and add singleton tuples to a list of handles. If matplotlib\n # gets a list like this but gets no 'labels' argument, it raises error.\n list_of_lists = False\n if handles is not None:\n handles = [h[0] if isinstance(h, tuple) and len(h) == 1 else h for h in handles]\n list_of_lists = any(isinstance(h, (list, np.ndarray)) for h in handles)\n if list_of_lists:\n if any(not np.iterable(_) for _ in handles):\n raise ValueError(f'Invalid handles={handles!r}.')\n if not labels:\n labels = [None] * len(handles)\n elif not all(np.iterable(_) and not isinstance(_, str) for _ in labels):\n # e.g. handles=[obj1, [obj2, obj3]] requires labels=[lab1, [lab2, lab3]]\n raise ValueError(f'Invalid labels={labels!r} for handles={handles!r}.')\n\n # Parse handles and legends with native matplotlib parser\n if not list_of_lists:\n if isinstance(handles, np.ndarray):\n handles = handles.tolist()\n if isinstance(labels, np.ndarray):\n labels = labels.tolist()\n handles, labels, *_ = mlegend._parse_legend_args(\n axs, handles=handles, labels=labels,\n )\n pairs = list(zip(handles, labels))\n else:\n pairs = []\n for ihandles, ilabels in zip(handles, labels):\n if isinstance(ihandles, np.ndarray):\n ihandles = ihandles.tolist()\n if isinstance(ilabels, np.ndarray):\n ilabels = ilabels.tolist()\n ihandles, ilabels, *_ = mlegend._parse_legend_args(\n axs, handles=ihandles, labels=ilabels,\n )\n pairs.append(list(zip(ihandles, ilabels)))\n\n # Manage pairs in context of 'center' option\n center = _not_none(center, list_of_lists)\n if not center and list_of_lists: # standardize format based on input\n list_of_lists = False # no longer is list of lists\n pairs = [pair for ipairs in pairs for pair in ipairs]\n elif center and not list_of_lists:\n list_of_lists = True\n ncol = _not_none(ncol, 3)\n pairs = [pairs[i * ncol:(i + 1) * ncol] for i in range(len(pairs))]\n ncol = None\n if list_of_lists: # remove empty lists, pops up in some examples\n pairs = [ipairs for ipairs in pairs if ipairs]\n\n # Bail if no pairs\n if not pairs:\n return mlegend.Legend(self, [], [], loc=loc, ncol=ncol, **kwargs)\n # Multiple-legend pseudo-legend\n elif center:\n objs = _multiple_legend(self, pairs, loc=loc, ncol=ncol, order=order, **kwargs)\n # Individual legend\n else:\n objs = [_single_legend(self, pairs, loc=loc, ncol=ncol, order=order, **kwargs)]\n\n # Add legends manually so matplotlib does not remove old ones\n for obj in objs:\n if isinstance(obj, mpatches.FancyBboxPatch):\n continue\n if hasattr(self, 'legend_') and self.legend_ is None:\n self.legend_ = obj # set *first* legend accessible with get_legend()\n else:\n self.add_artist(obj)\n\n # Apply legend box properties\n outline = rc.fill({\n 'linewidth': 'axes.linewidth',\n 'edgecolor': 'axes.edgecolor',\n 'facecolor': 'axes.facecolor',\n 'alpha': 'legend.framealpha',\n })\n for key in (*outline,):\n if key != 'linewidth':\n if kwargs.get(key, None):\n outline.pop(key, None)\n for obj in objs:\n if isinstance(obj, mpatches.FancyBboxPatch):\n obj.update(outline) # the multiple-legend bounding box\n else:\n obj.legendPatch.update(outline) # no-op if frame is off\n\n # Apply *overrides* to legend elements\n # WARNING: legendHandles only contains the *first* artist per legend because\n # HandlerBase.legend_artist() called in Legend._init_legend_box() only\n # returns the first artist. Instead we try to iterate through offset boxes.\n # TODO: Remove this feature? Idea was this lets users create *categorical*\n # legends in clunky way, e.g. entries denoting *colors* and entries denoting\n # *markers*. But would be better to add capacity for categorical labels in a\n # *single* legend like seaborn rather than multiple legends.\n for obj in objs:\n try:\n children = obj._legend_handle_box._children\n except AttributeError: # older versions maybe?\n children = []\n for obj in _iter_legend_children(children):\n # Account for mixed legends, e.g. line on top of error bounds shading\n if isinstance(obj, mtext.Text):\n obj.update(kw_text)\n else:\n for key, value in kw_handle.items():\n getattr(obj, 'set_' + key, lambda value: None)(value)\n\n # Append attributes and return, and set clip property!!! This is critical\n # for tight bounding box calcs!\n for obj in objs:\n obj.set_clip_on(False)\n if isinstance(objs[0], mpatches.FancyBboxPatch):\n objs = objs[1:]\n return objs[0] if len(objs) == 1 else tuple(objs)", "def plot2d(self, obj, options=\"\", label=None, labelfmt=None, **kwargs):\n self._pad.cd()\n self._pad.Update() # Updating the pad prevents spontaneous seg faults...\n\n # Apply formatting (if any) before calling `Draw()`\n root_helpers.set_graphics_attributes(obj, **kwargs)\n\n # Draw the object, depending on its type\n if isinstance(obj, root.TH2):\n if isinstance(self._frame, root.TH1F):\n if not self._is_empty:\n warnings.warn(\"plot2d: overwriting non-empty axes\")\n\n self._frame.Delete()\n self._frame = obj\n\n elif \"SAME\" not in options.upper():\n self._frame = obj\n\n obj.Draw(options)\n\n else:\n try:\n warnings.warn(\n \"plot2d: attempting to plot an object that is not a TH2.\\n\"\n \"This may result in unexpected behaviour.\"\n )\n obj.Draw(options)\n\n except AttributeError:\n raise TypeError(\"Attempting to plot an object with no Draw() method\")\n\n # Add object to list of legend entries if label was provided\n if label is not None:\n self._legend_entries.append((obj, label, labelfmt))\n\n self._is_empty = False # Record that the axes are no longer empty", "def decorate(**options):\n ax = plt.gca()\n ax.set(**options)\n\n handles, labels = ax.get_legend_handles_labels()\n if handles:\n ax.legend(handles, labels)\n\n plt.tight_layout()", "def _patch_legend(obj, draw_options, legend_type):\n legend = \"\"\n if _is_in_legend(obj):\n # Unfortunately, patch legend entries need \\addlegendimage in Pgfplots.\n do = \", \".join([legend_type] + draw_options) if draw_options else \"\"\n legend += \"\\\\addlegendimage{{{}}}\\n\\\\addlegendentry{{{}}}\\n\\n\".format(\n do, obj.get_label()\n )\n\n return legend", "def plot_legend(ax):\n\tlines = 4 * [None]\n\tcolors = [\"black\", \"deepskyblue\", \"lime\", \"crimson\"]\n\tlabels = [r\"Constant $y_\\text{Sr}^\\text{CC}$\",\n\t\tr\"$y_\\text{Sr}^\\text{CC} \\propto 1 - e^{-kZ}$\",\n\t\tr\"$y_\\text{Sr}^\\text{CC} \\propto Z$\",\n\t\tr\"$y_\\text{Sr}^\\text{CC}$ = 0\"]\n\tfor i in range(4):\n\t\tlines[i] = ax.plot([1, 2], [1, 2], c = visuals.colors()[\"white\"],\n\t\t\tlabel = labels[i])[0]\n\tleg = ax.legend(loc = visuals.mpl_loc()[\"upper left\"], ncol = 1,\n\t\tbbox_to_anchor = (0.0, 0.99), frameon = False, handlelength = 0)\n\tfor i in range(4):\n\t\tlines[i].remove()\n\t\tleg.get_texts()[i].set_color(colors[i])", "def legend(colors, labels, shapes='box', loc='best', layout='vertical', reverse_vertical=True, ax=None):\n if ax is None:\n ax = plt.gca()\n\n handles = get_handles(shapes, colors, labels)\n if not all(len(handles) == l for l in [len(colors), len(labels)]):\n warnings.warn('Lengths of one or more of colors, labels, and shapes did not match.', UserWarning)\n\n if layout == 'horizontal' or layout == 'h':\n ncol = len(labels)\n else:\n ncol = 1\n if reverse_vertical: #Reverse so that it goes from bottom to top\n handles = handles[-1::-1]\n\n return ax.legend(handles=handles, loc=loc, ncol=ncol, frameon=False)", "def legend(self):\n if self.nplots == 1:\n lax = self.ax\n loff = 0.2\n else:\n lax = self.ax1\n loff = 0.4\n box = lax.get_position()\n\n lax.figure.subplots_adjust(bottom=loff) # make space on bottom for legend\n lax.legend(self.plots, self.labels, loc='upper center', bbox_to_anchor=(0.5, -loff), fancybox=True, shadow=True, ncol=3, prop={'size': 8})", "def legend(colors, labels, **kwds):\n proxies = [pylab.Rectangle((0, 0), 1, 1, fc=color) for color in colors]\n nl = min(len(proxies), len(labels))\n pylab.legend(proxies[:nl], labels[:nl], **kwds)", "def add_legend(\n self,\n labels=None,\n bcolor=(0.5, 0.5, 0.5),\n border=False,\n size=(0.2, 0.2),\n name=None,\n loc='upper right',\n face='triangle',\n ):\n if self.legend is not None:\n self.remove_legend()\n self._legend = _vtk.vtkLegendBoxActor()\n\n if labels is None:\n # use existing labels\n if not self._labels:\n raise ValueError(\n 'No labels input.\\n\\n'\n 'Add labels to individual items when adding them to'\n 'the plotting object with the \"label=\" parameter. '\n 'or enter them as the \"labels\" parameter.'\n )\n\n self._legend.SetNumberOfEntries(len(self._labels))\n for i, (vtk_object, text, color) in enumerate(self._labels.values()):\n if face is None:\n # dummy vtk object\n vtk_object = pyvista.PolyData([0.0, 0.0, 0.0])\n\n self._legend.SetEntry(i, vtk_object, text, color.float_rgb)\n\n else:\n self._legend.SetNumberOfEntries(len(labels))\n\n legend_face = make_legend_face(face)\n for i, (text, color) in enumerate(labels):\n self._legend.SetEntry(i, legend_face, text, Color(color).float_rgb)\n\n if loc is not None:\n if loc not in ACTOR_LOC_MAP:\n allowed = '\\n'.join([f'\\t * \"{item}\"' for item in ACTOR_LOC_MAP])\n raise ValueError(f'Invalid loc \"{loc}\". Expected one of the following:\\n{allowed}')\n x, y, size = map_loc_to_pos(loc, size, border=0.05)\n self._legend.SetPosition(x, y)\n self._legend.SetPosition2(size[0], size[1])\n\n if bcolor is None:\n self._legend.SetUseBackground(False)\n else:\n self._legend.SetUseBackground(True)\n self._legend.SetBackgroundColor(Color(bcolor).float_rgb)\n\n self._legend.SetBorder(border)\n\n self.add_actor(self._legend, reset_camera=False, name=name, pickable=False)\n return self._legend", "def show_legend(self, show_legend):\n\n self.container['show_legend'] = show_legend", "def plot(self, obj, options=\"\", expand=True, label=None, labelfmt=None, **kwargs):\n self._pad.cd()\n self._pad.Update() # Updating the pad prevents spontaneous seg faults...\n\n # Apply formatting (if any) before calling `Draw()`\n root_helpers.set_graphics_attributes(obj, **kwargs)\n\n # Get current axis limits\n old_left, old_right = self.get_xlim()\n old_bottom, old_top = self.get_ylim()\n\n # Draw the object, depending on its type\n if isinstance(obj, root.TH1):\n # Histogram\n obj.Draw(\"HIST SAME \" + options)\n\n # Get new axis limits (to expand if needed)\n left, right = obj.GetXaxis().GetXmin(), obj.GetXaxis().GetXmax()\n bottom, top = root_helpers.hist_min(obj), root_helpers.hist_max(obj)\n\n elif isinstance(obj, root.THStack):\n # Stacked Histogram\n obj.Draw(\"SAME HIST\" + options)\n\n # Get new axis limits (to expand if needed)\n top_hist = obj.GetStack().Last()\n bottom_hist = obj.GetStack().First()\n left, right = top_hist.GetXaxis().GetXmin(), top_hist.GetXaxis().GetXmax()\n bottom, top = root_helpers.hist_min(bottom_hist), root_helpers.hist_max(top_hist)\n\n elif isinstance(obj, root.TGraph):\n # Graph\n obj.Draw(options)\n\n # Get new axis limits (to expand if needed)\n left, right = root_helpers.graph_xmin(obj), root_helpers.graph_xmax(obj)\n bottom, top = root_helpers.graph_ymin(obj), root_helpers.graph_ymax(obj)\n\n elif isinstance(obj, root.TMultiGraph):\n # Multigraph\n obj.Draw(options)\n\n # Get new axis limits (to expand if needed)\n left, right = root_helpers.multigraph_xmin(obj), root_helpers.multigraph_xmax(obj)\n bottom, top = root_helpers.multigraph_ymin(obj), root_helpers.multigraph_ymax(obj)\n\n elif isinstance(obj, root.TLine):\n # Line\n obj.Draw(options)\n\n # Get new axis limits (to expand if needed)\n left, right = obj.GetX1(), obj.GetX2()\n bottom, top = obj.GetY1(), obj.GetY2()\n\n else:\n try:\n obj.Draw(\"SAME \" + options)\n\n # Do not expand axis if we don't know what we're plotting\n left, right = old_left, old_right\n bottom, top = old_bottom, old_top\n\n except AttributeError:\n raise TypeError(\"Attempting to plot an object with no Draw() method\")\n\n # Add object to list of legend entries if label was provided\n if label is not None:\n self._legend_entries.append((obj, label, labelfmt))\n\n # Adjust axis limits\n if expand:\n if self._is_empty:\n # Axes are empty: expand or shrink to the object being drawn\n if left == 0 and right == 0:\n new_left = -0.01\n new_right = 0.01\n elif left == right:\n new_left = left * 0.99\n new_right = right * 1.01\n else:\n new_left = left\n new_right = right\n\n if bottom == 0 and top == 0:\n new_bottom = -0.01\n new_top = 0.01\n elif bottom == top:\n new_bottom = bottom * 0.99\n new_top = top * 1.01\n else:\n new_bottom = bottom\n new_top = top\n\n else:\n # Axes are not empty: expand or leave unaltered\n new_left = left if left < old_left else old_left\n new_right = right if right > old_right else old_right\n new_bottom = bottom if bottom < old_bottom else old_bottom\n new_top = top if top > old_top else old_top\n\n self.set_xlim(new_left, new_right)\n self.set_ylim(new_bottom, new_top)\n\n self._pad.RedrawAxis() # Redraw so axes appear above colour-filled areas\n\n self._is_empty = False # Record that the axes are no longer empty", "def set_legend(ax):\n l = ax.legend()\n plt.setp(l.get_texts(), fontsize=8)", "def legend(self, legend):\n\n self.container['legend'] = legend", "def add_legend(self,\n ax: Optional[Union[Axes, str, int]] = None,\n loc=None,\n labels: Optional[Sequence[str]] = None,\n **kwargs):\n target: Union[Figure, Axes]\n if ax is None:\n # automatic: figure legend or the (unique) axes\n if self.n_plots >= 2:\n target = self.fig\n else:\n target = self.axes_active[0]\n else:\n if isinstance(ax, (int, str)): # see __getitem__\n target = self[ax] # type: ignore\n else:\n target = ax\n\n # TODO: Customize how to sort legend items.\n legend_handles, legend_labels = zip(\n *[(h, l) for (l, h) in sorted(self._collect_legend().items())])\n if labels is not None:\n if len(labels) != len(legend_labels):\n raise ValueError(\n f\"labels {labels} should have length {len(legend_labels)} \"\n f\"but was given {len(labels)}\")\n legend_labels = list(labels)\n legend = target.legend(legend_handles, legend_labels, loc=loc, **kwargs)\n\n if isinstance(target, Axes) and not target.lines:\n target.axis('off')\n\n return legend", "def _LegendAndSave(Fig,SaveName,loc=\"upper right\",frameon=True,close=False,\n tight=True,use_legend=True,handlelength=1,**kwargs):\n if use_legend and legend_is_useable():\n legend(loc=loc,frameon=frameon,handlelength=handlelength)\n savefig(Fig,SaveName,close=close,tight=tight,**kwargs)", "def add_legend(ax, sf=16, loc='upper right'):\n ax.autoscale(False)\n #CONUS\n #leg_s = np.array([0.1, 0.5, 1.0, 5.0, 10.0])\n #HMA\n leg_s = np.array([0.1, 1.0, 10.0, 100.0])\n leg_x = np.full(leg_s.size, -999999999)\n leg_y = np.full(leg_s.size, -999999999)\n #leg_sc = ax.scatter(leg_x, leg_y, c='0.8', s=leg_s)\n #ax.legend(leg_sc, ['%0.1f km^2' % s for s in leg_s], scatterpoints=1, loc='upper right')\n for i, s in enumerate(leg_s):\n lbl = r'$%0.1f\\/km^2$' % s\n ax.scatter(leg_x[i], leg_y[i], s=s*sf, c='gray', label=lbl)\n legend = ax.legend(title='Glacier Area', scatterpoints=1, loc=loc, prop={'size':7})\n legend.get_title().set_fontsize('8')\n return legend", "def _draw_legend(self, labels, title=None):\n\n if len(self.pos) < 1:\n print 'Legend can not be plotted for Gleckler, as no data available!'\n return\n\n pmax = max(self.pos.values())\n\n # generate separate figure for legend\n f = plt.figure()\n ax = f.add_subplot(111, frameon=True, aspect='equal', axisbg='grey')\n f.subplots_adjust(bottom=0.25, top=0.75, left=0.25, right=0.75)\n\n for k in labels.keys():\n if k == 1:\n pos = 'top'\n elif k == 2:\n pos = 'bottom'\n elif k == 3:\n pos = 'left'\n elif k == 4:\n pos = 'right'\n else:\n raise ValueError('Can not draw Gleckler legend! Invalid position value! %s' % str(k))\n\n oldval = self.show_value\n self.show_value = False\n self.__plot_triangle(ax, np.random.random(), pos=pos)\n self.show_value = oldval\n ax.set_xticks([])\n ax.set_yticks([])\n\n fontsize = 16\n linewidth = 3\n\n for k in labels.keys():\n if k == 1: # top\n ax.annotate(labels[k], xy=(0.5, 0.9), xycoords='axes fraction', xytext=(0., 1.2), textcoords='axes fraction', arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"angle3,angleA=0,angleB=-90\", linewidth=linewidth), horizontalalignment='left', size=fontsize)\n elif k == 2:\n ax.annotate(labels[k], xy=(0.5, 0.1), xycoords='axes fraction', xytext=(0., -0.3), textcoords='axes fraction', arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"angle3,angleA=0,angleB=-90\", linewidth=linewidth), horizontalalignment='left', size=fontsize)\n elif k == 3:\n ax.annotate(labels[k], xy=(0.1, 0.5), xycoords='axes fraction', xytext=(-0.6, 0.2), textcoords='axes fraction', arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"angle3,angleA=0,angleB=-90\", linewidth=linewidth), horizontalalignment='left', size=fontsize)\n elif k == 4:\n ax.annotate(labels[k], xy=(0.9, 0.5), xycoords='axes fraction', xytext=(1.1, 0.8), textcoords='axes fraction', arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"angle3,angleA=0,angleB=-90\", linewidth=linewidth), horizontalalignment='left', size=fontsize)\n\n if title is not None:\n f.suptitle(title, size=fontsize)\n\n return f", "def legend(self, legend):\n\n self._legend = legend", "def add_to_legend(axes, text, **kwargs):\n text = mpatches.Patch(color='none', label=text)\n handles, labels = axes.get_legend_handles_labels()\n if 'handles' in kwargs:\n handles.append(kwargs.pop('handles'))\n handles.append(text)\n axes.legend(\n handles=handles,\n prop=kwargs.pop('prop', {'family': 'monospace'}),\n **kwargs\n )", "def finish (self, legend=None):\n if legend is True:\n kwargs = {}\n else:\n kwargs = legend\n if legend:\n self.legend (**kwargs)", "def set_legend(self, **lgdkwargs):\n\n if 'loc' not in lgdkwargs.keys(): \n lgdkwargs['loc'] = 'upper right'\n \n if 'scatterpoints' not in lgdkwargs.keys(): \n lgdkwargs['scatterpoints'] = 1 \n\n self.sub.legend(**lgdkwargs) \n \n return None", "def draw_legend(\n data: pd.Series[Any], da: DrawingArea, lyr: Layer\n ) -> DrawingArea:\n msg = \"The geom should implement this method.\"\n raise NotImplementedError(msg)", "def fl_draw_object_label(ptr_flobject):\n _fl_draw_object_label = library.cfuncproto(\n library.load_so_libforms(), \"fl_draw_object_label\", \\\n None, [cty.POINTER(xfdata.FL_OBJECT)], \\\n \"\"\"void fl_draw_object_label(FL_OBJECT * ob)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n library.keep_elem_refs(ptr_flobject)\n _fl_draw_object_label(ptr_flobject)", "def modify_legend_handles(ax, **kwargs):\r\n hndls, labls = ax.get_legend_handles_labels()\r\n _hndls = []\r\n for h in hndls:\r\n _h = copy(h)\r\n _h.update(kwargs)\r\n _hndls.append(_h)\r\n return _hndls, labls", "def add_legend(self, mode='image', label=None, color='none', alpha=1,\n size=15, family='sans-serif', properties=None, **kwargs):\n if properties is None:\n properties = {}\n properties = {'size': size, 'family': family, **properties}\n\n # get legend that already exists\n legend = self.ax.get_legend()\n old_handles = getattr(legend, 'legendHandles', [])\n handler_map = getattr(legend, '_custom_handler_map', {})\n\n # make new handles\n new_handles = []\n labels = to_list(label)\n colors = [color] * len(labels) if isinstance(color, str) else color\n alphas = [alpha] * len(labels) if isinstance(alpha, Number) else alpha\n\n for label_item, label_color, label_alpha in zip(labels, colors, alphas):\n if label_item is None:\n continue\n\n if isinstance(label_item, str):\n if mode in ('image', 'histogram'):\n if is_color_like(label_color):\n handle = Patch(color=label_color, alpha=label_alpha, label=label_item)\n else:\n handle = PatchCollection(patches=[], cmap=label_color, label=label_item)\n handler_map[PatchCollection] = ColorMappingHandler()\n elif mode in ('curve', 'loss'):\n handle = Line2D(xdata=[0], ydata=[0], color=label_color, alpha=label_alpha, label=label_item)\n new_handles.append(handle)\n elif not label_item.get_label().startswith('_'):\n new_handles.append(label_item)\n\n if len(new_handles) > 0:\n # extend existing handles and labels with new ones\n handles = old_handles + new_handles\n legend = self.ax.legend(prop=properties, handles=handles, handler_map=handler_map, **kwargs)\n\n return legend", "def legend(self, loc, options=\"\", **kwargs):\n self._pad.cd()\n\n if self._legend is not None and isinstance(self._legend, root.TLegend):\n warnings.warn(\"These axes already have a legend, will overwrite\", stacklevel=2)\n self._legend.Delete()\n\n self._legend = root.TLegend(*loc)\n\n # Default formatting options: use transparent background\n # Do this here since this option is not available in the `TStyle` class\n self._legend.SetFillColorAlpha(0, 0)\n\n # Set graphics attributes\n root_helpers.set_graphics_attributes(self._legend, **kwargs)\n\n # Columns\n if \"ncol\" in kwargs:\n self._legend.SetNColumns(kwargs[\"ncol\"])\n\n # Legend border size\n if \"bordersize\" in kwargs:\n self._legend.SetBorderSize(kwargs[\"bordersize\"])\n\n for obj, label, option in self._legend_entries:\n if option is not None:\n self._legend.AddEntry(obj, label, option)\n else:\n self._legend.AddEntry(obj, label)\n\n self._legend.Draw(options)\n\n return self._legend", "def draw_legend(self, *drawables):\n # Check if we already have a legend\n if hasattr(self, '_legend'):\n raise RuntimeError('legend already exists on this plot')\n\n # Switch to the context of the main plot\n self._plot.cd()\n\n # Create the legend\n if self._atlas_label_drawn:\n self._legend = TLegend(self.PLOT_LEGEND_LEFT,\n (self.PLOT_LEGEND_BOTTOM_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_LEGEND_BOTTOM),\n self.PLOT_LEGEND_RIGHT,\n (self.PLOT_LEGEND_TOP_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_LEGEND_TOP))\n else:\n # WJF may need customisation with ratio\n self._legend = TLegend(0.15, 0.7, 0.5, 0.88)\n\n SetOwnership(self._legend, False)\n\n # Style it\n self._legend.SetTextSize((\n self.PLOT_LEGEND_TEXT_SIZE_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_LEGEND_TEXT_SIZE\n ))\n self._legend.SetBorderSize(0)\n self._legend.SetFillStyle(0) # transparent\n self._legend.SetNColumns(self.PLOT_LEGEND_N_COLUMNS)\n\n # Create a chained list of all drawables. We decompose THStack\n # objects in reverse order, i.e. top-to-bottom.\n drawables = \\\n list(chain(*(drawable_iterable(h, True, True)\n for h\n in drawables)))\n\n # Add anything to this list that we created internally\n drawables.extend(self._legend_extras)\n\n # Because ROOT draws legend entries from left-to-right across rows and\n # not top-to-bottom along columns, we need to do a bit of a pivot on\n # the list so that the histograms appear in the vertical order of the\n # stack\n n_entries = len(drawables)\n n_col = self.PLOT_LEGEND_N_COLUMNS\n n_row = int(ceil(float(n_entries) / n_col))\n legend_order = []\n for r in xrange(0, n_row):\n for c in xrange(0, n_col):\n if (r * n_col + c) == n_entries:\n # Don't need an outer break, this would only happen on the\n # last row if n_row * n_col != n_entries\n break\n legend_order.append(drawables[r + c * n_row])\n\n # Add the drawables\n for drawable in legend_order:\n SetOwnership(drawable, False)\n title = drawable.GetTitle()\n # HACK: Convention: legend for drawables with a non-default\n # marker style (data) to be drawn as point, and with\n # empty fill (signal) to be drawn as line\n #print 'Adding plottable {0} to legend. Has MarkerStyle {1} and fill colour {2}'.format(drawable.GetName(), drawable.GetMarkerStyle(), drawable.GetFillColor())\n #self._legend.AddEntry(drawable, title, 'f')\n this_marker = drawable.GetMarkerStyle()\n if this_marker == 20:\n self._legend.AddEntry(drawable, title, 'p')\n #self._legend.AddEntry(drawable, title, 'l')\n elif drawable.GetTitle() == 'Total Background' or drawable.GetTitle() == 'Total background':\n self._legend.AddEntry(drawable, title, 'lf')\n elif drawable.GetFillColor() == 0:\n self._legend.AddEntry(drawable, title, 'l')\n elif this_marker == 21 or this_marker == 3 or this_marker == 22:\n self._legend.AddEntry(drawable, title, 'lp')\n else:\n self._legend.AddEntry(drawable, title, 'f')\n\n # Draw the legend\n self._legend.Draw()", "def legend(self, marks, bounds=None, rect=None, corner=None, grid=None, gutter=50, style=None, label_style=None, id=None):\n gutter = _require_scalar(gutter)\n style = _combine_styles(_require_style(style))\n label_style = _combine_styles(_require_style(label_style))\n id = _require_optional_id(id)\n\n xmin, xmax, ymin, ymax = _region(0, self._width, 0, self._height, bounds=bounds, rect=rect, corner=corner, grid=grid, gutter=gutter)\n self._children.append(LegendMark(xmin, xmax, ymin, ymax, marks, style, label_style, id))\n return self._children[-1]", "def add_plot_legend(fig, labright='M.', lableft='S.'):\n #............................................\n _leg = fig.add_axes([0.92, 0.865, 0.055, 0.085])\n _leg.fill((0, 0.5, 0.5, 0), (0, 0, 1, 1), fc=ENSOpolygons['W'])\n _leg.text(0.05, 0.5, 'EN', fontsize='smaller')\n _leg.fill((0.5, 1, 1, 0.5), (0, 0, 1, 1), fc=ENSOpolygons['C'])\n _leg.text(0.6, 0.5, 'LN', fontsize='smaller')\n _leg.set_xticks([])\n _leg.set_yticks([])\n #............................................\n _leg = fig.add_axes([0.92, 0.75, 0.055, 0.085])\n _leg.plot((0, 1,), (0, 1), ls='-', c='k', marker='')\n _leg.set_xticks([])\n _leg.set_yticks([])\n _leg.text(0.6, 0.15, labright, fontsize='smaller')\n _leg.text(0.1, 0.5, lableft, fontsize='smaller')", "def test_manual_legend(self):\n # Draw a random scatter plot\n random = np.random.RandomState(42)\n\n Ax, Ay = random.normal(50, 2, 100), random.normal(50, 3, 100)\n Bx, By = random.normal(42, 3, 100), random.normal(44, 1, 100)\n Cx, Cy = random.normal(20, 10, 100), random.normal(30, 1, 100)\n\n _, ax = plt.subplots()\n ax.scatter(Ax, Ay, c=\"r\", alpha=0.35, label=\"a\")\n ax.scatter(Bx, By, c=\"g\", alpha=0.35, label=\"b\")\n ax.scatter(Cx, Cy, c=\"b\", alpha=0.35, label=\"c\")\n\n # Add the manual legend\n manual_legend(\n ax, (\"a\", \"b\", \"c\"), (\"r\", \"g\", \"b\"), frameon=True, loc=\"upper left\"\n )\n\n # Assert image similarity\n self.assert_images_similar(ax=ax, tol=0.5)", "def legend(self, marks, bounds=None, rect=None, corner=None, grid=None, gutter=50, style=None, label_style=None, id=None):\n gutter = _require_scalar(gutter)\n style = _combine_styles(_require_style(style))\n label_style = _combine_styles(_require_style(label_style))\n id = _require_optional_id(id)\n\n xmin, xmax, ymin, ymax = _region(self._xmin_range, self._xmax_range, self._ymin_range, self._ymax_range, bounds=bounds, rect=rect, corner=corner, grid=grid, gutter=gutter)\n self._children.append(LegendMark(xmin, xmax, ymin, ymax, marks, style, label_style, id))\n return self._children[-1]", "def test_legend():\n fig = plt.figure()\n ax = fig.add_subplot(projection='ternary')\n\n for seed in [1, 9, 6, 8]:\n ax.scatter(*get_scatter_points(11, seed=seed), alpha=0.5, label=seed)\n\n ax.legend()", "def make_legend_fig(legend: matplotlib.legend.Legend) -> Figure:\n\n # Get the dimensions (in inches) of the legend's bounding box\n legend_inches = legend.get_window_extent().transformed(\n cast(Figure, legend.figure).dpi_scale_trans.inverted())\n\n fig = Figure(\n figsize=(\n legend_inches.width + 0.05,\n legend_inches.height + 0.05,\n ))\n fig.add_axes([0, 0, 1, 1]).axis('off')\n\n fig.legend(\n legend.legendHandles,\n [t.get_text() for t in legend.texts],\n ncol=legend._ncols,\n loc='center',\n bbox_to_anchor=(0.5, 0.5),\n )\n return fig", "def SetLegendDrawOption(self, option):\n self._legenddrawoption = option", "def config_ax(ax, xylabels=None, title=None, loc=None):\n\n ax.grid(True, color='0.9')\n ax.set_frame_on(False)\n ax.tick_params(color='0.9')\n\n if xylabels is not None:\n ax.set_xlabel(xylabels[0])\n ax.set_ylabel(xylabels[1])\n\n if title is not None:\n ax.set_title(title)\n\n if loc is not None:\n ax.legend(loc=loc)", "def plot_one_axes(self, fig_num: int, title: str, y_label: str, labeled: np.ndarray, filled: np.ndarray,\n smoothed: np.ndarray, legend_entries: Dict[str, str]) -> matplotlib.figure.Figure:\n fig = plt.figure(fig_num)\n ax = fig.subplots(1, 1)\n labeled_lines = kine_graph_init(ax, labeled, y_label, self.frame_nums, [{'ls': '', 'marker': 'o', 'ms': 2,\n 'fillstyle': 'none', 'mew': 0.5}] * 3)\n ax.set_prop_cycle(None)\n filled_lines = kine_graph_add(ax, filled, self.frame_nums, [{'ls': '-', 'lw': 0.75}] * 3)\n ax.set_prop_cycle(None)\n smoothed_lines = kine_graph_add(ax, smoothed, self.frame_nums, [{'ls': '-'}] * 3)\n plt.tight_layout()\n fig.suptitle(title, x=0.7)\n fig.legend((labeled_lines[0], smoothed_lines[2], filled_lines[1]),\n (legend_entries['labeled'], legend_entries['smoothed'], legend_entries['filled']),\n ncol=2, handlelength=0.75, handletextpad=0.25, columnspacing=0.5, loc='upper left')\n make_interactive()\n return fig", "def show_legend(self):\n return self.container['show_legend']", "def fl_fit_object_label(ptr_flobject, xmargin, ymargin):\n _fl_fit_object_label = library.cfuncproto(\n library.load_so_libforms(), \"fl_fit_object_label\",\\\n None, [cty.POINTER(xfdata.FL_OBJECT), xfdata.FL_Coord,\n xfdata.FL_Coord],\\\n \"\"\"void fl_fit_object_label(FL_OBJECT * obj, FL_Coord xmargin,\n FL_Coord ymargin)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n i_xmargin = library.convert_to_intc(xmargin)\n i_ymargin = library.convert_to_intc(ymargin)\n library.keep_elem_refs(ptr_flobject, xmargin, i_xmargin, \\\n ymargin, i_ymargin)\n _fl_fit_object_label(ptr_flobject, i_xmargin, i_ymargin)", "def show_legend(self):\n if self.axes.legend() is not None:\n # set visible to be True and re-draw\n # self.axes.legend().set_visible(True)\n self._setup_legend(font_size=self._legendFontSize)\n self.draw()\n\n # set flag on\n self._isLegendOn = True\n\n return", "def make_final_legend():\n fig = plt.figure(figsize=(10, 1))\n me.get_final_graph_legend(fig)\n fig.savefig(\"cumul_shuttle_leg.pdf\")", "def _hr_mean_add_legend(**kwargs):\n ax: plt.Axes = kwargs.get(\"ax\")\n legend_loc = kwargs.get(\"legend_loc\", \"upper left\")\n # get handles\n handles, labels = ax.get_legend_handles_labels()\n # remove the errorbars\n handles = [h[0] for h in handles]\n # use them in the legend\n if legend_loc == \"upper left\":\n bbox_to_anchor = (0.01, 0.90)\n elif legend_loc == \"upper right\":\n bbox_to_anchor = (0.99, 0.90)\n else:\n bbox_to_anchor = None\n ax.legend(\n handles,\n labels,\n loc=legend_loc,\n bbox_to_anchor=bbox_to_anchor,\n numpoints=1,\n )", "def plot(self):\n fig, ax = plt.subplots()\n\n T=self.storage.T\n #print self.storage.S\n #print T\n for statename in self.v:\n i=self.rv[statename]\n s=self.storage.S[:, i]\n #print s\n ax.plot(T, s, label=statename)\n\n legend = ax.legend(loc='upper right', shadow=True)\n\n frame = legend.get_frame()\n frame.set_facecolor('0.90')\n\n # Set the fontsize\n for label in legend.get_texts():\n label.set_fontsize('large')\n\n for label in legend.get_lines():\n label.set_linewidth(1.5) # the legend line width\n plt.show()", "def TurnOnLegend(self, pos):\n self.bShowLegend = True\n self.sLegendPos = pos\n return self", "def trace_gui(obj, **kwargs): # pragma: no cover\n\n gui = create_trace_gui(obj, **kwargs)\n gui.show()\n run_app()\n gui.close()", "def legend(self, *args, loc=None, width=None, space=None, **kwargs):\n if loc != '_fill':\n loc = self._loc_translate(loc, rc['legend.loc'])\n if isinstance(loc, np.ndarray):\n loc = loc.tolist()\n\n # Generate panel\n if loc in ('left', 'right', 'top', 'bottom'):\n ax = self.panel_axes(loc, width=width, space=space, filled=True)\n return ax.legend(*args, loc='_fill', **kwargs)\n\n # Fill\n if loc == '_fill':\n # Hide content\n for s in self.spines.values():\n s.set_visible(False)\n self.xaxis.set_visible(False)\n self.yaxis.set_visible(False)\n self.patch.set_alpha(0)\n self._panel_filled = True\n # Try to make handles and stuff flush against the axes edge\n kwargs.setdefault('borderaxespad', 0)\n frameon = _notNone(kwargs.get('frame', None), kwargs.get(\n 'frameon', None), rc['legend.frameon'])\n if not frameon:\n kwargs.setdefault('borderpad', 0)\n # Apply legend location\n side = self._panel_side\n if side == 'bottom':\n loc = 'upper center'\n elif side == 'right':\n loc = 'center left'\n elif side == 'left':\n loc = 'center right'\n elif side == 'top':\n loc = 'lower center'\n else:\n raise ValueError(f'Invalid panel side {side!r}.')\n\n # Draw legend\n return legend_wrapper(self, *args, loc=loc, **kwargs)", "def visualise(self, obj):\n self.clear()\n self.draw(obj)\n self.show()", "def multi_plot(x, y, y_legend=[] ,title=\"Title\", xlab=\"x-axis\", ylab=\"y-axis\"):\n\n if y_legend==[]:\n for i in range(0, np.size(y,0)):\n plt.plot(x, y[i][:], linewidth=2)\n else:\n for i in range(0, np.size(y,0)):\n plt.plot(x, y[i][:], label=y_legend[i], linewidth=2)\n plt.legend(prop={'size': 12}) #legend details\n\n plt.title(title)\n plt.xlabel(xlab)\n plt.ylabel(ylab)", "def fl_set_object_label(ptr_flobject, label):\n _fl_set_object_label = library.cfuncproto(\n library.load_so_libforms(), \"fl_set_object_label\", \\\n None, [cty.POINTER(xfdata.FL_OBJECT), xfdata.STRING], \\\n \"\"\"void fl_set_object_label(FL_OBJECT * ob, const char * label) \"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n s_label = library.convert_to_bytestrc(label)\n library.keep_elem_refs(ptr_flobject, label, s_label)\n _fl_set_object_label(ptr_flobject, s_label)", "def renderLegend(self, plot, painter, rect):\r\n if plot.legend():\r\n fillBackground = not self.__data.discardFlags & self.DiscardBackground\r\n plot.legend().renderLegend(painter, rect, fillBackground)", "def _legend(ax: mpl.axes.Subplot, graph: nx.classes.Graph,\n nodes: list) -> mpl.legend.Legend:\n legend_kwargs = {'fancybox': True,\n 'fontsize': 14,\n 'bbox_to_anchor': (1.02, 1.0)}\n\n labels = [r'$f_c = {:>9.3f}$ Hz'.format(key) for key in graph.nodes.keys()]\n legend = ax.legend(nodes.values(), labels, **legend_kwargs, borderaxespad=0)\n return legend", "def addLegend(ax, lines, impls, legendPos):\n\n # If there's only one piece of data being plotted, there's no need for a legend\n # since all the parameters will be in the title.\n # Compute the length (in characters) of the longest implementation.\n legendLen = max(list(map(len, impls)))\n if legendLen == 0:\n return\n legendItems = len(impls)\n fontSize = 10 if legendLen < 20 and legendItems <= 4 else 8\n prop = matplotlib.font_manager.FontProperties(size=fontSize)\n if legendPos in (\n \"best\",\n \"upper right\",\n \"upper left\",\n \"lower right\",\n \"lower left\",\n \"right\",\n \"center right\",\n \"center left\",\n \"lower center\",\n \"upper center\",\n \"center\",\n ):\n ax.legend(lines, impls, prop=prop, loc=legendPos)\n elif legendPos == \"below\":\n # Place the legend below the x-axis\n axisShrink = 0.15 if legendItems < 7 else 0.2\n box = ax.get_position()\n newHeight = box.height * (1 - axisShrink)\n ax.set_position([box.x0, box.y0 + box.height - newHeight, box.width, newHeight])\n ax.legend(\n lines,\n impls,\n prop=prop,\n bbox_to_anchor=(0, -0.1),\n borderaxespad=0.0,\n loc=\"upper left\",\n )\n else:\n # Place the legend on the right\n # Shink current axis by 15% to make room for the legend on the right.\n # If we were smarter we'd work out how much we need to shrink based on the\n # size of the legend box and so on, but this is OK for now.\n # See how much we think we need to shrink to fit in the legend\n axisShrink = 0.15 if legendLen < 20 else 0.2\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * (1 - axisShrink), box.height])\n ax.legend(\n lines,\n impls,\n prop=prop,\n bbox_to_anchor=(1.02, 1),\n borderaxespad=0.0,\n loc=\"upper left\",\n )", "def plot_3d_object(object_):\n \n # Initialize renderer instance\n r = Renderer()\n\n # Add surfaces and goal regions to the renderer instance\n for surf in object_:\n r.add((object_[surf][0],'b',1))\n if len(object_[surf])>2:\n r.add((object_[surf][2],'r',1))\n r.add((gPoint(-15,-15,-15),'k',1))\n r.show()", "def label_fig(ax, x_label=None, y_label=None, title=None,\n label_fontsize=18, title_fontsize=16, legend_fontsize=18, tick_size=14,\n legend=True, loc=0, ncol=1, tight=True, legbox=True):\n # ax.set(ylim=(ax.get_ylim()[0] * 0.9, ax.get_ylim()[1] * 1.1))\n ax.set_xlabel(x_label, fontsize=label_fontsize)\n ax.set_ylabel(y_label, fontsize=label_fontsize)\n ax.set_title(title, fontsize=title_fontsize)\n ax.xaxis.set_tick_params(labelsize = tick_size)\n ax.yaxis.set_tick_params(labelsize = tick_size)\n if legend:\n ax.legend(fontsize=legend_fontsize, loc=loc, ncol=ncol, frameon=legbox)\n if tight:\n plt.tight_layout()", "def get_legend(self, label):\n try:\n return next(leg for leg in self.legends if leg.label == label)\n except StopIteration:\n return None", "def plot_3D(self, title=None, fig_size=None, close=True):\r\n # TODO ajouter des titres\r\n combs = list(itertools.combinations(np.arange(self.features.shape[1]), 3))\r\n idx_plot = 1\r\n if fig_size is not None:\r\n fig = plt.figure(figsize=fig_size)\r\n else:\r\n fig = plt.figure()\r\n if len(combs) % 2 == 1:\r\n n_col, n_row = (int((len(combs) + 1) / 2), int(len(combs) / 2))\r\n else:\r\n n_col, n_row = (int(len(combs) / 2), int(len(combs) / 2))\r\n for x, y, z in combs:\r\n ax = fig.add_subplot(n_row, n_col, idx_plot, projection='3d')\r\n for target in self.targets:\r\n idx = np.where(self.labels == target)\r\n ax.scatter(self.features[idx, x], self.features[idx, y], self.features[idx, z], label=str(target))\r\n if self.features_names is not None:\r\n ax.set_xlabel(str(self.features_names[x]))\r\n ax.set_ylabel(str(self.features_names[y]))\r\n ax.set_zlabel(str(self.features_names[z]))\r\n if title is not None:\r\n ax.set_title(title[idx_plot - 1])\r\n idx_plot += 1\r\n plt.legend(fontsize='small')\r\n if close:\r\n plt.show()\r\n else:\r\n return fig", "def _set_legend(legend):\n if isinstance(legend, dict):\n return Legend(legend)\n elif isinstance(legend, Legend):\n return legend\n else:\n return Legend()", "def test_legend_entries():\n fig = Figure()\n fig.basemap(projection=\"x1i\", region=[0, 7, 3, 7], frame=True)\n fig.plot(\n data=\"@Table_5_11.txt\",\n style=\"c0.15i\",\n fill=\"lightgreen\",\n pen=\"faint\",\n label=\"Apples\",\n )\n fig.plot(data=\"@Table_5_11.txt\", pen=\"1.5p,gray\", label=\"My lines\")\n fig.plot(data=\"@Table_5_11.txt\", style=\"t0.15i\", fill=\"orange\", label=\"Oranges\")\n fig.legend(position=\"JTR+jTR\")\n\n return fig", "def saliva_plot_combine_legend(fig: plt.Figure, ax: plt.Axes, saliva_types: Sequence[str], **kwargs):\n legend_loc = kwargs.get(\"legend_loc\", \"upper center\")\n legend_size = kwargs.get(\"legend_size\", \"small\")\n rect = kwargs.get(\"rect\", (0, 0, 1.0, 0.95))\n labels = [ax.get_legend_handles_labels()[1] for ax in fig.get_axes()]\n\n if all(len(label) == 1 for label in labels):\n # only one group\n handles = [ax.get_legend_handles_labels()[0] for ax in fig.get_axes()]\n handles = [h[0] for handle in handles for h in handle]\n labels = [_saliva_plot_params.get(\"legend_title\")[b] for b in saliva_types]\n ncol = len(handles)\n fig.legend(\n handles,\n labels,\n loc=legend_loc,\n ncol=ncol,\n prop={\"size\": legend_size},\n )\n else:\n handles = [ax.get_legend_handles_labels()[0] for ax in fig.get_axes()]\n handles = [h[0] for handle in handles for h in handle]\n labels = [ax.get_legend_handles_labels()[1] for ax in fig.get_axes()]\n labels = [\n \"{}: {}\".format(_saliva_plot_params.get(\"legend_title\")[b], \" - \".join(label))\n for b, label in zip(saliva_types, labels)\n ]\n ncol = len(handles)\n\n fig.legend(\n list(zip(handles[::2], handles[1::2])),\n labels,\n loc=legend_loc,\n ncol=ncol,\n numpoints=1,\n handler_map={tuple: HandlerTuple(ndivide=None)},\n prop={\"size\": legend_size},\n )\n ax.legend().remove()\n fig.tight_layout(pad=1.0, rect=rect)", "def plot_one_axes(self, fig_num: int, title: str, y_label: str, raw: np.ndarray, smoothed: np.ndarray,\n legend_entries: Sequence[str]) -> matplotlib.figure.Figure:\n fig = plt.figure(fig_num)\n ax = fig.subplots(1, 1)\n raw_lines = kine_graph_init(ax, raw, y_label, self.frame_nums, [{'ls': ':', 'lw': 2}] * 3)\n ax.set_prop_cycle(None)\n smoothed_lines = kine_graph_add(ax, smoothed, self.frame_nums, [{'ls': '-'}] * 3)\n plt.tight_layout()\n fig.suptitle(title, x=0.7)\n legend_text = ('Raw (' + legend_entries[0] + ')', 'Smoothed (' + legend_entries[1] + ')',\n 'Smoothed (' + legend_entries[2] + ')')\n fig.legend((raw_lines[0], smoothed_lines[1], smoothed_lines[2]), legend_text, ncol=3, handlelength=0.75,\n handletextpad=0.25, columnspacing=0.5, loc='lower left')\n make_interactive()\n return fig", "def legend(self, include: bool = None):\n \n if include == None:\n if self.hasLegend.get() == True:\n include = True\n else:\n include = False\n \n if include == True:\n labels = []\n for line in self.lines.values():\n labels.append(line.name)\n self.ax.legend(labels).set_draggable(True)\n self.hasLegend.set(True)\n else:\n self.ax.legend().remove() # This line complains to the console if no legend exists when it's removed\n self.hasLegend.set(False)\n self.canvas.draw()", "def _createLegend(legendMap, collection, size=9, shape=Hexagon):\n\n class AssemblyLegend:\n \"\"\"\n Custom Legend artist handler.\n\n Matplotlib allows you to define a class that implements ``legend_artist`` to give you\n full control over how the legend keys and labels are drawn. This is done here to get\n Hexagons with Letters in them on the legend, which is not a built-in legend option.\n\n See: http://matplotlib.org/users/legend_guide.html#implementing-a-custom-legend-handler\n \"\"\"\n\n def legend_artist(self, _legend, orig_handle, _fontsize, handlebox):\n letter, index = orig_handle\n x0, y0 = handlebox.xdescent, handlebox.ydescent\n width, height = handlebox.width, handlebox.height\n x = x0 + width / 2.0\n y = y0 + height / 2.0\n normVal = collection.norm(index)\n cmap = collection.get_cmap()\n colorRgb = cmap(normVal)\n if shape == Hexagon:\n patch = matplotlib.patches.RegularPolygon(\n (x, y),\n 6,\n height,\n orientation=math.pi / 2.0,\n facecolor=colorRgb,\n transform=handlebox.get_transform(),\n )\n elif shape == Rectangle:\n patch = matplotlib.patches.Rectangle(\n (x - height / 2, y - height / 2),\n height * 2,\n height,\n facecolor=colorRgb,\n transform=handlebox.get_transform(),\n )\n else:\n patch = matplotlib.patches.Circle(\n (x, y),\n height,\n facecolor=colorRgb,\n transform=handlebox.get_transform(),\n )\n\n luminance = numpy.array(colorRgb).dot(LUMINANCE_WEIGHTS)\n dark = luminance < 0.5\n if dark:\n color = \"white\"\n else:\n color = \"black\"\n handlebox.add_artist(patch)\n txt = mpl_text.Text(\n x=x, y=y, text=letter, ha=\"center\", va=\"center\", size=7, color=color\n )\n handlebox.add_artist(txt)\n return (patch, txt)\n\n ax = plt.gca()\n keys = []\n labels = []\n for value, label, description in legendMap:\n keys.append((label, value))\n labels.append(description)\n\n legend = ax.legend(\n keys,\n labels,\n handler_map={tuple: AssemblyLegend()},\n loc=\"center left\",\n bbox_to_anchor=(1.0, 0.5),\n frameon=False,\n prop={\"size\": size},\n )\n return legend", "def create_figure(result_obj, dpi) -> Figure:\n # plot the data\n # set the figure size to tall_fig_size\n figure = Figure(figsize=result_obj.tall_fig_size)\n ax = figure.subplots()\n # call creating heat map\n result_obj.create_error_bar(ax=ax)\n return figure", "def plot_model(voi, states, algebraic):\n import pylab\n (legend_states, legend_algebraic, legend_voi, legend_constants) = createLegends()\n pylab.figure(1)\n pylab.plot(voi,vstack((states,algebraic)).T)\n pylab.xlabel(legend_voi)\n# pylab.legend(legend_states + legend_algebraic, loc='best')\n pylab.show()", "def lazyLabel(xlab,ylab,titLab,\n axis_kwargs=dict(),\n tick_kwargs=dict(add_minor=True),\n legend_kwargs=dict(frameon=False,loc='best'),\n title_kwargs=dict(),\n useLegend=None,zlab=None,ax=None):\n ax = gca(ax)\n if useLegend is None and legend_is_useable():\n # then we should do a legend (default behavior)\n useLegend = True\n elif useLegend is None:\n # no point in having a legend; no handles\n useLegend = False\n # set the labels and title\n xlabel(xlab,ax=ax,**axis_kwargs)\n ylabel(ylab,ax=ax,**axis_kwargs)\n title(titLab,ax=ax,**title_kwargs)\n # set the font\n tickAxisFont(ax=ax,**tick_kwargs)\n # if we have a z or a legemd, set those too.\n if (zlab is not None):\n zlabel(zlab,ax=ax,**axis_kwargs)\n if (useLegend):\n leg = legend(ax=ax,**legend_kwargs)\n else:\n leg = None\n return leg", "def toggle_minutni_legend(self, x):\r\n self.konfig.minutni.set_legend(x)\r\n self.minutniGraf.toggle_legend(x)", "def make_plot(self, conts, xlabel=None, ylabel=None, legend=None):\n\n # preparations\n assert not ((xlabel is None) and (ylabel is None)), \"Set xlabel and ylabel\"\n\n if legend is None:\n for i, element in enumerate(conts):\n plt.plot(*element.T, linewidth=1.5)\n else:\n for i, element in enumerate(conts):\n plt.plot(*element.T, label=legend[i], linewidth=1.5)\n plt.legend(fontsize=self.medium_fontsize)\n\n plt.grid(True)\n plt.ticklabel_format(style='sci', scilimits=self.scilimits)\n plt.tick_params(labelsize=self.medium_fontsize)\n plt.xlabel(xlabel, fontsize=self.big_fontsize)\n plt.ylabel(ylabel, fontsize=self.big_fontsize)\n plt.show()", "def plot_coefs(data, x_label, y_label, title, kind = 'barh', style = 'seaborn-darkgrid',\n figsize = (10, 8)):\n\n with plt.style.context(style):\n \n ax = data.plot(kind=kind, figsize = figsize, rot=45)\n \n if kind == 'barh':\n \n ax.xaxis.set_major_formatter(mpl.ticker.StrMethodFormatter('${x:,.0f}'))\n ax.set_yticklabels(ax.get_yticklabels(), ha='right')\n ax.axvline(color='k')\n ax.set(xlabel = x_label, ylabel = y_label, title = title)\n \n else:\n ax.yaxis.set_major_formatter(mpl.ticker.StrMethodFormatter('${x:,.0f}'))\n ax.set_xticklabels(ax.get_xticklabels(), ha='right')\n ax.axhline(color='k')\n ax.set(xlabel = x_label, ylabel = y_label, title = title)\n\n return ax", "def plot_colour(self, label):\n label = label.lower()\n pretty_colours = {}\n # SPIce HD\n pretty_colours['544'] = 'maroon'\n pretty_colours['545'] = 'goldenrod'\n pretty_colours['548'] = 'blueviolet'\n pretty_colours['549'] = 'forestgreen'\n # H2\n ## DOM Efficiency Sets\n pretty_colours['551'] = 'cornflowerblue'\n pretty_colours['552'] = 'cornflowerblue'\n pretty_colours['553'] = 'cornflowerblue'\n pretty_colours['554'] = 'mediumseagreen'\n pretty_colours['555'] = 'mediumseagreen'\n pretty_colours['556'] = 'mediumseagreen'\n ## Hole Ice Sets\n pretty_colours['560'] = 'olive'\n pretty_colours['561'] = 'olive'\n pretty_colours['564'] = 'darkorange'\n pretty_colours['565'] = 'darkorange'\n pretty_colours['572'] = 'teal'\n pretty_colours['573'] = 'teal'\n ## Dima Hole Ice Set without RDE\n pretty_colours['570'] = 'mediumvioletred'\n ## Baseline\n pretty_colours['585'] = 'slategrey'\n # Systematics\n pretty_colours['aeff_scale'] = 'maroon'\n pretty_colours['atm_muon_scale'] = 'goldenrod'\n pretty_colours['deltam31'] = 'blueviolet'\n pretty_colours['theta23'] = 'forestgreen'\n pretty_colours['hole_ice_fwd'] = 'mediumvioletred'\n pretty_colours['dom_eff'] = 'cornflowerblue'\n pretty_colours['genie_ma_qe'] = 'mediumseagreen'\n pretty_colours['genie_ma_res'] = 'olive'\n pretty_colours['hole_ice'] = 'darkorange'\n pretty_colours['nue_numu_ratio'] = 'teal'\n pretty_colours['theta13'] = 'fuchsia'\n pretty_colours['barr_nu_nubar'] = 'thistle'\n pretty_colours['barr_uphor'] = 'orchid'\n pretty_colours['delta_index'] = 'navy'\n # Mass ordering\n pretty_colours['no'] = 'r'\n pretty_colours['io'] = 'b'\n # Asimov fits\n pretty_colours['th_to_wh'] = 'darkviolet'\n pretty_colours['wh_to_th'] = 'deepskyblue'\n colourlabel = None\n for colourkey in pretty_colours.keys():\n if (colourkey in label) or (colourkey == label):\n colourlabel = pretty_colours[colourkey]\n if colourlabel is None:\n logging.debug(\"I do not have a colour scheme for your label %s. \"\n \"Returning black.\"%label)\n colourlabel = 'k'\n return colourlabel", "def sn1979c(ax, col, legend):\n d = 5.341805643483106e+25\n nu = 1.4E9 # 20cm\n t = np.array(\n [437,594,631,663,679,684,727,747,786,822,839,876,882,\n 914,937,973,995,\n 1026,1071,1091,1127,1156,1168,1212,1243,1277,1314,1358,1390,\n 1415,1435,1466,1513,1565,1600,1634,1659,1698,1714,1750,1771,\n 1931,2027])\n flux = np.array(\n [0.2,2.1,2.5,2.7,2.8,2.8,4.4,4.8,6.0,7.1,7.1,7.6,8.6,\n 9.8,6.5,8.6,9.5,\n 10.2,10.8,10.3,10.4,12.2,10.1,10.2,11.5,11.2,13.0,11.3,10.2,\n 9.6,11.2,13.2,11.1,9.1,8.5,9.1,8.8,10.1,9.7,9.1,8.9,\n 7.0,7.7])\n lum = plot_line(ax, d, t, nu*flux, 'SN1979c', 'SN', col, legend)\n #ax.text(t[0]/1.05, lum[0], 'SN1979C', fontsize=11,\n # verticalalignment='center',\n # horizontalalignment='right')", "def legend(self):\n return self.container['legend']", "def plot_cf(self, **options):\n n = len(self.hs)\n xs = np.arange(-n//2, n//2)\n hs = np.roll(self.hs, len(self.hs) // 2)\n plt.plot(xs, hs.real, label='real', **options)\n plt.plot(xs, hs.imag, label='imag', **options)\n plt.legend()", "def show_plot(self):\n # Tight layout\n plt.tight_layout()\n # Remove whitespace between upper and lower plots\n plt.subplots_adjust(hspace=0, wspace=0.3) \n # Tick marks on all sides of each plot and show legend\n for j in range(2):\n axes=self.ax[j]\n axes.tick_params(axis='both', which='both', direction='in',\n top=True, right=True)\n legend=axes.legend(framealpha=0)\n # Save and show\n plt.savefig('CMB_lensing_potential_LCDM_MG.pdf', format='pdf')\n plt.show()", "def PlotGraph(obj):\n\n generated_text = \"\\n\\n\\nclass PlotGraph():\"\n\n # get the parameters needed from the object\n expression = obj[\"expression\"]\n title = obj[\"name\"] + \" Graph\"\n graphColor = \"b\"\n scatter = False\n\n # optional parameters\n if obj[\"title\"]:\n title = obj[\"title\"] # should be written more concisely in python 3.8\n\n if obj[\"graphColor\"]:\n graphColor = obj[\"graphColor\"] # should be written more concisely in python 3.8\n\n if obj[\"scatter\"]:\n scatter = obj[\"scatter\"] # should be written more concisely in python 3.8\n\n # CONSTRUCTOR\n # def __init__(self, start, stop, num_samples, title=\"example\"):\n generated_text += \"\\n\\tdef __init__(self, start, stop, num_samples, title=\\\"{}\\\"): \".format(title)\n generated_text += \"\\n\\t\\tself.function = \\\"\\\"\"\n generated_text += \"\\n\\t\\tself.title = title\"\n generated_text += \"\\n\\t\\tself.X = np.linspace(start, stop, num_samples)\"\n generated_text += \"\\n\\t\\tself.Y = []\"\n\n # f()\n generated_text += \"\\n\\n\\tdef f(self):\"\n generated_text += \"\\n\\t\\tself.Y = [self.compute(x) for x in self.X]\"\n\n # compute()\n generated_text += \"\\n\\n\\tdef compute(self, x):\"\n generated_text += \"\\n\\t\\treturn np.sin(x)\"\n\n # plot()\n generated_text += \"\\n\\n\\tdef plot(self, scatter=False, color='{}'):\".format(graphColor)\n generated_text += \"\\n\\t\\tplt.figure(1)\\n\\t\\tplt.title(self.title)\"\n generated_text += \"\\n\\t\\tif scatter:\"\n generated_text += \"\\n\\t\\t\\tplt.scatter(self.X, self.Y, c=color)\\n\\t\\t\\treturn\"\n generated_text += \"\\n\\t\\tplt.plot(self.X, self.Y, c=color)\"\n\n # show()\n generated_text += \"\\n\\n\\tdef show(self):\"\n generated_text += \"\\n\\t\\tplt.show()\"\n\n # call()\n generated_text += \"\\n\\n\\tdef call(self):\"\n generated_text += \"\\n\\t\\tself.f()\"\n generated_text += \"\\n\\t\\tself.plot()\"\n generated_text += \"\\n\\t\\tself.show()\"\n\n #print(generated_text)\n return generated_text", "def render(self):\n # TODO: this is when the backing store should be swapped in.\n from matplotlib.font_manager import FontProperties\n self.subplot.legend(prop=FontProperties(size=10))\n #self.subplot.legend()\n pass", "def plot(x, y, car_type, size, leg_names):\n\n plt.title('City MPG / HP for each type of car in relation to the car size')\n\n color_map = {1: 'green', 2: 'orange', 3: 'teal', 4: 'maroon', 5: 'yellow', 6: 'red', 7: 'silver'}\n colors = []\n for index, type in enumerate(car_type):\n colors.append(color_map[type])\n car_type[index] = color_map[type]\n\n plt.scatter(x, y, color=colors, s=size, marker='s', edgecolors='black')\n plt.xlabel('HP')\n plt.ylabel('City MPG')\n\n # scale steps\n plt.yticks(np.arange(10, 65, 5))\n plt.xticks(np.arange(min(x), max(x) + 42.7, 42.7))\n\n # Add legend\n recs = []\n for i in color_map.values():\n recs.append(mpatches.Rectangle((0, 0), 1, 1, fc=i))\n plt.legend(recs, leg_names, loc=1)\n\n # plt.show()\n plt.savefig(sys.argv[2])", "def basic_plot_polishing(ax, **kwargs):\r\n\t\t# Title\r\n\t\tax.set_title(kwargs.get('title'), **kwargs.get('title_kw', {}))\r\n\r\n\t\t# ............................................... X- and Y-axes\r\n\t\t# Axes Labels\r\n\t\tax.set_xlabel(kwargs.get('xlabel'), **kwargs.get('xlabel_kw', {}))\r\n\t\tax.set_ylabel(kwargs.get('ylabel'), **kwargs.get('ylabel_kw', {}))\r\n\t\t# Limits\r\n\t\tax.set_xlim(kwargs.get('xlims'))\r\n\t\tax.set_ylim(kwargs.get('ylims'))\r\n\t\t# Ticks\r\n\t\tplt.xticks(**kwargs.get('xticks_kw', {}))\r\n\t\tplt.yticks(**kwargs.get('yticks_kw', {}))\r\n\r\n\t\t# ............................................... Grid, legend\r\n\t\t#if kwargs.get('grid', True):\r\n\t\t\t#ax.grid(True)\r\n\t\tif kwargs.get('grid'):\r\n\t\t\tif kwargs['grid'] is True:\r\n\t\t\t\tax.grid()\r\n\t\t\telse:\r\n\t\t\t\tax.grid(**kwargs['grid'])\r\n\r\n\t\t# todo recommend to interpret legend as the kw params\r\n\t\t#if kwargs.get('legend'):\r\n\t\t#\tax.legend(**kwargs.get('legend_kw', {}))\r\n\r\n\t\tif kwargs.get('legend'):\r\n\t\t\t# backwards compatibility and allows default call\r\n\t\t\tif kwargs['legend'] is True:\r\n\t\t\t\tax.legend()\r\n\t\t\telse:\r\n\t\t\t\tax.legend(**kwargs.get('legend'))\r\n\r\n\t\treturn ax", "def plot_target(target, dataframe, hist_bins=20, target_label=None):\n # Create figure\n target_fig = figure(figsize=(16, 16))\n\n # Create boxplot\n box_ax = target_fig.add_subplot(2, 1, 1)\n target_box = boxplot(\n dataframe[target], flierprops={\"markerfacecolor\": \"white\"}, ax=box_ax\n )\n\n # Create histogram\n hist_ax = target_fig.add_subplot(2, 1, 2)\n target_hist = histplot(\n dataframe[target], bins=hist_bins, kde=True, ax=hist_ax\n )\n\n # Change the x-axis labels\n if target_label is not None:\n for axes in [target_box, target_hist]:\n axes.set(xlabel=target_label)", "def graph(x, y, xlabel = \"\", ylabel = \"\", legend = \"\", color = \"\"):\n plt.plot(x, y, color, label = legend)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.legend(loc = 'best')\n plt.grid()", "def fl_draw_object_label_outside(ptr_flobject):\n _fl_draw_object_label_outside = library.cfuncproto(\n library.load_so_libforms(), \"fl_draw_object_label_outside\",\n None, [cty.POINTER(xfdata.FL_OBJECT)], \\\n \"\"\"void fl_draw_object_label_outside(FL_OBJECT * ob)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n library.keep_elem_refs(ptr_flobject)\n _fl_draw_object_label_outside(ptr_flobject)", "def _single_legend(self, pairs, ncol=None, order=None, **kwargs):\n # Optionally change order\n # See: https://stackoverflow.com/q/10101141/4970632\n # Example: If 5 columns, but final row length 3, columns 0-2 have\n # N rows but 3-4 have N-1 rows.\n ncol = _not_none(ncol, 3)\n if order == 'C':\n split = [pairs[i * ncol:(i + 1) * ncol] for i in range(len(pairs) // ncol + 1)]\n pairs = []\n nrows_max = len(split) # max possible row count\n ncols_final = len(split[-1]) # columns in final row\n nrows = [nrows_max] * ncols_final + [nrows_max - 1] * (ncol - ncols_final)\n for col, nrow in enumerate(nrows): # iterate through cols\n pairs.extend(split[row][col] for row in range(nrow))\n\n # Draw legend\n return mlegend.Legend(self, *zip(*pairs), ncol=ncol, **kwargs)", "def _create_legend(self, hue, cmap):\n if cmap:\n if hue in self.categorical_columns:\n mapping = self.feature_mapping[hue]\n categories = cmap[\"transform\"].factors\n colors = cmap[\"transform\"].palette\n text = \"\"\n template = self._legend_template_html\n\n for category, color in zip(categories, colors):\n mapped_category = mapping[float(category)] # float as keys in mapping dicts are numerical\n text += template.format(\n color=color,\n category=mapped_category\n )\n legend = Div(text=text, css_classes=[self._legend, self._legend_categorical])\n\n else:\n\n colorbar = ColorBar(color_mapper=cmap[\"transform\"],\n ticker=BasicTicker(desired_num_ticks=4),\n formatter=NumeralTickFormatter(format=\"0.[0000]\"),\n label_standoff=7,\n border_line_color=None,\n bar_line_color=self.plot_design.text_color,\n major_label_text_font_size=\"14px\",\n major_label_text_color=self.plot_design.text_color,\n major_tick_line_color=self.plot_design.text_color,\n major_tick_in=0,\n location=(-100, 0), # by default ColorBar is placed to the side of the Figure\n width=30\n )\n legend = default_figure(\n {\n \"height\": 120,\n \"width\": 120,\n \"css_classes\": [self._legend]\n }\n )\n legend.add_layout(colorbar, \"right\")\n\n else:\n legend = Div(\n text=self._legend_no_hue_html,\n css_classes=[self._legend]\n )\n\n return legend", "def add_legend_scale(\n self,\n corner_offset_factor=2.0,\n bottom_border_offset=30,\n top_border_offset=30,\n left_border_offset=30,\n right_border_offset=30,\n bottom_axis_visibility=True,\n top_axis_visibility=True,\n left_axis_visibility=True,\n right_axis_visibility=True,\n legend_visibility=True,\n xy_label_mode=False,\n render=True,\n color=None,\n font_size_factor=0.6,\n label_size_factor=1.0,\n label_format=None,\n number_minor_ticks=0,\n tick_length=5,\n minor_tick_length=3,\n show_ticks=True,\n tick_label_offset=2,\n ):\n color = Color(color, default_color=self._theme.font.color)\n\n legend_scale = _vtk.vtkLegendScaleActor()\n legend_scale.SetCornerOffsetFactor(corner_offset_factor)\n legend_scale.SetLegendVisibility(legend_visibility)\n if xy_label_mode:\n legend_scale.SetLabelModeToXYCoordinates()\n else:\n legend_scale.SetLabelModeToDistance()\n legend_scale.SetBottomAxisVisibility(bottom_axis_visibility)\n legend_scale.SetBottomBorderOffset(bottom_border_offset)\n legend_scale.SetLeftAxisVisibility(left_axis_visibility)\n legend_scale.SetLeftBorderOffset(left_border_offset)\n legend_scale.SetRightAxisVisibility(right_axis_visibility)\n legend_scale.SetRightBorderOffset(right_border_offset)\n legend_scale.SetTopAxisVisibility(top_axis_visibility)\n legend_scale.SetTopBorderOffset(top_border_offset)\n\n for text in ['Label', 'Title']:\n prop = getattr(legend_scale, f'GetLegend{text}Property')()\n if color != Color('white'):\n # This property turns black if set\n prop.SetColor(*color.int_rgb)\n prop.SetFontSize(\n int(font_size_factor * 20)\n ) # hack to avoid multiple font size arguments\n\n for ax in ['Bottom', 'Left', 'Right', 'Top']:\n axis = getattr(legend_scale, f'Get{ax}Axis')()\n axis.GetProperty().SetColor(*color.int_rgb)\n if color != Color('white'):\n # This label property turns black if set\n axis.GetLabelTextProperty().SetColor(*color.int_rgb)\n axis.SetFontFactor(font_size_factor)\n axis.SetLabelFactor(label_size_factor)\n if label_format:\n axis.SetLabelFormat(label_format)\n axis.SetNumberOfMinorTicks(number_minor_ticks)\n axis.SetTickLength(tick_length)\n axis.SetMinorTickLength(minor_tick_length)\n axis.SetTickVisibility(show_ticks)\n axis.SetTickOffset(tick_label_offset)\n\n return self.add_actor(\n legend_scale,\n reset_camera=False,\n name='_vtkLegendScaleActor',\n culling=False,\n pickable=False,\n render=render,\n )", "def one_data_figure_shaded(obs, axobj, color='Blue', facecolor='Blue',\n **kwargs):\n \n x, y, e = obs['wavelength'], obs['spectrum'], obs['unc']\n axobj.fill_between(x, y-e, y+e, facecolor='grey', alpha=0.3)\n axobj.plot(x, y, color = color, linewidth = 0.5,**kwargs)\n\n return axobj", "def plot_loss(losses: Union[Iterable, dict], ylabel: str = 'Losses') -> plt.Figure:\n fig, ax = plt.subplots()\n if isinstance(losses, np.ndarray) or isinstance(losses, list):\n ax.plot(losses)\n elif isinstance(losses, dict):\n for key, loss in losses.items():\n ax.plot(loss, label=key)\n ax.legend(loc='best')\n else:\n raise TypeError\n\n ax.set_xlabel('Iterations')\n ax.set_ylabel(ylabel)\n\n return fig", "def _labels_contour(self, obj, *args, fmt=None, **kwargs):\n # Parse input args\n text_kw = {}\n for key in (*kwargs,): # allow dict to change size\n if key not in (\n 'levels', 'colors', 'fontsize', 'inline', 'inline_spacing',\n 'manual', 'rightside_up', 'use_clabeltext',\n ):\n text_kw[key] = kwargs.pop(key)\n kwargs.setdefault('inline_spacing', 3)\n kwargs.setdefault('fontsize', rc['text.labelsize'])\n fmt = _not_none(fmt, pticker.SimpleFormatter())\n\n # Draw hidden additional contour for filled contour labels\n cobj = obj\n colors = None\n if _getattr_flexible(obj, 'filled'): # guard against changes?\n cobj = self.contour(*args, levels=obj.levels, linewidths=0)\n lums = (to_xyz(obj.cmap(obj.norm(level)), 'hcl')[2] for level in obj.levels)\n colors = ['w' if lum < 50 else 'k' for lum in lums]\n kwargs.setdefault('colors', colors)\n\n # Draw labels\n labs = cobj.clabel(fmt=fmt, **kwargs)\n if labs is not None: # returns None if no contours\n for lab in labs:\n lab.update(text_kw)\n\n return labs", "def plot_average(*args, **kwargs):\n pyplot.figure(figsize=(15, 5))\n plot_average_impl(*args, **kwargs)\n pyplot.legend()", "def __init__(self, ax, labels, active=0, activecolor='blue', size=49,\r\n orientation=\"vertical\", **kwargs):\r\n AxesWidget.__init__(self, ax)\r\n self.activecolor = activecolor\r\n axcolor = ax.get_facecolor()\r\n self.value_selected = None\r\n\r\n ax.set_xticks([])\r\n ax.set_yticks([])\r\n ax.set_navigate(False)\r\n\r\n circles = []\r\n for i, label in enumerate(labels):\r\n if i == active:\r\n self.value_selected = label\r\n facecolor = activecolor\r\n else:\r\n facecolor = axcolor\r\n p = ax.scatter([],[], s=size, marker=\"o\", edgecolor='black',\r\n facecolor=facecolor)\r\n circles.append(p)\r\n if orientation == \"horizontal\":\r\n kwargs.update(ncol=len(labels), mode=\"expand\")\r\n kwargs.setdefault(\"frameon\", False) \r\n self.box = ax.legend(circles, labels, loc=\"center\", **kwargs)\r\n self.labels = self.box.texts\r\n self.circles = self.box.legendHandles\r\n for c in self.circles:\r\n c.set_picker(5)\r\n self.cnt = 0\r\n self.observers = {}\r\n\r\n self.connect_event('pick_event', self._clicked)", "def legend(self) -> Optional[str]:\n return pulumi.get(self, \"legend\")", "def draw_plot(ax, dfs, legend, x, y, xscale, yaxis_max):\n xticks = dfs_all_values(dfs, x)\n # loop over all pandas.DataFrame objects\n for df in dfs:\n # setting the x-column as an index is required to draw the y-column\n # as a function of x argument\n df = df.set_index(x)\n # plot line on the subplot\n df[y].plot.line(ax=ax, rot=45, marker='.')\n\n if xscale == \"linear\":\n ax.set_xscale(xscale)\n else:\n ax.set_xscale(xscale, base=2)\n ax.xaxis.set_major_formatter(ScalarFormatter())\n\n ax.set_xticks(xticks)\n ax.set_xlabel(get_label(x))\n ax.set_ylabel(get_label(y))\n ax.set_ylim(bottom=0)\n if yaxis_max is not None:\n ax.set_ylim(top=float(yaxis_max))\n ax.legend(legend, fontsize=6)\n ax.grid(True)", "def plot_2D(self, title=None, fig_size=None, close=True):\r\n # TODO add possibility to change title parameter\r\n\r\n if fig_size is not None:\r\n fig = plt.figure(figsize=fig_size)\r\n else:\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n for target in self.targets:\r\n idx = np.where(self.labels == target)\r\n ax.scatter(self.features[idx, 0], self.features[idx, 1], label=str(target))\r\n if self.features_names is not None:\r\n plt.xlabel(str(self.features_names[0]))\r\n plt.ylabel(str(self.features_names[1]))\r\n else:\r\n plt.xlabel('axe 1')\r\n plt.ylabel('axe 2')\r\n box = ax.get_position()\r\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\r\n # Put a legend to the right of the current axis\r\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\r\n if title is not None:\r\n plt.title(title)\r\n if close:\r\n plt.show()\r\n else:\r\n return fig", "def figsetup(title, xlab, ylab, fname, show=False):\n plt.xlabel(xlab)\n plt.ylabel(ylab)\n plt.title(fname)\n plt.tight_layout()\n plt.title(title)\n plt.legend()\n plt.savefig(\"../figs/\" + fname + \".png\", dpi=250)\n if show is False:\n plt.close()\n else:\n plt.show()\n return", "def figsetup(title, xlab, ylab, fname, show=False):\n plt.xlabel(xlab)\n plt.ylabel(ylab)\n plt.title(fname)\n plt.tight_layout()\n plt.title(title)\n plt.legend()\n plt.savefig(\"../figs/\" + fname + \".png\", dpi=250)\n if show is False:\n plt.close()\n else:\n plt.show()\n return", "def plot_regressions(r2_df_weighted, describer, color_dict, cols_classes):\n alpha=0.5\n ax = plt.gca()\n r2_df_weighted.sort_values().plot(ax=ax, kind=\"barh\", color=\"#4C72B0\",zorder=3)\n fig = plt.gcf()\n fig.tight_layout()\n fig.set_size_inches(10, 11.7)\n plt.xlim(0,1)\n for tick_label in ax.axes.get_yticklabels():\n tick_text = tick_label.get_text()\n tick_label.set_fontsize(14)\n tick_label.set_color(color_dict[tick_text])\n ax.xaxis.grid(color=\"grey\", zorder=0)\n ax.set_facecolor(\"white\")\n plt.xlabel(\"Weigthed Coefficient of Determination\", alpha=alpha, fontsize=14)\n plt.ylabel(\"Catchment Attributes\", alpha=alpha, fontsize=14)\n plt.setp(ax.get_xticklabels(), alpha=alpha)\n # Remove the borders\n for spine in ax.spines.values():\n spine.set_visible(False)\n ax.tick_params(axis=u'both', which=u'both',length=0)\n\n \n # Create the legend\n handles = []\n for att, color in cols_classes.items():\n handles.append(mpatches.Patch(color=color, label=att))\n legend = ax.legend(handles=handles, frameon=True, fancybox=True, facecolor=\"white\", edgecolor=\"grey\", fontsize=14)\n for text in legend.get_texts():\n text.set_color(\"grey\")\n fig.set_size_inches(15,10)\n plt.savefig(\"r2_scores_ \" + describer + \".png\", bbox_inches=\"tight\", dpi=300)", "def plot_obj_func():\n X1 = [i for i in range(-63, 65, 1)]\n Y1 = [8 * math.sin(0.06 * x) + 8 * math.cos(0.14 * x) + 8 * math.exp(math.cos(0.2*x)) for x in X1]\n plt.plot(X1, Y1)\n plt.show()", "def DrawHistograms(Histograms, ran, title, xlabel, ylabel, Save=False,Normalize=True,DrawTitle=False, t_sleep=0):\n canvas = rt.TCanvas('canvas','canvas',600,600)\n\tif DrawTitle: \n\t\tcanvas.SetTitle(title)\n\telse:\n\t\trt.gStyle.SetOptTitle(0)\n\t\n\thistlist = []\n if len(Histograms) > 1:\n rt.gStyle.SetOptStat(0)#something is wrong with this\n legend = rt.TLegend(0.9,0.9,0.65,0.75)\n for nr, Histogram in enumerate(Histograms):\n\t\thistlist.append(Histogram[0])\n\t\tif len(Histogram)>2:\n\t\t\thistlist[nr].SetLineColor(Histogram[2])\n\t\telse:\n\t\t\tif nr < 3:\n \t\t \thistlist[nr].SetLineColor(nr+2)\n\t\t\telse:\n\t\t\t\thistlist[nr].SetLineColor(nr+3)\n if nr == 0:\n\t\t\tif DrawTitle: histlist[nr].SetTitle(title)\n histlist[nr].GetXaxis().SetTitle(xlabel)\n histlist[nr].GetYaxis().SetTitle(ylabel)\n histlist[nr].GetYaxis().SetTitleOffset(1.5)\n if Normalize:\n \thistlist[nr].DrawNormalized()\n else:\n histlist[nr].Draw()\n else:\n if Normalize:\n histlist[nr].DrawNormalized(\"SAME\")\n else:\n histlist[nr].Draw(\"SAME\")\n if len(Histograms)>1:\n legend.AddEntry(histlist[nr],Histogram[1])\n if len(Histograms)>1: \n\t\t#rt.gStyle.SetOptStat(0)#something is wrong with this\n\t\tlegend.Draw()\n if Save: canvas.SaveAs(\"Thesis_Plots/\"+title+\".png\")\n sleep(t_sleep)", "def H_perform_plot(performance, hurricane):\n fig = plt.figure(figsize = (15, 10))\n for i in range(len(performance)):\n temp1 = performance[i]\n temp2 = hurricane[i]\n plt.plot(np.arange(0, len(temp1), 1), temp1, color = temp2.c, label = temp2.name)\n plt.xlabel('Time Step')\n plt.xticks(np.arange(0, len(temp1), 30))\n plt.ylabel('Performance')\n plt.legend(bbox_to_anchor=(1, 1), loc='upper left', ncol=1, frameon = 0)\n plt.grid(True)", "def confusion_plot(lab, pred, name, new_plot=False, save=False):\n plt.scatter(lab, lab, label=\"truth\", s=2, color=\"black\")\n plt.scatter(lab, pred, label=name, s=2)\n handles, labels = plt.gca().get_legend_handles_labels()\n by_label = dict(zip(labels, handles))\n plt.legend(by_label.values(), by_label.keys())\n plt.xlabel(\"Truth\")\n plt.ylabel(\"Prediction\")", "def multi_line_plot(x_data, y_data, title, x_label, y_label):\n plt.figure(1, (18, 8)) # something, plot size\n plt.subplot(111)\n legend = []\n for i in range(len(x_data)):\n plt.plot(x_data[i], y_data[i])\n legend.append((i+1))\n plt.title(title)\n plt.xlabel(x_label, fontsize=12)\n plt.ylabel(y_label, fontsize=12)\n plt.legend(legend, loc='upper left')\n plt.show()", "def paint_graph(x_label, y_label, df, th_user, label_user, th_calc, label_calc):\n import seaborn as sns\n import matplotlib.pyplot as plt\n\n df = df.copy()\n df['target'] = df['target'].map({True: 'target', False: 'non-target'})\n title = '{} vs {}'.format(x_label, y_label)\n\n plt.figure(figsize=(1920 / 96, 1103 / 96), dpi=96)\n\n sns.stripplot(x=x_label, y=y_label, hue='target', data=df,\n palette={'target': 'blue', 'non-target': 'red'}, alpha=0.5, jitter=False, dodge=True)\n plt.axvline(x=th_user, label=label_user, c='g')\n plt.axvline(x=th_calc, label=label_calc, c='k')\n plt.title(title)\n plt.legend()\n\n plt.savefig('{}.png'.format(title))\n\n if plt.get_backend() == 'TkAgg':\n mng = plt.get_current_fig_manager()\n mng.window.state('zoomed')\n plt.show()" ]
[ "0.649047", "0.61699635", "0.6117448", "0.6017658", "0.59494585", "0.59489495", "0.59252846", "0.5908187", "0.58153063", "0.58062863", "0.5786078", "0.5747794", "0.5728206", "0.5683173", "0.56398714", "0.56154585", "0.5608667", "0.5580261", "0.5554319", "0.5549609", "0.55420744", "0.5499324", "0.5493276", "0.54257244", "0.5386864", "0.5367721", "0.53475153", "0.5302455", "0.529229", "0.5288217", "0.5226085", "0.52230984", "0.52227455", "0.52121735", "0.51930565", "0.51919913", "0.5185203", "0.51706773", "0.51705915", "0.5165323", "0.5150632", "0.5139926", "0.5135701", "0.51296544", "0.512269", "0.51197517", "0.5091793", "0.5054467", "0.50513536", "0.5045963", "0.50322455", "0.5022119", "0.50128573", "0.50121254", "0.5011897", "0.5005317", "0.5001383", "0.49772334", "0.49579614", "0.4957348", "0.49406835", "0.4920882", "0.49018803", "0.48856854", "0.48814508", "0.48759735", "0.4872125", "0.4870036", "0.4865039", "0.4864064", "0.48630568", "0.48577812", "0.48516715", "0.48358715", "0.48330116", "0.48144352", "0.48113978", "0.4802716", "0.48012817", "0.4789607", "0.47861025", "0.47732162", "0.47703534", "0.4769263", "0.47642034", "0.47611052", "0.47594082", "0.47541744", "0.47370663", "0.47368458", "0.47289228", "0.47272113", "0.47272113", "0.47270665", "0.4715414", "0.4712556", "0.47068626", "0.47043157", "0.47028056", "0.4701731" ]
0.723461
0
Plot a classic "convergence plot" that shows how the function value approaches optimum as time passes, in terms of raw performance. groupby is the method of aggregating results of multiple instances a callable, stringable object, GroupByMedian by default. By default, raw function values (as difference to optimum) are shown, but relative values to some baseline dataset can be shown instead.
def fval_by_budget(ax, pds, baseline_ds=None, baseline_label="", dim=None, funcId=None, groupby=None): if groupby is None: groupby = GroupByMedian() pfsize = len(pds.algds.keys()) if baseline_ds: baseline_budgets = baseline_ds.funvals[:, 0] baseline_funvals = groupby(baseline_ds.funvals[:, 1:], axis=1) baseline_safefunvals = np.maximum(baseline_funvals, 10**-8) # eschew zeros # fvb is matrix with each row being [budget,funval] baseline_fvb = np.transpose(np.vstack([baseline_budgets, baseline_safefunvals])) for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId): #print name, ds budgets = ds.funvals[:, 0] funvals = groupby(ds.funvals[:, 1:], axis=1) # Throw away funvals after ftarget reached try: limit = np.nonzero(funvals < 10**-8)[0][0] + 1 except IndexError: limit = np.size(budgets)+1 budgets = budgets[:limit] funvals = funvals[:limit] fvb = np.transpose(np.vstack([budgets[:limit], funvals[:limit]])) if baseline_ds: # Relativize by baseline fvba = ra.alignArrayData(ra.VArrayMultiReader([fvb, baseline_fvb])) budgets = fvba[:, 0] funvals = fvba[:, 1] / fvba[:, 2] style['markevery'] = 16 ax.loglog(budgets, funvals, label=name, basex=pfsize, **style) if baseline_ds: ax.set_yticks([1], minor=True) ax.set_xlabel('Budget') ax.set_ylabel(_fval_label(baseline_ds, baseline_label, str(groupby))) ax.grid() if baseline_ds: ax.yaxis.grid(True, which = 'minor')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_bias(clf_list = ['test_small','rt_small','test2_small'],return_df = False,XKCD = False):\n if XKCD = True:\n plt.xkcd()\n print('damn')\n df = load_all_dfs(clf_list)\n df = df.swaplevel(0,1)\n del df['std']\n df.hist()\n plt.figure()\n\n for clf in clf_list:\n df.ix[clf].mean().plot(label = clf,figsize=(16, 4))\n plt.legend(loc='upper right')\n plt.title('mean')\n plt.figure()\n \n # c = df.columns\n for clf in clf_list:\n #df[c[1:]].ix[clf].max().plot(label = clf,figsize=(16, 4))\n df.ix[clf].max().plot(label = clf,figsize=(16, 4))\n plt.legend(loc='upper right')\n plt.title('max')\n \n plt.figure()\n for clf in clf_list:\n df.ix[clf].std().plot(label = clf,figsize=(16, 4))\n\n \n plt.legend(loc='upper right')\n plt.title('std')\n plt.figure()\n used_list = []\n for clf in clf_list:\n for clf2 in clf_list:\n if (clf != clf2) and ({clf,clf2} not in used_list):\n diff = ((df.ix[clf] - df.ix[clf2])**2)**(1/2)\n diff.mean().plot(label = clf+' - ' +clf2,figsize=(16, 4))\n used_list.append({clf,clf2})\n \n \n \n \n \n plt.legend(loc='upper right')\n plt.title('difference')\n print('damnover')\n if return_df == True:\n return df", "def plot_metric(df_metrics, name, batch_size=10, epochs=10):\n\n # One groupplot\n fig, axarr = plt.subplots(3, 4, sharey=True, sharex=True)\n plotname = 'apfd'\n subplot_labels = ['(a)', '(b)', '(c)']\n\n for column, nr in enumerate(sorted(df_metrics['negative_ratio'].unique())):\n for row, emb_size in enumerate(df_metrics['emb_size'].unique()):\n for agidx, (labeltext, task, linestyle) in enumerate(\n [('Classification', 'True', '-'), ('Regression', 'False', '-.')]):\n rel_df = df_metrics[\n (df_metrics['emb_size'] == str(emb_size)) & (df_metrics['negative_ratio'] == str(nr)) &\n (df_metrics['batch_size'] == str(batch_size)) & (df_metrics['epochs'] == str(epochs))]\n\n # rel_df[rel_df['agent'] == agent].plot(x='step', y='napfd', label=labeltext, ylim=[0, 1], linewidth=0.8,\n # style=linestyle, color=sns.color_palette()[agidx], ax=axarr[row,column])\n\n apfd = rel_df.loc[rel_df['classification'] == task, 'apfd']\n miu = np.round(np.mean(apfd), 2)\n sigma = np.round(np.std(apfd), 2)\n label = labeltext + '\\n $\\mu$ - ' + str(miu) + ' $\\sigma$ - ' + str(sigma)\n\n # sns.displot(data=rel_df, x=\"apfd\", hue='classification', kde=True, ax=axarr[row, column])\n\n sns.distplot(apfd, kde=True,\n bins=int(180 / 5), color=sns.color_palette()[agidx],\n hist_kws={'edgecolor': 'black'},\n kde_kws={'linewidth': 4, 'clip': (0.0, 1.0)}, label=label, ax=axarr[row, column])\n\n axarr[row, column].xaxis.grid(True, which='major')\n\n axarr[row, column].set_title('Emb_size - %s - Neg_Ratio - %s' % (emb_size, nr), fontsize=10)\n\n if row == 2:\n axarr[row, column].set_xlabel('APFD')\n if column == 0:\n axarr[row, column].set_ylabel('Density')\n\n axarr[row, column].legend(frameon=True, prop={'size': 6})\n\n # Tweak spacing to prevent clipping of ylabel\n fig.suptitle('APFD Parameter Tuning - %d Epochs and batch-size - %d' % (epochs, batch_size))\n fig.tight_layout()\n plt.savefig(name, bbox_inches='tight')\n plt.show()", "def plot_convergence(self, x, y, **kwargs):\n self.plot(x, y, **kwargs)", "def plot_convergence(\n optimizers: list = [\"COBYLA\", \"SLSQP\", \"L-BFGS-B\", \"NELDER-MEAD\"],\n g2N: float = 0.2,\n maxit: int = 10000,\n varform: list = [\"ry\"],\n depth: int = 3,\n nrep: int = 10,\n dataprefix: str = \"data/miniBMN\",\n datasuffix: str = \"h5\",\n figprefix: str = \"figures/miniBMN\",\n ht: float = 0.0,\n up: int = 1000,\n):\n # setup parameters\n params = dict()\n params[\"l\"] = str(g2N).replace(\".\", \"\")\n params[\"d\"] = depth\n params[\"v\"] = \"-\".join(varform)\n params[\"m\"] = maxit\n params[\"n\"] = nrep\n params[\"f\"] = dataprefix\n params[\"s\"] = datasuffix\n assert type(optimizers).__name__ == \"list\"\n # collect data\n result = collect_data(optimizers, params)\n # get best runs\n gs = dict()\n for r in optimizers:\n gs[r] = result.loc[r].groupby(\"rep\").apply(min).energy\n gsdf = pd.DataFrame.from_dict(gs, dtype=float)\n print(gsdf.describe().T[[\"min\", \"max\", \"mean\", \"std\"]])\n # Plot\n # select the best runs for each optimizer\n fig, ax = plt.subplots()\n for o in optimizers:\n result.loc[o, gsdf[o].idxmin()].plot(\n x=\"counts\", y=\"energy\", xlim=[0, up], label=o, ax=ax\n )\n ax.axhline(ht, c=\"k\", ls=\"--\", lw=\"2\", label=\"HT\")\n ax.set_xlabel(\"iterations\")\n ax.set_ylabel(\"VQE energy\")\n ax.legend(loc=\"upper right\")\n filename = f\"{figprefix}_l{params['l']}_convergence_{params['v']}_depth{params['d']}_nr{params['n']}_max{params['m']}_xlim{up}\"\n plt.savefig(f\"{filename}.pdf\")\n plt.savefig(f\"{filename}.png\")\n plt.savefig(f\"{filename}.svg\")\n plt.close()", "def plot_cross_validation_metric(\n df_cv, metric, rolling_window=0.1, ax=None, figsize=(10, 6)\n):\n if ax is None:\n fig = plt.figure(facecolor='w', figsize=figsize)\n ax = fig.add_subplot(111)\n else:\n fig = ax.get_figure()\n # Get the metric at the level of individual predictions, and with the rolling window.\n df_none = performance_metrics(df_cv, metrics=[metric], rolling_window=0)\n df_h = performance_metrics(df_cv, metrics=[metric], rolling_window=rolling_window)\n\n # Some work because matplotlib does not handle timedelta\n # Target ~10 ticks.\n tick_w = max(df_none['horizon'].astype('timedelta64[ns]')) / 10.\n # Find the largest time resolution that has <1 unit per bin.\n dts = ['D', 'h', 'm', 's', 'ms', 'us', 'ns']\n dt_names = [\n 'days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds',\n 'nanoseconds'\n ]\n dt_conversions = [\n 24 * 60 * 60 * 10 ** 9,\n 60 * 60 * 10 ** 9,\n 60 * 10 ** 9,\n 10 ** 9,\n 10 ** 6,\n 10 ** 3,\n 1.,\n ]\n for i, dt in enumerate(dts):\n if np.timedelta64(1, dt) < np.timedelta64(tick_w, 'ns'):\n break\n\n x_plt = df_none['horizon'].astype('timedelta64[ns]').astype(np.int64) / float(dt_conversions[i])\n x_plt_h = df_h['horizon'].astype('timedelta64[ns]').astype(np.int64) / float(dt_conversions[i])\n\n ax.plot(x_plt, df_none[metric], '.', alpha=0.5, c='gray')\n ax.plot(x_plt_h, df_h[metric], '-', c='b')\n ax.grid(True)\n\n ax.set_xlabel('Horizon ({})'.format(dt_names[i]))\n ax.set_ylabel(metric)\n return fig", "def boxplot_from_data_frame(df,\n group_by=\"Method\",\n metric=\"Precision\",\n hue=None,\n y_min=0.0,\n y_max=1.0,\n plotf=violinplot,\n color='grey',\n color_palette=None,\n label_rotation=45):\n\n sns.set_style(\"whitegrid\")\n ax = violinplot(x=group_by, y=metric, hue=hue, data=df, color=color,\n palette=color_palette, order=sorted(df[group_by].unique()))\n ax.set_ylim(bottom=y_min, top=y_max)\n ax.set_ylabel(metric)\n ax.set_xlabel(group_by)\n for lab in ax.get_xticklabels():\n lab.set_rotation(label_rotation)\n\n plt.show()\n\n return ax", "def plot_results(outputs, x, e, t, a, folds, groups,\n quantiles, strat='quantile', adj='KM', plot=True):\n if plot:\n mpl.rcParams['hatch.linewidth'] = 2.0\n\n fig, big_axes = plt.subplots(\n figsize=(8 * (len(groups) + 2), 6 * len(quantiles)),\n nrows=len(quantiles),\n ncols=1)\n\n plt.subplots_adjust(hspace=0.4)\n\n i = 0\n for _, big_ax in enumerate(big_axes, start=1):\n big_ax.set_title(\n 'Receiver Operator Characteristic and Calibration at t=' +\n str(quantiles[i]) + '\\n',\n fontsize=16)\n big_ax.tick_params(\n labelcolor=(1., 1., 1., 0.0),\n top='off',\n bottom='off',\n left='off',\n right='off')\n i += 1\n \n eces = {}\n metrics = {}\n\n for quant in quantiles:\n eces[quant] = {}\n \n for i in range(len(quantiles)):\n\n scores = outputs[quantiles[i]]\n for j in range(len(groups) + 2):\n\n pt = (i * (len(groups) + 2) + j + 1)\n if plot:\n ax = fig.add_subplot(len(quantiles), len(groups) + 2, pt)\n else:\n ax = None\n \n if (j==1):\n eces[quantiles[i]]['all'] = plot_calibration_curve(ax,\n scores,\n e,\n t,\n a,\n folds,\n None,\n quantiles[i],\n strat=strat,\n adj=adj,\n plot=plot) \n \n if (j>1):\n eces[quantiles[i]][groups[j - 2]] = plot_calibration_curve(ax,\n scores,\n e,\n t,\n a,\n folds,\n groups[j - 2],\n quantiles[i],\n strat=strat,\n adj=adj,\n plot=plot)\n \n if (j==0):\n metrics[quantiles[i]] = plot_roc_curve(ax,\n scores,\n e,\n t,\n a,\n folds,\n groups,\n quantiles[i],\n plot=plot)\n\n for quant in quantiles:\n metrics[quant] = metrics[quant] + (eces[quant], )\n \n if plot: \n plt.show()\n return metrics", "def PlotComparison(result_values, descrete, continuous, jitter=100):\n df = result_values.copy()\n np.random.seed(0)\n df[continuous] = df[continuous] + np.random.randint(low=-jitter, high=jitter, size=len(df))\n base = alt.Chart(df).transform_calculate(\n ymin=\"datum.mean-2*datum.std\",\n ymax=\"datum.mean+2*datum.std\",\n ).properties(\n title = '[Interactive] Accuracy by Params'\n )\n \n points = base.mark_point(\n filled=True,\n size=10\n ).encode(\n x=continuous,\n y=alt.Y('mean:Q'),#, scale=alt.Scale(domain=(0.55, 0.7))),\n color=descrete,\n tooltip=['mean','std']\n )\n\n errorbars = base.mark_errorbar().encode(\n x=continuous,\n y=alt.Y(\"ymin:Q\",title='Accuracy'),\n y2=\"ymax:Q\",\n color=descrete,\n )\n\n return(points + errorbars)", "def plot_series(groups, series):\n fig, ax = plt.subplots()\n ax.set_xlabel(\"Iterations\")\n ax.set_ylabel(series)\n\n for gkey, gval in groups.items():\n args = dict(gkey)\n\n series_values = get_series(gval, series)\n interval_size = args['test_interval']\n interval_count = series_values.shape[1] - 1\n\n x = np.arange(0, interval_size * interval_count + 1, step=interval_size)\n mean = np.mean(series_values, axis=0)\n std = np.std(series_values, axis=0)\n\n ax.plot(x, mean, label=format_group_key(gkey))\n ax.fill_between(x, mean + std, mean - std, alpha=0.2)\n\n ax.legend()\n return fig, ax", "def get_convergence_plot(self):\n fig, ax = plt.subplots()\n first_episode = self.get_convergence_episode()\n\n values = self.stats['return_stats']['episode_totals']\n ax.plot(np.arange(len(values)), values, color='steelblue', lw=2, alpha=.9,\n label='Return')\n ax.axvline(first_episode, color='seagreen', lw=2, label='Converged')\n ax.set_xlim(left=0, right=first_episode * 2)\n\n ax.set_title('Normalized regret = {:.3f}'.format(\n self.get_normalized_regret()))\n ax.legend()\n ax.set_ylabel('Return')\n ax.set_xlabel('Episode')\n return fig", "def plot_associative_learning_progress(ax, df):\n\n num_objects_list = sorted(df.curr_num_objects.unique())\n legend_list = []\n for idx in num_objects_list:\n ax.plot(df[df.curr_num_objects == idx].groupby('objects_iter').rewards.mean())\n legend_list.append(f'ns={idx}')\n ax.set_xlabel('Stimulus iteration')\n ax.set_ylabel('P(correct)')\n ax.set_ylim([0.4, 1])\n ax.legend(legend_list)", "def evals_by_target(ax, pds, baseline_ds=None, baseline_label=\"\", dim=None, funcId=None, groupby=None):\n if groupby is None: groupby = GroupByMedian()\n pfsize = len(pds.algds.keys())\n\n runlengths = 10**np.linspace(0, np.log10(pds.maxevals((dim, funcId))), num=500)\n target_values = pp.RunlengthBasedTargetValues(runlengths,\n reference_data=pds.bestalg(None), force_different_targets_factor=10**0.004)\n targets = target_values((funcId, dim))\n\n if baseline_ds:\n baseline_fevs = groupby(baseline_ds.detEvals(targets), axis=1)\n\n for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):\n #print name, ds\n fevs = groupby(ds.detEvals(targets), axis=1)\n if baseline_ds:\n fevs /= baseline_fevs\n style['markevery'] = 64\n ax.loglog(targets, fevs, label=name, basey=pfsize, **style)\n ax.set_xlim(10**2, 10**(np.log10(targets[-1])-0.2))\n if baseline_ds:\n ax.set_yticks([2, 3.5], minor=True)\n ax.set_xlabel('Function Value Targets')\n ax.set_ylabel(_evals_label(baseline_ds, baseline_label, str(groupby)))\n ax.grid()\n if baseline_ds:\n ax.yaxis.grid(True, which = 'minor')", "def evals_by_evals(ax, pds, baseline1_ds=None, baseline1_label=\"\", baseline2_ds=None, baseline2_label=\"\", dim=None, funcId=None, groupby=None):\n if groupby is None: groupby = GroupByMedian()\n pfsize = len(pds.algds.keys())\n\n runlengths = 10**np.linspace(0, np.log10(pds.maxevals((dim, funcId))), num=500)\n target_values = pp.RunlengthBasedTargetValues(runlengths,\n reference_data=pds.bestalg(None), force_different_targets_factor=10**0.004)\n targets = target_values((funcId, dim))\n\n if baseline1_ds:\n baseline1_fevs = np.array(groupby(baseline1_ds.detEvals(targets), axis=1))\n if baseline2_ds:\n baseline2_fevs = np.array(groupby(baseline2_ds.detEvals(targets), axis=1))\n\n for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):\n #print name, ds\n fevs1 = groupby(ds.detEvals(targets), axis=1)\n if baseline1_ds:\n fevs1 /= baseline1_fevs\n fevs2 = groupby(ds.detEvals(targets), axis=1)\n if baseline2_ds:\n fevs2 /= baseline2_fevs\n\n infsx = np.nonzero(fevs1 == inf)\n infs = infsx[0]\n if np.size(infs) > 0:\n #print infs\n fevs1 = fevs1[:infs[0]-1]\n fevs2 = fevs2[:infs[0]-1]\n\n #print name, fevs1, fevs2\n style['markevery'] = 64\n ax.loglog(fevs2, fevs1, label=name, basex=pfsize, basey=pfsize, **style)\n ax.grid()\n ax.set_xlim(0, runlengths[-1] * pfsize) # i.e. log(runlengths) + 1\n ax.set_ylabel('Per-target ' + _evals_label(baseline1_ds, baseline1_label, str(groupby)))\n ax.set_xlabel('Per-target ' + _evals_label(baseline2_ds, baseline2_label, str(groupby)))", "def summaryPlot(df):\n import datetime as dt\n import matplotlib.pyplot as plt\n import matplotlib as mpl\n import numpy as np\n import pandas as pd\n from numpy import array\n import matplotlib.patches as mpatches\n import seaborn as sns\n from matplotlib.pyplot import figure\n\n class color:\n # Allows for bolded and underlined text\n BOLD = \"\\033[1m\"\n UNDERLINE = \"\\033[4m\"\n END = \"\\033[0m\"\n\n # Reads df and fills empty values\n df.index = pd.to_datetime(df.date)\n df = df.drop(\"date\", axis=1)\n df_all = df.resample(\"1D\")\n df_all = df_all.fillna(method=\"ffill\")\n\n dataPoints = [\"pm25\", \"co\", \"so2\", \"pm10\", \"o3\", \"no2\", \"nox\", \"wd\", \"ws\"]\n\n i = 0\n sub = 1\n while i < 9:\n # Plots line and histogram plots for ecery polutant\n # in the correct location based on subplot\n plt.figure(1, figsize=(50, 50))\n plt.subplot(9, 2, sub)\n sub = sub + 1\n a = df_all[dataPoints[i]].plot.line(color=\"gold\")\n a.axes.get_xaxis().set_visible(False)\n a.yaxis.set_label_position(\"left\")\n plt.ylabel(dataPoints[i], fontsize=75, bbox=dict(facecolor=\"whitesmoke\"))\n # print(df['pm25'].max())\n\n plt.subplot(9, 2, sub)\n sub = sub + 1\n plt.hist(df_all[dataPoints[i]], bins=50, color=\"green\")\n i = i + 1\n i = 0\n while i < 9:\n # Calculates statistics\n nDf = df[dataPoints[i]]\n missing = nDf.isna().sum() + sum(n < 0 for n in nDf)\n minVal = nDf.min()\n maxVal = nDf.max()\n meanVal = nDf.mean()\n medianVal = nDf.median()\n percentile = nDf.quantile(0.95)\n print(\"---------------\")\n print(color.BOLD + color.UNDERLINE + dataPoints[i] + color.END)\n print(\"min = \" + str(0))\n print(\"max = \" + str(maxVal))\n print(\"missing = \" + str(missing))\n print(\"mean = \" + str(meanVal))\n print(\"median = \" + str(medianVal))\n print(\"95th percentile = \" + str(percentile))\n i = i + 1", "def plot_test(y_test, y_pred, title = None, xlabel = 'Measured $Y = \\log_2(MIC)$', ylabel = 'Predicted $Y = \\log_2(MIC)$', legend = ['Ideal', 'Result'], groups = None):\n \n fig, ax = plt.subplots(1,1)\n fig.set_figheight(5)\n fig.set_figwidth(5)\n if groups is not None:\n groups_obj = pd.concat([y_test,y_pred], axis=1).groupby(np.array(groups))\n cmap=plt.get_cmap('tab10')\n for name, group in groups_obj:\n # Works only for groups with numeric names that are max cmap length:\n ax.plot(group.iloc[:,0], group.iloc[:,1], marker=\"o\", linestyle=\"\", label=int(name), color = cmap.colors[int(name)])\n ax.legend()\n else:\n ax.scatter(y_test,y_pred, color = 'red')\n ax_max = 10\n if np.max(y_test.values)>ax_max:\n ax_max = np.max(y_test).values\n ax_min = 0\n if np.min(y_test.values)<ax_min:\n ax_min = np.min(y_test.values)\n ax.plot([ax_min, ax_max], [ax_min, ax_max], '--', color='black')\n ax.set_aspect('equal', 'box')\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n #plt.savefig(title+'.pdf')\n plt.savefig(title+'.svg')\n #plt.savefig(title+'.png')#, dpi=600)\n #plt.show()", "def calculate_performance(controls_f, df, pop_type, neg, output):\n\n print('\\n Calculating performance...')\n log_write(output['log'], '\\n\\nPerformance based on results per %s\\n\\n' % pop_type)\n\n # Remove strains with missing penetrance and few cell count\n df = df.iloc[df['Penetrance'].isna().values == 0, :]\n df = df[df['Num_cells'] >= 15].reset_index(drop=True)\n\n # Plot penetrance distribution\n plt.figure(figsize=(6, 6))\n sns.set(font_scale=1.25)\n sns.set_style('white')\n sns.kdeplot(df['Penetrance'].values, color='mediumblue', shade=True)\n plt.xlabel('Penetrance (%)')\n mean_penetrance = df['Penetrance'].mean()\n plt.title('Mean penetrance: %.4f' % mean_penetrance)\n fig = plt.gcf()\n fig.savefig(output['Penetrance'].replace('.', '_%s.' % pop_type), dpi=150, bbox_inches='tight')\n fig.clf()\n plt.close(fig)\n log_write(output['log'], 'Mean penetrance: %.2f\\n' % mean_penetrance)\n log_write(output['log'], 'Mean WT penetrance: %.2f\\n' % df[df['Strain ID'].isin(neg)]['Penetrance'].mean())\n\n # Plot WT percentile distribution\n plt.figure(figsize=(6, 6))\n sns.set(font_scale=1.25)\n sns.set_style('white')\n sns.kdeplot(df['WT_percentile_at_threshold'].values, color='mediumblue', shade=True)\n plt.xlabel('WT Percentile at the score of maximum difference')\n mean_percentile = df['WT_percentile_at_threshold'].mean()\n plt.title('Mean percentile: %.4f' % mean_percentile)\n fig = plt.gcf()\n fig.savefig(output['WT_Percentile'].replace('.', '_%s.' % pop_type), dpi=150, bbox_inches='tight')\n fig.clf()\n plt.close(fig)\n log_write(output['log'], 'Mean WT percentile at threshold: %.2f\\n' % mean_percentile)\n\n # Plot correlation\n plt.figure(figsize=(6, 6))\n sns.set(font_scale=1.25)\n sns.set_style('white')\n limits = [-0.1, 100.1]\n ticks = [0, 20, 40, 60, 80, 100]\n sns.lineplot(x=limits, y=limits, color='k', dashes=True, linewidth=1)\n sns.scatterplot(x='Penetrance', y='KS_Penetrance', color='mediumblue', data=df, s=60, alpha=0.5, linewidth=0)\n plt.xlim(limits)\n plt.xticks(ticks)\n plt.ylim(limits)\n plt.yticks(ticks)\n plt.xlabel('Penetrance')\n plt.ylabel('KS Penetrance')\n correlation = stats.pearsonr(df['Penetrance'], df['KS_Penetrance'])[0]\n plt.title('Correlation: %.4f' % correlation)\n fig = plt.gcf()\n fig.savefig(output['KS_Correlation'].replace('.', '_%s.' % pop_type), dpi=150, bbox_inches='tight')\n fig.clf()\n plt.close(fig)\n log_write(output['log'], 'KS correlation: %.4f\\n' % correlation)\n\n # Get positive controls\n if controls_f:\n df_cont = pd.read_csv(controls_f, low_memory=False)\n pos = df_cont['Strain ID'].values\n\n # Calculate performance with maximum difference\n aupr, aupr_b, auroc = calculate_auc(df, neg, pos)\n correlation = plot_penetrance_agreement(df, df_cont, neg,\n output['PeneAgreement'].replace('.', '_%s.' % pop_type))\n log_write(output['log'], 'AUPR: %.4f\\n' % aupr)\n log_write(output['log'], 'AUPR balanced: %.4f\\n' % aupr_b)\n log_write(output['log'], 'AUROC: %.4f\\n' % auroc)\n log_write(output['log'], 'Correlation: %.4f\\n' % correlation)\n\n # Plot penetrance of controls\n plt.figure(figsize=(12, 4))\n sns.set(font_scale=1.25)\n sns.set_style('white')\n plt.scatter(x=df[df['Strain ID'].isin(neg)].index.values,\n y=df[df['Strain ID'].isin(neg)]['Penetrance'].values,\n color='dodgerblue', alpha=0.3, label='Negative control', s=20)\n plt.scatter(x=df[df['Strain ID'].isin(pos)].index.values,\n y=df[df['Strain ID'].isin(pos)]['Penetrance'].values,\n color='red', alpha=0.7, label='Positive control', s=20)\n plt.xticks([])\n plt.yticks([0, 25, 50, 75, 100])\n plt.xlabel('Genes')\n plt.ylabel('Penetrance (%)')\n plt.legend(loc='upper right')\n plt.savefig(output['PenetranceControls'].replace('.', '_%s.' % pop_type),\n dpi=150, bbox_inches='tight')", "def plot_coupling_grid(baseline_group, fits_groups, metrics, fax=None):\n n_algorithms = len(fits_groups)\n n_metrics = len(metrics)\n\n if fax is None:\n fig, axes = plt.subplots(n_metrics, n_algorithms,\n figsize=(3 * n_algorithms, 3 * n_metrics))\n else:\n fig, axes = fax\n\n # iterate over metrics\n for row_idx, metric in enumerate(metrics):\n if metric == 'selection_ratio':\n baseline_coefs = baseline_group['coupling_coefs'][:]\n baseline_selection_ratio = \\\n calculate_selection_ratio(baseline_coefs).mean(axis=0)\n\n # iterate over algorithms\n for col_idx, algorithm in enumerate(fits_groups):\n if metric == 'selection_ratio':\n # calculate selection ratio for algorithm\n coefs = algorithm['coupling_coefs'][:]\n selection_ratio = calculate_selection_ratio(coefs).mean(axis=0)\n\n # plot direct comparison\n axes[row_idx, col_idx].scatter(\n baseline_selection_ratio,\n selection_ratio,\n alpha=0.5,\n color='k',\n edgecolor='w')\n else:\n axes[row_idx, col_idx].scatter(\n baseline_group[metric][:].mean(axis=0),\n algorithm[metric][:].mean(axis=0),\n alpha=0.5,\n color='k',\n edgecolor='w')\n\n return fig, axes", "def plot_convergence_(OptimizeResult, fig_savepath, figsize=(5.51, 3.54), format='PNG', dpi=300): # (5.51,3.54)\n fig = plt.figure(figsize=figsize)\n ax0 = fig.add_subplot(111)\n plot_convergence(OptimizeResult, ax=ax0, true_minimum=0.0, )\n plt.title(\"\")\n ax0.set_ylabel(r\"minimum MSE after $n$ calls\")\n # plt.tight_layout()\n plt.subplots_adjust(left=0.12, bottom=0.12, right=0.96, top=0.94, hspace=0.1, wspace=0.2)\n plt.savefig(fig_savepath, format=format, dpi=dpi)\n # plt.show()", "def dichotomize_plot(args):\n # Read the files.\n df = _parse_data(args)\n df[\"group\"] = np.nan\n df[\"intercept\"] = 1\n\n df = df[[\"group\", \"intercept\", \"grs\", args.phenotype]]\n\n # Init the statistical test.\n test = model_map[args.test]()\n\n qs = []\n upper_ci = []\n lower_ci = []\n ns = []\n betas = []\n\n for q in np.linspace(0.05, 0.5, 200):\n low, high = df[[\"grs\"]].quantile([q, 1 - q]).values.T[0]\n\n df[\"group\"] = np.nan\n df.loc[df[\"grs\"] <= low, \"group\"] = 0\n df.loc[df[\"grs\"] >= high, \"group\"] = 1\n\n cur = df.dropna()\n\n stats = test.fit(\n cur[[args.phenotype]], cur[[\"group\", \"intercept\"]]\n )\n\n qs.append(q)\n betas.append(stats[\"group\"][\"coef\"])\n ns.append(df.dropna().shape[0])\n upper_ci.append(stats[\"group\"][\"upper_ci\"])\n lower_ci.append(stats[\"group\"][\"lower_ci\"])\n\n fig, ax1 = plt.subplots()\n\n beta_line, = ax1.plot(qs, betas)\n ci_line, = ax1.plot(qs, upper_ci, \"--\", color=\"gray\", linewidth=0.2)\n ax1.plot(qs, lower_ci, \"--\", color=\"gray\", linewidth=0.2)\n ax1.set_ylabel(r\"$\\beta$\")\n ax1.set_xlabel(\"Quantile used to form groups (0.5 is median)\")\n\n ax2 = ax1.twinx()\n ax2.grid(False, which=\"both\")\n n_line, = ax2.plot(qs, ns, \"-\", linewidth=0.2)\n ax2.set_ylabel(\"effective n\")\n\n plt.legend(\n (beta_line, ci_line, n_line),\n (r\"$\\beta$\", \"95% CI\", \"$n$\"),\n loc=\"upper center\"\n )\n\n if args.out:\n plt.savefig(args.out)\n else:\n plt.show()", "def fit_and_plot(self, max_iter):\n from matplotlib import pyplot as plt\n from matplotlib import cm\n\n colours = cm.rainbow(np.linspace(0, 1, self.num_classes)) # FIXME: rainbow list -> array\n\n def plot_data(d):\n for c in range(self.num_classes):\n for n in range(self.num_nuisances):\n plt.scatter(*d[c][n].T, c=colours[c])\n plt.waitforbuttonpress()\n\n def plot_mean(th):\n for c in range(self.num_classes):\n for n in range(self.num_nuisances):\n plt.scatter(*th[c][n].mean.T, c=colours[c], marker=\"x\")\n plt.waitforbuttonpress()\n\n plt.ion()\n plt.scatter(*self.data.T)\n plt.waitforbuttonpress()\n\n split_data = self.initialise_clusters_with_kmeans()\n plot_data(split_data)\n thetas = self.maximization(split_data)\n plot_mean(thetas)\n\n for i in range(max_iter):\n plt.clf()\n split_data = self.expectation(thetas)\n plot_data(split_data)\n thetas = self.maximization(split_data)\n plot_mean(thetas)\n return split_data, thetas", "def _plot_experiment(df, axes, metric_name, isTrain):\n # colors: https://stackoverflow.com/questions/42086276/get-default-line-colour-cycle\n ldf = metric_short_to_long(df)\n plotted = \"Train\" if isTrain else \"Val\"\n m = ldf.query(\"stat == 'mse' and metric == @metric_name\")[[\"trial\",\"state\",\"value\"]].rename({\"value\":\"mse\"},axis=1)\n # aggregated\n ax = sns.barplot(x=\"trial\", y=\"mse\", data=m, palette=[u'#1f77b4'], ci=\"sd\", ax=axes[0])\n ax.set_ylabel(\"MSE (log)\")\n ax.set_yscale(\"log\")\n ax.set_title(f\"Aggregated State Errors ({plotted})\")\n ax.set_xlabel(\"Trial Number\")\n\n # individual state plots\n ax = sns.barplot(x=\"trial\", y=\"mse\", hue=\"state\",data=m, ci=\"sd\", ax=axes[1])\n ax.set_ylabel(\"MSE (log)\")\n ax.set_yscale(\"log\")\n ax.set_title(f\"State Error by Trial ({plotted})\")\n ax.set_xlabel(\"Trial Number\")", "def convergence():\n fig, axes = plt.subplots(nrows=2, figsize=figsize(aspect=1.2))\n\n # label names\n label1 = str(league.lambda1)\n label2_list = [str(lambda2) for lambda2 in league.lambda2_list]\n\n # point spread and point total subplots\n subplots = [\n (False, [-0.5, 0.5], league.spreads, 'probability spread > 0.5'),\n (True, [200.5], league.totals, 'probability total > 200.5'),\n ]\n\n for ax, (commutes, lines, values, ylabel) in zip(axes, subplots):\n\n # train margin-dependent Elo model\n melo = Melo(lines=lines, commutes=commutes, k=1e-4)\n melo.fit(league.times, league.labels1, league.labels2, values)\n\n line = lines[-1]\n\n for label2 in label2_list:\n\n # evaluation times and labels\n times = np.arange(league.times.size)[::1000]\n labels1 = times.size * [label1]\n labels2 = times.size * [label2]\n\n # observed win probability\n prob = melo.probability(times, labels1, labels2, lines=line)\n ax.plot(times, prob)\n\n # true (analytic) win probability\n if ax.is_first_row():\n prob = skellam.sf(line, int(label1), int(label2))\n ax.axhline(prob, color='k')\n else:\n prob = poisson.sf(line, int(label1) + int(label2))\n ax.axhline(prob, color='k')\n\n # axes labels\n if ax.is_last_row():\n ax.set_xlabel('Iterations')\n ax.set_ylabel(ylabel)\n\n set_tight(w_pad=.5)", "def plot(self, plot_cmd=None, tf=lambda y: y):\r\n if not plot_cmd:\r\n plot_cmd = self.plot_cmd\r\n colors = 'bgrcmyk'\r\n pylab.hold(False)\r\n res = self.res\r\n\r\n flatx, flatf = self.flattened()\r\n minf = np.inf\r\n for i in flatf:\r\n minf = min((minf, min(flatf[i])))\r\n addf = 1e-9 - minf if minf <= 0 else 0\r\n for i in sorted(res.keys()): # we plot not all values here\r\n if type(i) is int:\r\n color = colors[i % len(colors)]\r\n arx = sorted(res[i].keys())\r\n plot_cmd(arx, [tf(np.median(res[i][x]) + addf) for x in arx], color + '-')\r\n pylab.text(arx[-1], tf(np.median(res[i][arx[-1]])), i)\r\n pylab.hold(True)\r\n plot_cmd(flatx[i], tf(np.array(flatf[i]) + addf), color + 'o')\r\n pylab.ylabel('f + ' + str(addf))\r\n pylab.draw()\r\n show()\r\n # raw_input('press return')\r\n return self", "def visualization(obj_value):\n for n in range(3):\n plt.loglog(obj_value[n],\".\");\n\n plt.ylabel('objective values');\n plt.xlabel('iteration counter');\n plt.title('objective values for each pair against iterations');\n plt.legend();\n plt.show();", "def make_tuning_plot_rmse(df, error_col_name=\"rmse\",\n error_title = \"Top 10% RMSE\",\n cutoff = 0.10):\n\n df = df.copy()\n\n # Get the regularizer and reset coeff\n coeff = [float(i.split(\"evidence_new_reg_\")[1]) if \"evidence\" in i else i for i in df['method_name']]\n df[\"method_name\"] = coeff\n df[\"Data\"] = convert_dataset_names(df[\"dataset\"])\n df[\"Method\"] = df[\"method_name\"]\n\n # Get appropriate datasets\n trials = 'trial_number'\n methods = 'Method'\n\n # Make area plot\n uniq_methods = set(df[\"Method\"].values)\n method_order = sorted(uniq_methods,\n key=lambda x : x if isinstance(x, float) else -1)\n method_df = []\n datasets = set()\n for data, sub_df in df.groupby(\"Data\"):\n # Add datasets\n datasets.add(data)\n rmse_sub = sub_df[error_col_name]\n methods_sub = sub_df[\"Method\"]\n trials_sub= sub_df['trial_number']\n for method_idx, method in enumerate(method_order):\n # Now summarize these lines\n bool_select = (methods_sub == method)\n\n rmse_method = rmse_sub[bool_select]\n trials_temp = trials_sub[bool_select]\n areas = []\n # create area!\n for trial, rmse_trial in zip(trials_sub, rmse_method):\n num_tested = len(rmse_trial)\n cutoff_index = int(cutoff * num_tested) - 1\n rmse_val = rmse_trial[-cutoff_index]\n to_append = {error_title: rmse_val,\n \"Regularizer Coeff, $\\lambda$\": method,\n \"method_name\": method,\n \"Data\": data,\n \"Trial\" : trial}\n method_df.append(to_append)\n method_df = pd.DataFrame(method_df)\n\n # Filter out dropout\n method_df = method_df[[i != \"dropout\" for i in\n method_df['method_name']]].reset_index()\n\n # Normalize by dataset\n for dataset in datasets:\n # Make a divison vector of ones and change it to a different value only\n # for the correct dataset of interest to set max rmse to 1\n division_factor = np.ones(len(method_df))\n indices = (method_df[\"Data\"] == dataset)\n\n # Normalize with respect to the ensemble so that this is 1\n max_val = method_df[indices].query(\"method_name == 'ensemble'\").mean()[error_title]\n\n # Take the maximum of the AVERAGE so it's normalized to 1\n division_factor[indices] = max_val\n method_df[error_title] = method_df[error_title] / division_factor\n\n method_df_evidence = method_df[[isinstance(i, float) for i in\n method_df['method_name']]].reset_index()\n method_df_ensemble = method_df[[\"ensemble\" in str(i) for i in\n method_df['method_name']]].reset_index()\n\n data_colors = {\n dataset : sns.color_palette()[index]\n for index, dataset in enumerate(datasets)\n }\n\n min_x = np.min(method_df_evidence[\"Regularizer Coeff, $\\lambda$\"])\n max_x= np.max(method_df_evidence[\"Regularizer Coeff, $\\lambda$\"])\n\n sns.lineplot(x=\"Regularizer Coeff, $\\lambda$\", y=error_title,\n hue=\"Data\", alpha=0.8, data=method_df_evidence,\n palette = data_colors)\n\n for data, subdf in method_df_ensemble.groupby(\"Data\"):\n\n color = data_colors[data]\n area = subdf[error_title].mean()\n std = subdf[error_title].std()\n plt.hlines(area, min_x, max_x, linestyle=\"--\", color=color, alpha=0.8)\n\n # Add ensemble baseline\n ensemble_line = plt.plot([], [], color='black', linestyle=\"--\",\n label=\"Ensemble\")\n # Now make ensemble plots\n plt.legend(bbox_to_anchor=(1.1, 1.05))", "def test_convergence(self, nMax=500000, withPlots=True, overStep=100):\n\n def _mad(vin):\n med = np.median(vin)\n return np.median(np.abs(vin - med))\n\n self.convergenceSearchFlag = True\n self.needReset = False\n self._reset_limits()\n mStart = 10\n mStep = 1\n statStep = 5\n m = 0\n k = mStart\n converged = False\n postConv = 0\n pltout = []\n dIout = []\n Iold = 0\n sE = self.E_max * np.ones(1)\n sTheta_max = self.Theta_max * np.ones(1)\n sPsi_max = self.Psi_max * np.ones(1)\n\n statOut = []\n dIOut = []\n xm = []\n\n outQuad = 0\n outInt = 0\n if withPlots:\n from matplotlib import pyplot as plt\n fig = plt.figure(figsize=(8, 6))\n\n ax0 = fig.add_axes([0.1, 0.65, 0.8, 0.3])\n ax0.xaxis.set_visible(False)\n ax0.set_ylabel('Relative intensity $I$', color='C0')\n ampLine, = ax0.semilogy([], [], 'C0')\n\n ax1 = fig.add_axes([0.1, 0.1, 0.8, 0.55])\n ax1.set_xlabel('Number of nodes')\n ax1.set_ylabel('Median absolute deviation of $I$', color='C1')\n madLine, = ax1.semilogy([], [], 'C1')\n\n ax2 = ax1.twinx()\n ax2.set_ylabel('Median $dI/I$', color='C2')\n relmadLine, = ax2.semilogy([], [], 'C2')\n else:\n fig = None\n\n while True:\n m += 1\n if m % 1000 == 0:\n mStep *= 2\n if True: # raycing._VERBOSITY_ > 10:\n # print(\"INSUFFICIENT CONVERGENCE RANGE:\", k, \"NODES\")\n print(\"INCREASING CONVERGENCE STEP. NEW STEP\", mStep)\n\n k += mStep\n self.quadm = k\n self._build_integration_grid()\n xm.append(k*self.gIntervals)\n Inew = self.build_I_map(sE, sTheta_max, sPsi_max)[0]\n pltout.append(Inew)\n dIout.append(np.abs(Inew-Iold)/Inew)\n if m == 1:\n Iold = Inew\n continue\n Iold = Inew\n\n if withPlots:\n ampLine.set_xdata(xm)\n relInt = np.array(pltout)\n relInt /= relInt.max()\n ampLine.set_ydata(relInt)\n new_y_min = np.floor(np.log10(relInt.min()))\n ax0.set_xlim([0, xm[-1]+5])\n ax0.set_ylim([10**(new_y_min+0.1), 1.1])\n\n if converged:\n postConv += 1\n if m > statStep:\n mad = _mad(np.abs(np.array(pltout))[m-statStep:m])\n dIMAD = np.median(dIout[m-statStep:m])\n\n statOut.append(mad)\n dIOut.append(dIMAD)\n\n if ((dIMAD < self.gp) or (mad < self.gp)) and not converged:\n convPoint = k*self.gIntervals\n outQuad = k\n outInt = self.gIntervals\n if True: # raycing._VERBOSITY_ > 10:\n print(\"CONVERGENCE THRESHOLD REACHED AT \"\n \"{0} NODES, {1} INTERVALS.\".format(\n k, self.gIntervals))\n print(\"INTEGRATION GRID LENGTH IS {} POINTS\".format(\n convPoint))\n converged = True\n if withPlots:\n label = 'True convergence: {0} nodes, {1} interval{2}'\\\n .format(self.quadm, self.gIntervals,\n '' if self.gIntervals == 1 else 's')\n axvlineDict = dict(x=convPoint, color='r', label=label)\n ax0.axvline(**axvlineDict)\n ax1.axvline(**axvlineDict)\n if withPlots:\n new_y_max = np.ceil(np.log10(max(statOut)))\n new_y_min = np.floor(np.log10(min(statOut)))\n ax1.set_xlim([0, xm[-1]+5])\n ax1.set_ylim([10**new_y_min, 10**(new_y_max-0.1)])\n madLine.set_xdata(xm[statStep:])\n madLine.set_ydata(statOut)\n relmadLine.set_xdata(xm[statStep:])\n relmadLine.set_ydata(dIOut)\n new_y_max = np.ceil(np.log10(max(dIOut)))\n new_y_min = np.floor(np.log10(min(dIOut)))\n ax2.set_xlim([0, xm[-1]+5])\n ax2.set_ylim([10**new_y_min, 10**new_y_max])\n fig.canvas.draw()\n plt.pause(0.001)\n\n if xm[-1] > nMax:\n if not converged:\n print(\"PROBLEM WITH CONVERGENCE. INCREASE nMax.\")\n break\n\n if overStep is not None:\n if postConv > overStep:\n break\n\n convRes, stats = self._find_convergence_mixed()\n print(\"CONVERGENCE TEST COMPLETED.\")\n self.needReset = True\n if withPlots:\n label = 'Auto-finder: {0} nodes, {1} interval{2}'.format(\n self.quadm, self.gIntervals,\n '' if self.gIntervals == 1 else 's')\n axvlineDict = dict(x=self.quadm*self.gIntervals, color='m',\n linestyle='--', label=label)\n ax0.axvline(**axvlineDict)\n ax1.axvline(**axvlineDict)\n ax1.legend()\n fig.canvas.draw()\n plt.pause(0.1)\n return converged, outQuad, outInt, fig", "def plot_trends(group, country=\"US\", state=None, place=None, predictive_method=\"ARIMA\"):\n print(f\"* Plotting Google Trends of `{group}` for {country} - {state or 'All'}\")\n group_queries = get_group_queries(group, only_root=True)\n\n n_queries = len(group_queries)\n n_cols = 3\n n_rows = int(n_queries / n_cols) + (1 if n_queries % n_cols else 0)\n\n # Annotations\n annotations = []\n\n # Initialize figure with subplots\n subplot_titles = [\"%s...\" % t[:22] if len(t) >= 22 else t for t in group_queries]\n fig = make_subplots(\n rows=n_rows, cols=n_cols, subplot_titles=subplot_titles,\n shared_yaxes=True,\n print_grid=True\n )\n\n # Marked Dates\n covid_start_date = COVID_START_DATE\n reopen_date = REOPEN_DATE\n reopen_date_minus_1 = REOPEN_DATE_MINUS_1\n data_start_date = DATA_START_DATE\n data_end_date = DATA_END_DATE\n\n # Figure variable\n baseline = 0\n value_range = [0, 100]\n\n # Model params\n model_params = []\n\n for idx, query in enumerate(group_queries):\n row = int(idx / n_cols) + 1\n col = idx % n_cols + 1\n showlegend = idx == 0\n\n query_file_path = get_data_filename(group, query, country=country, state=state, full=True)\n df = pd.read_csv(query_file_path, parse_dates=True)\n count = df[\"date\"].count()\n\n # ARIMA Model\n if query in df.columns:\n print(\"Query: \", query)\n # get_arima_params(df[query])\n df, model = arima_predict(df, from_date=PREDICT_FROM_DATE, value_col=query)\n params = model.get_params()\n model_params.append([query, str(params[\"order\"])])\n # return False\n \n # No data\n if count == 0:\n continue\n\n # Process\n stayhome_order_date = place.get(\"ClosedFrom\") if place else SOCIAL_DISTANCE_ORDER_DATE\n\n df = df[(df[\"date\"] >= data_start_date) & (df[\"date\"] <= data_end_date)]\n df_before = df[(df[\"date\"] <= reopen_date)]\n df_after = df[(df[\"date\"] >= reopen_date_minus_1)]\n df_prediction = df[df[\"is_predicted\"] == 1]\n\n # Normalize\n if config.TRENDS_APPLY_NORMALIZATION:\n max_value = df[query].max()\n baseline = df_before[query].median()\n df[\"value\"] = df[query].apply(lambda x: (x - baseline) / max_value)\n df_before[\"value\"] = df_before[query].apply(lambda x: (x - baseline) / max_value)\n df_after[\"value\"] = df_after[query].apply(lambda x: (x - baseline) / max_value)\n baseline = 0\n value_range = [-1, 1]\n else:\n max_value = df[query].max()\n baseline = df_before[query].median()\n df[\"value\"] = df[query]\n df_before[\"value\"] = df_before[query]\n df_after[\"value\"] = df_after[query]\n\n # Compute difference\n query_text = query.split(\"+\")[0].strip() + \" + ...\" if \"+\" in query else query\n actual_mean, actual_meanCI95min, actual_meanCI95max = mean_confidence_interval(df_prediction[query])\n predict_mean = df_prediction[\"prediction\"].mean()\n diff = round(100 * (actual_mean - predict_mean) / predict_mean, 1)\n diffCI95min = round(100 * (actual_meanCI95min - predict_mean) / predict_mean, 1)\n diffCI95max = round(100 * (actual_meanCI95max - predict_mean) / predict_mean, 1)\n x_date = list(df['date'])[int(df[\"date\"].count()/2)]\n diff_annot = go.layout.Annotation(\n text=f'<b>{query_text}</b><br><sub><b style=\"color:{config.COLOR_UPTREND if diff >= 0 else config.COLOR_DOWNTREND}\">{diff}%</b>; 95%CI, [{diffCI95min}%, {diffCI95max}%]</sub>',\n showarrow=False, xanchor=\"center\", yanchor=\"top\", \n x=x_date,\n y=0.0,\n xshift=0,\n yshift=-5,\n xref=f\"x{'' if idx == 0 else idx + 1}\",\n yref=f\"y{'' if idx == 0 else idx + 1}\"\n )\n annotations.append(diff_annot)\n\n # Lockdown period\n max_y = max(df[query].max(), abs(df[query].min()))\n min_y = -max_y\n shape_lockdown = go.layout.Shape(**{\"type\": \"rect\",\"y0\":100,\"y1\": -100,\"x0\":COVID_START_DATE, \n \"x1\":REOPEN_DATE,\"xref\":\"x1\",\"yref\":\"y1\",\"layer\":\"below\",\n \"fillcolor\":\"#eeeeee\", \"line\":dict(width=0), \"line_width\": 0})\n fig.add_shape(shape_lockdown, row=row, col=col)\n\n # Horizontal line \n shape = go.layout.Shape(**{\"type\": \"line\",\"y0\":baseline,\"y1\": baseline,\"x0\":str(df[\"date\"].values[0]), \n \"x1\":str(df[\"date\"].values[-1]),\"xref\":\"x1\",\"yref\":\"y1\",\"layer\":\"below\",\n \"line\": {\"color\": \"rgb(200, 200, 200)\",\"width\": 1.5}})\n fig.add_shape(shape, row=row, col=col)\n\n # Stay home order\n if stayhome_order_date:\n shape_stayhome_order = go.layout.Shape(**{\"type\": \"line\",\"y0\":-0.25,\"y1\": 0.25,\"x0\":stayhome_order_date, \n \"x1\":stayhome_order_date,\"xref\":\"x1\",\"yref\":\"y1\",\n \"line\": {\"color\": \"blue\",\"width\": 1.5, \"dash\": \"dot\"}})\n fig.add_shape(shape_stayhome_order, row=row, col=col)\n\n # Plot\n subplot_before = go.Scatter(x=df_before[\"date\"], y=df_before[\"value\"], \n mode=\"lines\", name=\"Before Lockdown\",\n line=dict(width=1, color=config.LINE_COLOR_BEFORE), \n line_shape=\"linear\", showlegend=False) # linear or spline \n subplot_after = go.Scatter(x=df_after[\"date\"], y=df_after[\"value\"], \n mode=\"lines\", name=\"Actual Queries\",\n line=dict(width=1.5, color=config.LINE_COLOR_AFTER), \n line_shape=\"linear\", showlegend=showlegend) # linear or spline \n subplot_prediction = go.Scatter(x=df_prediction[\"date\"], y=df_prediction[\"prediction\"], \n mode=\"lines\", name=\"Expected Queries\",\n line=dict(width=2, color=config.LINE_COLOR_BEFORE, dash=\"dot\"), \n line_shape=\"linear\", showlegend=showlegend) # linear or spline \n subplot_lockdown_legend = go.Bar(x=[reopen_date,], y=[0,], \n name=\"Early Lockdown Phase\", \n showlegend=showlegend,\n marker_color=\"#eeeeee\")\n fig.add_trace(subplot_before, row=row, col=col)\n fig.add_trace(subplot_after, row=row, col=col)\n fig.add_trace(subplot_prediction, row=row, col=col)\n if idx == 0:\n fig.add_trace(subplot_lockdown_legend, row=row, col=col)\n\n # break\n\n # Caption\n # caption = go.layout.Annotation(\n # showarrow=False,\n # text=\"\",\n # xanchor=\"center\",\n # x=0.5,\n # yanchor=\"top\",\n # y=0.0,\n # yshift=0,\n # )\n\n # Layout\n # location = f\"{country}.{state}\" if state else country\n # fig_title = f\"\"\"Term: {group}. Location: {location}<br>\n # <span style=\"font-size: 14px;line-height:1\">Period: {data_start_date} - {data_end_date}\n # <br>Lockdown Period: {covid_start_date} - {PREDICT_FROM_DATE}</span>\"\"\"\n fig_title = \"\"\n fig.update_layout(title={\"text\": fig_title, \"x\":0.5, \"xanchor\": \"center\"}, \n title_font=dict(size=12),\n height=50 + n_rows * 175, width=250 * n_cols, coloraxis=dict(colorscale=\"Bluered_r\"), \n showlegend=True, plot_bgcolor=\"rgb(255,255,255)\", titlefont={\"size\": 30},\n margin={\"t\": 50},\n annotations=annotations,\n legend=dict(\n orientation=\"v\",\n yanchor=\"bottom\",\n y=0,\n xanchor=\"right\",\n x=1,\n bgcolor=\"white\",\n bordercolor=\"#333\",\n borderwidth=1\n )\n )\n fig.update_xaxes(showgrid=False, showticklabels=False, showline=False)\n fig.update_yaxes(showgrid=False, showticklabels=False, showline=True, range=value_range)\n\n # Store model parameters\n mkdir_if_not_exist(config.TRENDS_OUTPUT_DIR)\n df_params = pd.DataFrame(model_params, columns=[\"Query\", \"Order\"])\n df_params.to_csv(\"%s/ARIMA_orders_%s.csv\" % (config.TRENDS_OUTPUT_DIR, group), index=False)\n\n # Create online URL\n url = py.iplot(fig, filename=group, file_id=group)\n print(\"URL:\", url.src)\n\n if config.TRENDS_EXPORT_FIGURES:\n # Save\n mkdir_if_not_exist(config.TRENDS_FIGURES_DIR)\n fig.write_image(\"%s/%s_%s_%s.jpg\" % (config.TRENDS_FIGURES_DIR, country, state or \"All\", group))\n # fig.show()\n else:\n # Show\n fig.show()", "def update_plotmon_adaptive_cma(self, force_update=False):\n\n if self._live_plot_enabled():\n try:\n if (time.time() - self.time_last_ad_plot_update >\n self.plotting_interval() or force_update):\n ##########################################\n # Main plotmon\n ##########################################\n i = 0\n nr_sweep_funcs = len(self.sweep_function_names)\n\n # best_idx -1 as we count from 0 and best eval\n # counts from 1.\n best_index = int(self.opt_res_dset[-1, -1] - 1)\n\n for j in range(len(self.detector_function.value_names)):\n y_ind = nr_sweep_funcs + j\n\n ##########################################\n # Main plotmon\n ##########################################\n for x_ind in range(nr_sweep_funcs):\n\n x = self.dset[:, x_ind]\n y = self.dset[:, y_ind]\n\n self.curves[i]['config']['x'] = x\n self.curves[i]['config']['y'] = y\n\n best_x = x[best_index]\n best_y = y[best_index]\n self.curves_best_ever[i]['config']['x'] = [best_x]\n self.curves_best_ever[i]['config']['y'] = [best_y]\n mean_x = self.opt_res_dset[:, 2+x_ind]\n # std_x is needed to implement errorbars on X\n # std_x = self.opt_res_dset[:, 2+nr_sweep_funcs+x_ind]\n # to be replaced with an actual mean\n mean_y = self.opt_res_dset[:, 2+2*nr_sweep_funcs]\n mean_y = get_generation_means(\n self.opt_res_dset[:, 1], y)\n # TODO: turn into errorbars\n self.curves_distr_mean[i]['config']['x'] = mean_x\n self.curves_distr_mean[i]['config']['y'] = mean_y\n i += 1\n ##########################################\n # Secondary plotmon\n ##########################################\n # Measured value vs function evaluation\n y = self.dset[:, y_ind]\n x = range(len(y))\n self.iter_traces[j]['config']['x'] = x\n self.iter_traces[j]['config']['y'] = y\n\n # generational means\n gen_idx = self.opt_res_dset[:, 1]\n self.iter_mean_traces[j]['config']['x'] = gen_idx\n self.iter_mean_traces[j]['config']['y'] = mean_y\n\n # This plots the best ever measured value vs iteration\n # number of evals column\n best_evals_idx = (\n self.opt_res_dset[:, -1] - 1).astype(int)\n best_func_val = y[best_evals_idx]\n self.iter_bever_traces[j]['config']['x'] = best_evals_idx\n self.iter_bever_traces[j]['config']['y'] = best_func_val\n\n self.main_QtPlot.update_plot()\n self.secondary_QtPlot.update_plot()\n\n self.time_last_ad_plot_update = time.time()\n\n except Exception as e:\n log.warning(traceback.format_exc())", "def summarize(group, fs=None, include_source=True):\n _line_break = '{0:-<120}\\n'.format('')\n tests = sorted(ComparisonBenchmark.groups[group], key=lambda t: getattr(t, 'time_average_seconds'))\n log = StringIO.StringIO()\n log.write('Call statement:\\n\\n')\n log.write('\\t' + tests[0].stmt)\n log.write('\\n\\n\\n')\n fmt = \"{0: <8} {1: <35} {2: <12} {3: <15} {4: <15} {5: <14}\\n\"\n log.write(fmt.format('Rank', 'Function Name', 'Time', '% of Slowest', 'timeit_repeat', 'timeit_number'))\n log.write(_line_break)\n log.write('\\n')\n\n for i, t in enumerate(tests):\n func_name = \"{}.{}\".format(t.classname, t.callable.__name__) if t.classname else t.callable.__name__\n if i == len(tests)-1:\n time_percent = 'Slowest'\n else:\n time_percent = \"{:.1f}\".format(t.time_average_seconds / tests[-1].time_average_seconds * 100)\n log.write(fmt.format(i+1,\n func_name,\n convert_time_units(t.time_average_seconds),\n time_percent,\n t.timeit_repeat,\n t.timeit_number))\n log.write(_line_break)\n\n if include_source:\n log.write('\\n\\n\\nSource Code:\\n')\n log.write(_line_break)\n for test in tests:\n log.write(test.log.getvalue())\n log.write(_line_break)\n\n if isinstance(fs, str):\n with open(fs, 'w') as f:\n f.write(log.getvalue())\n\n elif fs is None:\n print(log.getvalue())\n else:\n try:\n fs.write(log.getvalue())\n except AttributeError as e:\n print(e)", "def plot_cdf_compare(self, output_fn_base=\"CDF_compare.png\"):\n self.logger.debug(\"Plot CDF to %s_[train|test].png\", output_fn_base)\n\n timeout = self.scenario.cutoff\n\n data = self.data\n\n def prepare_data(x_data):\n \"\"\" Helper function to keep things easy, generates y_data and\n manages x_data-timeouts \"\"\"\n x_data = sorted(x_data)\n y_data = np.array(range(len(x_data)))/(len(x_data)-1)\n for idx in range(len(x_data)):\n if (timeout != None) and (x_data[idx] >= timeout):\n x_data[idx] = timeout\n y_data[idx] = y_data[idx-1]\n return (x_data, y_data)\n\n # Generate y_data\n data = {config_name : {label : prepare_data(x_data) for label, x_data in\n data[config_name].items()}\n for config_name in data}\n\n output_fn = [output_fn_base + \"_\" + inst_set + '.png' for inst_set in\n ['train', 'test']]\n\n for inst_set, out in zip(['train', 'test'], output_fn):\n f = plt.figure(1, dpi=100, figsize=(10,10))\n ax1 = f.add_subplot(1,1,1)\n ax1.step(data['default'][inst_set][0],\n data['default'][inst_set][1], color='red',\n linestyle='-', label='default train')\n ax1.step(data['incumbent'][inst_set][0],\n data['incumbent'][inst_set][1], color='blue',\n linestyle='-', label='incumbent train')\n ax1.legend()\n ax1.grid(True)\n ax1.set_xscale('log')\n ax1.set_ylabel('probability of being solved')\n ax1.set_xlabel('time')\n # Plot 'timeout'\n if timeout:\n ax1.text(timeout,\n ax1.get_ylim()[0] - 0.1 * np.abs(ax1.get_ylim()[0]),\n \"timeout \", horizontalalignment='center',\n verticalalignment=\"top\", rotation=30)\n ax1.axvline(x=timeout, linestyle='--')\n\n f.tight_layout()\n f.savefig(out)\n plt.close(f)\n return output_fn", "def plot_fishing_mortality(df):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n ax.set_position(default_timeseries_position) \n\n Fn = df['Fn'].groupby([df.Year, df.Reg, df.Sreg]).mean()\n\n all_fishing_mortality = Fn.loc[:, 'All', 'All']\n ma_fishing_mortality = Fn.loc[:, '1', 'All']\n gb_fishing_mortality = Fn.loc[:, '2', 'All']\n\n # Don't plot the first year. Also, the data is shifted by one year.\n # For some reason, restricting the year range above results in a series\n # that still have a multi-index. This seems like the cleanest way to do\n # that.\n all_fishing_mortality = all_fishing_mortality[2:]\n ma_fishing_mortality = ma_fishing_mortality[2:]\n gb_fishing_mortality = gb_fishing_mortality[2:]\n\n all_fishing_mortality.index = all_fishing_mortality.index - 1\n ma_fishing_mortality.index = ma_fishing_mortality.index - 1\n gb_fishing_mortality.index = gb_fishing_mortality.index - 1\n\n all_fishing_mortality.plot(ax=ax, label='All') \n ma_fishing_mortality.plot(ax=ax, label='Mid Atlantic')\n gb_fishing_mortality.plot(ax=ax, label='Georges Bank')\n\n ax.legend(loc='best')\n\n content = io.BytesIO()\n plt.savefig(content, format='png')\n content.seek(0)\n image_cache['fishing_mortality']['fishing_mortality'] = content\n\n plt.close()", "def plot_results(epochs: int = 20, segments: int = 5, plot: bool = True):\n \"\"\"\n plt.figure(0)\n plot_approximation(\"product\", modelSetProd, 1, epochs, gpus=0)\n \"\"\"\n\n data = [\n {\n \"title\": \"Piecewise Discontinuous Function Approximation\",\n \"layer\": \"discontinuous\",\n \"model_set\": modelSetD,\n },\n {\n \"title\": \"Piecewise Continuous Function Approximation\",\n \"layer\": \"continuous\",\n \"model_set\": modelSetC,\n },\n {\n \"title\": \"Polynomial function approximation\",\n \"layer\": \"polynomial\",\n \"model_set\": modelSetP,\n },\n {\n \"title\": \"Fourier function approximation\",\n \"layer\": \"fourier\",\n \"model_set\": modelSetF,\n },\n ]\n\n for index, element in enumerate(data):\n if plot is True:\n plt.figure(index)\n plot_approximation(\n element[\"layer\"],\n element[\"model_set\"],\n 5,\n epochs,\n accelerator=\"cpu\",\n periodicity=2,\n )\n\n if plot is True:\n plt.title(\"Piecewise Discontinuous Function Approximation\")\n\n if plot is True:\n plt.show()", "def plot_balancer_results_per_classifier(data_balancer_results_per_classifier, parameter=(2, \"Balanced Accuracy\")):\n classifier_arr = []\n color = iter(cm.Set1(np.linspace(0, 1, len(data_balancer_results_per_classifier) + 1)))\n mean_classifier_arr = [0] * len(data_balancer_results_per_classifier[0][1])\n for (classifier_name, data_balancer_results) in data_balancer_results_per_classifier:\n individual_data_balance_plot = []\n x = 0\n for (data_balancer_name, result_arr) in data_balancer_results:\n individual_data_balance_plot.append(result_arr[parameter[0]]) # Average True rate\n mean_classifier_arr[x] += result_arr[parameter[0]]\n x += 1\n classifier_arr.append(individual_data_balance_plot)\n\n classifier_arr.append([value / float(len(data_balancer_results_per_classifier)) for value in mean_classifier_arr])\n\n fig = plt.figure(figsize=(12, 10))\n\n classifiers = np.arange(len(classifier_arr))\n data_balancers = np.arange(len(classifier_arr[0])) * 3\n bar_width = 0.2\n opacity = 0.9\n\n for i in range(len(classifier_arr)):\n if i + 1 != len(classifier_arr):\n label = data_balancer_results_per_classifier[i][0]\n else:\n label = \"Mean classification\"\n\n plt.bar(data_balancers + (i * bar_width), classifier_arr[i], bar_width,\n alpha=opacity,\n color=color.next(),\n label=label)\n\n plt.locator_params(axis='y', nbins=10)\n plt.xlabel(\"Data balance algorithm\")\n plt.ylabel(parameter[1])\n plt.legend(loc=\"lower right\", fancybox=True, frameon=True)\n plt.title(\"{0} per data balance algorithm\".format(parameter[1]))\n plt.ylim([0.0, 1.00])\n data_balance_labels = [filter(str.isupper, data_balance_name) if data_balance_name != \"None\" and len(filter(str.isupper, data_balance_name)) < 6 else data_balance_name for\n (data_balance_name, _) in data_balancer_results_per_classifier[0][1]]\n plt.xticks(data_balancers + (bar_width / 2) * len(classifiers), data_balance_labels)\n\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n plt.savefig(os.path.dirname(os.path.realpath(__file__)) + \"/../results/data_balancer_results_per_classifier_plot_{0}_{1}.png\".format(parameter[1], current_time))\n plt.close(fig)", "def _show_learning_rate():\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(6.4 * 2, 4.8))\n\n # Visualize c_prime\n c_prime_list = np.linspace(1, 100, num=11)\n x_label = f\"c'\"\n y_label = \"Minimum Clusters Size\"\n title = \"\"\n\n ax = axes[0]\n x_list = c_prime_list\n\n # MNIST\n y_list = [161, 16, 14, 15, 20, 21, 24, 27, 30, 30, 35]\n ax.plot(x_list, y_list, label=\"MNIST\")\n\n # Fashion MNIST\n y_list = [63, 12, 12, 15, 18, 19, 22, 25, 26, 28, 30]\n ax.plot(x_list, y_list, label=\"Fashion MNIST\")\n\n # 20 news groups\n y_list = [1297, 724, 221, 80, 52, 51, 54, 54, 52, 60, 60]\n ax.plot(x_list, y_list, label=\"Newsgroups\")\n\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_title(title)\n ax.legend()\n ax.set_yscale('log')\n\n # Visualize t0\n t0_list = np.linspace(1, 100, num=11)\n x_label = f\"t0\"\n y_label = \"Minimum Clusters Size\"\n title = \"\"\n\n ax = axes[1]\n x_list = t0_list\n\n # MNIST\n y_list = [16, 16, 16, 16, 16, 17, 16, 16, 16, 16, 16]\n ax.plot(x_list, y_list, label=\"MNIST\")\n\n # Fashion MNIST\n y_list = [12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12]\n ax.plot(x_list, y_list, label=\"Fashion MNIST\")\n\n # 20 news groups\n y_list = [765, 765, 767, 772, 772, 773, 789, 789, 793, 796, 799]\n ax.plot(x_list, y_list, label=\"Newsgroups\")\n\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_title(title)\n ax.legend()\n ax.set_yscale('log')\n\n plt.show()", "def ecdf(data, group_by=None, targets=None, ax=None, **kwargs):\n text_color = plt.rcParams.get('ytick.color')\n linewidth = 2\n # Handle keyword arguments\n for k, v in kwargs.items():\n if k not in ['linewidth']:\n raise TypeError('ecdf got an unexpeted keyword argument: {}'.format(k))\n else:\n if k == 'linewidth':\n linewidth = v\n # Deal with input data\n if group_by is not None:\n if type(data) == pd.core.frame.DataFrame:\n print(\"Grouping DataFrame by {}\".format(group_by))\n print(\"Target Features:\", targets)\n if type(targets) == str:\n targets = [targets]\n else:\n try:\n it = iter(targets)\n except:\n targets = [targets]\n cols = targets + [group_by]\n data = data[cols]\n variables = data.columns[:-1]\n data = data.groupby(group_by)\n else:\n return(\"Error: only DataFrame input works with group_by functionality\")\n else: \n if type(data) == pd.core.series.Series:\n variables = [data.name]\n elif type(data) == pd.core.frame.DataFrame:\n if targets is None:\n variables = list(data.columns)\n else:\n if type(targets) == str:\n targets = [targets]\n else: \n try:\n it = iter(targets)\n except:\n targets = [targets]\n print(\"Target Features:\", targets)\n variables = targets\n elif type(data) == pd.core.groupby.generic.DataFrameGroupBy:\n variables = list(data.obj.columns)\n else:\n data = pd.Series(data, name='data')\n variables = [data.name]\n \n \n if type(data) == pd.core.groupby.generic.DataFrameGroupBy:\n for variable in variables:\n if not ax:\n fig, ax = plt.subplots(figsize=(12,8))\n max_x = 0\n for name, group in data:\n x = np.sort(group[variable])\n n = len(group)\n y = np.arange(1, n+1) / n\n ax.plot(x, y, marker='.', label=name, alpha=0.7, linewidth=linewidth)\n if max(x) > max_x:\n max_x = max(x)\n #max_x = 0\n ax.axhline(y=0.5, ls=':', color='gray')\n ax.axhline(y=0.05, ls=':', color='gray')\n ax.axhline(y=0.95, ls=':', color='gray')\n ax.annotate('0.5', xy=(max_x, 0.47))\n ax.annotate('0.95', xy=(max_x, 0.92))\n ax.annotate('0.05', xy=(max_x, 0.02))\n ax.legend()\n plt.title(\"ECDF for feature: {}\".format(variable), color=text_color)\n plt.show()\n \n else:\n n = len(data)\n y = np.arange(1, n+1) / n\n if not ax:\n fig, ax = plt.subplots(figsize=(12,8))\n max_x = 0\n for variable in variables:\n if type(data) == pd.core.series.Series:\n x = np.sort(data)\n string = variable\n else:\n x = np.sort(data[variable])\n string = 'Data'\n ax.plot(x, y, marker='.', label=variable)\n if max(x) > max_x:\n max_x = max(x)\n ax.axhline(y=0.5, ls=':', color='gray')\n ax.axhline(y=0.05, ls=':', color='gray')\n ax.axhline(y=0.95, ls=':', color='gray')\n ax.annotate('0.5', xy=(max_x, 0.47))\n ax.annotate('0.95', xy=(max_x, 0.92))\n ax.annotate('0.05', xy=(max_x, 0.02))\n plt.title(\"ECDF for {}\".format(string), color=text_color)\n plt.legend()\n plt.show()", "def plot(self, x_feature=\"ratio\", y_feature=\"fold_change\", ax=None):\n\n if ax is None:\n ax = plt.gca()\n\n # - Data\n x, y = (\n self.bed_seg.query(f\"sgRNA_ID >= {self.n_sgrna}\")[x_feature],\n self.bed_seg.query(f\"sgRNA_ID >= {self.n_sgrna}\")[y_feature],\n )\n x_, y_ = (\n self.bed_seg.query(f\"sgRNA_ID < {self.n_sgrna}\")[x_feature],\n self.bed_seg.query(f\"sgRNA_ID < {self.n_sgrna}\")[y_feature],\n )\n\n x_pred = np.arange(0, x.max(), 0.1)\n y_pred, y_pred_std = self.predict(x_pred.reshape(-1, 1), return_std=True)\n\n # - Plot\n # Segments used for fitting\n ax.scatter(\n x,\n y,\n c=cy.QCplot.PAL_DBGD[0],\n alpha=0.7,\n edgecolors=\"white\",\n lw=0.3,\n label=f\"#(sgRNA_ID) >= {self.n_sgrna}\",\n )\n\n # Segments not used for fitting\n plt.scatter(\n x_,\n y_,\n c=cy.QCplot.PAL_DBGD[0],\n marker=\"X\",\n alpha=0.3,\n edgecolors=\"white\",\n lw=0.3,\n label=f\"#(sgRNA_ID) < {self.n_sgrna}\",\n )\n\n # Plot GP fit\n # GP fit\n plt.plot(\n x_pred, y_pred, ls=\"-\", lw=1.0, c=cy.QCplot.PAL_DBGD[1], label=\"GPR mean\"\n )\n plt.fill_between(\n x_pred,\n y_pred - y_pred_std,\n y_pred + y_pred_std,\n alpha=0.2,\n color=cy.QCplot.PAL_DBGD[1],\n lw=0,\n )\n\n # Misc\n plt.axhline(0, ls=\":\", color=cy.QCplot.PAL_DBGD[2], lw=0.3, zorder=0)\n\n plt.xlabel(f\"Segment\\n{x_feature}\")\n plt.ylabel(f\"Segment\\nmean {y_feature}\")\n\n plt.title(f\"{self.kernel_}\", fontsize=6)\n\n plt.legend(frameon=False)\n\n return ax", "def plot_running_time(num_clusters):\n slow_running = []\n fast_running = []\n for dummy_i in range(2, num_clusters):\n cluster_list = gen_random_clusters(dummy_i)\n start = timer()\n fast_closest_pair(cluster_list)\n end = timer()\n fast_running.append((end - start))\n \n start = timer()\n slow_closest_pair(cluster_list)\n end = timer()\n slow_running.append((end - start))\n #\n plt.plot(range(2, num_clusters), fast_running)\n plt.plot(range(2, num_clusters), slow_running)\n plt.xlabel(\"num clusters\")\n plt.ylabel(\"running time in seconds\")\n plt.title(\"Running time slow closest pair vs fast closest pair.\")\n plt.legend([\"fast closest pair\", \"slow closest pair\"])\n plt.show()", "def convergence_plot(self, varying, savename = False):\n assert(varying == \"Mx\" or varying == \"My\" or varying == \"Both\") \n self._colors = [\"red\", \"green\", \"black\", \"orange\"]\n self._powers = [2] # Power used in the convergence plot. \n\n # Assert that the savename variable is of the correct format.\n if (varying == \"Mx\" or varying == \"My\") and savename:\n assert(type(savename) is list and len(savename) == 4)\n elif savename:\n assert(isinstance(savename, str))\n\n if varying == \"Mx\":\n self._constant_list = [10, 100, pow(10, 3), pow(10, 4)] # Constant values in plots. \n maximum = 2**7 # Maximum limit of Mx.\n elif varying == \"My\":\n self._constant_list = [10, 100, pow(10, 3), pow(10, 4)] # Constant values in plots. \n maximum = 2**7 # Maximum limit of My.\n elif varying == \"Both\":\n maximum = 2**10 # Maximum limit of My and Mx. \n self._powers = [1] # Power used in the convergence plot. \n\n varying_list = 2 ** np.arange(1, np.log(maximum)/np.log(2)+1, dtype = int)\n if varying == \"Both\":\n self._discrete_error = np.zeros(len(varying_list))\n for i, m in enumerate(varying_list):\n Usol, xv, yv = self.num_solution_Mx_My(Mx = m, My = m)\n analsol = self.analytic_solution(xv, yv)\n self._discrete_error[i] = e_l(Usol, analsol)\n if savename:\n self.plot_plots(varying_list, varying_list, savename=savename)\n else: \n self.plot_plots(varying_list, varying_list)\n elif varying:\n for j, constant in enumerate(self._constant_list):\n self._discrete_error = np.zeros(len(varying_list))\n for i, m in enumerate(varying_list):\n if varying == \"Mx\":\n Usol, xv, yv = self.num_solution_Mx_My(Mx = m, My = constant)\n elif varying == \"My\":\n Usol, xv, yv = self.num_solution_Mx_My(Mx = constant, My = m)\n\n analsol = self.analytic_solution(xv, yv)\n self._discrete_error[i] = e_l(Usol, analsol)\n if savename:\n self.plot_plots(varying_list, constant, savename=savename[j])\n else: \n self.plot_plots(varying_list, constant)", "def plot_grating_coupler_sweep_efficiency(matlab_file_path, function=log):\n d = loadmat(matlab_file_path)\n print(d.keys())\n\n N = len(d[\"M_sweep\"][0])\n parameter = np.zeros(N)\n efficiency = np.zeros(N)\n\n for i in range(N):\n parameter[i] = 1e9 * d[\"M_sweep\"][0][i]\n # x = d[\"WL\"][0] * 1e9\n y = function(d[\"M_T\"][i])\n efficiency[i] = np.max(y)\n\n plt.figure()\n plt.xlabel(\"waveguide height (nm)\")\n plt.ylabel(\"Eficiency (dB)\")\n plt.plot(parameter, efficiency, \".\")\n plt.legend()", "def plot_pointwise_convergence(x, a, n, coeff_func, name, f, b, ylim_min,\n save=False, dirname=DEFAULT_DIR):\n series = legendre_series(x, coeff_func(a))\n degrees = np.arange(n)\n values = np.array([next(series) for _ in degrees])\n errors = np.abs(f(x, a) - values)\n\n a_min = -convergence_rate(x, a, b)\n alpha, beta = convergence_line_log(degrees, errors, a_min)\n\n fig, ax = plt.subplots()\n ax.set(\n ylim=(ylim_min, 1e1),\n title=f\"x={x}, a={a}\",\n xlabel=r\"$k$\",\n ylabel=r\"$|\\varepsilon_k(x)|$\"\n )\n ax.loglog(degrees[1:], errors[1:])\n # ax.loglog(degrees[indices], errors[indices])\n ax.loglog(degrees[1:], beta * degrees[1:] ** alpha,\n label=rf\"$\\alpha={-alpha:.3f}$\"+'\\n'+rf\"$\\beta={beta:.3f}$\")\n ax.legend()\n if save:\n fpath = os.path.join(dirname, \"pointwise_convergence\", name, str(a))\n os.makedirs(fpath, exist_ok=True)\n plt.savefig(os.path.join(fpath, f\"{x:.7f}.png\"), dpi=300)\n else:\n plt.show()\n plt.close(fig)", "def deciles_chart_ebm(\n df,\n period_column=None,\n column=None,\n title=\"\",\n ylabel=\"\",\n show_outer_percentiles=True,\n show_legend=True,\n ax=None,\n):\n sns.set_style(\"whitegrid\", {\"grid.color\": \".9\"})\n if not ax:\n fig, ax = plt.subplots(1, 1)\n df = compute_deciles(df, period_column, column, show_outer_percentiles)\n linestyles = {\n \"decile\": {\n \"line\": \"b--\",\n \"linewidth\": 1,\n \"label\": \"decile\",\n },\n \"median\": {\n \"line\": \"b-\",\n \"linewidth\": 1.5,\n \"label\": \"median\",\n },\n \"percentile\": {\n \"line\": \"b:\",\n \"linewidth\": 0.8,\n \"label\": \"1st-9th, 91st-99th percentile\",\n },\n }\n label_seen = []\n for percentile in range(1, 100): # plot each decile line\n data = df[df[\"percentile\"] == percentile]\n add_label = False\n\n if percentile == 50:\n style = linestyles[\"median\"]\n add_label = True\n elif show_outer_percentiles and (percentile < 10 or percentile > 90):\n style = linestyles[\"percentile\"]\n if \"percentile\" not in label_seen:\n label_seen.append(\"percentile\")\n add_label = True\n else:\n style = linestyles[\"decile\"]\n if \"decile\" not in label_seen:\n label_seen.append(\"decile\")\n add_label = True\n if add_label:\n label = style[\"label\"]\n else:\n label = \"_nolegend_\"\n\n ax.plot(\n data[period_column],\n data[column],\n style[\"line\"],\n linewidth=style[\"linewidth\"],\n label=label,\n )\n ax.set_ylabel(ylabel, size=15, alpha=0.6)\n if title:\n ax.set_title(title, size=18)\n # set ymax across all subplots as largest value across dataset\n ax.set_ylim([0, df[column].max() * 1.05])\n ax.tick_params(labelsize=12)\n ax.set_xlim(\n [df[period_column].min(), df[period_column].max()]\n ) # set x axis range as full date range\n\n ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter(\"%B %Y\"))\n ax.xaxis.set_major_locator(matplotlib.dates.MonthLocator(interval=1))\n if show_legend:\n ax.legend(\n bbox_to_anchor=(1.05, 0.6),\n ncol=1,\n fontsize=12,\n borderaxespad=0.0,\n frameon=True,\n )\n\n # rotates and right aligns the x labels, and moves the bottom of the\n # axes up to make room for them\n plt.gcf().autofmt_xdate(rotation=90, ha=\"center\", which=\"both\")\n\n plt.show()\n return plt", "def plot_iteration(ax, x_n, y_n, f, \n max_labels=6, resfct = 100, include_chords=True,\n left_extra=0.01, right_extra=0.01):\n if include_chords:\n # create a list including chord points:\n x_c = sum([[x, x] for x in x_n[1:]], [x_n[0]])\n y_c = sum([[0, y] for y in y_n[1:]], [y_n[0]])\n else:\n x_c = x_n\n y_c = y_n\n # the iteration results\n ax.scatter(x_c, y_c, marker='x', color='red', s=30)\n\n # the convergence pattern\n ax.plot(x_c, y_c, color='green', ls='--')\n\n # add some labels\n # figure out a reasonable offset for labels\n dxt = (np.max(x_n)-np.min(x_n))/50.\n dyt = (np.max(y_n)-np.min(y_n))/50.\n # only plot a maximum of max_labels labels, so plot doesn't get too messy\n for i,(x,y) in enumerate(zip(x_n, y_n)):\n ax.text(x_n[i]+dxt, y_n[i]+dyt, '$x_{}$'.format(i), fontsize=16)\n if i == max_labels:\n break\n\n # the function\n x = np.linspace(np.min(x_n) - left_extra, np.max(x_n) + right_extra, resfct)\n ax.plot(x, f(x), 'b', label='$F(x)$')\n\n # zero line\n xlim = ax.get_xlim()\n ax.plot([xlim[0], xlim[1]], [0., 0.], 'k--')\n # add ticks for the x_n\n for x in x_n:\n ax.plot([x, x], [-dyt, dyt], 'k')\n ax.set_xlim(xlim)\n ax.set_xlabel('$x$', fontsize=16)\n ax.set_ylabel('$y=F(x)$', fontsize=16)", "def _plot_ecdf(self, numerator_name, denominator_name):\n x = self.ecdf[numerator_name][denominator_name]['x']\n y = self.ecdf[numerator_name][denominator_name]['y']\n\n lower_bound = x[y.index(min(y,\n key=lambda x:\n abs(x-self.confidence_level)))]\n median = x[y.index(min(y, key=lambda x:abs(x-0.5)))]\n upper_bound = x[y.index(min(y,\n key=lambda x:\n abs(x-(1-self.confidence_level))))]\n\n sns.lineplot(x=x, y=y)\n ci = 1 - self.confidence_level\n title = ('Median Lift was {0:.2%}, with a '\n '{1:.0%} CI of [{2:.2%}, {3:.2%}]'.format(median,\n ci,\n lower_bound,\n upper_bound))\n title = self._format_title(title)\n plt.title(title)\n plt.xlabel('Lift')\n plt.ylabel('Cumulative Probability')\n plt.axvline(x=lower_bound, linestyle='dotted', color='black')\n plt.axvline(x=median, linestyle='dotted', color='black')\n plt.axvline(x=upper_bound, linestyle='dotted', color='black')\n sns.despine(left=True)\n locs, labels = plt.xticks()\n labels = self._format_axis_as_percent(locs, labels)\n plt.xticks(locs, labels=labels)", "def plot_test_objective_multi(df, exp_config, output_dir, show):\n output_file_name = f\"{inspect.stack()[0][3]}.{FILE_EXTENSION}\"\n output_path = os.path.join(output_dir, output_file_name)\n\n plt.figure()\n\n for exp_name, exp_df in df.items():\n\n if \"rep\" in exp_config[\"data\"][exp_name]:\n\n exp_dfs = exp_df\n\n T = np.linspace(0, exp_config[\"t_max\"], 50000)\n\n y_list = []\n for i, df_i in enumerate(exp_dfs):\n\n df_i = process_for_test_objective(\n df_i.sort_values(\"timestamp_end\"),\n mode=MODE,\n max_budget=exp_config[\"max_budget\"],\n )\n x = df_i.loc[df_i[\"max_idx\"]][\"timestamp_end\"].values\n y = df_i.loc[df_i[\"max_idx\"]][exp_config[\"test_objective\"]].values\n\n f = interp1d(x, y, kind=\"previous\", fill_value=\"extrapolate\")\n y = exp_config.get(\"best_objective\", 1) - f(T)\n y_list.append(y)\n\n y_list = np.asarray(y_list)\n y_mean = y_list.mean(axis=0)\n y_std = y_list.std(axis=0)\n y_se = y_std / np.sqrt(y_list.shape[0])\n\n plt.plot(\n T,\n y_mean,\n label=exp_config[\"data\"][exp_name][\"label\"],\n color=exp_config[\"data\"][exp_name][\"color\"],\n linestyle=exp_config[\"data\"][exp_name].get(\"linestyle\", \"-\"),\n )\n plt.fill_between(\n T,\n y_mean - 1.96 * y_se,\n y_mean + 1.96 * y_se,\n facecolor=exp_config[\"data\"][exp_name][\"color\"],\n alpha=0.3,\n )\n\n else:\n\n exp_df = process_for_test_objective(\n exp_df.sort_values(\"timestamp_end\"),\n mode=MODE,\n max_budget=exp_config[\"max_budget\"],\n )\n x = exp_df.loc[exp_df[\"max_idx\"]][\"timestamp_end\"].values\n y = exp_df.loc[exp_df[\"max_idx\"]][exp_config[\"test_objective\"]].values\n\n idx = np.unique(x, return_index=True, axis=0)[1]\n\n x = x[idx]\n y = y[idx]\n\n x = np.clip(np.concatenate([x, [exp_config[\"t_max\"]]]), 0, exp_config[\"t_max\"])\n y = np.clip(exp_config.get(\"best_objective\", 1) - np.concatenate([y, [y[-1]]]), 0, 1)\n \n area = aulc(x, y)\n exp_config[\"data\"][exp_name][\"AULC\"] = area\n \n plt.step(\n x[:],\n y[:],\n where=\"post\",\n label=exp_config[\"data\"][exp_name][\"label\"],\n color=exp_config[\"data\"][exp_name][\"color\"],\n marker=exp_config[\"data\"][exp_name].get(\"marker\", None),\n markevery=len(x) // 5,\n linestyle=exp_config[\"data\"][exp_name].get(\"linestyle\", \"-\"),\n )\n\n ax = plt.gca()\n ticker_freq = exp_config[\"t_max\"] / 5\n ax.xaxis.set_major_locator(ticker.MultipleLocator(ticker_freq))\n ax.xaxis.set_major_formatter(minute_major_formatter)\n\n if exp_config.get(\"title\") and PRINT_TITLE:\n plt.title(exp_config.get(\"title\"))\n\n # if MODE == \"min\":\n # plt.legend(loc=\"upper right\")\n # else:\n # plt.legend(loc=\"lower right\")\n plt.legend(loc=exp_config.get(\"legend\", \"best\"))\n\n plt.ylabel(\"Test Regret\")\n plt.xlabel(\"Search time (min.)\")\n\n if exp_config.get(\"ylim\"):\n plt.ylim(*exp_config.get(\"ylim\"))\n\n if exp_config.get(\"xlim\"):\n plt.xlim(*exp_config.get(\"xlim\"))\n else:\n plt.xlim(0, exp_config[\"t_max\"])\n\n if exp_config.get(\"yscale\"):\n plt.yscale(exp_config.get(\"yscale\"))\n\n plt.grid(which=\"minor\", color=\"gray\", linestyle=\":\")\n plt.grid(which=\"major\", linestyle=\"-\")\n plt.tight_layout()\n plt.savefig(output_path, dpi=360)\n if show:\n plt.show()\n plt.close()", "def plot_metric_results():\n from run_metric_comparison_experiments import (\n PIVECTOR_TEMPLATE,\n PIVECTOR_DISTANCE_MATRIX_TEMPLATE,\n DISCRIMINATOR_DISTANCE_MATRIX_TEMPLATE,\n GAUSSIAN_DISTANCE_MATRIX_TEMPLATE,\n ENCODER_DISTANCE_MATRIX_TEMPLATE,\n DISCRETIZATION_DISTANCE_MATRIX_TEMPLATE,\n NUM_TRAJECTORIES,\n NUM_COMPONENTS,\n NUM_REPETITIONS,\n REWARD_SCALES,\n ENVS\n )\n\n # Path-templates to each distance matrix to compare\n # BC = Behavioural Characteristication\n BC_DISTANCE_MATRIX_TEMPLATES = [\n PIVECTOR_DISTANCE_MATRIX_TEMPLATE,\n GAUSSIAN_DISTANCE_MATRIX_TEMPLATE,\n DISCRIMINATOR_DISTANCE_MATRIX_TEMPLATE,\n ENCODER_DISTANCE_MATRIX_TEMPLATE,\n DISCRETIZATION_DISTANCE_MATRIX_TEMPLATE\n ]\n\n BC_LEGEND_NAMES = [\n \"Supervector\",\n \"Gaussian\",\n \"Discriminator\",\n \"Encoder\",\n \"Discretization\"\n ]\n\n BC_PLOT_COLORS = [\n \"C0\",\n \"C1\",\n \"C2\",\n \"C3\",\n \"C4\"\n ]\n\n fig, axs = pyplot.subplots(\n figsize=[4.8 * 3 * 0.75, 4.8 * 0.75],\n nrows=1,\n ncols=3,\n )\n\n def get_policy_names(env):\n policy_names = glob(PIVECTOR_TEMPLATE.format(env=env, num_traj=\"*\", num_components=\"*\", policy_name=\"*\", repetition_num=\"*\"))\n policy_names = [\"_\".join(os.path.basename(x).split(\"_\")[-4:-2]) for x in policy_names]\n policy_names = sorted(list(set(policy_names)))\n return policy_names\n\n # For each different distance measurement\n for distance_matrix_template, plot_legend_name, plot_color in zip(BC_DISTANCE_MATRIX_TEMPLATES, BC_LEGEND_NAMES, BC_PLOT_COLORS):\n # These will be NUM_TRAJECTORY length lists\n average_scores = np.ones((len(NUM_TRAJECTORIES),))\n std_scores = np.ones((len(NUM_TRAJECTORIES),))\n for num_traj_idx, num_traj in enumerate(NUM_TRAJECTORIES):\n # Average over environments, policies and repetitions\n scores = []\n for env_i, env in enumerate(ENVS):\n if \"Bipedal\" in env and distance_matrix_template == DISCRETIZATION_DISTANCE_MATRIX_TEMPLATE:\n print(\"[Note] Skipping env {} for discretization distances (OOM)\".format(env))\n continue\n min_reward, max_reward = REWARD_SCALES[env]\n policy_names = get_policy_names(env)\n\n for policy_name in policy_names:\n for repetition in range(1, NUM_REPETITIONS + 1):\n # Ugh bit of messing around because I did not think this through...\n if distance_matrix_template == PIVECTOR_DISTANCE_MATRIX_TEMPLATE:\n file_path = distance_matrix_template.format(env=env, num_traj=num_traj, num_components=NUM_COMPONENTS, policy_name=policy_name, repetition_num=repetition)\n else:\n file_path = distance_matrix_template.format(env=env, num_traj=num_traj, policy_name=policy_name, repetition_num=repetition)\n\n data = np.load(file_path)\n distance_matrix = data[\"distance_matrix\"]\n rewards = data[\"average_episodic_rewards\"]\n\n raveled_reward_distances = np.abs(rewards - rewards[:, None])\n # Take upper diagonal, skip diagonal\n raveled_reward_distances = raveled_reward_distances[np.triu_indices(raveled_reward_distances.shape[0], 1)]\n raveled_distances = distance_matrix[np.triu_indices(distance_matrix.shape[0], 1)]\n\n # Score is correlation between the two\n correlation = np.corrcoef(raveled_distances, raveled_reward_distances)[0, 1]\n scores.append(correlation)\n\n scores = np.array(scores)\n average_score = np.mean(scores)\n std_score = np.std(scores)\n average_scores[num_traj_idx] = average_score\n std_scores[num_traj_idx] = std_score\n ax = axs[0]\n ax.plot(NUM_TRAJECTORIES, average_scores, c=plot_color, label=plot_legend_name)\n ax.scatter(NUM_TRAJECTORIES, average_scores, c=plot_color)\n #ax.fill_between(\n # NUM_TRAJECTORIES,\n # average_scores - std_scores,\n # average_scores + std_scores,\n # alpha=0.2,\n # color=plot_color,\n # edgecolor=\"none\",\n # linewidth=0.0\n #)\n ax.set_xticks(NUM_TRAJECTORIES)\n ax.tick_params(axis='both', which='both', labelsize=\"x-large\")\n ax.set_ylabel(\"Correlation with return-distances\", fontsize=\"x-large\")\n ax.set_xlabel(\"Number of trajectories\", fontsize=\"x-large\")\n ax.grid(alpha=0.2)\n\n # Amount of error to \"ground truth\" result,\n # where \"ground truth\" is one of the results with 100 trajectories of data.\n # Because of wonkyness of this, store list [#num-traj] of lists,\n # each storing results for that num-traj run\n per_trajectory_relative_errors = [[] for i in NUM_TRAJECTORIES]\n for env in ENVS:\n if \"Bipedal\" in env and distance_matrix_template == DISCRETIZATION_DISTANCE_MATRIX_TEMPLATE:\n print(\"[Note] Skipping env {} for discretization distances (OOM)\".format(env))\n continue\n policy_names = get_policy_names(env)\n for policy_name in policy_names:\n # The \"ground truth\" distances, will be filled with first\n # result with 100 trajectories.\n anchor_distance = None\n for traj_i, num_traj in enumerate(NUM_TRAJECTORIES):\n for repetition in range(1, NUM_REPETITIONS + 1):\n if distance_matrix_template == PIVECTOR_DISTANCE_MATRIX_TEMPLATE:\n file_path = distance_matrix_template.format(env=env, num_traj=num_traj, num_components=NUM_COMPONENTS, policy_name=policy_name, repetition_num=repetition)\n else:\n file_path = distance_matrix_template.format(env=env, num_traj=num_traj, policy_name=policy_name, repetition_num=repetition)\n distance_matrix = np.load(file_path)[\"distance_matrix\"]\n # Normalize to [0, 1]\n distance_matrix = (distance_matrix - distance_matrix.min()) / (distance_matrix.max() - distance_matrix.min())\n # Get only upper triangle as distance matrix is symmetric. Exlude diagonal\n raveled_distances = distance_matrix[np.triu_indices(distance_matrix.shape[0], 1)]\n # Check if we use this as the zero-point or compute relative error to\n if anchor_distance is None:\n assert num_traj == 100\n anchor_distance = raveled_distances\n else:\n per_trajectory_relative_errors[traj_i].append(\n np.mean(np.abs(raveled_distances - anchor_distance) / np.abs(anchor_distance))\n )\n # Lists are not of equal length, so can not just change into an array\n mean_average_errors = np.array([np.mean(np.array(results) * 100) for results in per_trajectory_relative_errors])\n std_average_errors = np.array([np.std(np.array(results) * 100) for results in per_trajectory_relative_errors])\n ax = axs[1]\n ax.plot(NUM_TRAJECTORIES, mean_average_errors, c=plot_color, label=plot_legend_name)\n ax.scatter(NUM_TRAJECTORIES, mean_average_errors, c=plot_color)\n #ax.fill_between(\n # NUM_TRAJECTORIES,\n # mean_average_errors - std_average_errors,\n # mean_average_errors + std_average_errors,\n # alpha=0.2,\n # color=plot_color,\n # edgecolor=\"none\",\n # linewidth=0.0\n #)\n ax.set_xticks(NUM_TRAJECTORIES)\n ax.tick_params(axis='both', which='both', labelsize=\"x-large\")\n ax.set_ylabel(\"Relative error to ground truth (%)\", fontsize=\"x-large\")\n ax.set_xlabel(\"Number of trajectories\", fontsize=\"x-large\")\n ax.grid(alpha=0.2)\n\n # Variation between results\n cv_means = np.ones((len(NUM_TRAJECTORIES,)))\n cv_stds = np.ones((len(NUM_TRAJECTORIES,)))\n for traj_i, num_traj in enumerate(NUM_TRAJECTORIES):\n traj_averaged_cvs = []\n for env in ENVS:\n if \"Bipedal\" in env and distance_matrix_template == DISCRETIZATION_DISTANCE_MATRIX_TEMPLATE:\n print(\"[Note] Skipping env {} for discretization distances (OOM)\".format(env))\n continue\n policy_names = get_policy_names(env)\n for policy_name in policy_names:\n # Compute std over repetitions\n distances = []\n for repetition in range(1, NUM_REPETITIONS + 1):\n if distance_matrix_template == PIVECTOR_DISTANCE_MATRIX_TEMPLATE:\n file_path = distance_matrix_template.format(env=env, num_traj=num_traj, num_components=NUM_COMPONENTS, policy_name=policy_name, repetition_num=repetition)\n else:\n file_path = distance_matrix_template.format(env=env, num_traj=num_traj, policy_name=policy_name, repetition_num=repetition)\n\n distance_matrix = np.load(file_path)[\"distance_matrix\"]\n # Normalize to [0, 1]\n distance_matrix = (distance_matrix - distance_matrix.min()) / (distance_matrix.max() - distance_matrix.min())\n # Get only upper triangle as distance matrix is symmetric. Exlude diagonal\n raveled_distances = distance_matrix[np.triu_indices(distance_matrix.shape[0], 1)]\n distances.append(raveled_distances)\n distances = np.stack(distances)\n # Coefficient of variance (std / mean)\n average_cv = np.mean(np.std(distances, axis=0) / np.mean(distances, axis=0))\n traj_averaged_cvs.append(average_cv)\n traj_averaged_cvs = np.array(traj_averaged_cvs)\n cv_means[traj_i] = np.mean(traj_averaged_cvs)\n cv_stds[traj_i] = np.std(traj_averaged_cvs)\n\n ax = axs[2]\n ax.plot(NUM_TRAJECTORIES, cv_means, c=plot_color, label=plot_legend_name)\n ax.scatter(NUM_TRAJECTORIES, cv_means, c=plot_color)\n #ax.fill_between(\n # NUM_TRAJECTORIES,\n # cv_means - cv_stds,\n # cv_means + cv_stds,\n # alpha=0.2,\n # color=plot_color,\n # edgecolor=\"none\",\n # linewidth=0.0\n #)\n ax.set_xticks(NUM_TRAJECTORIES)\n ax.tick_params(axis='both', which='both', labelsize=\"x-large\")\n ax.set_ylabel(\"Coefficient of variance $\\\\sigma/\\\\mu$\", fontsize=\"x-large\")\n ax.set_xlabel(\"Number of trajectories\", fontsize=\"x-large\")\n ax.grid(alpha=0.2)\n\n axs[1].legend(prop={\"size\": \"large\"})\n pyplot.tight_layout()\n pyplot.savefig(\"figures/metric_comparison.pdf\", bbox_inches=\"tight\", pad_inches=0.0)", "def ridge_cross_validation_visualization(lambdas, accuracies):\n colors = ['r', 'b', 'y', 'g']\n labels = ['group_0', 'group_1', 'group_2', 'group_3']\n for i in range(len(accuracies)):\n plt.semilogx(lambdas, accuracies[i], marker=\".\", color=colors[i], label=labels[i])\n plt.xlabel(\"lambda\")\n plt.ylabel(\"accuracy\")\n plt.xlim(1e-4, 1)\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"./img/ridge_cross_validation\")", "def plot_k_clust(model, cparam, num_clusters_set, clust_thresh_size, col_palette, max_x=8, x_step=2.0, y_label=[0,24,48], y_minmax=(-5, 53), xlabel='Time from Onset (Years)', ylabel='ALSFRS-R Total'):\n fig = plt.figure(figsize=(20,7), constrained_layout=True)\n wrs = [2, 0.2]+[1]*(math.ceil(num_clusters_set/2))\n gs = fig.add_gridspec(2, len(wrs), width_ratios=wrs)\n f_ax1 = fig.add_subplot(gs[:, 0])\n\n axs = []\n for i in range(0,2):\n for j in range(2,math.ceil(num_clusters_set/2)+2):\n axs.append(fig.add_subplot(gs[i,j]))\n\n # Plot kcluster\n kmeans = KMeans(n_clusters=num_clusters_set, random_state=0).fit(cparam[['neg_linmap.A', 'rbf.lengthscale']])\n cparam['k_label']=kmeans.labels_\n cparam_freq = cparam.groupby('k_label')['clust_size'].sum()/cparam['clust_size'].sum()\n collist = [col_palette[x] for x in kmeans.labels_]\n f_ax1.scatter(cparam['neg_linmap.A'],cparam['rbf.lengthscale'], s=cparam['clust_size']*2, color=collist, alpha=0.9)\n _ = f_ax1.set_xlabel('Negative Mean Slope')\n _ = f_ax1.set_ylabel('Lengthscale')\n\n\n # plot clusters on subplots below\n nkclust = len(np.unique(kmeans.labels_))\n# klist = np.unique(kmeans.labels_)\n # sort by progression rate\n klist = cparam.groupby('k_label')['neg_linmap.A'].mean().sort_values(ascending=False).index\n kalph = [ascii_lowercase[a] for a, j in enumerate(klist)]\n cparam['kalph']=cparam['k_label'].map(dict(zip(klist,kalph)))\n # klist = [2,0,3,1]\n\n for j, kclust in enumerate(klist):\n allclust = cparam.index[kmeans.labels_==kclust]\n cax = axs[j]\n for i, k in enumerate(allclust):\n if model.allocmodel.Nk[int(k)]>=clust_thresh_size:\n _, num_pat_k = plot_mogp_by_clust(cax, model, None, int(k), data_flag=False, data_col=col_palette[kclust],\n model_flag=True, model_col=col_palette[kclust])\n _ = format_mogp_axs(cax, max_x=max_x, x_step=x_step, y_label=y_label, y_minmax=y_minmax)\n _ = cax.text(0.9, 0.9, '{}) {:.2f}%'.format(ascii_lowercase[j], cparam_freq.loc[kclust]*100), va='top', ha='right', transform = cax.transAxes)\n _ = cax.get_legend().remove()\n _ = cax.set_xlabel(xlabel)\n _ = cax.set_ylabel(ylabel)\n return cparam, cparam_freq, fig, f_ax1, axs", "def plot_aggregate(values, label='', smth_wnd=50, plot_mean=False, plot_stdev=False, plot_med=True, plot_iqr=True,\n\t\t\t\t plot_ext=False):\n\tif label != '':\n\t\tlabel += ' '\n\n\tsmoothen = True if 0 < 3 * smth_wnd < values.shape[1] else False\n\n\tx_values = np.arange(1, values.shape[1] + 1)\n\n\tmeans = np.mean(values, axis=0)\n\tif smoothen:\n\t\tmeans = pd.Series(means).rolling(smth_wnd, min_periods=smth_wnd).mean()\n\n\tif plot_stdev:\n\t\tstd_dev = np.std(values, axis=0)\n\n\t\tif smoothen:\n\t\t\tstd_dev = pd.Series(std_dev).rolling(smth_wnd, min_periods=smth_wnd).mean()\n\n\t\tplt.fill_between(x_values, means - std_dev, means + std_dev, alpha=0.25, label=label + '1×σ')\n\n\tif plot_mean:\n\t\tplt.plot(x_values, means, '--', label=label + 'Mean')\n\n\tif plot_iqr:\n\t\tiqr_25 = np.percentile(values, 25, axis=0)\n\t\tiqr_75 = np.percentile(values, 75, axis=0)\n\n\t\tif smoothen:\n\t\t\tiqr_25 = pd.Series(iqr_25).rolling(smth_wnd, min_periods=smth_wnd).mean()\n\t\t\tiqr_75 = pd.Series(iqr_75).rolling(smth_wnd, min_periods=smth_wnd).mean()\n\n\t\tplt.fill_between(x_values, iqr_25, iqr_75, alpha=0.45, label=label + 'IQR')\n\n\tif plot_med:\n\t\tmedians = np.percentile(values, 50, axis=0)\n\n\t\tif smoothen:\n\t\t\tmedians = pd.Series(medians).rolling(smth_wnd, min_periods=smth_wnd).mean()\n\n\t\tplt.plot(x_values, medians, '--', label=label + 'Median', linewidth=1.5)\n\n\tif plot_ext:\n\t\text_min = np.min(values, axis=0)\n\t\text_max = np.max(values, axis=0)\n\n\t\tif smoothen:\n\t\t\text_min = pd.Series(ext_min).rolling(smth_wnd, min_periods=smth_wnd).mean()\n\t\t\text_max = pd.Series(ext_max).rolling(smth_wnd, min_periods=smth_wnd).mean()\n\n\t\tplt.fill_between(x_values, ext_min, ext_max, alpha=0.125, label=label + 'Extremes')", "def plot_comparisons(self, exact, blocked, blockederr, axdelta=None):\n if axdelta is None:\n axdelta = plt.gca()\n delta = self.means - exact\n axdelta.errorbar(list(range(1, self.max_dets)), delta[0], yerr=self.stderr[0], label='independent')\n axdelta.errorbar(list(range(1, self.max_dets)), delta[1], yerr=self.stderr[1], label='correlated')\n axdelta.axhline(delta[0, 0], linestyle=':', color='grey', label='reference')\n axdelta.axhline(0, linestyle='-', linewidth=1, color='black')\n if blocked:\n axdelta.axhline(blocked-exact, linestyle='--', color='darkgreen', label='reblocked')\n if blockederr:\n axdelta.fill_between([0, self.max_dets], [blocked-exact-blockederr,blocked-exact-blockederr],\n [blocked-exact+blockederr,blocked-exact+blockederr], color='green', alpha=0.2)\n axdelta.set_xlabel('Number of determinants in estimator')\n axdelta.set_ylabel(r'$E-E_\\mathrm{CCSD}$ / ha')\n axdelta.legend()\n return axdelta", "def display_group_density_plot(df, groupby, on, palette, figsize):\n\n if not isinstance(df, pd.core.frame.DataFrame):\n raise ValueError('df must be a pandas DataFrame')\n\n if not groupby:\n raise ValueError('groupby parameter must be provided')\n\n elif not groupby in df.keys():\n raise ValueError(groupby + ' column does not exist in the given DataFrame')\n\n if not on:\n raise ValueError('on parameter must be provided')\n\n elif not on in df.keys():\n raise ValueError(on + ' column does not exist in the given DataFrame')\n\n if len(set(df[groupby])) > 10:\n groups = df[groupby].value_counts().index[:10]\n\n else:\n groups = set(df[groupby])\n\n # Get relevant palette\n if palette:\n palette = palette[:len(groups)]\n else:\n palette = sns.color_palette()[:len(groups)]\n\n # Plot\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111)\n ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left')\n\n for value, color in zip(groups, palette):\n sns.kdeplot(df.loc[df[groupby] == value][on], shade=True, color=color, label=value)\n\n ax.set_title(str(\"Distribution of \" + on + \" per \" + groupby + \" group\"), fontsize=30)\n \n ax.set_xlabel(on, fontsize=20)\n return ax", "def main():\n\n convergence_rates_e1 = pd.DataFrame(index=['Mean', 'St.D'])\n convergence_rates_e4 = pd.DataFrame(index=['Mean', 'St.D'])\n success_rates = pd.DataFrame(index=['1e-1', '1e-2', '1e-3', '1e-4', '1e-5'])\n peak_ratios = pd.DataFrame(index=['1e-1', '1e-2', '1e-3', '1e-4', '1e-5'])\n\n for function in range(1, 21):\n results = pickle.load(open('results/benchmarking_result_{}.pkl'.format(function), 'rb'))\n\n col_name = 'F{}'.format(function)\n index = function-1\n\n convergence_rates_e1.insert(index, col_name, results.ConvergenceRates[0])\n convergence_rates_e4.insert(index, col_name, results.ConvergenceRates[3])\n\n success_rates.insert(index, col_name, results.SuccessRate)\n peak_ratios.insert(index, col_name, results.PeakRatio)\n\n for i in results.SimulationSwarms:\n\n x_axis = [k for k, _ in i.items()]\n y_axis = [v for _, v in i.items()]\n\n plt.subplot(4, 5, function) # subplot indexes from 1\n plt.plot(x_axis, y_axis, 'k-')\n\n plt.savefig('results/nmmso_benchmark.png')\n plt.show()\n\n pd.set_option('display.max_columns', None) # make sure all columns are printed below\n print(convergence_rates_e1)\n print(convergence_rates_e4)\n print(success_rates)\n print(peak_ratios)\n\n # Table V from Fieldsend et al.\n table4 = pd.Series([\n peak_ratios.stack().median(),\n peak_ratios.stack().mean(),\n peak_ratios.stack().std()\n ])\n print(table4)\n\n # do we want to reproduce Fig. 3", "def plot_objective_multi(df, exp_config, output_dir, show):\n output_file_name = f\"{inspect.stack()[0][3]}.{FILE_EXTENSION}\"\n output_path = os.path.join(output_dir, output_file_name)\n\n plt.figure()\n\n for exp_name, exp_df in df.items():\n\n if \"rep\" in exp_config[\"data\"][exp_name]:\n\n exp_dfs = exp_df\n\n T = np.linspace(0, exp_config[\"t_max\"], 50000)\n\n y_list = []\n for i, df_i in enumerate(exp_dfs):\n df_i = df_i.sort_values(\"timestamp_end\")\n x, y = df_i.timestamp_end.to_numpy(), df_i.objective.cummin().to_numpy()\n f = interp1d(x, y, kind=\"previous\", fill_value=\"extrapolate\")\n y = f(T)\n y_list.append(y)\n\n y_list = np.asarray(y_list)\n y_mean = y_list.mean(axis=0)\n y_std = y_list.std(axis=0)\n y_se = y_std / np.sqrt(y_list.shape[0])\n\n plt.plot(\n T,\n y_mean,\n label=exp_config[\"data\"][exp_name][\"label\"],\n color=exp_config[\"data\"][exp_name][\"color\"],\n linestyle=exp_config[\"data\"][exp_name].get(\"linestyle\", \"-\"),\n )\n plt.fill_between(\n T,\n y_mean - 1.96 * y_se,\n y_mean + 1.96 * y_se,\n facecolor=exp_config[\"data\"][exp_name][\"color\"],\n alpha=0.3,\n )\n # plt.fill_between(T,\n # y_mean-1.96*y_std,\n # y_mean+1.96*y_std,\n # facecolor=exp_config[\"data\"][exp_name][\"color\"],\n # alpha=0.3)\n else:\n exp_df = exp_df.sort_values(\"timestamp_end\")\n x, y = exp_df.timestamp_end.to_numpy(), exp_df.objective.cummax().to_numpy()\n if \"hartmann6D\" in exp_name:\n y = y + 3.32237 # hartmann6D\n\n plt.plot(\n x,\n y,\n label=exp_config[\"data\"][exp_name][\"label\"],\n color=exp_config[\"data\"][exp_name][\"color\"],\n marker=exp_config[\"data\"][exp_name].get(\"marker\", None),\n markevery=len(x) // 5,\n linestyle=exp_config[\"data\"][exp_name].get(\"linestyle\", \"-\"),\n )\n\n ax = plt.gca()\n ticker_freq = exp_config[\"t_max\"] / 5\n ax.xaxis.set_major_locator(ticker.MultipleLocator(ticker_freq))\n ax.xaxis.set_major_formatter(minute_major_formatter)\n\n if exp_config.get(\"title\") and PRINT_TITLE:\n plt.title(exp_config.get(\"title\"))\n\n if MODE == \"min\":\n plt.legend(loc=\"upper right\")\n else:\n plt.legend(loc=\"lower right\")\n\n plt.ylabel(exp_config.get(\"ylabel\", \"Objective\"))\n plt.xlabel(\"Search time (min.)\")\n\n if exp_config.get(\"ylim\"):\n plt.ylim(*exp_config.get(\"ylim\"))\n\n if exp_config.get(\"xlim\"):\n plt.xlim(*exp_config.get(\"xlim\"))\n else:\n plt.xlim(0, exp_config[\"t_max\"])\n\n if exp_config.get(\"yscale\"):\n plt.yscale(exp_config.get(\"yscale\"))\n\n plt.grid()\n plt.tight_layout()\n plt.savefig(output_path, dpi=360)\n if show:\n plt.show()\n plt.close()", "def showVs(df, feat1, feat2):\n colors = ['blue', 'red', 'green', 'coral']\n for u in range(len(cBouts)):\n plt.plot(f[f['clust_ind'] == u][feat1],\n f[f['clust_ind'] == u][feat2], 'o', color=colors[u],\n alpha=0.6, markeredgecolor='none')\n plt.xlabel(feat1)\n plt.ylabel(feat2)\n plt.show()\n return", "def display_group_density_plot(df, groupby, on, palette = None, figsize = None, title=\"\", ax=None):\n if palette is None:\n palette = sns.color_palette('Set2')\n if figsize is None:\n figsize = (10, 5)\n if not isinstance(df, pd.core.frame.DataFrame):\n raise ValueError('df must be a pandas DataFrame')\n\n if not groupby:\n raise ValueError('groupby parameter must be provided')\n\n elif not groupby in df.keys():\n raise ValueError(groupby + ' column does not exist in the given DataFrame')\n\n if not on:\n raise ValueError('on parameter must be provided')\n\n elif not on in df.keys():\n raise ValueError(on + ' column does not exist in the given DataFrame')\n\n if len(set(df[groupby])) > 10:\n groups = df[groupby].value_counts().index[:10]\n\n else:\n groups = set(df[groupby])\n\n # Get relevant palette\n if palette:\n palette = palette[:len(groups)]\n else:\n palette = sns.color_palette()[:len(groups)]\n\n if ax is None:\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111)\n \n ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left')\n for value, color in zip(groups, palette):\n sns.kdeplot(df.loc[df[groupby] == value][on], shade=True, color=color, label=value, ax=ax)\n if not title:\n title = str(\"Distribution of \" + on + \" per \" + groupby + \" group\")\n \n ax.set_title(title,fontsize=10)\n ax.set_xlabel(on, fontsize=10)\n return ax", "def get_report(dataset):\n\n dataset = dataset.round(2)\n print('Overall results (mean): ')\n display(dataset[['classifier', 'preprocessor', 'f1', 'precision', 'recall']].groupby(['preprocessor', 'classifier'])\n .mean().round(2))\n print('Overall results (max): ')\n display(dataset[['classifier', 'preprocessor', 'f1', 'precision', 'recall']].groupby(['preprocessor', 'classifier'])\n .max().round(2))\n print('Grouped by Preprocessor (mean):')\n display(dataset[['preprocessor', 'f1', 'precision', 'recall']].groupby('preprocessor').mean().round(2))\n print('Grouped by Classifier (mean):')\n display(dataset[['classifier', 'f1', 'precision', 'recall']].groupby('classifier').mean().round(2))\n\n preprocessors = dataset['preprocessor'].unique()\n metrics = ['f1', 'precision', 'recall']\n\n # For each metric, display top 10 rounds.\n for m in metrics:\n print(f'Top 10 by {m}:')\n display(dataset.sort_values(m, ascending=False).head(10).round(2))\n\n for p in preprocessors:\n for m in metrics:\n d = dataset[dataset['preprocessor'] == p]\n for c in dataset['classifier'].unique():\n plt.plot(d[d['classifier'] == c]['prior'].unique(), d[d['classifier'] == c].groupby('prior').mean()[m],\n label=str(c))\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)\n plt.title(m + ' - ' + str(p))\n plt.show()", "def plot_mean_roc_curve_of_classifiers(classifier_roc_list, data_set_description):\n if const.RECORD_RESULTS is True:\n fig = plt.figure(figsize=(8, 6.66))\n monochrome = (cycler(\"color\", [\"k\"]) * cycler(\"marker\", [\"\"]) *\n cycler(\"linestyle\", [\"-\", \"--\", \"-.\"]))\n color_arr = [\"#64B3DE\", \"#1f78b4\", \"#6ABF20\", \"#FBAC44\", \"#bc1659\", \"#B9B914\", \"#33a02c\", \"#ff7f00\", \"#6a3d9a\", \"black\", \"#b15928\", \"#e31a1c\"]\n plt.rc(\"axes\", prop_cycle=monochrome)\n line_style_index = 0\n color_index = 0\n\n for (test_run_roc_list, classifier_description) in classifier_roc_list:\n if not (None, None) in test_run_roc_list[0]:\n mean_tpr = 0.0\n mean_fpr = np.linspace(0, 1, 100)\n count = 0\n for roc_list in test_run_roc_list:\n for (tpr, fpr) in roc_list:\n mean_tpr += interp(mean_fpr, fpr, tpr)\n mean_tpr[0] = 0.0\n count += 1\n\n mean_tpr /= float(count)\n mean_tpr[-1] = 1.0\n mean_auc = auc(mean_fpr, mean_tpr)\n line_width = 0.5\n if line_style_index == 1:\n line_width = 0.8\n elif line_style_index == 2:\n line_width = 1.5\n\n plt.plot(mean_fpr, mean_tpr, c=color_arr[color_index], lw=line_width, alpha=1, label=\"{0} ({1:.3f})\".format(classifier_description, mean_auc))\n line_style_index = (line_style_index + 1) % 3\n color_index += 1\n\n plt.locator_params(axis='x', nbins=10)\n plt.locator_params(axis='y', nbins=10)\n plt.plot([0, 1], [0, 1], \"k--\", label=\"Random classification\", lw=0.8)\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.0])\n plt.xlabel(\"False Positive Rate\")\n plt.ylabel(\"True Positive Rate\")\n plt.title(\"ROC curve for each classifier\")\n plt.legend(loc=\"lower right\", fancybox=True, frameon=True)\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n plt.savefig(os.path.dirname(os.path.realpath(__file__)) + \"/../results/{0}_roc_classifier_plot_{1}.png\".format(data_set_description, current_time), bbox_inches=\"tight\")\n plt.close(fig)", "def PlotContributions( ax=None, dev=False, measure='DM', redshift=0.1, cumulative=False, N_inter=False, **scenario ):\n if ax is None:\n fig, ax = plt.subplots()\n for region in regions:\n models = scenario.get( region )\n if models:\n for model in models:\n P = GetLikelihood( region=region, model=model, measure=measure, redshift=redshift, N_inter=N_inter, dev=dev )\n PlotLikelihood( *P, measure=measure, label=region+': '+Label(model) , linestyle=linestyle_region[region], ax=ax, cumulative=cumulative )\n ax.legend()\n ax.set_title( \"redshift = %.1f\" % redshift )", "def plot_like(self, field, flist=None, fmax=False, tol=0.0,\n mean_lonlat=[True, False], title=None, **kwargs):\n # div=False, robust=False, vmax=None, vmin=None):\n from matplotlib.ticker import ScalarFormatter\n import matplotlib.pyplot as plt\n import aux_functions_strat as aux\n import pandas as pd\n # TODO: add area_mean support\n if not hasattr(self, 'results_'):\n raise AttributeError('No results yet... run model.fit(X,y) first!')\n rds = self.results_\n if field not in rds.data_vars:\n raise KeyError('No {} in results_!'.format(field))\n # if 'div' in keys:\n # cmap = 'bwr'\n # else:\n # cmap = 'viridis'\n plt_kwargs = {'yscale': 'log', 'yincrease': False, 'cmap': 'bwr'}\n if field in rds.attrs['sample_types']:\n orig = aux.xr_weighted_mean(rds['original'])\n try:\n times = aux.xr_weighted_mean(rds[field])\n except KeyError:\n print('Field not found..')\n return\n except AttributeError:\n times = rds[field]\n fig, axes = plt.subplots(nrows=2, sharex=True, figsize=(15, 7),\n num='Time_Level_Comperison')\n cmap_max = abs(max(abs(orig.min().values), abs(orig.max().values)))\n orig = orig.reindex({'time': pd.date_range(orig.time[0].values,\n orig.time[-1].values,\n freq='MS')})\n plt_sample = {**plt_kwargs}\n plt_sample.update({'center': 0.0, 'levels': 41, 'vmax': cmap_max})\n plt_sample.update(kwargs)\n con = orig.T.plot.contourf(ax=axes[0], **plt_sample)\n cb = con.colorbar\n cb.set_label(orig.attrs['units'], fontsize=10)\n ax = axes[0]\n ax.set_title(orig.attrs['long_name'] + ' original', loc='center')\n ax.yaxis.set_major_formatter(ScalarFormatter())\n # plot the PREDICTED :\n times = times.reindex({'time': pd.date_range(times.time[0].values,\n times.time[-1].values,\n freq='MS')})\n plt_sample.update({'extend': 'both'})\n con = times.T.plot.contourf(ax=axes[1], **plt_sample)\n # robust=robust)\n cb = con.colorbar\n try:\n cb.set_label(times.attrs['units'], fontsize=10)\n except KeyError:\n print('no units found...''')\n cb.set_label(' ', fontsize=10)\n ax = axes[1]\n ax.yaxis.set_major_formatter(ScalarFormatter())\n ax.set_title(times.attrs['long_name'] + ' predicted', loc='center')\n plt.subplots_adjust(left=0.05, right=0.995)\n [ax.invert_yaxis() for ax in con.ax.figure.axes]\n plt.show()\n return con\n elif field in rds.attrs['error_types']:\n # TODO: add contour lines\n if title is not None:\n suptitle = title\n else:\n suptitle = rds[field].name\n plt_error = {**plt_kwargs}\n plt_error.update({'cmap': 'viridis', 'add_colorbar': True,\n 'figsize': (6, 8)})\n plt_error.update(kwargs)\n if 'lon' in rds[field].dims:\n error_field = aux.xr_weighted_mean(rds[field],\n mean_on_lon=mean_lonlat[0],\n mean_on_lat=mean_lonlat[1])\n else:\n error_field = rds[field]\n try:\n con = error_field.plot.contourf(**plt_error)\n ax = plt.gca()\n ax.yaxis.set_major_formatter(ScalarFormatter())\n plt.suptitle(suptitle, fontsize=12, fontweight=750)\n except KeyError:\n print('Field not found or units not found...')\n return\n except ValueError:\n con = error_field.plot(xscale='log', xincrease=False,\n figsize=(6, 8))\n ax = plt.gca()\n ax.xaxis.set_major_formatter(ScalarFormatter())\n plt.suptitle(suptitle, fontsize=12, fontweight=750)\n plt.show()\n plt.gca().invert_yaxis()\n return con\n elif field in rds.attrs['feature_types']:\n # TODO: add contour lines\n con_levels = [0.001, 0.005, 0.01, 0.05] # for pvals\n con_colors = ['blue', 'cyan', 'yellow', 'red'] # for pvals\n import xarray as xr\n fdim = rds.attrs['feature_dim']\n if flist is None:\n flist = [x for x in rds[fdim].values if\n xr.ufuncs.fabs(rds[field].sel({fdim: x})).mean() > tol]\n if rds[fdim].sel({fdim: flist}).size > 6:\n colwrap = 6\n else:\n colwrap = None\n if 'lon' in rds[field].dims:\n feature_field = aux.xr_weighted_mean(rds[field],\n mean_on_lon=mean_lonlat[0],\n mean_on_lat=mean_lonlat[1])\n else:\n feature_field = rds[field]\n vmax = feature_field.max()\n if fmax:\n vmax = feature_field.sel({fdim: flist}).max()\n if title is not None:\n suptitle = title\n else:\n suptitle = feature_field.name\n plt_feature = {**plt_kwargs}\n plt_feature.update({'add_colorbar': False, 'levels': 41,\n 'figsize': (15, 4),\n 'extend': 'min', 'col_wrap': colwrap})\n plt_feature.update(**kwargs)\n try:\n if feature_field.name == 'pvalues':\n plt_feature.update({'colors': con_colors,\n 'levels': con_levels, 'extend': 'min'})\n plt_feature.update(**kwargs)\n plt_feature.pop('cmap', None)\n else:\n plt_feature.update({'cmap': 'bwr',\n 'vmax': vmax})\n plt_feature.update(**kwargs)\n fg = feature_field.sel({fdim: flist}).plot.contourf(col=fdim,\n **plt_feature)\n ax = plt.gca()\n ax.yaxis.set_major_formatter(ScalarFormatter())\n fg.fig.subplots_adjust(bottom=0.3, top=0.85, left=0.05)\n cbar_ax = fg.fig.add_axes([0.1, 0.1, .8, .025])\n fg.add_colorbar(\n cax=cbar_ax,\n orientation=\"horizontal\",\n format='%0.3f')\n fg.fig.suptitle(suptitle, fontsize=12, fontweight=750)\n except KeyError:\n print('Field not found or units not found...')\n return\n except ValueError as valerror:\n print(valerror)\n fg = feature_field.plot(col=fdim, xscale='log', xincrease=False,\n figsize=(15, 4))\n fg.fig.subplots_adjust(bottom=0.3, top=0.85, left=0.05)\n ax = plt.gca()\n ax.xaxis.set_major_formatter(ScalarFormatter())\n plt.suptitle(suptitle, fontsize=12, fontweight=750)\n plt.show()\n [ax.invert_yaxis() for ax in fg.fig.axes]\n return fg", "def ratio(gb_data, data_depcode, data_ratio_hospitalises,current_date, data_hospitalises, current_date_file, min_value_80p , nbhospitalises_80p) :\n start = time.time()\n fig, ax = plt.subplots(figsize=(12, 8))\n\n plt.title(f\"Ratio of in-hospital deaths to hospitalizations : {current_date}\", fontsize=20)\n plt.ylabel(\"Total number of deceases / Total number of hospitalized\")\n plt.xlabel(\"Total number of hospitalized\")\n\n for i, txt in enumerate(data_depcode):\n if (data_hospitalises[i] > data_hospitalises.max() * 0.20):\n ax.annotate(txt, (data_hospitalises[i], data_ratio_hospitalises[i]), xytext=(data_hospitalises[i] + 20, data_ratio_hospitalises[i])) \n\n plt.axhline(data_ratio_hospitalises.mean(), color='green', linestyle='--', label=f'average death ratio ({data_ratio_hospitalises.mean():.2f}%)')\n\n plt.axvline(min_value_80p, color='pink', linestyle='-', label=f\"80% of the number of hospitalized people in France are on the right side of the line ({nbhospitalises_80p:.0f} hospitalized)\")\n\n ax.scatter(data_hospitalises, data_ratio_hospitalises)\n\n ax.annotate('updated chart',xy=(1, 0), xytext=(-15, 10), fontsize=15,\n xycoords='axes fraction', textcoords = 'offset points',\n bbox=dict(facecolor = 'white', alpha = 0.9),\n horizontalalignment = 'right', verticalalignment = 'bottom')\n\n ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.0f%%'))\n plt.legend()\n\n current_date_file = gb_data['date'].max().strftime('%Y%m%d')\n end = time.time()\n print(\"Time spent on ratio plot: {0:.5f} s.\".format(end - start)) \n plt.show()", "def plot_runtimes(num_clusters_range):\n scp_times = []\n fcp_times = []\n\n for num in range(num_clusters_range[0], num_clusters_range[1]+1):\n result = compute_runtimes(num)\n scp_times.append(result[0])\n fcp_times.append(result[1])\n\n print \"\\n\\nscp_times:\", scp_times\n print \"fcp_times:\", fcp_times\n\n # plot n(x) vs time (y) on a standard plot\n xvals = range(num_clusters_range[0], num_clusters_range[1]+1)\n yvals1 = scp_times\n yvals2 = fcp_times\n\n plt.plot(xvals, yvals1, '-b', label='slow_closest_pair')\n plt.plot(xvals, yvals2, '-r', label='fast_closest_pair')\n plt.xlabel('number of initial clusters')\n plt.ylabel('run time (seconds)')\n plt.legend(loc='upper right')\n plt.show()", "def plot_trace(param, param_name='parameter'):\n \n # Summary statistics\n mean = np.mean(param)\n median = np.median(param)\n cred_min, cred_max = np.percentile(param, 2.5), np.percentile(param, 97.5)\n \n # Plotting\n plt.subplot(1,2,1)\n plt.plot(param,color=\"b\")\n plt.xlabel('samples')\n plt.ylabel(param_name)\n plt.axhline(mean, color='r', lw=2, linestyle='--')\n plt.axhline(median, color='c', lw=2, linestyle='--')\n plt.axhline(cred_min, linestyle=':', color='k', alpha=0.2)\n plt.axhline(cred_max, linestyle=':', color='k', alpha=0.2)\n plt.title('Trace and Posterior Distribution for {}'.format(param_name))\n\n plt.subplot(1,2,2)\n plt.hist(param, 30, density=True, color=\"blue\",); sns.kdeplot(param, shade=True,color=\"g\")\n plt.xlabel(param_name)\n plt.ylabel('density')\n plt.axvline(mean, color='r', lw=2, linestyle='--',label='mean')\n plt.axvline(median, color='c', lw=2, linestyle='--',label='median')\n plt.axvline(cred_min, linestyle=':', color='k', alpha=0.2, label='95% CI')\n plt.axvline(cred_max, linestyle=':', color='k', alpha=0.2)\n \n plt.gcf().tight_layout()\n plt.legend()", "def display_group_density_plot(df, groupby, on, palette, figsize):\n\n if not isinstance(df, pd.core.frame.DataFrame):\n raise ValueError('df must be a pandas DataFrame')\n\n if not groupby:\n raise ValueError('groupby parameter must be provided')\n\n elif not groupby in df.keys():\n raise ValueError(groupby + ' column does not exist in the given DataFrame')\n\n if not on:\n raise ValueError('on parameter must be provided')\n\n elif not on in df.keys():\n raise ValueError(on + ' column does not exist in the given DataFrame')\n\n if len(set(df[groupby])) > 10:\n groups = df[groupby].value_counts().index[:10]\n\n else:\n groups = set(df[groupby])\n\n # Get relevant palette\n if palette:\n palette = palette[:len(groups)]\n else:\n palette = sns.color_palette()[:len(groups)]\n\n # Plot\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111)\n ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left')\n\n for value, color in zip(groups, palette):\n sns.kdeplot(df.loc[df[groupby] == value][on], shade=True, color=color, label=value)\n\n ax.set_title(str(\"Distribution of \" + on + \" per \" + groupby + \" group\"), fontsize=10)\n return ax", "def make_accuracy_plot(ax,\n groundtruth_boxes,\n hpu_boxes,\n cpu_boxes,\n hpu_strategy,\n label,\n N=10,\n num_graph_points=20,\n match_mode=\"ellipse\",\n):\n print \"Making plot for\", repr(label)\n print \"TODO: this should graph seconds per image\"\n mix_fractions = np.linspace(0, 1.0, num_graph_points)\n # Plot confidence intervals\n min_ci = []\n max_ci = []\n mean_accs = []\n stderr_accs = []\n for mix_fraction in mix_fractions:\n accuracies = [\n maximum_F_score(\n groundtruth_boxes,\n hpu_strategy(hpu_boxes, cpu_boxes, mix_fraction),\n match_mode=match_mode,\n )\n for _ in xrange(N)\n ]\n mean_accs.append(np.mean(accuracies))\n stderr_accs.append(np.std(accuracies, ddof=1) / np.sqrt(N))\n #print mix_fraction, np.mean(accuracies)\n ax.errorbar(mix_fractions, mean_accs, stderr_accs, label=label)\n ax.set_xlabel(\"Fraction of HPU-labeled images\")\n ax.set_ylabel(\"Maximum F-score\")", "def plot_landings_quantiles(df):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n ax.set_position(default_timeseries_position) \n\n Fn = df['CatchMT'].groupby([df.Year, df.Reg, df.Sreg]).mean()\n grp = df['CatchMT'].groupby([df.Year, df.Reg, df.Sreg])\n\n qmean = grp.mean().loc[:, 'All', 'All'] \n q90 = grp.quantile(0.90).loc[:, 'All', 'All'] \n q75 = grp.quantile(0.75).loc[:, 'All', 'All'] \n q50 = grp.quantile(0.50).loc[:, 'All', 'All'] \n q25 = grp.quantile(0.25).loc[:, 'All', 'All'] \n q10 = grp.quantile(0.10).loc[:, 'All', 'All'] \n\n # Don't plot the first year. Also, the data is shifted by one year.\n # For some reason, restricting the year range above results in a series\n # that still have a multi-index. This seems like the cleanest way to do\n # that.\n qmean = qmean.iloc[2:]\n q90 = q90.iloc[2:]\n q75 = q75.iloc[2:]\n q50 = q50.iloc[2:]\n q25 = q25.iloc[2:]\n q10 = q10.iloc[2:]\n qmean.index = qmean.index - 1\n q90.index = q90.index - 1\n q75.index = q75.index - 1\n q50.index = q50.index - 1\n q25.index = q25.index - 1\n q10.index = q10.index - 1\n \n colors = seaborn.color_palette(n_colors=3);\n\n q90.plot(ax=ax, color=colors[0], linestyle='--', label='90%') \n q75.plot(ax=ax, color=colors[1], linestyle='--', label='75%') \n qmean.plot(ax=ax, color='black', label='Mean') \n q50.plot(ax=ax, color=colors[2], linestyle='--', label='50%') \n q25.plot(ax=ax, color=colors[1], linestyle='--', label='25%') \n q10.plot(ax=ax, color=colors[0], linestyle='--', label='10%') \n\n ax.legend(loc='best')\n\n content = io.BytesIO()\n plt.savefig(content, format='png')\n content.seek(0)\n image_cache['landings']['quantiles'] = content\n\n plt.close()", "def plot_psychometric(df, color='black', ax=None, **kwargs):\n\n if len(df['signedContrast'].unique()) > 4:\n df2 = df.groupby(['signedContrast']).agg(\n {'choice': 'count', 'choice2': 'mean'}).reset_index()\n df2.rename(columns={\"choice2\": \"fraction\", \"choice\": \"ntrials\"}, inplace=True)\n\n pars, L = psy.mle_fit_psycho(df2.transpose().values, # extract the data from the df\n P_model='erf_psycho_2gammas',\n parstart=np.array([df2['signedContrast'].mean(), 20., 0.05,\n 0.05]),\n parmin=np.array([df2['signedContrast'].min(), 0., 0., 0.]),\n parmax=np.array([df2['signedContrast'].max(), 100., 1, 1]))\n sns.lineplot(np.arange(-100, 100), psy.erf_psycho_2gammas(pars, np.arange(-100, 100)),\n color=color, ax=ax)\n\n if 100 in df.signedContrast.values and not 50 in df.signedContrast.values:\n df['signedContrast'] = df.signedContrast.replace(-100, -35)\n df['signedContrast'] = df.signedContrast.replace(100, 35)\n\n brokenXaxis = True\n else:\n brokenXaxis = False\n\n # plot datapoints on top\n sns.lineplot(x='signedContrast', y='choice2', err_style=\"bars\", linewidth=0, linestyle='None',\n mew=0.5,\n marker='.', ci=68, data=df, color=color, ax=ax)\n\n if not brokenXaxis:\n # Reduce the clutter\n ax.set_xticks([-100, -50, 0, 50, 100])\n ax.set_xticklabels(['-100', '-50', '0', '50', '100'])\n ax.set_xlim([-110, 110])\n else:\n ax.set_xticks([-35, -25, -12.5, -6, 0, 6, 12.5, 25, 35])\n ax.set_xticklabels(['-100', '-25', '-12.5', '-6.25', '0', '6.25', '12.5', '25', '100'],\n size='x-small', rotation=-90)\n ax.set_xlim([-40, 40])\n\n ax.set_yticks([0, .5, 1])\n ax.set_ylim([-0.03, 1.03])\n ax.set_xlabel('Contrast (%)')\n\n return ax", "def plot_sampling(fname, df, of=\"r_neighbor\", show=True):\n xlabel = r\"Neighborhood $r_{c}$\"\n logx = False\n\n if of == \"n_iter\":\n xlabel = \"#Cycles\"\n logx = True\n\n fig, ax = plt.subplots(figsize=(15, 5))\n\n gb = df.groupby([of])\n aggregation = {\"stress\": [np.mean, np.std], \"correlation\": [np.mean, np.std]}\n gb = gb.agg(aggregation)\n\n gb.stress[\"mean\"].plot(yerr=gb.stress[\"std\"], color=\"crimson\", logx=logx)\n\n ax2 = ax.twinx()\n\n gb.correlation[\"mean\"].plot(yerr=gb.correlation[\"std\"],\n color=\"dodgerblue\", logx=logx)\n\n ax.set_xlabel(xlabel, fontsize=20)\n ax.set_ylabel(\"Stress\", fontsize=20)\n ax.set_ylim(0, 0.2)\n\n ax2.set_ylabel(r\"Correlation $\\gamma$\", fontsize=20)\n ax2.set_ylim(0, 1)\n\n plt.savefig(fname, dpi=300, format=\"png\", bbox_inches=\"tight\")\n\n if show:\n plt.show()", "def plot(self, ax=None, ylabel=\"CDF(x)\", xlabel=\"y\", upper_quantile=.25, lower_quantile=.75, force_recomputation=False, show=False, outputname=None, color=\"C2\", plot_cCDF=False):\n \n \"\"\"If data set is empty, return without plotting\"\"\"\n if self.samples_x == []:\n return\n \n \"\"\"Create figure if none was provided\"\"\"\n if ax is None:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n \n \"\"\"Compute plots if not already done or if recomputation was requested\"\"\"\n if (self.series_y is None) or force_recomputation:\n self.make_figure(upper_quantile, lower_quantile)\n \n \"\"\"Switch to cCDF if requested\"\"\"\n if plot_cCDF:\n self.reverse_CDF()\n \n \"\"\"Plot\"\"\"\n ax.fill_between(self.quantile_series_x, self.quantile_series_y_lower, self.quantile_series_y_upper, facecolor=color, alpha=0.25)\n ax.plot(self.median_x, self.series_y, color=color)\n ax.plot(self.mean_x, self.series_y, dashes=[3, 3], color=color)\n \n \"\"\"Set plot attributes\"\"\"\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n \n \"\"\"Save if filename provided\"\"\"\n if outputname is not None:\n plt.savefig(outputname + \".pdf\")\n plt.savefig(outputname + \".png\", density=300)\n \n \"\"\"Show if requested\"\"\"\n if show:\n plt.show()", "def get_stability_plot(self):\n fig, ax = plt.subplots()\n first_episode = self.get_convergence_episode()\n\n values = self.stats['return_stats']['episode_totals']\n _, _, (y_lower, _) = self._moving_average(\n values, window=_ROLLING_WINDOW, p=_CONFIDENCE_LEVEL)\n episodes = np.arange(len(values))\n unstable_episodes = np.where(\n np.logical_and(values < y_lower[-1], episodes > first_episode))[0]\n\n ax.plot(episodes, values, color='steelblue', lw=2, alpha=.9,\n label='Return')\n for i, episode in enumerate(unstable_episodes):\n ax.axvline(episode, color='salmon', lw=2,\n label='Unstable' if i == 0 else None)\n ax.axvline(first_episode, color='seagreen', lw=2, label='Converged')\n\n ax.set_title('Normalized instability = {:.3f}%'.format(\n self.get_normalized_instability() * 100.))\n ax.legend()\n ax.set_ylabel('Return')\n ax.set_xlabel('Episode')\n return fig", "def plotConfidence(tar, measure):\n\tglobal normalized\n\tintegrated = \"\"\n\tif tar == \"integrated\":\n\t\tintegrated = True\n\tif tar == \"target\":\n\t\tintegrated = False\n\tintWarmthMap, intCompMap = parser.getMappings(normalized)\n\tif measure == \"warmth\":\n\t\tmapping = intWarmthMap\n\telse:\n\t\tmapping = intCompMap\n\tlabels = []\n\tvalues = []\n\tfor key in mapping.keys():\n\t\tcount = 0\n\t\tif (\"_\" in key and integrated) or (\"_\" not in key and not integrated):\n\t\t\tavgCertainty = 0\n\t\t\tfor i in range(0, len(mapping[key])):\n\t\t\t\tif mapping[key][i][1] != \"\":\n\t\t\t\t\tavgCertainty += int(mapping[key][i][1])\n\t\t\t\t\tcount += 1\n\t\t\tavgCertainty = avgCertainty / float(count)\n\t\t\tlabels.append(key)\n\t\t\tvalues.append(int(avgCertainty))\n\n\tfig = plt.figure()\n\tax = fig.add_subplot(111)\n\n\tN = len(values)\n\t\n\tind = np.arange(N) # the x locations for the groups\n\twidth = 0.35 # the width of the bars\n\n\trects1 = ax.bar(ind, values, width,\n\t color='blue',\n\t error_kw=dict(elinewidth=2,ecolor='red'))\n\n\t# axes and labels\n\tax.set_xlim(-width,len(ind)+width)\n\tax.set_ylim(0,100)\n\tax.set_ylabel('Certainty')\n\tif tar == \"integrated\":\n\t\tax.set_title('Integrated Certainty Measures for ' + measure)\n\telse:\n\t\tax.set_title('Target Certainty Measures for ' + measure)\n\txTickMarks = labels\n\tif tar == \"target\":\n\t\tk = .3 \n\telse:\n\t\tk = 1 \n\tax.set_xticks(ind+width - k)\n\txtickNames = ax.set_xticklabels(xTickMarks)\n\tplt.setp(xtickNames, rotation=45, fontsize=10)\n\n\tprint(\"values\", values, len(values))\n\tprint(\"labels\", labels, len(labels))\n\tplt.show()", "def plot_kde():\n f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, sharex=True)\n sns.kdeplot(data.data.numpy()[:,0], data.data.numpy()[:,1], color=\"r\", shade=True, ax=ax1)\n sns.kdeplot(dec_mean.data.numpy()[:,0], dec_mean.data.numpy()[:,1], color=\"b\", shade=True, ax=ax2)\n plt.show()\n plt.pause(1e-6)\n plt.gcf().clear()", "def test():\n X,Xval,Yval = _load_sample_data()\n mu,var = estimate_gaussian_params(X)\n pval = get_probability(Xval,mu,var)\n\n figure()\n plot(X[:,0],X[:,1],'b+',label='data'); xlabel(\"Latency (ms)\"); ylabel(\"Throughput (Mb/s)\")\n epsilon, F1 = determine_threshold(Yval,pval)\n print(\"Optimal epsilon and F1 score for sample dataset {}, {}\".format(epsilon, F1))\n plot_gaussian(mu,var,epsilon=epsilon)\n\n ## Plot Outliers\n predictions = get_probability(X,mu, var)\n outliers = X[predictions < epsilon]\n plot(outliers[:,0],outliers[:,1],'ro',mfc=None,label='outliers');\n legend()\n grid()", "def plot_convergence_and_temp(filename, algorithm):\n\n plt.figure(figsize=(8, 8))\n plt.title(\"Convergence of score\\n coolscheme:\" + algorithm.coolscheme + \", \" + str(len(algorithm.particles)) + \" particles\", size=25)\n\n # x axis\n plt.xticks(size=13)\n plt.xlabel(\"Steps\", size=18)\n\n # left y axis\n ax1 = plt.gca()\n ax1.set_ylabel(\"Score (total reciprocal of distance)\", size=18)\n label1 = ax1.plot(range(len(algorithm.scores_list)), algorithm.scores_list, color=\"turquoise\", label=\"Score\")\n plt.yticks(size=13)\n\n # right y axis\n ax2 = ax1.twinx()\n ax2.set_ylabel(\"Temperature\", size=18)\n label2 = ax2.plot(range(len(algorithm.temperatures)), algorithm.temperatures, color=\"orchid\", label=\"Temperature\")\n plt.yticks(size=13)\n\n # labels en legend\n plots = label1 + label2\n labs = [l.get_label() for l in plots]\n legend = ax1.legend(plots, labs, loc='upper right')\n legend.FontSize = 20\n plt.savefig(filename)", "def plot_evaluation(values, info, measures = ['Dice','Jaccard', 'TPR', 'TNR', '1-GCE', 'VS', 'RI', 'ARI', 'MI', '1-VOI', 'ICC','1/(1+PBD)', 'KAP', 'AUC', '1/(1+HD)', '1/(1+AVD)', 'MHD' ], colourmap=None, outfile='polar_results.png'):\n _min = info['minimum']\n _max = info['maximum']\n if colourmap is None:\n colourmap = [[86./255.,180./255.,233./255.] for ii in range(values.shape[0])]\n else:\n # normalize colourmap values between 0 and 1\n colourmap = (colourmap-_min)/(_max-_min)\n # apply cividis, returns the RBG1 values for cividis, for dots\n colourmap = [[cm.cividis(ii)] for ii in colourmap] \n\n # elements of the circle\n N = len(measures)\n # evenly space measures around circle\n x_as = [n / float(N) * 2 * pi for n in range(N)] \n\n # Set color of axes\n plt.rc('axes', linewidth=0.5, edgecolor=\"#888888\")\n\n # Create polar plot\n fig = plt.figure(figsize = (11,9.5))\n gs = gridspec.GridSpec(1, 3, width_ratios=[17,2,1])\n ax = plt.subplot(gs[0], polar=True)\n \n # Set position of y-labels\n ax.set_rlabel_position(0)\n\n # Set color and linestyle of grid\n ax.xaxis.grid(True, color=\"#888888\", linestyle='solid', linewidth=0.5)\n ax.yaxis.grid(True, color=\"#888888\", linestyle='solid', linewidth=0.5)\n\n # Set yticks\n plt.yticks([0.2, 0.4, 0.6, 0.8, 1.0], [\"0.2\", \"0.4\", \"0.6\", \"0.8\", \"1.0\"], fontsize=15)\n pos=ax.get_rlabel_position()\n ax.set_rlabel_position(pos+0.4*360./float(len(measures)))\n\n # Plot data\n for ii in np.arange(values.shape[0]):\n xx = np.asarray(x_as) + np.random.randn(len(x_as))*np.diff(x_as)[0]/15.\n data_norm = None\n if info['logplot']:\n data_norm = matplotlib.colors.LogNorm(vmin=_min, vmax=_max)\n sc = ax.scatter(xx, values[ii,:], 23, color=colourmap[ii]*len(xx), norm=data_norm, zorder=3) \n\n # Fill area\n # close the circle\n median = list(np.median(values, axis=0))\n median += median[:1]\n upper = list(np.percentile(values, 75, axis=0))\n upper += upper[:1]\n lower = list(np.percentile(values, 25, axis=0))\n lower += lower[:1]\n x_as += x_as[:1]\n ax.plot(x_as, median, color=[86./255.,180./255.,233./255.], zorder=5)\n ax.fill_between(x_as, upper, lower, zorder=4, color=[86./255.,180./255.,233./255.], alpha=0.3)\n\n # Set number of radial axes and remove labels\n plt.xticks(x_as[:-1], [])\n\n # Set axes limits\n plt.ylim(0, 1)\n\n # Draw ytick labels to make sure they fit properly\n for i in range(N):\n angle_rad = i / float(N) * 2 * pi-0.05\n text_size = 21\n if i in {3,8}:\n ax.text(angle_rad, 1.15, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {0}:\n ax.text(angle_rad, 1.25, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {1,5,7}:\n ax.text(angle_rad, 1.29, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {4}:\n ax.text(angle_rad, 1.32, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"top\")\n elif i in {10}:\n ax.text(angle_rad, 1.26, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {6}:\n ax.text(angle_rad, 1.25, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {9}:\n ax.text(angle_rad, 1.18, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n else:\n ax.text(angle_rad, 1.22, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n\n # colorbar location on figure\n cbaxes = plt.subplot(gs[2])\n\n # log scaling option\n norm = None\n if info['logplot']:\n norm = matplotlib.colors.LogNorm(vmin=_min,vmax=_max)\n\n img = plt.imshow(np.array([[_min,_max]]), aspect='auto', cmap=\"cividis\", norm=norm)\n img.set_visible(False)\n\n # initialize colorbar\n cbar = plt.colorbar(cax = cbaxes)\n\n # ticks and label\n c_values = cbar.get_ticks().tolist()\n \n ticklabels = [\"\" for ii in c_values]\n if _min < np.min(c_values):\n c_values = [_min] + c_values\n ticklabels = [\"%0.1f %s\" %(np.min(c_values), info['unit'])] + ticklabels\n else:\n ticklabels[0] = \"%0.1f %s\" %(np.min(c_values), info['unit'])\n\n if _max > np.max(c_values):\n c_values = c_values + [_max]\n ticklabels = ticklabels + [\"%0.1f %s\" %(np.max(c_values), info['unit'])]\n else:\n ticklabels[-1] = \"%0.1f %s\" %(np.max(c_values), info['unit'])\n \n cbar.set_ticks(c_values)\n cbar.set_ticklabels(ticklabels)\n cbaxes.yaxis.set_minor_formatter(matplotlib.ticker.NullFormatter())\n cbar.ax.set_ylabel(info[\"label\"], labelpad=-20)\n \n # font sizes for colorbar\n cbar.ax.yaxis.label.set_size(19)\n cbar.ax.tick_params(labelsize=14)\n\n # Save and show polar plot \n plt.savefig(outfile)\n if info['display']:\n plt.show()\n plt.clf()\n plt.close('all')", "def plot_deviation(\n true_values: np.ndarray, pred_values: np.ndarray, save=None, show=True, return_axs=False, title: str = \"\"\n) -> Optional[Axes]:\n true_values, pred_values = _input_checks(true_values, pred_values)\n\n plt.ioff()\n\n n_par = true_values.shape[0]\n summary_fit = pd.concat(\n [\n pd.DataFrame(\n {\n \"deviation\": pred_values[i, :] - true_values[i, :],\n \"coefficient\": pd.Series([\"coef_\" + str(i) for x in range(pred_values.shape[1])], dtype=\"category\"),\n }\n )\n for i in range(n_par)\n ]\n )\n summary_fit[\"coefficient\"] = summary_fit[\"coefficient\"].astype(\"category\")\n\n fig, ax = plt.subplots()\n sns.violinplot(x=summary_fit[\"coefficient\"], y=summary_fit[\"deviation\"], ax=ax)\n\n if title is not None:\n ax.set_title(title)\n\n # Save, show and return figure.\n if save is not None:\n plt.savefig(save + \"_deviation_violin.png\")\n\n if show:\n plt.show()\n\n plt.close(fig)\n plt.ion()\n\n if return_axs:\n return ax\n return None", "def plot(self, n=1000, hist=True, kde=False):\n sns.set(rc={\"xtick.bottom\": True, \"ytick.left\": True})\n sims = [self.estimate() for i in range(n)]\n fig, ax = plt.subplots(figsize=(10, 8))\n\n if hist:\n kwargs = {'cumulative': False, 'edgecolor': \"k\", 'linewidth': 1}\n plot = sns.distplot(sims, bins=math.floor(max(sims)), hist=True,\n kde=kde,norm_hist=False, hist_kws=kwargs,\n ax=ax)\n plt.title('Histogram - days to project completion '\n '- n = {}'.format(n))\n plt.axvline(x=np.median(sims), color='red', label='50%')\n plt.text(np.median(sims)-0.5, -2, '50%', color='red')\n plt.show()\n\n else:\n kwargs = {'cumulative': True, 'edgecolor': \"k\", 'linewidth': 1}\n plot = sns.distplot(sims, bins=math.floor(max(sims)),\n hist=True, kde=False, norm_hist=True,\n hist_kws=kwargs)\n plt.title('Cumulative histogram - days project to completion '\n '- n = {}'.format(n))\n plt.show()\n\n return plot", "def evidence_tuning_plots(df, x_input = \"Mean Predicted Avg\",\n y_input = \"Empirical Probability\",\n x_name=\"Mean Predicted\",\n y_name=\"Empirical Probability\"):\n\n def lineplot(x, y, trials, methods, **kwargs):\n \"\"\"method_lineplot.\n\n Args:\n y:\n methods:\n kwargs:\n \"\"\"\n uniq_methods = set(methods.values)\n method_order = sorted(uniq_methods)\n\n method_new_names = [f\"$\\lambda={i:0.4f}$\" for i in method_order]\n method_df = []\n for method_idx, (method, method_new_name) in enumerate(zip(method_order,\n method_new_names)):\n lines_y = y[methods == method]\n lines_x = x[methods == method]\n for index, (xx, yy,trial) in enumerate(zip(lines_x, lines_y, trials)):\n\n to_append = [{x_name : x,\n y_name: y,\n \"Method\": method_new_name,\n \"Trial\" : trial}\n for i, (x,y) in enumerate(zip(xx,yy))]\n method_df.extend(to_append)\n method_df = pd.DataFrame(method_df)\n x = np.linspace(0,1,100)\n plt.plot(x, x, linestyle='--', color=\"black\")\n sns.lineplot(x=x_name, y=y_name, hue=\"Method\",\n alpha=0.8,\n hue_order=method_new_names, data=method_df,)\n # estimator=None, units = \"Trial\")\n\n df = df.copy()\n # Query methods that have evidence_new_reg_2.0\n df = df[[\"evidence\" in i for i in\n df['method_name']]].reset_index()\n\n # Get the regularizer and reset coeff\n coeff = [float(i.split(\"evidence_new_reg_\")[1]) for i in df['method_name']]\n df[\"method_name\"] = coeff\n df[\"Data\"] = convert_dataset_names(df[\"dataset\"])\n df[\"Method\"] = df[\"method_name\"]\n\n g = sns.FacetGrid(df, col=\"Data\", height=6, sharex = False, sharey = False)\n g.map(lineplot, x_input, y_input, \"trial_number\",\n methods=df[\"Method\"]).add_legend()", "def plotTimeDelta(data, type_plot, device):\n mean = data.mean()\n std = data.std()\n max_data = data.max()\n min_data = data.min()\n max_indx = np.argmax(data) # max value index\n min_indx = np.argmin(data) # min value index\n x = np.arange(min_data, max_data, 0.1)\n y = normfun(x, mean, std)\n res_quantile = quantileValues(data, device)\n if type_plot == 0:\n plt.plot(x, y, color='blue')\n annot_max_min(x, y)\n # plt.hist(data.dropna(), bins=500, rwidth=0.9, normed=True)\n plt.title('Time Delta distribution')\n plt.xlabel('Time Delta')\n plt.ylabel('Probability')\n sns.distplot(tmp.deltaSeconds.dropna(),\n kde=True, rug=True, rug_kws={\"color\": \"k\"},\n kde_kws={\"color\": \"red\", \"lw\": 3, \"label\": \"KDE\"},\n hist_kws={\"histtype\": \"step\", \"lw\": 3, \"alpha\": 1,\n \"color\": \"g\"},\n bins=500)\n # ax.set(xlabel='Vibration Intensity', ylabel='Probability')\n elif type_plot == 1: # plot the max and min point\n plt.plot(data)\n plt.plot(max_indx, data[max_indx], 'ks')\n show_max = '['+str(max_indx)+' '+str(data[max_indx])+']'\n plt.annotate(show_max,\n xytext=(max_indx, data[max_indx]),\n xy=(max_indx, data[max_indx]))\n plt.plot(min_indx, data[min_indx], 'gs')\n show_min = '['+str(min_indx)+' '+str(data[min_indx])+']'\n plt.annotate(show_min,\n xytext=(min_indx, data[min_indx]),\n xy=(min_indx, data[min_indx]))\n plt.title('Time Delta')\n plt.xlabel('Index')\n plt.ylabel('Vibration Intensity Value')\n elif type_plot == 2: # boxplot\n boxplot(data.dropna())\n return res_quantile", "def det_plot(data, group_by, plot_title, save_figure_path=None):\n subgroups = data.groupby(group_by)\n li_subgroups = subgroups.groups\n\n fontsize = 12\n fig, ax = plt.subplots(figsize=(8, 8), constrained_layout=True)\n for subgroup in li_subgroups:\n # for each subgroup\n df_subgroup = subgroups.get_group(subgroup)\n labels, scores = (\n df_subgroup[\"label\"].values.astype(int),\n df_subgroup[\"score\"].values,\n )\n fpr, fnr, thresholds = calculate_det_curves(labels, scores)\n ax = draw_det_curve(\n fpr, fnr, ax=ax, label=subgroup, fontsize=fontsize, title=plot_title\n )\n\n ax.xaxis.set_major_formatter(mtick.FormatStrFormatter(\"%.e\"))\n plt.minorticks_off()\n ax.set_ylabel(\"FNR (%)\", fontsize=fontsize)\n ax.set_xlabel(\"FPR\", fontsize=fontsize)\n plt.legend(fontsize=fontsize)\n ax.set_xlim([1e-4, 1])\n ax.set_ylim([0, 30])\n\n ax.tick_params(axis=\"both\", labelsize=fontsize)\n\n # save figure\n if save_figure_path is not None:\n plt.savefig(save_figure_path)", "def _plot_profile(self, x, y, label=None, return_fig=False):\n import matplotlib.pyplot as plt\n fig, ax = plt.subplots()\n idx = np.argsort(x)\n ax.plot(x[idx], y[idx], lw=1.0, color='k')\n if label is not None:\n ax.set_ylabel(f'{label}')\n ax.set_xlabel('Radius (au)')\n ax.set_xlim(0, x.max())\n if return_fig:\n return fig", "def graph_results(self):\n # fig = plt.figure()\n # fig.subplots_adjust(hspace=.35)\n\n # median_x_values = [num for num in\n # range(len(results.get_meaningful_medians()))]\n # median_y_values = results.get_meaningful_medians()\n\n # print(\"Meaningful medians:\", results.get_meaningful_medians())\n # print(\"Enumerated meaningful medians:\",\n # list(enumerate(results.get_meaningful_medians())))\n # print(\"Zipped enumerated meaningful medians:\",\n # list(zip(*enumerate(results.get_meaningful_medians()))))\n\n print(\"[Progress] Graphing results...\")\n\n median_x_values, median_y_values = \\\n zip(*enumerate(self.sim_results.get_median_balances()))\n median_graph = self.graph_fig.add_subplot(2, 1, 1)\n\n median_graph.plot(median_x_values, median_y_values)\n\n median_graph.set_title(\"Simulation Result Medians\")\n median_graph.set_xlabel(\"Roll #\")\n median_graph.set_ylabel(\"Median Balance\")\n\n mean_graph = self.graph_fig.add_subplot(2, 1, 2)\n\n mean_values_to_graph = \\\n self.sim_results.get_average_balances()\n mean_x_values, mean_y_values = \\\n zip(*enumerate(mean_values_to_graph))\n mean_graph.plot(mean_x_values, mean_y_values)\n\n mean_graph.set_title(\"Simulation Result Means\")\n mean_graph.set_xlabel(\"Roll #\")\n mean_graph.set_ylabel(\"Mean Balance\")\n\n plt.show()", "def plot_time_to_default_results(time_to_default_results_per_classifier, parameter=\"Balanced accuracy\"):\n color = iter(cm.Set1(np.linspace(0, 1, len(time_to_default_results_per_classifier[0][1]) + 1)))\n classifier_arr = []\n for i in range(len(time_to_default_results_per_classifier[0][1]) + 1):\n classifier_arr.append(list())\n\n for i in range(0, len(time_to_default_results_per_classifier)):\n data_balancer_results = time_to_default_results_per_classifier[i][1]\n x = 0\n mean_classification = 0\n for (result_arr, data_balancer_name, _) in data_balancer_results:\n result = result_arr[2]\n classifier_arr[x].append(result)\n mean_classification += result\n x += 1\n mean_classification /= float(len(data_balancer_results))\n classifier_arr[x].append(mean_classification)\n\n fig = plt.figure(figsize=(12, 10))\n\n classifiers = np.arange(len(classifier_arr))\n data_balancers = np.arange(len(classifier_arr[0])) * 3\n bar_width = 0.2\n opacity = 0.9\n\n for i in range(len(classifier_arr)):\n if i == len(classifier_arr) - 1:\n label = \"Mean classification\"\n else:\n label = time_to_default_results_per_classifier[0][1][i][1]\n plt.bar(data_balancers + (i * bar_width), classifier_arr[i], bar_width,\n alpha=opacity,\n color=color.next(),\n label=label)\n\n plt.locator_params(axis='y', nbins=10)\n plt.xlabel(\"Default range (days)\")\n plt.ylabel(parameter)\n plt.ylim([0.0, 1.00])\n plt.legend(loc=\"lower right\", fancybox=True, frameon=True)\n plt.title(\"{0} when trained on different default ranges\".format(parameter))\n feature_selection_labels = [time_to_default_results_per_classifier[i][0] for i in range(0, len(time_to_default_results_per_classifier))]\n plt.xticks(data_balancers + (bar_width / 2) * len(classifiers), feature_selection_labels)\n\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n plt.savefig(os.path.dirname(os.path.realpath(__file__)) + \"/../results/time_to_default_results_per_classifier_plot_{0}_{1}.png\".format(parameter, current_time))\n plt.close(fig)", "def plot_slope_by_clust(ax, model, k, lower_bound=0, upper_bound=1, estimate_x_val=3, slope_col='r'):\n\n # Calculate slope\n x_slope = np.array([lower_bound, upper_bound])\n y_slope_pred_mean = model.obsmodel[k].model.predict(x_slope.reshape(-1, 1))[0]\n slope = ((y_slope_pred_mean[1] - y_slope_pred_mean[0]) / (x_slope[1] - x_slope[0]))[0]\n intercept = y_slope_pred_mean[0][0]\n\n x_slp_vals = np.array(ax.get_xlim())\n y_slp_vals = intercept + slope * x_slp_vals\n\n ax.plot(x_slp_vals, y_slp_vals, '--', color=slope_col, linewidth=3)\n\n\n # Estimate difference between slope prediction and MoGP at estimate_x_val years\n mogp_estim = model.obsmodel[k].model.predict(np.array([estimate_x_val]).reshape(-1, 1))[0][0][0]\n slope_estim = (intercept + slope * estimate_x_val)\n estim_diff = (mogp_estim - slope_estim)\n\n return estim_diff", "def compare_agg_funcs(df, y):\n agg_funcs = [\"mean\", \"max\", \"min\", \"sum\", \"median\"]\n compare_agg_func = pd.DataFrame(columns = agg_funcs, index = df.columns)\n for column in df:\n for agg in agg_funcs:\n prep = preprocess_X_values(df[[column]], agg_func = agg)\n prep = prep.join(y).dropna(subset = y.columns)\n compare_agg_func.loc[column, agg] = prep.corr()[y.columns[0]][0]\n fig = sns.heatmap(np.abs(compare_agg_func.fillna(0)), annot = True, cmap = \"Greys\")\n fig.figure.savefig(f\"output/compare_aggregations.png\", dpi = 300, transparent = False)\n return compare_agg_func", "def plot_curve_fit(\n func: Callable,\n result: FitData,\n ax=None,\n num_fit_points: int = 100,\n labelsize: int = 14,\n grid: bool = True,\n fit_uncertainty: List[Tuple[float, float]] = None,\n **kwargs,\n):\n if ax is None:\n ax = get_non_gui_ax()\n\n if fit_uncertainty is None:\n fit_uncertainty = []\n elif isinstance(fit_uncertainty, tuple):\n fit_uncertainty = [fit_uncertainty]\n\n # Default plot options\n plot_opts = kwargs.copy()\n if \"color\" not in plot_opts:\n plot_opts[\"color\"] = \"blue\"\n if \"linestyle\" not in plot_opts:\n plot_opts[\"linestyle\"] = \"-\"\n if \"linewidth\" not in plot_opts:\n plot_opts[\"linewidth\"] = 2\n\n xmin, xmax = result.x_range\n\n # Plot fit data\n xs = np.linspace(xmin, xmax, num_fit_points)\n ys_fit_with_error = func(xs, **dict(zip(result.popt_keys, result.popt)))\n\n # Line\n ax.plot(xs, unp.nominal_values(ys_fit_with_error), **plot_opts)\n\n # Confidence interval of N sigma values\n stdev_arr = unp.std_devs(ys_fit_with_error)\n if np.isfinite(stdev_arr).all():\n for sigma, alpha in fit_uncertainty:\n ax.fill_between(\n xs,\n y1=unp.nominal_values(ys_fit_with_error) - sigma * stdev_arr,\n y2=unp.nominal_values(ys_fit_with_error) + sigma * stdev_arr,\n alpha=alpha,\n color=plot_opts[\"color\"],\n )\n\n # Formatting\n ax.tick_params(labelsize=labelsize)\n ax.grid(grid)\n return ax", "def plot_groups(sb, **kw):\n\n #check kws\n B_flag = True\n if('B' in kw):\n B_flag = bool(kw['B'])\n E_flag = True\n if('E' in kw):\n E_flag = bool(kw['E'])\n ugroups = sb.unique_group_names\n if('groups' in kw):\n ugroups = set(kw['groups'])\n if('return_figs' in kw):\n if(kw['return_figs']):\n return_figs = True\n figs = {'E': {}, 'B': {}}\n else:\n return_figs = False\n else:\n if((not B_flag) or (not E_flag)):\n group_lim = 8\n else:\n group_lim = 4\n if(len(ugroups) <= group_lim):\n return_figs = True\n figs = {'E': {}, 'B': {}}\n else:\n return_figs = False\n\n flags = [B_flag, E_flag]\n fields = ['Bmax', 'Emax']\n ylabels = ['Maximum Magnetic Field (mG)', 'Maximum Electric Field (kV/m)']\n title_pre = ['Maximum Magnetic Field - ',\n 'Maximum Electric Field - ']\n keys = ['B', 'E']\n it = zip(flags, fields, ylabels, title_pre, keys)\n\n #iterate over groups with more than 1 CrossSection\n for xss in sb.groups:\n if(xss[0].group in ugroups):\n for (fl, fi, yl, ti, k) in it:\n if(fl):\n #get plotting objects\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n #init handles and labels lists for legend\n kw['H'], kw['L'] = [], []\n #plot the Bmax results for each xs in the group\n _plot_group_fields(ax, xss, fi, **kw)\n #plot wires\n max_field = max([xs.fields[fi].max() for xs in xss])\n _plot_group_wires(ax, xss, max_field, **kw)\n #draw ground surface if necessary\n if(len(xss) <= 2):\n _check_und_conds(xss, [ax], **kw)\n #plot ROW lines\n _plot_group_ROW_edges(ax, xss, **kw)\n #set axis text and legend\n ax.set_xlabel('Distance (ft)')\n ax.set_ylabel(yl)\n ax.set_title(textwrap.fill(ti + str(xss[0].group)))\n ax.legend(kw['H'], kw['L'], **_leg_kw)\n _format_line_axes_legends(ax)\n #save the figure if keyword 'save' == True, and append fig\n _save_fig('group_%s-%s' % (str(xss[0].group), fi), fig, **kw)\n #store the fig or close it\n if(return_figs):\n figs[k][xss[0].group] = fig\n else:\n plt.close(fig)\n\n if(return_figs):\n return(figs)", "def topfeats_boxplots_by_group(df, test_results_df, grouping_variable, \n plot_save_dir=None, p_value_threshold=0.05, \n n_topfeats=5):\n \n if plot_save_dir:\n # Ensure directory exists to save plots\n plot_save_dir.mkdir(exist_ok=True, parents=True)\n \n pvals_corrected = test_results_df.loc['pval_corrected']\n n_sigfeats = sum(pvals_corrected < p_value_threshold)\n \n if pvals_corrected.isna().all():\n print(\"No signficant features found in control with respect to '%s'\" % grouping_variable)\n elif n_sigfeats > 0:\n # Rank p-values in ascending order\n ranked_pvals = pvals_corrected.sort_values(ascending=True)\n \n # Drop non-sig feats\n ranked_pvals = ranked_pvals[ranked_pvals < p_value_threshold]\n \n # Select the first n pvalues for plotting\n topfeats = ranked_pvals[:n_topfeats]\n \n if n_sigfeats < n_topfeats:\n print(\"WARNING: Only %d features found to vary significantly with respect to '%s'\"\\\n % (n_sigfeats, grouping_variable))\n \n print(\"\\nTop %d features found to differ significantly with respect to '%s':\\n\"\\\n % (len(topfeats), grouping_variable))\n print(*[feat + '\\n' for feat in list(topfeats.index)])\n \n # for f, feature in enumerate(features_to_analyse[0:25]):\n for feature in topfeats.index:\n print(\"P-value for '%s': %s\" % (feature, str(topfeats[feature])))\n feat_df = df[[grouping_variable, feature]]\n \n # Plot boxplots of control across days for most significant features\n plt.close('all')\n fig = plt.figure(figsize=[10,6])\n ax = fig.add_subplot(1,1,1)\n sns.boxplot(x=grouping_variable, y=feature, data=feat_df)\n ax.set_xlabel(grouping_variable, fontsize=15, labelpad=12)\n ax.set_title(feature, fontsize=20, pad=20)\n \n # TODO: Add pvalues to plot?\n \n if plot_save_dir:\n # Save plot\n plots_outpath = plot_save_dir / (feature + '_wrt_' + grouping_variable + '.eps')\n savefig(plots_outpath, tellme=True, saveFormat='eps') \n plt.close()\n else:\n plt.show(); plt.pause(5)", "def plot_convergence_distance(xs, a, n, coeff_func, func_name, f, b, save=False,\n dirname=DEFAULT_DIR):\n betas = []\n for x in xs:\n print(x)\n series = legendre_series(x, coeff_func(a))\n degrees = np.arange(n)\n values = np.array([next(series) for _ in degrees])\n errors = np.abs(f(x, a) - values)\n\n a_min = -convergence_rate(x, a, b)\n alpha, beta = convergence_line_log(degrees, errors, a_min)\n betas.append(beta)\n\n fig = plt.figure(figsize=(16, 8))\n plt.xlabel(r\"$x$\")\n plt.ylabel(r\"$\\beta(x)$\")\n plt.semilogy(xs, betas, '.', basey=10)\n\n if save:\n fpath = os.path.join(dirname, \"convergence_distances\", func_name)\n os.makedirs(fpath, exist_ok=True)\n plt.savefig(os.path.join(fpath, f\"{a}.png\"))\n else:\n plt.show()\n plt.close(fig)", "def plot(self, ax=None, savefile=None, shells=None, color='b', title=None,\n xlabel=None, ylabel=None, withavg=False):\n import matplotlib.pyplot as plt\n if ax is None:\n plt.figure()\n axset=plt\n else:\n axset=ax\n\n cmax = float(max(self.counts))\n total = sum(self.counts)\n nalpha = 0.85 if cmax/total > 0.33 else 0.65\n maxy = 1.\n for di, df in enumerate(self.dfs):\n alpha=nalpha*self.counts[di]/cmax\n axset.plot(df.x, df.df, color=color, alpha=alpha)\n maxy_ = np.max(df.df)\n if maxy_ > maxy:\n maxy = maxy_\n\n if withavg and len(self) > 0:\n x = self.dfs[0].x\n axset.plot(x, self.average, 'r-')\n maxy_ = np.max(self.average)\n if maxy_ > maxy:# pragma: no cover\n maxy = maxy_\n\n if len(self) > 0:\n dtype = self.dfs[0].dtype\n unit = \"Ang.\" if dtype == \"R\" else \"Rad.\"\n tstr = \"Radial\" if dtype == \"R\" else \"Angular\"\n else:# pragma: no cover\n unit = \"unknown units\"\n tstr = \"\"\n \n if ax is None:\n if title is None:\n plt.title(\"{} Distribution Function of Collection\".format(tstr))\n else:\n plt.title(title)\n if xlabel is None:\n plt.xlabel(\"Distance ({})\".format(unit))\n else:\n plt.xlabel(xlabel)\n if ylabel is None:\n plt.ylabel(\"Accumulated Density\")\n else:\n plt.ylabel(ylabel)\n\n _plot_shells(axset, shells, maxy)\n \n if savefile is not None:\n plt.savefig(savefile)\n\n from gblearn.base import testmode\n if not testmode:# pragma: no cover\n plt.show()\n return axset", "def plot_svc_decision_function(clf, ax=None):\n plot_decision_function(clf.decision_function, [-1, 0, 1], ax)", "def plot_results(self):\n experiment_utils.plot_exp_metric_comparison(self.experiments(reverse_sort=False))", "def plot_yearly_count(result_dict, plot_ratio=False, \\\n plot_average_comparison=True, scale_to_max=False, \\\n ax=None, figsize=(8.0,6.0), dpi=100.0):\n \n # Create a new figure.\n if ax is None:\n fig, ax = pyplot.subplots(figsize=figsize, dpi=dpi)\n fig.subplots_adjust(left=0.11, right=0.99, bottom=0.13, top=0.99)\n else:\n fig = ax.get_figure()\n # We'll be keeping track of the maximum result (to scale the y axis), so\n # we start at 0. (It will be updated as we go along.)\n max_result = 0\n \n # If there are more than one comparison terms, compute their average.\n if len(result_dict[\"_comparison\"]) > 1:\n # Collect all numbers in a single matrix, as float rather than int\n # to make sure we can average OK.\n shape = ( \\\n len(result_dict[\"_comparison\"]), \\\n len(result_dict[\"_year_range\"]))\n a = numpy.zeros(shape, dtype=numpy.float64)\n for i, term in enumerate(result_dict[\"_comparison\"]):\n a[i,:] = numpy.array(result_dict[term], dtype=numpy.float64)\n if scale_to_max:\n a[i,:] /= numpy.max(a[i,:])\n # Compute average and 95% confidence intervals.\n m = numpy.mean(a, axis=0)\n # Only compute the following if more than two comparison terms are\n # available.\n if len(result_dict[\"_comparison\"]) > 2:\n sd = numpy.std(a, axis=0)\n sem = sd / numpy.sqrt(a.shape[0]-1)\n ci = 1.96 * sem\n else:\n ci = None\n # If there is only one comparison term, use it.\n else:\n m = numpy.array(result_dict[result_dict[\"_comparison\"][0]], \\\n dtype=numpy.float64)\n if scale_to_max and numpy.max(m) > 0:\n m /= numpy.max(m)\n ci = None\n\n # Plot the results together if the user opted for this.\n if plot_average_comparison and not plot_ratio:\n # Choose the label for the line.\n if len(result_dict[\"_comparison\"]) > 1:\n lbl = \"comparison\"\n else:\n lbl = result_dict[\"_comparison\"][0]\n # Plot the average and confidence intervals.\n ax.plot(result_dict[\"_year_range\"], m, \"-\", lw=2, \\\n color=_colour_for_comparison, label=lbl)\n highest = numpy.max(m)\n if ci is not None:\n ax.fill_between(result_dict[\"_year_range\"], m, m-ci, m+ci, \\\n color=_colour_for_comparison, alpha=0.3)\n highest = numpy.max(m+ci)\n # Check if this term's maximum is higher than the mean plus the\n # confidence interval.\n if highest > max_result:\n max_result = highest\n # Plot the comparison results individually if the user opted for this.\n elif not plot_average_comparison and not plot_ratio:\n # Plot the results.\n for term in result_dict[\"_comparison\"]:\n y = numpy.array(result_dict[term], dtype=numpy.float64)\n if scale_to_max and numpy.max(y) > 0:\n y /= numpy.max(y)\n ax.plot(result_dict[\"_year_range\"], y, \"-\", lw=2, \\\n color=_colour_for_comparison, label=term)\n # Check if this term's maximum is higher than the current.\n if numpy.max(y) > max_result:\n max_result = numpy.max(y)\n \n # Plot the results for all target terms.\n for i, term in enumerate(result_dict[\"_target\"]):\n # Pick the next colour in the list.\n col = _colours[i % len(_colours)]\n # Overwrite the plot colour if there is no comparison, and the term\n # happens to be banana. This, obviously, turns the colour to yellow.\n if plot_ratio and term in [\"banana\",\"\\\"banana\\\"\",\"\\'banana\\'\"]:\n col = \"#c4a000\"\n # Compute the result.\n y = numpy.array(result_dict[term], dtype=numpy.float64)\n if scale_to_max and numpy.max(y) > 0:\n y /= numpy.max(y)\n if plot_ratio:\n y[m>0] /= m[m>0]\n # If the reference keyword is 0, comparisons make no sense. The\n # expected behaviour here could perhaps be to set the keyword\n # ratio to infinite, but given the futility of such a comparison,\n # perhaps NaN or 0 are better options.\n y[m==0] = numpy.NaN\n # Plot the line for the result.\n ax.plot(result_dict[\"_year_range\"], y, \"-\", lw=2, color=col, \\\n label=term)\n # Check if this term's maximum is higher than the current.\n if numpy.nanmax(y) > max_result:\n max_result = numpy.nanmax(y)\n \n # If we have more than 10 years, only write ticks on the even years.\n if 30 >= len(result_dict[\"_year_range\"]) > 10:\n # The starting index (si) should be 0 if the first year is even, and\n # 1 if the first year is odd.\n si = result_dict[\"_year_range\"][0] % 2\n # Create a list of indices to slice only the even years.\n xi = range(si, len(result_dict[\"_year_range\"]), 2)\n # Create empty tick labels for all recorded years. (Note: This will\n # only work for years -999 to 9999; just up the number in \"|U4\" if\n # you're somehow still using this in the future, or want to include\n # references earlier than 999 BC.\n xticklabels = numpy.zeros(len(result_dict[\"_year_range\"]), dtype=\"|U4\")\n xticklabels[xticklabels==\"0\"] = \"\"\n # Set only the recorded year tick labels.\n xticklabels[xi] = numpy.array(result_dict[\"_year_range\"])[xi]\n # If we have more than 30 years, only write ticks every 5 years.\n elif len(result_dict[\"_year_range\"]) > 30:\n # Find the lowest year that is divisible by 5.\n si = None\n for i in range(len(result_dict[\"_year_range\"])):\n if result_dict[\"_year_range\"][i] % 5 == 0:\n si = i\n break\n # Create a list of indices to slice only the %5 years.\n xi = range(si, len(result_dict[\"_year_range\"]), 5)\n # Create empty tick labels for all recorded years. (Note: This will\n # only work for years -999 to 9999; just up the number in \"|U4\" if\n # you're somehow still using this in the future, or want to include\n # references earlier than 999 BC.\n xticklabels = numpy.zeros(len(result_dict[\"_year_range\"]), dtype=\"|U4\")\n xticklabels[xticklabels==\"0\"] = \"\"\n # Set only the recorded year tick labels.\n xticklabels[xi] = numpy.array(result_dict[\"_year_range\"])[xi]\n # If we have 10 years or fewer, simply use all as tick labels.\n else:\n xticklabels = map(str, result_dict[\"_year_range\"])\n # Set the x ticks (for all recorded years) and x tick labels (created\n # above; either for all years or only for even years.)\n ax.set_xticks(result_dict[\"_year_range\"])\n ax.set_xticklabels(map(str, xticklabels), fontsize=16, rotation=85)\n # Set the axis limits. For the x-axis, this is the first year minus 1,\n # and the last year plus one. For the y-axis, this is 0 to the maximum\n # number of search results plus a small margin.\n ax.set_xlim([result_dict[\"_year_range\"][0]-1, \\\n result_dict[\"_year_range\"][-1]+1])\n ax.set_ylim([0, max_result*1.05])\n # Set the y label.\n if plot_ratio:\n if (len(result_dict[\"_comparison\"]) == 1) and \\\n (result_dict[\"_comparison\"][0] in [\"banana\",\"\\\"banana\\\"\",\"\\'banana\\'\"]):\n ylbl = \"Banana ratio\"\n else:\n ylbl = \"Relative publication ratio\"\n else:\n ylbl = \"Number of publications\"\n if scale_to_max:\n ylbl += \" (max-scaled)\"\n ax.set_ylabel(ylbl, fontsize=20)\n # Draw the legend.\n ax.legend(loc=\"upper left\", fontsize=16)\n \n return fig, ax", "def plot_confidence_interval_for_variable (model, X, y, variable):\n\n preds = np.stack([t.predict(X) for t in model.estimators_], axis=1)\n X_ds_new = X.copy()\n X_ds_new['actual'] = y\n X_ds_new['pred'] = np.mean(preds, axis=1)\n X_ds_new['pred_std'] = np.std(preds, axis=1)\n\n X_ds_grp = X_ds_new.groupby(variable)['actual', 'pred', 'pred_std'].agg('mean')\n X_ds_grp['count'] = X_ds_new[variable].value_counts()\n\n print (f'Average Predicted value and Std Dev by : {variable}')\n display(X_ds_grp)\n print ('')\n print (f'Distribution of Predicted value by : {variable}')\n sns.catplot(x=variable, y='pred', data=X_ds_new, kind='box')\n plt.show()", "def plot_mcmc_behaviour(ax, samples_mcmc, param_mcmc, dist_mcmc, num_average=100):\n num_samples = len(samples_mcmc[:, 0])\n num_average = int(num_average)\n n_points = int((num_samples - num_samples % num_average) / num_average)\n for i, param_name in enumerate(param_mcmc):\n samples = samples_mcmc[:, i]\n samples_averaged = np.average(samples[:int(n_points * num_average)].reshape(n_points, num_average), axis=1)\n end_point = np.mean(samples_averaged)\n samples_renormed = (samples_averaged - end_point) / np.std(samples_averaged)\n ax.plot(samples_renormed, label=param_name)\n\n dist_averaged = -np.max(dist_mcmc[:int(n_points * num_average)].reshape(n_points, num_average), axis=1)\n dist_normed = (dist_averaged - np.max(dist_averaged)) / (np.max(dist_averaged) - np.min(dist_averaged))\n ax.plot(dist_normed, label=\"logL\", color='k', linewidth=2)\n ax.legend()\n return ax", "def plot_scenario_distribution(self):\n x = self.arms\n\n y = self.df.groupby('price').mean().Converted[x]\n y_sex_0 = self.df[self.df.Sex == 0].groupby('price').mean().Converted[x]\n y_sex_1 = self.df[self.df.Sex == 1].groupby('price').mean().Converted[x]\n y_age_0 = self.df[self.df.Under_30 == 0].groupby('price').mean().Converted[x]\n y_age_1 = self.df[self.df.Under_30 == 1].groupby('price').mean().Converted[x]\n\n fig, ax_list = plt.subplots(2,1, figsize=(12, 9))\n\n for ax in ax_list:\n ax.grid(alpha=0.3, linestyle='--')\n\n ax.set_ylim(bottom=0, top=0.6)\n ax.set_xlim(left=50, right=104)\n\n ax.set_xlabel(\"Price\", fontsize=14)\n ax.set_ylabel(\"Conversion Rate\", fontsize=14)\n\n ax.set_xticks(self.arms)\n ax.set_xticklabels(self.arms.astype(np.int64), fontsize=12, alpha=0.7)\n ax.set_yticks(np.linspace(0, 0.7, 8))\n ax.set_yticklabels([str((i * 100).astype(np.int64)) + \"%\" for i in np.linspace(0, 0.7, 8)], fontsize=12, alpha=0.7)\n\n ax.spines['right'].set_alpha(0)\n ax.spines['left'].set_alpha(0.3)\n ax.spines['top'].set_alpha(0)\n ax.spines['bottom'].set_alpha(0.3)\n\n ax_list[0].plot(x, y, label='Global')\n ax_list[0].plot(x, y_sex_0, label='Male', color='moccasin')\n ax_list[0].plot(x, y_sex_1, label='Female', color='darkorange')\n\n ax_list[1].plot(x, y, label='Global')\n ax_list[1].plot(x, y_age_0, label='Under 30', color='red')\n ax_list[1].plot(x, y_age_1, label='Over 30', color='darkred')\n\n ax_list[0].legend()\n ax_list[1].legend()\n\n fig.suptitle(\"Conversion Rate\", fontsize=22)\n\n fig.show()\n\n plt.savefig('chapter5_pricing.png')", "def make_comparison_plot(args, res, keys, min_length):\n directory = args.directory\n\n # Build the plot.\n fig, ax = plt.subplots(figsize=(args.figSizeX, args.figSizeY))\n\n # Stack the results groups, thus, each must be the same shape.\n sns.tsplot(data = np.stack(res, axis=2), condition=keys, ax=ax, ci=[68, 95])\n \n # Save the plot.\n ax.set_title('Average Return by Group, N=' + str(min_length), fontsize=18)\n ax.set_xlabel('Bin', fontsize=18)\n ax.set_ylabel('Average Return', fontsize=18)\n ax.legend(fontsize=18)\n plt.tick_params(axis='both', which='major', labelsize=18)\n ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))\n plt.savefig(os.path.join(directory, 'group_comparison.png'), \n bbox_inches='tight')", "def plot_results(sgd_train_acc, sgd_train_std, sgd_heldout_acc, sgd_heldout_std, sgd_test_acc,\n dt_train_acc, dt_train_std, dt_heldout_acc, dt_heldout_std, dt_test_acc,\n dt4_train_acc, dt4_train_std, dt4_heldout_acc, dt4_heldout_std, dt4_test_acc,\n stumps_train_acc, stumps_train_std, stumps_heldout_acc, stumps_heldout_std, stumps_test_acc):\n train_x_pos = [0, 4, 8, 12]\n cv_x_pos = [1, 5, 9, 13]\n test_x_pos = [2, 6, 10, 14]\n ticks = cv_x_pos\n\n labels = ['sgd', 'dt', 'dt4', 'stumps (4 x 50)']\n\n train_accs = [sgd_train_acc, dt_train_acc, dt4_train_acc, stumps_train_acc]\n train_errors = [sgd_train_std, dt_train_std, dt4_train_std, stumps_train_std]\n\n cv_accs = [sgd_heldout_acc, dt_heldout_acc, dt4_heldout_acc, stumps_heldout_acc]\n cv_errors = [sgd_heldout_std, dt_heldout_std, dt4_heldout_std, stumps_heldout_std]\n\n test_accs = [sgd_test_acc, dt_test_acc, dt4_test_acc, stumps_test_acc]\n\n fig, ax = plt.subplots()\n ax.bar(train_x_pos, train_accs, yerr=train_errors, align='center', alpha=0.5, ecolor='black', capsize=10, label='train')\n ax.bar(cv_x_pos, cv_accs, yerr=cv_errors, align='center', alpha=0.5, ecolor='black', capsize=10, label='held-out')\n ax.bar(test_x_pos, test_accs, align='center', alpha=0.5, capsize=10, label='test')\n ax.set_ylabel('Accuracy')\n ax.set_xticks(ticks)\n ax.set_xticklabels(labels)\n ax.set_title('Models')\n ax.yaxis.grid(True)\n ax.legend()\n plt.tight_layout()", "def performancePlot(plot=\"ROC\"):\n fig, ax = plt.subplots()\n colors = {\"ML\": \"red\", \"Null YFP\": \"gold\", \"Null DAPI\": \"blue\"}\n mapp = pickle.load(open(\"pickles/mapp_fold_-1.pk\", \"rb\"))\n null_mapp = pickle.load(open(\"pickles/null_YFP_mapp_fold_-1.pk\", \"rb\"))\n null_DAPI_mapp = pickle.load(open(\"pickles/null_DAPI_mapp_fold_-1.pk\", \"rb\"))\n \n i = 0\n for m in [mapp, null_mapp, null_DAPI_mapp]:\n coordinates = [] #list of tuples (thresh, x point, y point) to plot\n for key in m: ##for each threshold\n TPs = sum([x[0] for x in m[key]])\n FPs = sum([x[1] for x in m[key]])\n TNs = sum([x[2] for x in m[key]])\n FNs = sum([x[3] for x in m[key]])\n if plot == \"PRC\":\n x = TPs / float(TPs + FNs) #recall (TPR)\n y = TPs / float(TPs + FPs) #precision\n if plot == \"ROC\":\n x = FPs / float(FPs + TNs) #FPR \n y = TPs / float(TPs + FNs) #recall (TPR)\n if not (np.isnan(x) or np.isnan(y)): \n coordinates.append((key, x, y))\n coordinates = sorted(coordinates, key=lambda x: x[0]) ##sort by threshold\n x = [t[1] for t in coordinates][::-1]\n y = [t[2] for t in coordinates][::-1]\n # print(i, coordinates)\n # thresholds = [entry[0] for entry in coordinates]\n # for j, txt in enumerate(thresholds):\n # ax.annotate(txt, (x[j], y[j]), fontsize=5)\n auc = np.trapz(y,x)\n if i == 0:\n label = \"ML\"\n if i == 1:\n label = \"Null YFP\"\n if i == 2:\n label = \"Null DAPI\" \n ax.plot(x, y, linewidth=2.0, color=colors[label], label=\"{} Model, AUC = {}\".format(label, str(round(auc, 2)))) \n i += 1\n plt.title(\"{} Curves\".format(plot), fontname=\"Times New Roman\", fontsize=12)\n ax.set_xlim((0,1))\n ax.set_ylim((0,1))\n plt.rc('font',family='Times New Roman')\n plt.xticks(fontname=\"Times New Roman\", fontsize=12)\n plt.yticks(fontname=\"Times New Roman\", fontsize=12)\n if plot == \"ROC\":\n ax.set_xlabel(\"False Positive Rate\", fontname=\"Times New Roman\", fontsize=12)\n ax.set_ylabel(\"True Positive Rate\",fontname=\"Times New Roman\", fontsize=12)\n ax.plot([0, .5, 1], [0,.5, 1], linestyle=\"--\", linewidth=1.0, color=\"black\")\n ax.legend(loc='lower right',prop={\"family\":\"Times New Roman\", \"size\":10})\n plt.savefig(\"matplotlib_figures/ROC.png\", dpi=300)\n if plot == \"PRC\":\n positives = TPs + FNs ##doesn't matter which map we use, positive prevalence independent of map \n total = TPs + FPs + TNs + FNs \n positive_prevalence = positives / float(total)\n ax.hlines(y=positive_prevalence, xmin=0, xmax=1, linestyle=\"--\", linewidth=1.0, color=\"black\")\n ax.set_xlabel(\"Recall\", fontname=\"Times New Roman\", fontsize=12)\n ax.set_ylabel(\"Precision\",fontname=\"Times New Roman\", fontsize=12)\n ax.set_xlim((0,1))\n ax.set_ylim((0,1))\n ax.legend(loc='upper right',prop={\"family\":\"Times New Roman\", \"size\":10})\n plt.savefig(\"matplotlib_figures/PRC.png\", dpi=300)", "def visualize(dcf_prices, current_share_prices, regress = True):\n # TODO: implement\n return NotImplementedError", "def PlotLikelihoodEvolution( measure='DM', dev=False, scenario={}, ax=None, measureable=False, redshift_bins=redshift_bins, colorbar=True, force=False, alpha=0.5, **kwargs ):\n if ax is None:\n fig, ax = plt.subplots()\n for z, color in zip( redshift_bins, Rainbow(redshift_bins) ):\n P = GetLikelihood_Full( dev=dev, redshift=z, measure=measure, force=force, **scenario )\n if measureable:\n P = LikelihoodMeasureable( *P, min=measure_range[measure][0], max=measure_range[measure][1] )\n PlotLikelihood( *P, ax=ax, measure=measure, color=color, alpha=alpha, **kwargs )\n if colorbar:\n Colorbar( redshift_bins, label='redshift', ax=ax)", "def make_plot_for_different_thresholds(\n lambda_2,\n lambda_1,\n mu,\n num_of_servers,\n num_of_trials,\n seed_num=None,\n measurement_type=None,\n runtime=1440,\n max_threshold=None,\n):\n all_ambulance_patients_mean_times = []\n all_other_patients_mean_times = []\n all_total_mean_times = []\n if max_threshold == None:\n max_threshold = num_of_servers\n for threshold in range(1, max_threshold + 1):\n current_ambulance_patients_mean_times = []\n current_other_patients_mean_times = []\n current_total_mean_times = []\n for _ in range(num_of_trials):\n times = get_times_for_patients(\n lambda_2,\n lambda_1,\n mu,\n num_of_servers,\n threshold,\n seed_num,\n measurement_type,\n runtime,\n )\n current_ambulance_patients_mean_times.append(np.nanmean(times[0]))\n current_other_patients_mean_times.append(np.nanmean(times[1]))\n current_total_mean_times.append(np.nanmean(times[0] + times[1]))\n all_ambulance_patients_mean_times.append(\n np.nanmean(current_ambulance_patients_mean_times)\n )\n all_other_patients_mean_times.append(\n np.nanmean(current_other_patients_mean_times)\n )\n all_total_mean_times.append(np.nanmean(current_total_mean_times))\n\n x_axis = [thres for thres in range(1, max_threshold + 1)]\n x_axis_label, y_axis_label, title = get_plot_for_different_thresholds_labels(\n measurement_type\n )\n plt.figure(figsize=(23, 10))\n diff_threshold_plot = plt.plot(\n x_axis,\n all_ambulance_patients_mean_times,\n \"solid\",\n x_axis,\n all_other_patients_mean_times,\n \"solid\",\n x_axis,\n all_total_mean_times,\n \"solid\",\n )\n plt.title(title, fontsize=13, fontweight=\"bold\")\n plt.xlabel(x_axis_label, fontsize=13, fontweight=\"bold\")\n plt.ylabel(y_axis_label, fontsize=13, fontweight=\"bold\")\n plt.legend(\n [\"Ambulance Patients\", \"Other Patients\", \"All times\"], fontsize=\"x-large\"\n )\n\n return diff_threshold_plot" ]
[ "0.5761602", "0.5579714", "0.55573255", "0.55306447", "0.548709", "0.54616326", "0.5449815", "0.54433835", "0.54180986", "0.5371205", "0.53488153", "0.53472847", "0.53443724", "0.53393245", "0.5338107", "0.533209", "0.5318276", "0.53181833", "0.53000176", "0.52845144", "0.5239834", "0.52334744", "0.5231088", "0.52194834", "0.51904804", "0.5186688", "0.5183069", "0.51649475", "0.51603705", "0.5153082", "0.51354146", "0.51203346", "0.5116682", "0.51007086", "0.50939345", "0.5091585", "0.50744385", "0.50618", "0.50560796", "0.50530887", "0.50487", "0.50353104", "0.5033208", "0.5033037", "0.5030142", "0.5017824", "0.500991", "0.5008533", "0.500181", "0.49975917", "0.4997421", "0.49935728", "0.49876162", "0.49852866", "0.49781734", "0.49733132", "0.49592853", "0.49509105", "0.49503362", "0.49502963", "0.49500632", "0.4949417", "0.4948518", "0.4947614", "0.49470738", "0.49425203", "0.49414083", "0.49408588", "0.4930663", "0.49305016", "0.4927349", "0.49254835", "0.49213392", "0.4920298", "0.49200577", "0.49183428", "0.49054697", "0.4901247", "0.49002126", "0.489642", "0.48958123", "0.48856232", "0.4882884", "0.48800164", "0.48783153", "0.48771366", "0.4876406", "0.48759696", "0.48758584", "0.48749888", "0.4871242", "0.487092", "0.4867867", "0.48657164", "0.4864511", "0.4862662", "0.48527977", "0.48511627", "0.48505697", "0.48468667" ]
0.53283376
16
Plot each algorithm/method's rank evolving as budget increases. groupby is the method of aggregating results of multiple instances a callable, stringable object, GroupByMedian by default. Note that funcId may be an array of id numbers; in that case, an average rank over listed functions is taken.
def rank_by_budget(ax, pds, dim=None, funcId=None, groupby=None): if groupby is None: groupby = GroupByMedian() pfsize = len(pds.algds.keys()) try: # funcId is array? # _pds_plot_iterator[] uses funcId only for things we don't care for fakeFuncId = funcId[0] manyranking = np.array([pds.ranking((dim, i), groupby) for i in funcId]) rankcount = np.shape(manyranking[0])[1] - 1 amanyranking = ra.alignArrayData(ra.VArrayMultiReader(manyranking)) budget = amanyranking[:,0] rankings = np.hsplit(amanyranking[:,1:], len(funcId)) avgranking = np.average(rankings, axis=0) ranking = np.vstack([budget, avgranking.T]).T except TypeError: # funcId is scalar fakeFuncId = funcId ranking = pds.ranking((dim, funcId), groupby) i = 0 for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, fakeFuncId): if kind != 'algorithm' and kind != 'strategy': continue #print name, ds budgets = ranking[:,0] ranks = ranking[:,1+i] style['markevery'] = 64 ax.plot(budgets, ranks, label=name, **style) i += 1 ax.set_xlabel('Budget') ax.set_ylabel('Rank by '+str(groupby).title()+' Function Value') ax.set_xscale('log', basex=pfsize) ax.grid()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ranking(self, dimfun, groupby, ftarget=10**-8):\n nameds = list(itertools.chain(self.algds_dimfunc(dimfun), self.stratds_dimfunc(dimfun)))\n count = len(nameds)\n\n # Produce \"fv\" items, one per dataset, containing single function value\n # for each budget\n fvset = []\n for (name, ds) in nameds:\n budgets = ds.funvals[:,0]\n f1vals = np.maximum(groupby(ds.funvals[:, 1:], axis=1), ftarget)\n fv = np.transpose(np.vstack([budgets, f1vals]))\n fvset.append(fv)\n\n # Align the \"fv\" items by budget and merge them\n fva = ra.alignArrayData(ra.VArrayMultiReader(fvset))\n budgets = fva[:,0]\n\n # Assign function values and rank them\n # However, we want to resolve eventual ties by ranking first\n # converging function first. So we do a trick and rewrite ftarget\n # values in increasing convergence sort order.\n values = fva[:,1:].copy()\n firstconv = np.ones(count) * (np.size(budgets)+1) # runlength+1 is default\n for i in range(count): # XXX: drop the loop\n try:\n firstconv[i] = np.nonzero(values[:,i] == ftarget)[0][0]\n except IndexError:\n continue # no rewriting needed\n firstconvranks = ss.mstats.rankdata(firstconv)\n for i in range(count):\n r = firstconvranks[i]\n values[firstconv[i]:, i] = ftarget - (1-r/count)*ftarget\n\n ranks = ss.mstats.rankdata(values, axis=1)\n\n return np.transpose(np.vstack([budgets, ranks.T]))", "def fval_by_budget(ax, pds, baseline_ds=None, baseline_label=\"\", dim=None, funcId=None, groupby=None):\n if groupby is None: groupby = GroupByMedian()\n pfsize = len(pds.algds.keys())\n\n if baseline_ds:\n baseline_budgets = baseline_ds.funvals[:, 0]\n baseline_funvals = groupby(baseline_ds.funvals[:, 1:], axis=1)\n baseline_safefunvals = np.maximum(baseline_funvals, 10**-8) # eschew zeros\n # fvb is matrix with each row being [budget,funval]\n baseline_fvb = np.transpose(np.vstack([baseline_budgets, baseline_safefunvals]))\n\n for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):\n #print name, ds\n budgets = ds.funvals[:, 0]\n funvals = groupby(ds.funvals[:, 1:], axis=1)\n\n # Throw away funvals after ftarget reached\n try:\n limit = np.nonzero(funvals < 10**-8)[0][0] + 1\n except IndexError:\n limit = np.size(budgets)+1\n budgets = budgets[:limit]\n funvals = funvals[:limit]\n\n fvb = np.transpose(np.vstack([budgets[:limit], funvals[:limit]]))\n\n if baseline_ds:\n # Relativize by baseline\n fvba = ra.alignArrayData(ra.VArrayMultiReader([fvb, baseline_fvb]))\n budgets = fvba[:, 0]\n funvals = fvba[:, 1] / fvba[:, 2]\n\n style['markevery'] = 16\n ax.loglog(budgets, funvals, label=name, basex=pfsize, **style)\n if baseline_ds:\n ax.set_yticks([1], minor=True)\n ax.set_xlabel('Budget')\n ax.set_ylabel(_fval_label(baseline_ds, baseline_label, str(groupby)))\n ax.grid()\n if baseline_ds:\n ax.yaxis.grid(True, which = 'minor')", "def evals_by_target(ax, pds, baseline_ds=None, baseline_label=\"\", dim=None, funcId=None, groupby=None):\n if groupby is None: groupby = GroupByMedian()\n pfsize = len(pds.algds.keys())\n\n runlengths = 10**np.linspace(0, np.log10(pds.maxevals((dim, funcId))), num=500)\n target_values = pp.RunlengthBasedTargetValues(runlengths,\n reference_data=pds.bestalg(None), force_different_targets_factor=10**0.004)\n targets = target_values((funcId, dim))\n\n if baseline_ds:\n baseline_fevs = groupby(baseline_ds.detEvals(targets), axis=1)\n\n for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):\n #print name, ds\n fevs = groupby(ds.detEvals(targets), axis=1)\n if baseline_ds:\n fevs /= baseline_fevs\n style['markevery'] = 64\n ax.loglog(targets, fevs, label=name, basey=pfsize, **style)\n ax.set_xlim(10**2, 10**(np.log10(targets[-1])-0.2))\n if baseline_ds:\n ax.set_yticks([2, 3.5], minor=True)\n ax.set_xlabel('Function Value Targets')\n ax.set_ylabel(_evals_label(baseline_ds, baseline_label, str(groupby)))\n ax.grid()\n if baseline_ds:\n ax.yaxis.grid(True, which = 'minor')", "def evals_by_evals(ax, pds, baseline1_ds=None, baseline1_label=\"\", baseline2_ds=None, baseline2_label=\"\", dim=None, funcId=None, groupby=None):\n if groupby is None: groupby = GroupByMedian()\n pfsize = len(pds.algds.keys())\n\n runlengths = 10**np.linspace(0, np.log10(pds.maxevals((dim, funcId))), num=500)\n target_values = pp.RunlengthBasedTargetValues(runlengths,\n reference_data=pds.bestalg(None), force_different_targets_factor=10**0.004)\n targets = target_values((funcId, dim))\n\n if baseline1_ds:\n baseline1_fevs = np.array(groupby(baseline1_ds.detEvals(targets), axis=1))\n if baseline2_ds:\n baseline2_fevs = np.array(groupby(baseline2_ds.detEvals(targets), axis=1))\n\n for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):\n #print name, ds\n fevs1 = groupby(ds.detEvals(targets), axis=1)\n if baseline1_ds:\n fevs1 /= baseline1_fevs\n fevs2 = groupby(ds.detEvals(targets), axis=1)\n if baseline2_ds:\n fevs2 /= baseline2_fevs\n\n infsx = np.nonzero(fevs1 == inf)\n infs = infsx[0]\n if np.size(infs) > 0:\n #print infs\n fevs1 = fevs1[:infs[0]-1]\n fevs2 = fevs2[:infs[0]-1]\n\n #print name, fevs1, fevs2\n style['markevery'] = 64\n ax.loglog(fevs2, fevs1, label=name, basex=pfsize, basey=pfsize, **style)\n ax.grid()\n ax.set_xlim(0, runlengths[-1] * pfsize) # i.e. log(runlengths) + 1\n ax.set_ylabel('Per-target ' + _evals_label(baseline1_ds, baseline1_label, str(groupby)))\n ax.set_xlabel('Per-target ' + _evals_label(baseline2_ds, baseline2_label, str(groupby)))", "def _pds_plot_iterator(pds, dim, funcId):\n i = 0\n for (algname, ds) in pds.algds_dimfunc((dim, funcId)):\n yield ('algorithm', algname, ds, _style_algorithm(algname, i))\n i += 1\n yield ('oracle', 'oracle', pds.oracle((dim, funcId)), _style_oracle())\n yield ('unifpf', 'eUNIF', pds.unifpf().dictByDimFunc()[dim][funcId][0], _style_unifpf())\n i = 0\n for (stratname, ds) in pds.stratds_dimfunc((dim, funcId)):\n yield ('strategy', stratname, ds, _style_strategy(stratname, i))\n i += 1", "def summarize(group, fs=None, include_source=True):\n _line_break = '{0:-<120}\\n'.format('')\n tests = sorted(ComparisonBenchmark.groups[group], key=lambda t: getattr(t, 'time_average_seconds'))\n log = StringIO.StringIO()\n log.write('Call statement:\\n\\n')\n log.write('\\t' + tests[0].stmt)\n log.write('\\n\\n\\n')\n fmt = \"{0: <8} {1: <35} {2: <12} {3: <15} {4: <15} {5: <14}\\n\"\n log.write(fmt.format('Rank', 'Function Name', 'Time', '% of Slowest', 'timeit_repeat', 'timeit_number'))\n log.write(_line_break)\n log.write('\\n')\n\n for i, t in enumerate(tests):\n func_name = \"{}.{}\".format(t.classname, t.callable.__name__) if t.classname else t.callable.__name__\n if i == len(tests)-1:\n time_percent = 'Slowest'\n else:\n time_percent = \"{:.1f}\".format(t.time_average_seconds / tests[-1].time_average_seconds * 100)\n log.write(fmt.format(i+1,\n func_name,\n convert_time_units(t.time_average_seconds),\n time_percent,\n t.timeit_repeat,\n t.timeit_number))\n log.write(_line_break)\n\n if include_source:\n log.write('\\n\\n\\nSource Code:\\n')\n log.write(_line_break)\n for test in tests:\n log.write(test.log.getvalue())\n log.write(_line_break)\n\n if isinstance(fs, str):\n with open(fs, 'w') as f:\n f.write(log.getvalue())\n\n elif fs is None:\n print(log.getvalue())\n else:\n try:\n fs.write(log.getvalue())\n except AttributeError as e:\n print(e)", "def group_apply_edges(self, group_by, func, edges=ALL, inplace=True):\n super(BaseGraphStore, self).group_apply_edges(group_by, func, edges, inplace=True)", "def plot(self, ax=None, savefile=None, shells=None, color='b', title=None,\n xlabel=None, ylabel=None, withavg=False):\n import matplotlib.pyplot as plt\n if ax is None:\n plt.figure()\n axset=plt\n else:\n axset=ax\n\n cmax = float(max(self.counts))\n total = sum(self.counts)\n nalpha = 0.85 if cmax/total > 0.33 else 0.65\n maxy = 1.\n for di, df in enumerate(self.dfs):\n alpha=nalpha*self.counts[di]/cmax\n axset.plot(df.x, df.df, color=color, alpha=alpha)\n maxy_ = np.max(df.df)\n if maxy_ > maxy:\n maxy = maxy_\n\n if withavg and len(self) > 0:\n x = self.dfs[0].x\n axset.plot(x, self.average, 'r-')\n maxy_ = np.max(self.average)\n if maxy_ > maxy:# pragma: no cover\n maxy = maxy_\n\n if len(self) > 0:\n dtype = self.dfs[0].dtype\n unit = \"Ang.\" if dtype == \"R\" else \"Rad.\"\n tstr = \"Radial\" if dtype == \"R\" else \"Angular\"\n else:# pragma: no cover\n unit = \"unknown units\"\n tstr = \"\"\n \n if ax is None:\n if title is None:\n plt.title(\"{} Distribution Function of Collection\".format(tstr))\n else:\n plt.title(title)\n if xlabel is None:\n plt.xlabel(\"Distance ({})\".format(unit))\n else:\n plt.xlabel(xlabel)\n if ylabel is None:\n plt.ylabel(\"Accumulated Density\")\n else:\n plt.ylabel(ylabel)\n\n _plot_shells(axset, shells, maxy)\n \n if savefile is not None:\n plt.savefig(savefile)\n\n from gblearn.base import testmode\n if not testmode:# pragma: no cover\n plt.show()\n return axset", "def plot_scoring(\n graphs: list,\n ref_partitions: object,\n graph_names: list,\n methods: list,\n scoring: Callable[\n [object, object], object\n ] = cdlib.evaluation.adjusted_mutual_information,\n nbRuns: int = 5,\n) -> object:\n forDF = []\n for i, g in enumerate(graphs):\n for m in methods:\n for r in range(nbRuns):\n partition = m(g)\n\n score = scoring(partition, ref_partitions[i]).score\n forDF.append([graph_names[i], score, partition.get_description()])\n df = pd.DataFrame(columns=[\"graph\", \"score\", \"method\"], data=forDF)\n ax = sns.lineplot(x=\"graph\", y=\"score\", hue=\"method\", data=df, legend=\"brief\")\n ax.legend(loc=\"best\")\n for tick in ax.get_xticklabels():\n tick.set_rotation(90)\n plt.tight_layout()\n\n return ax", "def parallel_group(\n G, group_by, ax=None, y_offset=-0.3, rotation=45, ha=\"right\", va=\"top\"\n):\n if ax is None:\n ax = plt.gca()\n nt = utils.node_table(G)\n # groups = nt.groupby(group_by).apply(lambda df: len(df)).sort_index()\n groups = sorted(nt[group_by].unique())\n\n for i, label in enumerate(groups):\n x = i * 4\n y = y_offset\n ax.annotate(label, xy=(x, y), ha=ha, va=va, rotation=rotation)\n ax.relim()", "def editing_type_count_by_group_plot(lib_name,group_dict:Dict,dirstruct:DirectoryStructure,dict_colors):\n\n group_counts=dict()\n\n # get aggregate counts per group\n for group_name,group_nodes in group_dict.items():\n # get editing percent pileup and summary file names\n editing_percent_pileups=[dirstruct.pathName(lib_name,node,Stages.editing_type_count,EditTypeStage.edit_percent_pileup)\n for node in group_nodes ]\n summary_files=[dirstruct.pathName(lib_name,node,Stages.editing_type_count,EditTypeStage.file_summary)\n for node in group_nodes ]\n\n # calculatte aggregate distribution\n aggregate_counts,count_summary,pileup_length=editing_site_count_per_type(editing_percent_pileups,summary_files)\n # save it for plot\n group_counts[group_name]=aggregate_counts\n\n #output aggregate counts to file\n aggregate_summary_file=dirstruct.pathName(lib_name,group_name,Stages.editing_type_count,EditTypeStage.group_distribution_summary)\n count_summary.to_csv(aggregate_summary_file)\n #output counts per file to file\n group_summary_file=dirstruct.pathName(lib_name,group_name,Stages.editing_type_count,EditTypeStage.group_count_summary)\n count_summary.to_csv(group_summary_file)\n\n # generating the plot\n try:\n plt.figure()\n group_names=[name for name in group_dict.keys()]\n data=pd.concat(aggregate_counts for aggregate_counts in group_counts.values())\n\n data.index=group_names\n data=data.transpose()\n\n plt_res, axes = stacked_bar(data, show_values=True, value_format=\"{:.3f}\",\n y_label=\"Percent of sites\",size_plot=[18,20],use_dataframe=True,throw_zeros=True,dict_colors=dict_colors)\n\n #puts the ledgends outside of the plot\n plt_res.subplots_adjust(right=0.62)\n plt_res.legend(loc='center left',bbox_to_anchor=(1, 0.5),handles=axes[::-1])\n\n output_path = dirstruct.pathName(lib_name,None,Stages.editing_type_count,EditTypeStage.plot)\n plt_res.savefig(output_path)\n plt_res.show()\n except:\n logging.exception(\"edit plot failed\")", "def plot_table(self):\r\n q = dict(sorted(decorator.arr.items(), key=lambda item: item[1]))\r\n print(\"PROGRAM | RANK | TIME ELAPSED\")\r\n count = 1\r\n for i in q:\r\n print(i[0], \"\\t\", count, \"\\t\", float(q[i]) * 1000, \"ms\")\r\n count += 1", "def plot_group(self, group_name, domains, get_time_data, fs, get_freq_data=None, get_const_data=None):\n plots = []\n \n def many(f, n=4):\n return np.concatenate([f() for _ in range(n)])\n \n for domain in domains:\n \n if domain=='frequency':\n \n # HW accelerated FFT\n if get_freq_data != None:\n f_plot = sdr_plots.HWFreqPlot(\n [get_freq_data() for _ in range(4)],\n fs, animation_period=100, w=700)\n f_dt = dma_timer.DmaTimer(f_plot.add_frame, get_freq_data, 0.3)\n # SW FFT\n else:\n f_plot = sdr_plots.IQFreqPlot(\n [many(get_time_data) for _ in range(4)],\n fs, x_range=(-2000,2000), animation_period=100, w=700)\n f_dt = dma_timer.DmaTimer(f_plot.add_frame, lambda:many(get_time_data), 0.3)\n plots.append(dict(title='Frequency domain', plot=f_plot, control=f_dt))\n \n elif domain=='time' or domain=='time-binary':\n if domain=='time-binary':\n iq_plot = sdr_plots.IQTimePlot(many(get_time_data), fs, w=700, scaling=1, ylabel='Symbol value')\n iq_plot.set_line_mode(lines=True, markers=True, shape='hvh')\n iq_plot.get_widget().layout.yaxis.dtick=1\n else:\n iq_plot = sdr_plots.IQTimePlot(many(get_time_data), fs, w=700)\n iq_plot.set_line_mode(markers=False)\n iq_dt = dma_timer.DmaTimer(iq_plot.add_data, get_time_data, 0.05)\n plots.append(dict(title='Time domain', plot=iq_plot, control=iq_dt))\n \n elif domain=='constellation':\n c_plot = sdr_plots.IQConstellationPlot(many(get_const_data or get_time_data, n=10), h=550, fade=True)\n c_dt = dma_timer.DmaTimer(c_plot.add_data, get_const_data or get_time_data, 0.05)\n plots.append(dict(title='Constellation', plot=c_plot, control=c_dt,\n layout=ipw.Layout(width='550px', margin='auto')))\n \n self.timers.register_timers(group_name, list(map(lambda tab: tab['control'], plots)))\n return QpskOverlay.tab_plots(plots)", "def evalRun(rerankRun, qrelsDict, metricFunc, debug=False):\n resArr = []\n\n for qid, scoreDict in rerankRun.items():\n relsSortedByScores = []\n\n val = 0\n\n if qid in qrelsDict:\n queryQrelDict = qrelsDict[qid]\n\n for did, score in getSorteScoresFromScoreDict(scoreDict):\n rel_score = 0\n if did in queryQrelDict:\n rel_score = queryQrelDict[did]\n\n relsSortedByScores.append(rel_score)\n\n val = metricFunc(relsSortedByScores, queryQrelDict) if queryQrelDict else 0\n\n if debug:\n print('%s %g' % (qid, val))\n\n resArr.append(val)\n\n res = np.mean(resArr)\n if debug:\n print('mean %g' % res)\n\n return res", "def plot_results(outputs, x, e, t, a, folds, groups,\n quantiles, strat='quantile', adj='KM', plot=True):\n if plot:\n mpl.rcParams['hatch.linewidth'] = 2.0\n\n fig, big_axes = plt.subplots(\n figsize=(8 * (len(groups) + 2), 6 * len(quantiles)),\n nrows=len(quantiles),\n ncols=1)\n\n plt.subplots_adjust(hspace=0.4)\n\n i = 0\n for _, big_ax in enumerate(big_axes, start=1):\n big_ax.set_title(\n 'Receiver Operator Characteristic and Calibration at t=' +\n str(quantiles[i]) + '\\n',\n fontsize=16)\n big_ax.tick_params(\n labelcolor=(1., 1., 1., 0.0),\n top='off',\n bottom='off',\n left='off',\n right='off')\n i += 1\n \n eces = {}\n metrics = {}\n\n for quant in quantiles:\n eces[quant] = {}\n \n for i in range(len(quantiles)):\n\n scores = outputs[quantiles[i]]\n for j in range(len(groups) + 2):\n\n pt = (i * (len(groups) + 2) + j + 1)\n if plot:\n ax = fig.add_subplot(len(quantiles), len(groups) + 2, pt)\n else:\n ax = None\n \n if (j==1):\n eces[quantiles[i]]['all'] = plot_calibration_curve(ax,\n scores,\n e,\n t,\n a,\n folds,\n None,\n quantiles[i],\n strat=strat,\n adj=adj,\n plot=plot) \n \n if (j>1):\n eces[quantiles[i]][groups[j - 2]] = plot_calibration_curve(ax,\n scores,\n e,\n t,\n a,\n folds,\n groups[j - 2],\n quantiles[i],\n strat=strat,\n adj=adj,\n plot=plot)\n \n if (j==0):\n metrics[quantiles[i]] = plot_roc_curve(ax,\n scores,\n e,\n t,\n a,\n folds,\n groups,\n quantiles[i],\n plot=plot)\n\n for quant in quantiles:\n metrics[quant] = metrics[quant] + (eces[quant], )\n \n if plot: \n plt.show()\n return metrics", "def makeaplot(events,\n sensitivities,\n hrf_estimates,\n roi_pair,\n fn=True):\n import matplotlib.pyplot as plt\n\n # take the mean and transpose the sensitivities\n sensitivities_stacked = mv.vstack(sensitivities)\n\n if bilateral:\n sensitivities_stacked.sa['bilat_ROIs_str'] = map(lambda p: '_'.join(p),\n sensitivities_stacked.sa.bilat_ROIs)\n mean_sens = mv.mean_group_sample(['bilat_ROIs_str'])(sensitivities_stacked)\n else:\n sensitivities_stacked.sa['all_ROIs_str'] = map(lambda p: '_'.join(p),\n sensitivities_stacked.sa.all_ROIs)\n mean_sens = mv.mean_group_sample(['all_ROIs_str'])(sensitivities_stacked)\n\n mean_sens_transposed = mean_sens.get_mapped(mv.TransposeMapper())\n\n # some parameters\n # get the conditions\n block_design = sorted(np.unique(events['trial_type']))\n reorder = [0, 6, 1, 7, 2, 8, 3, 9, 4, 10, 5, 11]\n block_design = [block_design[i] for i in reorder]\n # end indices to chunk timeseries into runs\n run_startidx = np.array([0, 157, 313, 469])\n run_endidx = np.array([156, 312, 468, 624])\n\n runs = np.unique(mean_sens_transposed.sa.chunks)\n\n for j in range(len(hrf_estimates.fa.bilat_ROIs_str)):\n comparison = hrf_estimates.fa.bilat_ROIs[j][0]\n if (roi_pair[0] in comparison) and (roi_pair[1] in comparison):\n roi_pair_idx = j\n roi_betas_ds = hrf_estimates[:, roi_pair_idx]\n roi_sens_ds = mean_sens_transposed[:, roi_pair_idx]\n\n for run in runs:\n fig, ax = plt.subplots(1, 1, figsize=[18, 10])\n colors = ['#7b241c', '#e74c3c', '#154360', '#3498db', '#145a32', '#27ae60',\n '#9a7d0a', '#f4d03f', '#5b2c6f', '#a569bd', '#616a6b', '#ccd1d1']\n plt.suptitle('Timecourse of sensitivities, {} versus {}, run {}'.format(roi_pair[0],\n roi_pair[1],\n run + 1),\n fontsize='large')\n plt.xlim([0, max(mean_sens_transposed.sa.time_coords)])\n plt.ylim([-5, 7])\n plt.xlabel('Time in sec')\n plt.legend(loc=1)\n plt.grid(True)\n # for each stimulus, plot a color band on top of the plot\n for stimulus in block_design:\n onsets = events[events['trial_type'] == stimulus]['onset'].values\n durations = events[events['trial_type'] == stimulus]['duration'].values\n stimulation_end = np.sum([onsets, durations], axis=0)\n r_height = 1\n color = colors[0]\n y = 6\n\n # get the beta corresponding to the stimulus to later use in label\n beta = roi_betas_ds.samples[hrf_estimates.sa.condition == stimulus.replace(\" \", \"\"), 0]\n\n for i in range(len(onsets)):\n r_width = durations[i]\n x = stimulation_end[i]\n rectangle = plt.Rectangle((x, y),\n r_width,\n r_height,\n fc=color,\n alpha=0.5,\n label='_'*i + stimulus.replace(\" \", \"\") + '(' + str('%.2f' % beta) + ')')\n plt.gca().add_patch(rectangle)\n plt.legend(loc=1)\n del colors[0]\n\n times = roi_sens_ds.sa.time_coords[run_startidx[run]:run_endidx[run]]\n\n ax.plot(times, roi_sens_ds.samples[run_startidx[run]:run_endidx[run]], '-', color='black', lw=1.0)\n glm_model = hrf_estimates.a.model.results_[0.0].predicted[run_startidx[run]:run_endidx[run], roi_pair_idx]\n ax.plot(times, glm_model, '-', color='#7b241c', lw=1.0)\n model_fit = hrf_estimates.a.model.results_[0.0].R2[roi_pair_idx]\n plt.title('R squared: %.2f' % model_fit)\n if fn:\n plt.savefig(results_dir + 'timecourse_localizer_glm_sens_{}_vs_{}_run-{}.svg'.format(roi_pair[0], roi_pair[1], run + 1))", "def make_group_plot(args):\n directory = args.directory\n prefix = args.prefix\n buckets = args.buckets \n\n # Collect all the results and create placeholder for results.\n all_files = glob.glob(directory + \"/\" + prefix + \"*.csv\")\n df = pd.concat((pd.read_csv(f) for f in all_files), axis=1)\n df.columns = all_files\n results_raw = df.as_matrix()\n num_bins = int(np.ceil(results_raw.shape[0]/buckets))\n results_binned = np.zeros((results_raw.shape[1], num_bins))\n\n # Bin the results.\n for run in range(results_raw.shape[1]):\n for bin_idx in range(num_bins):\n results_binned[run, bin_idx] = (np.mean(results_raw[\n int(bin_idx*buckets):int(bin_idx*buckets+buckets), run]))\n\n # Build the plot.\n fig, ax = plt.subplots(figsize=(args.figSizeX, args.figSizeY))\n sns.tsplot(data = results_binned, ax=ax, ci=[68, 95], color=\"m\")\n\n # Save the plot.\n ax.set_title(prefix + ' -- Average Binned Return', fontsize=18)\n ax.set_xlabel('Bin', fontsize=18)\n ax.set_ylabel('Average Return', fontsize=18)\n plt.tick_params(axis='both', which='major', labelsize=18)\n ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))\n plt.savefig(os.path.join(directory, prefix+'_groupfig.png'), \n bbox_inches='tight')\n \n # Return binned results for group figure.\n return results_binned", "def series_measure(function,group_filters,**options):\n\tresults=pd.Series()\n\tfor group_key, group_filter in group_filters.items():\n\t\tjoined_options={**options,**group_filter}\n\t\tif not callable(function):\n\t\t\tif \"func\" in joined_options.keys():\n\t\t\t\tfunc=joined_options.pop('func')\n\t\t\t\tresults[group_key]=func(**joined_options)\n\t\t\telse:\n\t\t\t\traise TypeError('function passed is not callable and no functions\\\n\t\t\t\t referenced in filters!')\n\t\telse:\n\t\t\tresults[group_key]=function(**joined_options)\n\treturn results", "def plot_metric(df_metrics, name, batch_size=10, epochs=10):\n\n # One groupplot\n fig, axarr = plt.subplots(3, 4, sharey=True, sharex=True)\n plotname = 'apfd'\n subplot_labels = ['(a)', '(b)', '(c)']\n\n for column, nr in enumerate(sorted(df_metrics['negative_ratio'].unique())):\n for row, emb_size in enumerate(df_metrics['emb_size'].unique()):\n for agidx, (labeltext, task, linestyle) in enumerate(\n [('Classification', 'True', '-'), ('Regression', 'False', '-.')]):\n rel_df = df_metrics[\n (df_metrics['emb_size'] == str(emb_size)) & (df_metrics['negative_ratio'] == str(nr)) &\n (df_metrics['batch_size'] == str(batch_size)) & (df_metrics['epochs'] == str(epochs))]\n\n # rel_df[rel_df['agent'] == agent].plot(x='step', y='napfd', label=labeltext, ylim=[0, 1], linewidth=0.8,\n # style=linestyle, color=sns.color_palette()[agidx], ax=axarr[row,column])\n\n apfd = rel_df.loc[rel_df['classification'] == task, 'apfd']\n miu = np.round(np.mean(apfd), 2)\n sigma = np.round(np.std(apfd), 2)\n label = labeltext + '\\n $\\mu$ - ' + str(miu) + ' $\\sigma$ - ' + str(sigma)\n\n # sns.displot(data=rel_df, x=\"apfd\", hue='classification', kde=True, ax=axarr[row, column])\n\n sns.distplot(apfd, kde=True,\n bins=int(180 / 5), color=sns.color_palette()[agidx],\n hist_kws={'edgecolor': 'black'},\n kde_kws={'linewidth': 4, 'clip': (0.0, 1.0)}, label=label, ax=axarr[row, column])\n\n axarr[row, column].xaxis.grid(True, which='major')\n\n axarr[row, column].set_title('Emb_size - %s - Neg_Ratio - %s' % (emb_size, nr), fontsize=10)\n\n if row == 2:\n axarr[row, column].set_xlabel('APFD')\n if column == 0:\n axarr[row, column].set_ylabel('Density')\n\n axarr[row, column].legend(frameon=True, prop={'size': 6})\n\n # Tweak spacing to prevent clipping of ylabel\n fig.suptitle('APFD Parameter Tuning - %d Epochs and batch-size - %d' % (epochs, batch_size))\n fig.tight_layout()\n plt.savefig(name, bbox_inches='tight')\n plt.show()", "def avgPlotter(graph, contribution_curves, mean_contribs, ax_degree, ax_avg, box_plot=False, median=True, log_scale=True, size_marker=5, network=\"\"):\n\n # Plot scatter\n contributions = [y[len(y) - 1] for _, y in contribution_curves]\n degree = [graph.degree(i) for i in range(graph.order())]\n existing_degrees = [d for d in sorted(set(degree))]\n min_degree = min(degree)\n max_degree = max(degree)\n ordered_contribs = [[] for i in range(len(existing_degrees))]\n for idx in range(len(degree)):\n ordered_contribs[existing_degrees.index(degree[idx])].append(contributions[idx])\n if box_plot:\n ax_degree.boxplot(ordered_contribs, positions=existing_degrees)\n elif median:\n median_contribs_degree = [np.median(ordered_contribs[i]) for i in range(len(existing_degrees))]\n error_bars = np.zeros((2, len(existing_degrees)))\n error_bars[0, :] = [median_contribs_degree[i] - np.percentile(ordered_contribs[i], 25) for i in range(len(existing_degrees))]\n error_bars[1, :] = [np.percentile(ordered_contribs[i], 75) - median_contribs_degree[i] for i in range(len(existing_degrees))]\n\n size_marker = [len(ordered_contribs[i]) * size_marker for i in range(len(existing_degrees))]\n ax_degree.scatter(existing_degrees, median_contribs_degree, s=size_marker)\n ax_degree.errorbar(existing_degrees, median_contribs_degree, error_bars,\n alpha=0.5, linestyle='--')\n else:\n mean_contribs_degree = [mean(ordered_contribs[i]) for i in range(len(existing_degrees))]\n std_mean_contribs_degree = []\n for i in range(len(existing_degrees)):\n if len(ordered_contribs[i]) > 1:\n std_mean_contribs_degree.append(stdev(ordered_contribs[i]) / np.sqrt(len(ordered_contribs[i])))\n else:\n std_mean_contribs_degree.append(0)\n\n size_marker = [len(ordered_contribs[i])*size_marker for i in range(len(existing_degrees))]\n ax_degree.scatter(existing_degrees, mean_contribs_degree, s=size_marker)\n ax_degree.errorbar(existing_degrees, mean_contribs_degree, std_mean_contribs_degree,\n alpha=0.5, linestyle='--')\n\n if log_scale:\n ax_degree.set_xscale('log')\n\n\n # Plot avg. contribution\n mean_color = (np.random.rand(), np.random.rand(), np.random.rand(), 0.3)\n if network == \"WS\":\n mean_color = \"green\"\n elif network == \"BA\":\n mean_color = \"orange\"\n elif network == \"FB\":\n mean_color = \"blue\"\n x = list(range(len(mean_contribs[0, :])))\n #ax_avg.plot(mean_contribs[0, :], color=mean_color)\n ax_avg.plot(mean_contribs[0, :], color=mean_color, )\n plt.fill_between(x, (mean_contribs[1, :]), (mean_contribs[2, :]), color=mean_color, alpha=0.3 ,edgecolor=None)\n plt.ylim(0, 100);", "def hive_group(G, group_by, ax=None, offset=np.pi / 12):\n nt = utils.node_table(G)\n groups = sorted(nt[group_by].unique())\n\n if ax is None:\n ax = plt.gca()\n\n for grp in groups:\n theta = item_theta(groups, grp) + offset\n radius = 2 * (8 + len(nt[nt[group_by] == grp]) + 1)\n x, y = to_cartesian(radius, theta)\n ha, va = text_alignment(x, y)\n ax.annotate(grp, xy=(x, y), ha=ha, va=va)", "def RunEstimate(update_func, num_points=31, median_flag=False):\n d = ReadHeights(nrows=None)\n labels = {1:'male', 2:'female'}\n\n suites = {}\n for key, xs in d.items():\n label = labels[key]\n print(label, len(xs))\n Summarize(xs)\n\n xs = thinkbayes2.Jitter(xs, 1.3)\n\n mus, sigmas = FindPriorRanges(xs, num_points, median_flag=median_flag)\n suite = Height(mus, sigmas, label)\n suites[label] = suite\n update_func(suite, xs)\n print('MAP', suite.MaximumLikelihood())\n\n suite1 = suites['male']\n suite2 = suites['female']\n\n mu1 = suite1.Marginal(0)\n sigma1 = suite1.Marginal(1)\n\n mu2 = suite2.Marginal(0)\n sigma2 = suite2.Marginal(1)\n\n diff = mu1 - mu2\n sigma = (sigma1 + sigma2) / 2\n\n pmf_d = diff / sigma\n\n thinkplot.Cdf(pmf_d.MakeCdf())\n thinkplot.Show(xlabel='# stddev between means',\n ylabel='PMF')", "def aggregateFunction():\r\n global aggFunc\r\n aggFunc = []\r\n for objFunc in P_prime:\r\n aggFunc.append(objFunc[0]*FileSettings.settingsdict['weights'][0] +\r\n objFunc[1]*FileSettings.settingsdict['weights'][1] +\r\n objFunc[2]*FileSettings.settingsdict['weights'][2] +\r\n objFunc[3]*FileSettings.settingsdict['weights'][3])\r\n return aggFunc", "def make_results_plot( df, k, reg ):\n\tuid = smalldf['user_id'].values\n\tbid = smalldf['business_id'].values\n\tactual = smalldf['stars'].values\n\tpredicted = np.zeros( len(actual) )\n\tcounter = 0\n\tfor biz_id, user_id in izip( bid, uid ):\n\t\tpredicted[counter] = rating( biz_id, user_id, k = k, reg = reg ) \n\t\tcounter = counter + 1\n\t# compare_results( actual, predicted )", "def data_group():\n ...", "def plot_slice_wise_measures(labels, preds, args):\n\n cal_roc = [[], []]\n cal_prrcf1 = [[], [], []] # save PR, RC, F1 respectively\n noncal_prrcf1 = [[], [], []]\n thres_all = []\n noncal_roc = [[], []]\n n_slices = len(labels)\n for thres in range(500, -1, -5):\n print(\"[Threshold # of pixels: {}]\".format(thres))\n thres_all.append(thres)\n cal_pgt, cal_pp, cal_tp, noncal_pgt, noncal_pp, noncal_tp = \\\n plaque_detection_rate(labels, preds, thres=thres)\n\n\n cal_prrcf1[0].append(float(cal_tp) / cal_pp if cal_pp != 0 else 0.0)\n cal_prrcf1[1].append(float(cal_tp) / cal_pgt)\n cal_prrcf1[2].append(2.0 * cal_tp / (cal_pgt + cal_pp))\n noncal_prrcf1[0].append(float(noncal_tp) / noncal_pp if noncal_pp != 0 else 0.0)\n noncal_prrcf1[1].append(float(noncal_tp) / noncal_pgt)\n noncal_prrcf1[2].append(2.0 * noncal_tp / (noncal_pgt + noncal_pp))\n\n cal_roc[0].append((cal_pp - cal_tp) / (n_slices - cal_pgt)) # false negative ratio\n cal_roc[1].append(cal_tp / cal_pgt) # true positive ratio\n noncal_roc[0].append((noncal_pp - noncal_tp) / (n_slices - noncal_pgt)) # false negative ratio\n noncal_roc[1].append(noncal_tp / noncal_pgt) # true positive ratio\n\n print('Cal: PR - {:.4f} RC - {:.4f} F1 - {:.4f} Noncal: PR - {:.4f} RC - {:.4f} F1 - {:.4f}'.format(\n cal_prrcf1[0][-1], cal_prrcf1[1][-1], cal_prrcf1[2][-1],\n noncal_prrcf1[0][-1], noncal_prrcf1[1][-1], noncal_prrcf1[2][-1]))\n print('Cal: fpr - {:.4f} tpr - {:.4f} Noncal: fpr - {:.4f} tpr - {:.4f}'.format(\n cal_roc[0][-1], cal_roc[1][-1], noncal_roc[0][-1], noncal_roc[1][-1]))\n\n # plot the roc curve and calculate AUC\n fig_names = ['calcified', 'non-calcified']\n for plq_metrics, fig_name in zip([cal_roc, noncal_roc], fig_names):\n plt.figure()\n lw = 2\n auc_metric = auc(plq_metrics[0], plq_metrics[1])\n print(\"{} : {}\".format(fig_name, auc_metric))\n plt.plot(plq_metrics[0], plq_metrics[1], color='darkorange',\n lw=lw, label='ROC curve (area = %0.2f)' % auc_metric)\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('slice-wise ROC curve of {} plaques'.format(fig_name))\n plt.legend(loc=\"lower right\")\n plt.savefig(\"./{}/{}_roc.png\".format(args.fig_dir, fig_name))\n\n for plq_metrics, fig_name in zip([cal_prrcf1, noncal_prrcf1], fig_names):\n plt.figure()\n lw = 2\n plt.plot(thres_all, plq_metrics[0], color='r', lw=lw, label='precision')\n plt.plot(thres_all, plq_metrics[1], color='g', lw=lw, label='recall')\n plt.plot(thres_all, plq_metrics[2], color='b', lw=lw, label='f1')\n\n plt.xlim([min(thres_all), max(thres_all)])\n plt.ylim([0.0, 1.05])\n plt.xlabel('Threshold Number of Pixels')\n plt.title('{} measures under different thresholds'.format(fig_name))\n plt.legend(bbox_to_anchor=(1, 0.95), loc=\"upper right\")\n plt.savefig(\"./{}/{}_prrcf1.png\".format(args.fig_dir, fig_name))", "def plot_model_ranking(self, var, show_text=False, obslabels=None):\n\n # search for model keys\n tmp = []\n for i in xrange(4):\n tmp = self._get_model_ranking(i + 1, var)\n if len(tmp) > 0:\n break # assumes that all datasets with observations have same models\n if len(tmp) == 0:\n print var\n print self.pos\n print self.data\n print('FATAL error: no model keys provided!')\n return None\n\n fig = plt.figure()\n gs = gridspec.GridSpec(1, 2, wspace=0.05, hspace=0.05, bottom=0.2, width_ratios=[3, 1])\n ax = fig.add_subplot(gs[0])\n\n # 1 vs. 2\n self.__draw_ranking_scatter(1, 2, var, color='red', marker='o', show_text=show_text, ax=ax, obslabels=obslabels)\n # 1 vs. 3\n self.__draw_ranking_scatter(1, 3, var, color='green', marker='*', ax=ax, show_text=show_text, obslabels=obslabels)\n # 1 vs. 4\n self.__draw_ranking_scatter(1, 4, var, color='blue', marker='^', ax=ax, show_text=show_text, obslabels=obslabels)\n # 2 vs. 3\n self.__draw_ranking_scatter(2, 3, var, color='grey', marker='x', ax=ax, show_text=show_text, obslabels=obslabels)\n # 2 vs 4\n self.__draw_ranking_scatter(2, 4, var, color='m', marker='+', ax=ax, show_text=show_text, obslabels=obslabels)\n # 3 vs 4\n self.__draw_ranking_scatter(3, 4, var, color='c', marker='h', ax=ax, show_text=show_text, obslabels=obslabels)\n\n if ax is not None:\n ax.legend(prop={'size': 8}, ncol=1, fancybox=True, loc='upper left')\n ax.set_xlabel('rank(observation X)')\n ax.set_ylabel('rank(observation Y)')\n ax.set_ylim(ymin=0, ymax=len(tmp) + 1)\n ax.set_xlim(xmin=0, xmax=len(tmp) + 1)\n ax.grid()\n ax.set_title('Comparison of model ranking: ' + var.upper())\n ax.plot(ax.get_xlim(), ax.get_xlim(), 'k--') # 1:1 line\n\n # legend\n ax2 = fig.add_subplot(gs[1])\n dy = 0.1\n yoff = dy\n for k in tmp:\n ax2.text(0.1, yoff, self._model2short_label(k) + ': ' + k)\n yoff += dy\n ax2.set_ylim(0., yoff)\n ax2.set_xticks([])\n ax2.set_yticks([])\n\n return fig", "def plot_coupling_grid(baseline_group, fits_groups, metrics, fax=None):\n n_algorithms = len(fits_groups)\n n_metrics = len(metrics)\n\n if fax is None:\n fig, axes = plt.subplots(n_metrics, n_algorithms,\n figsize=(3 * n_algorithms, 3 * n_metrics))\n else:\n fig, axes = fax\n\n # iterate over metrics\n for row_idx, metric in enumerate(metrics):\n if metric == 'selection_ratio':\n baseline_coefs = baseline_group['coupling_coefs'][:]\n baseline_selection_ratio = \\\n calculate_selection_ratio(baseline_coefs).mean(axis=0)\n\n # iterate over algorithms\n for col_idx, algorithm in enumerate(fits_groups):\n if metric == 'selection_ratio':\n # calculate selection ratio for algorithm\n coefs = algorithm['coupling_coefs'][:]\n selection_ratio = calculate_selection_ratio(coefs).mean(axis=0)\n\n # plot direct comparison\n axes[row_idx, col_idx].scatter(\n baseline_selection_ratio,\n selection_ratio,\n alpha=0.5,\n color='k',\n edgecolor='w')\n else:\n axes[row_idx, col_idx].scatter(\n baseline_group[metric][:].mean(axis=0),\n algorithm[metric][:].mean(axis=0),\n alpha=0.5,\n color='k',\n edgecolor='w')\n\n return fig, axes", "def det_plot(data, group_by, plot_title, save_figure_path=None):\n subgroups = data.groupby(group_by)\n li_subgroups = subgroups.groups\n\n fontsize = 12\n fig, ax = plt.subplots(figsize=(8, 8), constrained_layout=True)\n for subgroup in li_subgroups:\n # for each subgroup\n df_subgroup = subgroups.get_group(subgroup)\n labels, scores = (\n df_subgroup[\"label\"].values.astype(int),\n df_subgroup[\"score\"].values,\n )\n fpr, fnr, thresholds = calculate_det_curves(labels, scores)\n ax = draw_det_curve(\n fpr, fnr, ax=ax, label=subgroup, fontsize=fontsize, title=plot_title\n )\n\n ax.xaxis.set_major_formatter(mtick.FormatStrFormatter(\"%.e\"))\n plt.minorticks_off()\n ax.set_ylabel(\"FNR (%)\", fontsize=fontsize)\n ax.set_xlabel(\"FPR\", fontsize=fontsize)\n plt.legend(fontsize=fontsize)\n ax.set_xlim([1e-4, 1])\n ax.set_ylim([0, 30])\n\n ax.tick_params(axis=\"both\", labelsize=fontsize)\n\n # save figure\n if save_figure_path is not None:\n plt.savefig(save_figure_path)", "def plot_groups(sb, **kw):\n\n #check kws\n B_flag = True\n if('B' in kw):\n B_flag = bool(kw['B'])\n E_flag = True\n if('E' in kw):\n E_flag = bool(kw['E'])\n ugroups = sb.unique_group_names\n if('groups' in kw):\n ugroups = set(kw['groups'])\n if('return_figs' in kw):\n if(kw['return_figs']):\n return_figs = True\n figs = {'E': {}, 'B': {}}\n else:\n return_figs = False\n else:\n if((not B_flag) or (not E_flag)):\n group_lim = 8\n else:\n group_lim = 4\n if(len(ugroups) <= group_lim):\n return_figs = True\n figs = {'E': {}, 'B': {}}\n else:\n return_figs = False\n\n flags = [B_flag, E_flag]\n fields = ['Bmax', 'Emax']\n ylabels = ['Maximum Magnetic Field (mG)', 'Maximum Electric Field (kV/m)']\n title_pre = ['Maximum Magnetic Field - ',\n 'Maximum Electric Field - ']\n keys = ['B', 'E']\n it = zip(flags, fields, ylabels, title_pre, keys)\n\n #iterate over groups with more than 1 CrossSection\n for xss in sb.groups:\n if(xss[0].group in ugroups):\n for (fl, fi, yl, ti, k) in it:\n if(fl):\n #get plotting objects\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n #init handles and labels lists for legend\n kw['H'], kw['L'] = [], []\n #plot the Bmax results for each xs in the group\n _plot_group_fields(ax, xss, fi, **kw)\n #plot wires\n max_field = max([xs.fields[fi].max() for xs in xss])\n _plot_group_wires(ax, xss, max_field, **kw)\n #draw ground surface if necessary\n if(len(xss) <= 2):\n _check_und_conds(xss, [ax], **kw)\n #plot ROW lines\n _plot_group_ROW_edges(ax, xss, **kw)\n #set axis text and legend\n ax.set_xlabel('Distance (ft)')\n ax.set_ylabel(yl)\n ax.set_title(textwrap.fill(ti + str(xss[0].group)))\n ax.legend(kw['H'], kw['L'], **_leg_kw)\n _format_line_axes_legends(ax)\n #save the figure if keyword 'save' == True, and append fig\n _save_fig('group_%s-%s' % (str(xss[0].group), fi), fig, **kw)\n #store the fig or close it\n if(return_figs):\n figs[k][xss[0].group] = fig\n else:\n plt.close(fig)\n\n if(return_figs):\n return(figs)", "def get_median(projx, method='gf_mean', output='proj'):\n x_axis = np.arange(len(projx[0]))\n all_mean = []\n for proj in projx:\n proj = proj - proj.min()\n if method == 'gf_mean':\n gf = gaussfit.GaussFit(x_axis, proj)\n all_mean.append(gf.mean)\n elif method == 'gf_sigma':\n gf = gaussfit.GaussFit(x_axis, proj)\n all_mean.append(gf.sigma)\n elif method == 'mean':\n mean = np.sum(x_axis*proj) / np.sum(proj)\n all_mean.append(mean)\n elif method == 'std':\n mean = np.sum(x_axis*proj) / np.sum(proj)\n rms = np.sqrt(np.sum((x_axis-mean)**2 * proj) / np.sum(proj))\n all_mean.append(rms)\n else:\n raise ValueError(method)\n\n index_median = np.argsort(all_mean)[len(all_mean)//2]\n projx_median = projx[index_median]\n\n #import matplotlib.pyplot as plt\n #plt.figure()\n #for proj in projx:\n # plt.plot(proj)\n #plt.plot(projx_median, color='black', lw=3)\n #plt.show()\n #import pdb; pdb.set_trace()\n\n if output == 'proj':\n return projx_median\n elif output == 'index':\n return index_median", "def plot_parallel():\n import chartify\n\n # Generate example data\n data = chartify.examples.example_data()\n\n total_quantity_by_fruit_and_country = data.groupby([\"fruit\", \"country\"])[\"quantity\"].sum().reset_index()\n print(total_quantity_by_fruit_and_country.head())\n \"\"\"Print break\"\"\"\n _parallel_example_1(total_quantity_by_fruit_and_country)", "def plotdFvsLambda2(nb=10):\n x = numpy.arange(len(df_allk))\n if len(x) < nb:\n return\n xs = numpy.array_split(x, len(x)/nb+1)\n mnb = max([len(i) for i in xs])\n fig = pl.figure(figsize = (8,6))\n width = 1./(len(P.methods)+1)\n elw = 30*width\n colors = {'TI':'#C45AEC', 'TI-CUBIC':'#33CC33', 'DEXP':'#F87431', 'IEXP':'#FF3030', 'GINS':'#EAC117', 'GDEL':'#347235', 'BAR':'#6698FF', 'UBAR':'#817339', 'RBAR':'#C11B17', 'MBAR':'#F9B7FF'}\n ndx = 1\n for x in xs:\n lines = tuple()\n ax = pl.subplot(len(xs), 1, ndx)\n for name in P.methods:\n y = [df_allk[i][name]/P.beta_report for i in x]\n ye = [ddf_allk[i][name]/P.beta_report for i in x]\n line = pl.bar(x+len(lines)*width, y, width, color=colors[name], yerr=ye, lw=0.05*elw, error_kw=dict(elinewidth=elw, ecolor='black', capsize=0.5*elw))\n lines += (line[0],)\n for dir in ['left', 'right', 'top', 'bottom']:\n if dir == 'left':\n ax.yaxis.set_ticks_position(dir)\n else:\n ax.spines[dir].set_color('none')\n pl.yticks(fontsize=10)\n ax.xaxis.set_ticks([])\n for i in x+0.5*width*len(P.methods):\n ax.annotate('$\\mathrm{%d-%d}$' % (i, i+1), xy=(i, 0), xycoords=('data', 'axes fraction'), xytext=(0, -2), size=10, textcoords='offset points', va='top', ha='center')\n pl.xlim(x[0], x[-1]+len(lines)*width + (mnb - len(x)))\n ndx += 1\n leg = ax.legend(lines, tuple(P.methods), loc=0, ncol=2, prop=FP(size=8), title='$\\mathrm{\\Delta G\\/%s\\/}\\mathit{vs.}\\/\\mathrm{lambda\\/pair}$' % P.units, fancybox=True)\n leg.get_frame().set_alpha(0.5)\n pl.savefig(os.path.join(P.output_directory, 'dF_state.pdf'), bbox_inches='tight')\n pl.close(fig)\n return", "def plot(self):\n \n \n x_ibs=[] \n x_gss=[]\n y_ibs=[] \n y_gss=[]\n x_pso=[]\n x_bgd=[]\n y_bgd=[]\n y_pso=[]\n x_gd=[]\n y_gd=[]\n \n i=0.0000001\n \n # for k in range(1,51):\n # i= random.uniform(0.00000001, 1)\n # t_avg_ibs=[]\n # t_avg_gss=[]\n # for j in range(1,51):\n #L=random.randint(-100, 0)\n #U=random.randint(0, 100)\n max_iter=self.Max_iter \n L=self.Lower_bound\n U=self.Upper_bound\n \n minima=self.gss(L,U,i,1000)\n #print(\"minima at X = \",minima[1])\n x_ibs.append(self.I_bisection(L,U,minima[1],max_iter)[0])\n x_gss.append(self.gss(L,U,i,max_iter)[0])\n x_pso.append(self.particle_Swarm(self.func, L, U, 2, max_iter)[0])\n x_gd.append(self.gradient_descent(X=U ,eta=0.01, tol=minima[1],iter= max_iter)[0])\n x_bgd.append(self.b_gradient_descent(LB=L,UB=U ,eta=0.01, tol=minima[1],iter=max_iter)[0])\n #print(x_pso)\n for i in x_ibs[0]:\n #print(self.Func(i)) \n y_ibs.append(self.Func(i))\n for i in x_gss[0]:\n y_gss.append(self.Func(i)) \n for i in x_pso[0]:\n y_pso.append(self.Func(i)) \n for i in x_gd[0]:\n y_gd.append(self.Func(i)) \n for i in x_bgd[0]:\n y_bgd.append(self.Func(i)) \n #print(y_gss)\n\n plt.plot(x_ibs[0], y_ibs, 'r.')\n plt.plot(x_gss[0], y_gss, '.')\n plt.plot(x_pso[0], y_pso, 'y.')\n #plt.plot(x_gd[0], y_gd, 'y.')\n #plt.plot(x_bgd[0], y_bgd, 'k.')\n plt.xlabel('x')\n plt.ylabel('y')\n \n plt.suptitle('Interval Bisection Search (Red) vs Golden Section Search (Blue) vs Particle swarm optimization (Green)')\n #plt.axis([0, 100, 0.00000001, 1]) \n plt.show()\n plt.plot(x_gd[0], y_gd, 'r.')\n plt.plot(x_bgd[0], y_bgd, 'k.')\n plt.xlabel('x')\n plt.ylabel('y') \n plt.suptitle('Gradient Descent (Red) vs Batch Gradient Descent (Black) ')\n \n plt.show()\n \n start_time = timeit.default_timer()\n ibs=self.I_bisection(L,U,minima[1],max_iter)\n print(\" Execution time for Interval bisection Method is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n gss=self.gss(L,U,i,max_iter)\n print(\" Execution time for Golden Section Search is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n pso=self.particle_Swarm(self.func, L, U, 2, max_iter)\n print(\" Execution time for Particle swarm optimization is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n gd=self.gradient_descent(X=U ,eta=0.01, tol=minima[1],iter= max_iter)\n print(\" Execution time for Gradient Descent is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n bgd=self.b_gradient_descent(LB=L,UB=U ,eta=0.01, tol=minima[1],iter=max_iter)\n print(\" Execution time for Batch Gradient Descent is\", timeit.default_timer() - start_time,\"s\")\n plt.plot(ibs[1], ibs[2], 'r.')\n plt.text(ibs[1], ibs[2],\"IB\")\n plt.plot(gss[1], gss[2], '.')\n plt.text(gss[1], gss[2],\" GSS\")\n plt.plot(pso[1], pso[2], 'y.')\n plt.text(pso[1], pso[2],\" PSO\")\n plt.plot(gd[1], gd[2], 'g.')\n plt.text(gd[1], gd[2],\" GD \")\n plt.plot(bgd[1],bgd[2], 'k.')\n plt.text(bgd[1], bgd[2],\" Batch_GD\")\n \n plt.xlabel('Value of X')\n plt.ylabel('NUmber of iteration') \n plt.suptitle('Number of iterations vs minimum value of x')\n \n plt.show()", "def _graph_ranks(avranks, names, p_values, cd=None, cdmethod=None, lowv=None, highv=None, highlight=None,\n width=6, textspace=1, reverse=False, filename=None, labels=False, **kwargs):\n width = float(width)\n textspace = float(textspace)\n \n def lloc(_list, n):\n \"\"\"\n List location in list of list structure.\n Enable the use of negative locations:\n -1 is the last element, -2 second last...\n \"\"\"\n if n < 0:\n return len(_list[0]) + n\n return n\n \n def nth(_list, n):\n \"\"\"\n Returns only nth elemnt in a list.\n \"\"\"\n n = lloc(_list, n)\n return [a[n] for a in _list]\n\n def mxrange(lr):\n \"\"\"\n Multiple xranges. Can be used to traverse matrices.\n This function is very slow due to unknown number of\n parameters.\n >>> mxrange([3,5])\n [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)]\n >>> mxrange([[3,5,1],[9,0,-3]])\n [(3, 9), (3, 6), (3, 3), (4, 9), (4, 6), (4, 3)]\n \"\"\"\n if len(lr):\n yield ()\n else:\n # it can work with single numbers\n index = lr[0]\n if isinstance(index, int):\n index = [index]\n for a in range(*index):\n for b in mxrange(lr[1:]):\n yield tuple([a] + list(b))\n\n sums = avranks\n\n nnames = names\n ssums = sums\n\n if lowv is None:\n lowv = min(1, int(math.floor(min(ssums))))\n if highv is None:\n highv = max(len(avranks), int(math.ceil(max(ssums))))\n\n cline = 0.4\n\n k = len(sums)\n\n linesblank = 0\n scalewidth = width - 2 * textspace\n\n def rankpos(rank):\n if not reverse:\n a = rank - lowv\n else:\n a = highv - rank\n return textspace + scalewidth / (highv - lowv) * a\n\n distanceh = 0.25\n\n cline += distanceh\n\n # calculate height needed height of an image\n minnotsignificant = max(2 * 0.2, linesblank)\n height = cline + ((k + 1) / 2) * 0.2 + minnotsignificant\n\n fig = plt.figure(figsize=(width, height*1.05))\n fig.set_facecolor('white')\n ax = fig.add_axes([0, 0, 1, 1]) # reverse y axis\n ax.set_axis_off()\n\n hf = 1. / height # height factor\n wf = 1. / width\n\n def hfl(_list):\n return [a * hf for a in _list]\n\n def wfl(_list):\n return [a * wf for a in _list]\n\n # Upper left corner is (0,0).\n ax.plot([0, 1], [0, 1], c=\"w\")\n ax.set_xlim(0, 1)\n ax.set_ylim(1, 0)\n\n def line(l, color='k', **kwargs):\n \"\"\"\n Input is a list of pairs of points.\n \"\"\"\n ax.plot(wfl(nth(l, 0)), hfl(nth(l, 1)), color=color, **kwargs)\n\n def text(x, y, s, *args, **kwargs):\n ax.text(wf * x, hf * y, s, *args, **kwargs)\n\n line([(textspace, cline), (width - textspace, cline)], linewidth=2)\n\n bigtick = 0.3\n smalltick = 0.15\n linewidth = 2.0\n linewidth_sign = 4.0\n\n tick = None\n for a in list(np.arange(lowv, highv, 0.5)) + [highv]:\n tick = smalltick\n if a == int(a):\n tick = bigtick\n line([(rankpos(a), cline - tick / 2),\n (rankpos(a), cline)],\n linewidth=2)\n\n for a in range(lowv, highv + 1):\n text(rankpos(a), cline - tick / 2 - 0.05, str(a),\n ha=\"center\", va=\"bottom\", size=16)\n\n k = len(ssums)\n\n def filter_names(name):\n return name\n\n space_between_names = 0.24\n\n for i in range(math.ceil(k / 2)):\n chei = cline + minnotsignificant + i * space_between_names\n if nnames[i] == highlight:\n line([(rankpos(ssums[i]), cline),\n (rankpos(ssums[i]), chei),\n (textspace - 0.1, chei)],\n linewidth=linewidth, color='red')\n else:\n line([(rankpos(ssums[i]), cline),\n (rankpos(ssums[i]), chei),\n (textspace - 0.1, chei)],\n linewidth=linewidth)\n if labels:\n text(textspace + 0.3, chei - 0.075,\n format(ssums[i], '.4f'), ha=\"right\", va=\"center\", size=10)\n if nnames[i] == highlight:\n text(textspace - 0.2, chei,\n filter_names(nnames[i]), ha=\"right\", va=\"center\", size=18, color='red')\n else:\n text(textspace - 0.2, chei,\n filter_names(nnames[i]), ha=\"right\", va=\"center\", size=18)\n\n for i in range(math.ceil(k / 2), k):\n chei = cline + minnotsignificant + (k - i - 1) * space_between_names\n if nnames[i] == highlight:\n line([(rankpos(ssums[i]), cline),\n (rankpos(ssums[i]), chei),\n (textspace + scalewidth + 0.1, chei)],\n linewidth=linewidth, color='red')\n else:\n line([(rankpos(ssums[i]), cline),\n (rankpos(ssums[i]), chei),\n (textspace + scalewidth + 0.1, chei)],\n linewidth=linewidth)\n if labels:\n text(textspace + scalewidth - 0.3, chei - 0.075,\n format(ssums[i], '.4f'), ha=\"left\", va=\"center\", size=10)\n if nnames[i] == highlight:\n text(textspace + scalewidth + 0.2, chei, filter_names(nnames[i]),\n ha=\"left\", va=\"center\", size=18, color='red')\n else:\n text(textspace + scalewidth + 0.2, chei, filter_names(nnames[i]),\n ha=\"left\", va=\"center\", size=18)\n start = cline + 0.2\n side = -0.02\n height = 0.1\n\n # draw no significant lines\n # get the cliques\n cliques = _form_cliques(p_values, nnames)\n achieved_half = False\n print(nnames)\n for clq in cliques:\n if len(clq) == 1:\n continue\n print(clq)\n min_idx = np.array(clq).min()\n max_idx = np.array(clq).max()\n if min_idx >= len(nnames) / 2 and achieved_half == False:\n start = cline + 0.25\n achieved_half = True\n line([(rankpos(ssums[min_idx]) - side, start),\n (rankpos(ssums[max_idx]) + side, start)],\n linewidth=linewidth_sign)\n start += height", "def create_income_expense_grouped_bar_chart(year_id):\n month_objects = get_months_by_year(year_id)\n\n # get chart data\n months = convert_to_verbose_months(month_objects)\n\n y_expenses = get_transactions_sum_data(month_objects, amount_type='expenses')\n \n y_incomes = get_transactions_sum_data(month_objects, amount_type='incomes')\n\n # build chart\n fig = go.Figure(\n data=[\n go.Bar(name='Gastos', x=months, y=y_expenses, marker_color='#b22222'),\n go.Bar(name=\"Rendas\", x=months, y=y_incomes, marker_color='#22b222')\n ]\n )\n\n fig.update_layout(barmode='group')\n\n plot_div = plot(fig, output_type='div', include_plotlyjs=False)\n\n return plot_div", "def plot_model_vis(func):\n def plotting_func(model, train_config, arch, data_loader, epoch, handle_dict, vis=False, eval=False, label_names=None):\n output_dict = func(model, train_config, arch, data_loader, epoch, vis=vis, eval=eval)\n # plot average metrics on validation set\n average_elbo = np.mean(output_dict['total_elbo'][:, -1], axis=0)\n average_cond_log_like = np.mean(output_dict['total_cond_log_like'][:, -1], axis=0)\n average_kl = [0. for _ in range(len(output_dict['total_kl']))]\n for level in range(len(output_dict['total_kl'])):\n average_kl[level] = np.mean(output_dict['total_kl'][level][:, -1], axis=0)\n averages = average_elbo, average_cond_log_like, average_kl\n plot_average_metrics(averages, epoch, handle_dict, 'Validation')\n\n # plot average improvement on metrics over iterations\n if train_config['n_iterations'] > 1:\n plot_average_improvement([output_dict['total_elbo'], output_dict['total_cond_log_like'], output_dict['total_kl']], epoch, handle_dict)\n\n if vis:\n # plot reconstructions, samples\n batch_size = train_config['batch_size']\n data_shape = list(next(iter(data_loader))[0].size())[1:]\n plot_images(output_dict['total_recon'][:batch_size, -1].reshape([batch_size]+data_shape), caption='Reconstructions, Epoch ' + str(epoch))\n plot_images(output_dict['samples'].reshape([batch_size]+data_shape), caption='Samples, Epoch ' + str(epoch))\n\n if model.output_distribution == 'gaussian':\n plot_output_variance(output_dict['total_cond_like'], epoch, handle_dict)\n\n # plot t-sne for each level's posterior (at first inference iteration)\n #for level in range(len(output_dict['total_posterior'])):\n # plot_tsne(output_dict['total_posterior'][level][:, 1, 0], 1 + output_dict['total_labels'], title='T-SNE Posterior Mean, Epoch ' + str(epoch) + ', Level ' + str(level), legend=label_names)\n\n # plot the covariance matrix for each level's posterior (at first inference iteration)\n for level in range(len(output_dict['total_posterior'])):\n plot_latent_covariance_matrix(output_dict['total_posterior'][level][:, 1, 0], epoch, level)\n\n if train_config['n_iterations'] > 1:\n # plot ELBO, reconstruction loss, KL divergence over inference iterations\n plot_metrics_over_iterations([output_dict['total_elbo'], output_dict['total_cond_log_like'], output_dict['total_kl']], epoch)\n\n # plot reconstructions over inference iterations\n plot_images(output_dict['total_recon'][:batch_size].reshape([-1]+data_shape), caption='Reconstructions Over Iterations, Epoch '+str(epoch))\n\n # plot errors over inference iterations\n plot_errors_over_iterations(output_dict['total_recon'][:batch_size], next(iter(data_loader))[0].cpu().numpy(), epoch)\n\n return output_dict, averages, handle_dict\n return plotting_func", "def group_by(self, func):\n return _(_group_by(self._, func))", "def main():\r\n\r\n # Pre-pandemic period\r\n # Step 1 (might take 2-3 mins to categorise 16724 records)\r\n pre = RQ2('../csv_files/precovid_filtered.csv')\r\n pre.cateActions()\r\n\r\n # Step 2\r\n for detail in pre.li_detail[:10]:\r\n print(detail)\r\n plot_lev(pre.od)\r\n\r\n # Step 3\r\n df_new_fixed_pre = manFix(pre.df_new,option='precovid')\r\n\r\n # Step 4 \r\n li_pre_final = cal_group_actions(df_new_fixed_pre,option='precovid')\r\n\r\n # ================================================================\r\n # Post-pandemic period\r\n # Step 1 (might take 2-3 mins to categorise 25827 records)\r\n post = RQ2('../csv_files/postcovid_filtered.csv')\r\n post.cateActions()\r\n\r\n # Step 2 is similar to pre-pandemic period (commented to keep the result clear)\r\n # for detail in post.li_detail[:10]:\r\n # print(detail)\r\n\r\n # Step 3\r\n df_new_fixed_post = manFix(post.df_new, option='postcovid')\r\n\r\n # Step 4 \r\n li_post_final = cal_group_actions(df_new_fixed_post,option='postcovid')\r\n\r\n # ================================================================\r\n # Step 5\r\n meanTest(li_pre_final,li_post_final)\r\n\r\n # Step 6\r\n li_merge = li_pre_final + li_post_final\r\n boxplot(li_merge)", "def plot_series(groups, series):\n fig, ax = plt.subplots()\n ax.set_xlabel(\"Iterations\")\n ax.set_ylabel(series)\n\n for gkey, gval in groups.items():\n args = dict(gkey)\n\n series_values = get_series(gval, series)\n interval_size = args['test_interval']\n interval_count = series_values.shape[1] - 1\n\n x = np.arange(0, interval_size * interval_count + 1, step=interval_size)\n mean = np.mean(series_values, axis=0)\n std = np.std(series_values, axis=0)\n\n ax.plot(x, mean, label=format_group_key(gkey))\n ax.fill_between(x, mean + std, mean - std, alpha=0.2)\n\n ax.legend()\n return fig, ax", "def fit_and_plot(self, max_iter):\n from matplotlib import pyplot as plt\n from matplotlib import cm\n\n colours = cm.rainbow(np.linspace(0, 1, self.num_classes)) # FIXME: rainbow list -> array\n\n def plot_data(d):\n for c in range(self.num_classes):\n for n in range(self.num_nuisances):\n plt.scatter(*d[c][n].T, c=colours[c])\n plt.waitforbuttonpress()\n\n def plot_mean(th):\n for c in range(self.num_classes):\n for n in range(self.num_nuisances):\n plt.scatter(*th[c][n].mean.T, c=colours[c], marker=\"x\")\n plt.waitforbuttonpress()\n\n plt.ion()\n plt.scatter(*self.data.T)\n plt.waitforbuttonpress()\n\n split_data = self.initialise_clusters_with_kmeans()\n plot_data(split_data)\n thetas = self.maximization(split_data)\n plot_mean(thetas)\n\n for i in range(max_iter):\n plt.clf()\n split_data = self.expectation(thetas)\n plot_data(split_data)\n thetas = self.maximization(split_data)\n plot_mean(thetas)\n return split_data, thetas", "def gbids(self, method=\"median\", pattr=None, **kwargs):\n import gblearn.selection as sel\n from functools import partial\n methmap = {\n \"median\": sel.median,\n \"cna\": partial(sel.cna_max, coord=0),\n\t \"cna_z\": partial(sel.cna_max, coord=2)\n }\n if method in methmap:\n extra = getattr(self, pattr) if pattr is not None else None\n return methmap[method](self.xyz, extra, types=self.types, **kwargs)", "def plot_bias(clf_list = ['test_small','rt_small','test2_small'],return_df = False,XKCD = False):\n if XKCD = True:\n plt.xkcd()\n print('damn')\n df = load_all_dfs(clf_list)\n df = df.swaplevel(0,1)\n del df['std']\n df.hist()\n plt.figure()\n\n for clf in clf_list:\n df.ix[clf].mean().plot(label = clf,figsize=(16, 4))\n plt.legend(loc='upper right')\n plt.title('mean')\n plt.figure()\n \n # c = df.columns\n for clf in clf_list:\n #df[c[1:]].ix[clf].max().plot(label = clf,figsize=(16, 4))\n df.ix[clf].max().plot(label = clf,figsize=(16, 4))\n plt.legend(loc='upper right')\n plt.title('max')\n \n plt.figure()\n for clf in clf_list:\n df.ix[clf].std().plot(label = clf,figsize=(16, 4))\n\n \n plt.legend(loc='upper right')\n plt.title('std')\n plt.figure()\n used_list = []\n for clf in clf_list:\n for clf2 in clf_list:\n if (clf != clf2) and ({clf,clf2} not in used_list):\n diff = ((df.ix[clf] - df.ix[clf2])**2)**(1/2)\n diff.mean().plot(label = clf+' - ' +clf2,figsize=(16, 4))\n used_list.append({clf,clf2})\n \n \n \n \n \n plt.legend(loc='upper right')\n plt.title('difference')\n print('damnover')\n if return_df == True:\n return df", "def metrics_group():", "def display_group_density_plot(df, groupby, on, palette, figsize):\n\n if not isinstance(df, pd.core.frame.DataFrame):\n raise ValueError('df must be a pandas DataFrame')\n\n if not groupby:\n raise ValueError('groupby parameter must be provided')\n\n elif not groupby in df.keys():\n raise ValueError(groupby + ' column does not exist in the given DataFrame')\n\n if not on:\n raise ValueError('on parameter must be provided')\n\n elif not on in df.keys():\n raise ValueError(on + ' column does not exist in the given DataFrame')\n\n if len(set(df[groupby])) > 10:\n groups = df[groupby].value_counts().index[:10]\n\n else:\n groups = set(df[groupby])\n\n # Get relevant palette\n if palette:\n palette = palette[:len(groups)]\n else:\n palette = sns.color_palette()[:len(groups)]\n\n # Plot\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111)\n ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left')\n\n for value, color in zip(groups, palette):\n sns.kdeplot(df.loc[df[groupby] == value][on], shade=True, color=color, label=value)\n\n ax.set_title(str(\"Distribution of \" + on + \" per \" + groupby + \" group\"), fontsize=30)\n \n ax.set_xlabel(on, fontsize=20)\n return ax", "def function(self, func):\n blocks = []\n\n for block in idaapi.FlowChart(func):\n blocks.append(self.block(block))\n\n return blocks", "def PlotContributions( ax=None, dev=False, measure='DM', redshift=0.1, cumulative=False, N_inter=False, **scenario ):\n if ax is None:\n fig, ax = plt.subplots()\n for region in regions:\n models = scenario.get( region )\n if models:\n for model in models:\n P = GetLikelihood( region=region, model=model, measure=measure, redshift=redshift, N_inter=N_inter, dev=dev )\n PlotLikelihood( *P, measure=measure, label=region+': '+Label(model) , linestyle=linestyle_region[region], ax=ax, cumulative=cumulative )\n ax.legend()\n ax.set_title( \"redshift = %.1f\" % redshift )", "def callable(self, nans=False):\n jitfunc = nb.njit(self.func, nogil=True)\n\n def _loop(sortidx, group_idx, a, ret):\n size = len(ret)\n group_idx_srt = group_idx[sortidx]\n a_srt = a[sortidx]\n\n indices = step_indices(group_idx_srt)\n for i in range(len(indices) - 1):\n start_idx, stop_idx = indices[i], indices[i + 1]\n ri = group_idx_srt[start_idx]\n if ri < 0:\n raise ValueError(\"negative indices not supported\")\n if ri >= size:\n raise ValueError(\"one or more indices in group_idx are too large\")\n ret[ri] = jitfunc(a_srt[start_idx:stop_idx])\n return nb.njit(_loop, nogil=True)", "def display_group_density_plot(df, groupby, on, palette, figsize):\n\n if not isinstance(df, pd.core.frame.DataFrame):\n raise ValueError('df must be a pandas DataFrame')\n\n if not groupby:\n raise ValueError('groupby parameter must be provided')\n\n elif not groupby in df.keys():\n raise ValueError(groupby + ' column does not exist in the given DataFrame')\n\n if not on:\n raise ValueError('on parameter must be provided')\n\n elif not on in df.keys():\n raise ValueError(on + ' column does not exist in the given DataFrame')\n\n if len(set(df[groupby])) > 10:\n groups = df[groupby].value_counts().index[:10]\n\n else:\n groups = set(df[groupby])\n\n # Get relevant palette\n if palette:\n palette = palette[:len(groups)]\n else:\n palette = sns.color_palette()[:len(groups)]\n\n # Plot\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111)\n ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left')\n\n for value, color in zip(groups, palette):\n sns.kdeplot(df.loc[df[groupby] == value][on], shade=True, color=color, label=value)\n\n ax.set_title(str(\"Distribution of \" + on + \" per \" + groupby + \" group\"), fontsize=10)\n return ax", "def boxplot_from_data_frame(df,\n group_by=\"Method\",\n metric=\"Precision\",\n hue=None,\n y_min=0.0,\n y_max=1.0,\n plotf=violinplot,\n color='grey',\n color_palette=None,\n label_rotation=45):\n\n sns.set_style(\"whitegrid\")\n ax = violinplot(x=group_by, y=metric, hue=hue, data=df, color=color,\n palette=color_palette, order=sorted(df[group_by].unique()))\n ax.set_ylim(bottom=y_min, top=y_max)\n ax.set_ylabel(metric)\n ax.set_xlabel(group_by)\n for lab in ax.get_xticklabels():\n lab.set_rotation(label_rotation)\n\n plt.show()\n\n return ax", "def plot_prob_reject(ex, fname, func_xvalues, xlabel, func_title=None, \n return_plot_values=False):\n #from IPython.core.debugger import Tracer \n #Tracer()()\n\n results = glo.ex_load_result(ex, fname)\n\n def rej_accessor(jr):\n rej = jr['test_result']['h0_rejected']\n # When used with vectorize(), making the value float will make the resulting \n # numpy array to be of float. nan values can be stored.\n return float(rej)\n\n #value_accessor = lambda job_results: job_results['test_result']['h0_rejected']\n vf_pval = np.vectorize(rej_accessor)\n # results['job_results'] is a dictionary: \n # {'test_result': (dict from running perform_test(te) '...':..., }\n rejs = vf_pval(results['job_results'])\n repeats, _, n_methods = results['job_results'].shape\n\n # yvalues (corresponding to xvalues) x #methods\n mean_rejs = np.mean(rejs, axis=0)\n #print mean_rejs\n #std_pvals = np.std(rejs, axis=0)\n #std_pvals = np.sqrt(mean_rejs*(1.0-mean_rejs))\n\n xvalues = func_xvalues(results)\n\n #ns = np.array(results[xkey])\n #te_proportion = 1.0 - results['tr_proportion']\n #test_sizes = ns*te_proportion\n line_styles = func_plot_fmt_map()\n method_labels = get_func2label_map()\n \n func_names = [f.__name__ for f in results['method_funcs'] ]\n plotted_methods = []\n for i in range(n_methods): \n #te_proportion = 1.0 - results['tr_proportion']\n fmt = line_styles[func_names[i]]\n #plt.errorbar(ns*te_proportion, mean_rejs[:, i], std_pvals[:, i])\n method_label = method_labels[func_names[i]]\n plotted_methods.append(method_label)\n plt.plot(xvalues, mean_rejs[:, i], fmt, label=method_label)\n '''\n else:\n # h0 is true \n z = stats.norm.isf( (1-confidence)/2.0)\n for i in range(n_methods):\n phat = mean_rejs[:, i]\n conf_iv = z*(phat*(1-phat)/repeats)**0.5\n #plt.errorbar(test_sizes, phat, conf_iv, fmt=line_styles[i], label=method_labels[i])\n plt.plot(test_sizes, mean_rejs[:, i], line_styles[i], label=method_labels[i])\n '''\n \n ylabel = 'Rejection rate'\n plt.ylabel(ylabel)\n plt.xlabel(xlabel)\n plt.xticks(np.hstack((xvalues) ))\n \n alpha = results['alpha']\n plt.legend(loc='best')\n title = '%s. %d trials. $\\\\alpha$ = %.2g.'%( results['prob_label'],\n repeats, alpha) if func_title is None else func_title(results)\n plt.title(title)\n plt.grid()\n if return_plot_values:\n return results, PlotValues(xvalues=xvalues, methods=plotted_methods,\n plot_matrix=mean_rejs.T)\n else:\n return results", "def plot(var):\n # MISSCHIEN KUNNEN WE HIER NOG IETS MEE\n # total_dead = len(train_data[\"Survived\"] == 0)\n # total_survived = len(train_data[\"Survived\"] == 1)\n # died = train_data[train_data[\"Survived\"] == 0][var].value_counts() / total_dead\n # survived = train_data[train_data[\"Survived\"] == 1][var].value_counts() / total_survived\n sns.set()\n sns.set_color_codes(\"pastel\")\n\n # order bars for family size variable\n if var == \"FamSize\":\n sns.barplot(x=var, y=\"Survived\", data=train_data, color=\"b\",\\\n capsize=.1, errwidth=.7, order=[\"alone\", 1, 2, 3, \"4 or more\"]).\\\n tick_params(labelsize=18)\n else:\n sns.barplot(x=var, y=\"Survived\", data=train_data, color=\"b\",\\\n capsize=.1, errwidth=1.1).tick_params(labelsize=18)\n\n # plot style properties\n ax = plt.gca()\n\n for ax in plt.gcf().axes:\n x = ax.get_xlabel()\n y = ax.get_ylabel()\n ax.set_xlabel(x, fontsize=20)\n ax.set_ylabel(y, fontsize=20)\n\n plt.title(\"Ratio of survivors for variable \" + str(var), fontsize=22)\n t = ax.title\n t.set_position([.5, 1.05])\n plt.ylim([0, 1])\n plt.subplots_adjust(bottom=.15, left=.15)\n plt.savefig(\"results/survived_\" + str(var) + \".png\", bbox_inches=\"tight\")\n\n plt.show()", "def profile_group(func, args, kwargs, func_result):\n (collection, key, condition, initial, reduce) = args[:5]\n report_kvs = _profile_query(collection)\n\n if key:\n report_kvs['Group_Key'] = _to_json(key)\n\n if condition:\n report_kvs['Group_Condition'] = _to_json(condition)\n\n if initial:\n report_kvs['Group_Initial'] = _to_json(initial)\n\n if reduce:\n report_kvs['Group_Reduce'] = reduce\n\n return report_kvs", "def plotLambdaDependency(folder='results/', analysis='good', sigma=3):\n matplotlib.rc('text', usetex=True)\n if 'ind' in analysis:\n print 'Individual Results'\n data800 = [fileIO.cPicleRead(file) for file in g.glob('results/I800nm*.pkl')]\n data600 = [fileIO.cPicleRead(file) for file in g.glob('results/I800nm54*.pkl')]\n data700 = [fileIO.cPicleRead(file) for file in g.glob('results/I800nm52*.pkl')]\n data890 = [fileIO.cPicleRead(file) for file in g.glob('results/I800nm50*.pkl')]\n data = (data600, data700, data800, data890)\n datacontainer = []\n for x in data:\n wx = np.median([d['wx'] for d in x])\n wxerr = np.median([d['wxerr'] for d in x])\n wy = np.median([d['wy'] for d in x])\n wyerr = np.median([d['wyerr'] for d in x])\n dat = dict(wx=wx, wy=wy, wxerr=wxerr, wyerr=wyerr)\n datacontainer.append(dat)\n data = datacontainer\n waves = [600, 700, 800, 890]\n elif 'join' in analysis:\n print 'Joint Results'\n data800nm = fileIO.cPicleRead(folder+'J800nm.pkl')\n data600nm = fileIO.cPicleRead(folder+'J600nm54k.pkl')\n data700nm = fileIO.cPicleRead(folder+'J700nm52k.pkl')\n data890nm = fileIO.cPicleRead(folder+'J890nm50k.pkl')\n data = (data600nm, data700nm, data800nm, data890nm)\n waves = [int(d['wavelength'].replace('nm', '')) for d in data]\n else:\n print 'Using subset of data'\n #data600nm = fileIO.cPicleRead(folder+'G600nm0.pkl')\n data600nm = fileIO.cPicleRead(folder+'J600nm54k.pkl')\n #data700nm = fileIO.cPicleRead(folder+'G700nm0.pkl')\n data700nm = fileIO.cPicleRead(folder+'J700nm52k.pkl')\n #data800nm = fileIO.cPicleRead(folder+'G800nm0.pkl')\n data800nm = fileIO.cPicleRead(folder+'J800nm.pkl')\n #data890nm = fileIO.cPicleRead(folder+'G890nm0.pkl')\n data890nm = fileIO.cPicleRead(folder+'J890nm50k.pkl')\n data = (data600nm, data700nm, data800nm, data890nm)\n waves = [600, 700, 800, 890]\n\n wx = np.asarray([_FWHMGauss(d['wx']) for d in data])\n wxerr = np.asarray([_FWHMGauss(d['wxerr']) for d in data])\n wypix = np.asarray([d['wy'] for d in data])\n wy = _FWHMGauss(wypix)\n wyerrpix = np.asarray([d['wyerr'] for d in data])\n wyerr = _FWHMGauss(wyerrpix)\n waves = np.asarray(waves)\n\n w = np.sqrt(wx*wy)\n werr = np.sqrt(wxerr*wyerr)\n\n print zip(waves, w)\n\n #plot FWHM\n fig = plt.figure()\n ax1 = fig.add_subplot(311)\n ax2 = fig.add_subplot(312)\n ax3 = fig.add_subplot(313)\n fig.subplots_adjust(hspace=0, top=0.93, bottom=0.17, left=0.11, right=0.95)\n ax1.set_title('CCD273 PSF Wavelength Dependency')\n\n ax1.errorbar(waves, wx, yerr=sigma*wxerr/3., fmt='o', label='Data')\n ax2.errorbar(waves, wy, yerr=sigma**wyerr/3., fmt='o', label='Data')\n ax3.errorbar(waves, w, yerr=sigma*werr, fmt='o', label='Data')\n\n #fit a power law\n fitfunc = lambda p, x: p[0] * x ** p[1]\n errfunc = lambda p, x, y: fitfunc(p, x) - y\n fit1, success = optimize.leastsq(errfunc, [1, -0.2], args=(waves, wx))\n fit2, success = optimize.leastsq(errfunc, [1, -0.2], args=(waves, wy))\n fit3, success = optimize.leastsq(errfunc, [1, -0.2], args=(waves, w))\n\n #requirement\n alpha=0.2\n x = np.arange(500, 950, 1)\n y = 37*x**-alpha\n # compute the best fit function from the best fit parameters\n corrfit1 = fitfunc(fit1, x)\n corrfit2 = fitfunc(fit2, x)\n corrfit3 = fitfunc(fit3, x)\n print 'Slope:', fit1[1]\n print 'Slope:', fit2[1]\n print 'Slope [requirement < -0.2]:', fit3[1]\n\n #ax1.plot(x, corrfit1, 'k-', label=r'Power Law Fit: $\\alpha \\sim %.2f $' % (fit1[1]))\n #ax2.plot(x, corrfit2, 'k-', label=r'Power Law Fit: $\\alpha \\sim %.2f $' % (fit2[1]))\n ax3.plot(x, y, 'r-', label=r'Requirement: $\\alpha \\leq - %.1f$' % alpha)\n #ax3.plot(x, corrfit3, 'k-', label=r'Power Law Fit: $\\alpha \\sim %.2f $' % (fit3[1]))\n\n # Bayesian\n shift = 0.\n waves -= shift\n px, paramsx, errorsx, outliersx = powerlawFitWithOutliers(waves, wx, wxerr, outtriangle='WFWHMx.png')\n py, paramsy, errorsy, outliersy = powerlawFitWithOutliers(waves, wy, wyerr, outtriangle='WFWHMy.png')\n p, params, errors, outliers = powerlawFitWithOutliers(waves, w, werr, outtriangle='WFWHM.png')\n print paramsx[::-1], errorsx[::-1]\n print paramsy[::-1], errorsy[::-1]\n print params[::-1], errors[::-1]\n\n ax1.plot(x, paramsx[0]*(x-shift)**paramsx[1], 'g-', label=r'Power Law Fit: $\\alpha \\sim %.2f $' % (paramsx[1]))\n ax2.plot(x, paramsy[0]*(x-shift)**paramsy[1], 'g-', label=r'Power Law Fit: $\\alpha \\sim %.2f $' % (paramsy[1]))\n ax3.plot(x, params[0]*(x-shift)**params[1], 'g-', label=r'Power Law Fit: $\\alpha \\sim %.2f $' % (params[1]))\n\n plt.sca(ax1)\n plt.xticks(visible=False)\n plt.sca(ax2)\n plt.xticks(visible=False)\n plt.sca(ax3)\n\n ax1.set_ylim(6.6, 13.5)\n ax2.set_ylim(6.6, 13.5)\n ax3.set_ylim(6.6, 13.5)\n ax1.set_xlim(550, 900)\n ax2.set_xlim(550, 900)\n ax3.set_xlim(550, 900)\n\n ax1.set_ylabel(r'FWHM$_{X} \\, [\\mu$m$]$')\n ax2.set_ylabel(r'FWHM$_{Y} \\, [\\mu$m$]$')\n ax3.set_ylabel(r'FWHM$\\, [\\mu$m$]$')\n ax3.set_xlabel('Wavelength [nm]')\n ax1.legend(shadow=True, fancybox=True, loc='best', numpoints=1)\n ax2.legend(shadow=True, fancybox=True, loc='best', numpoints=1)\n ax3.legend(shadow=True, fancybox=True, loc='best', numpoints=1)\n plt.savefig('LambdaDependency.pdf')\n plt.close()\n\n print 'R2:'\n R2 = _R2FromGaussian(wxpix, wypix)*1e3\n print zip(waves, R2)\n errR2 = _R2err(wxpix, wypix, wxerrpix, wyerrpix)*1e3\n p, params, errors, outliers = powerlawFitWithOutliers(waves, R2, errR2, outtriangle='WR2.png')\n print params[::-1], errors[::-1]\n\n fig = plt.figure()\n ax1 = fig.add_subplot(211)\n ax2 = fig.add_subplot(212)\n fig.subplots_adjust(hspace=0, top=0.93, bottom=0.17, left=0.11, right=0.93)\n ax1.set_title('CCD273 PSF Wavelength Dependency')\n ax1.errorbar(waves, R2, yerr=sigma*errR2, fmt='o', label='Data')\n ax1.plot(x, params[0] * (x - shift)**params[1], 'm-', label=r'Power Law Fit: $\\alpha \\sim %.2f $' % (params[1]))\n #ax1.plot(waves[outliers], R2[outliers], 'ro', ms=20, mfc='none', mec='red')\n ax1.set_ylabel(r'R^{2} \\, [$mas$^{2}]$')\n ax1.legend(shadow=True, fancybox=True, numpoints=1, loc='lower right')\n\n print 'Ellipticity:'\n ell = _ellipticityFromGaussian(wxpix, wypix) + 1\n print zip(waves, ell)\n ellerr = _ellipticityerr(wxpix, wypix, wxerrpix, wyerrpix)\n p, params, errors, outliers = powerlawFitWithOutliers(waves, ell, ellerr, outtriangle='Well.png')\n print params[::-1], errors[::-1]\n\n fitfunc = lambda p, x: p[0] * x ** p[1]\n errfunc = lambda p, x, y: fitfunc(p, x) - y\n fit1, success = optimize.leastsq(errfunc, [2., -0.1], args=(waves, ell), maxfev=100000)\n print fit1[::-1]\n\n ax2.errorbar(waves, ell, yerr=sigma*ellerr, fmt='o', label='Data')\n ax2.plot(x, params[0] * (x - shift)**params[1], 'm-', label=r'Power Law Fit: $\\alpha \\sim %.2f $' % (params[1]))\n #ax2.plot(waves[outliers], ell[outliers], 'ro', ms=20, mfc='none', mec='red')\n ax1.legend(shadow=True, fancybox=True, numpoints=1, loc='lower right')\n ax2.legend(shadow=True, fancybox=True, numpoints=1)\n ax2.set_ylabel('Ellipticity')\n\n ax1.set_ylim(0.65, 2.5)\n ax2.set_ylim(1+-0.01, 1+0.16)\n\n plt.sca(ax1)\n plt.xticks(visible=False)\n\n plt.savefig('LambdaR2ell.pdf')\n plt.close()", "def schedule_group_apply_edge(graph, u, v, eid, apply_func, group_by, inplace, outframe=...): # -> None:\n ...", "def plot(self, plot_cmd=None, tf=lambda y: y):\r\n if not plot_cmd:\r\n plot_cmd = self.plot_cmd\r\n colors = 'bgrcmyk'\r\n pylab.hold(False)\r\n res = self.res\r\n\r\n flatx, flatf = self.flattened()\r\n minf = np.inf\r\n for i in flatf:\r\n minf = min((minf, min(flatf[i])))\r\n addf = 1e-9 - minf if minf <= 0 else 0\r\n for i in sorted(res.keys()): # we plot not all values here\r\n if type(i) is int:\r\n color = colors[i % len(colors)]\r\n arx = sorted(res[i].keys())\r\n plot_cmd(arx, [tf(np.median(res[i][x]) + addf) for x in arx], color + '-')\r\n pylab.text(arx[-1], tf(np.median(res[i][arx[-1]])), i)\r\n pylab.hold(True)\r\n plot_cmd(flatx[i], tf(np.array(flatf[i]) + addf), color + 'o')\r\n pylab.ylabel('f + ' + str(addf))\r\n pylab.draw()\r\n show()\r\n # raw_input('press return')\r\n return self", "def plot_ps(func, span=range(0,8)):\n nplots = len(span)\n ax = qax(int(len(span)))\n for pln in range(0, len(span)):\n plt.sca(ax[pln])\n func(span[pln])", "def factor_graph_builders(self, rating_groups, ranks, weights):\n flatten_ratings = sum(map(tuple, rating_groups), ())\n flatten_weights = sum(map(tuple, weights), ())\n size = len(flatten_ratings)\n group_size = len(rating_groups)\n # create variables\n rating_vars = [Variable() for x in range(size)]\n perf_vars = [Variable() for x in range(size)]\n team_perf_vars = [Variable() for x in range(group_size)]\n team_diff_vars = [Variable() for x in range(group_size - 1)]\n team_sizes = _team_sizes(rating_groups)\n # layer builders\n def build_rating_layer():\n for rating_var, rating in zip(rating_vars, flatten_ratings):\n yield PriorFactor(rating_var, rating, self.tau)\n def build_perf_layer():\n for rating_var, perf_var in zip(rating_vars, perf_vars):\n yield LikelihoodFactor(rating_var, perf_var, self.beta ** 2)\n def build_team_perf_layer():\n for team, team_perf_var in enumerate(team_perf_vars):\n if team > 0:\n start = team_sizes[team - 1]\n else:\n start = 0\n end = team_sizes[team]\n child_perf_vars = perf_vars[start:end]\n coeffs = flatten_weights[start:end]\n yield SumFactor(team_perf_var, child_perf_vars, coeffs)\n def build_team_diff_layer():\n for team, team_diff_var in enumerate(team_diff_vars):\n yield SumFactor(team_diff_var,\n team_perf_vars[team:team + 2], [+1, -1])\n def build_trunc_layer():\n for x, team_diff_var in enumerate(team_diff_vars):\n if callable(self.draw_probability):\n # dynamic draw probability\n team_perf1, team_perf2 = team_perf_vars[x:x + 2]\n args = (Rating(team_perf1), Rating(team_perf2), self)\n draw_probability = self.draw_probability(*args)\n else:\n # static draw probability\n draw_probability = self.draw_probability\n size = sum(map(len, rating_groups[x:x + 2]))\n draw_margin = calc_draw_margin(draw_probability, size, self)\n if ranks[x] == ranks[x + 1]: # is a tie?\n v_func, w_func = self.v_draw, self.w_draw\n else:\n v_func, w_func = self.v_win, self.w_win\n yield TruncateFactor(team_diff_var,\n v_func, w_func, draw_margin)\n # build layers\n return (build_rating_layer, build_perf_layer, build_team_perf_layer,\n build_team_diff_layer, build_trunc_layer)", "def display_group_density_plot(df, groupby, on, palette = None, figsize = None, title=\"\", ax=None):\n if palette is None:\n palette = sns.color_palette('Set2')\n if figsize is None:\n figsize = (10, 5)\n if not isinstance(df, pd.core.frame.DataFrame):\n raise ValueError('df must be a pandas DataFrame')\n\n if not groupby:\n raise ValueError('groupby parameter must be provided')\n\n elif not groupby in df.keys():\n raise ValueError(groupby + ' column does not exist in the given DataFrame')\n\n if not on:\n raise ValueError('on parameter must be provided')\n\n elif not on in df.keys():\n raise ValueError(on + ' column does not exist in the given DataFrame')\n\n if len(set(df[groupby])) > 10:\n groups = df[groupby].value_counts().index[:10]\n\n else:\n groups = set(df[groupby])\n\n # Get relevant palette\n if palette:\n palette = palette[:len(groups)]\n else:\n palette = sns.color_palette()[:len(groups)]\n\n if ax is None:\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111)\n \n ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left')\n for value, color in zip(groups, palette):\n sns.kdeplot(df.loc[df[groupby] == value][on], shade=True, color=color, label=value, ax=ax)\n if not title:\n title = str(\"Distribution of \" + on + \" per \" + groupby + \" group\")\n \n ax.set_title(title,fontsize=10)\n ax.set_xlabel(on, fontsize=10)\n return ax", "def group(x, y, a, b, l, m, i=0.2644, f = 100):\n\n #Formula for distance between 2 points on a graph\n #((x2 - x1)**2 + (y2 - y1)**2)**(1/2)\n #sqrt of x = x ** (1/2)\n\n len1 = ((a - x)**2 + (b - y)**2)**(1/2) #distance between (x,y) and (a,b)\n len2 = ((l - x)**2 + (m - y)**2)**(1/2) #distance between (x,y) and (l,m)\n len3 = ((l - a)**2 + (m - b)**2)**(1/2) #distance between (a,b) and (l,m)\n\n def largest(array, n):\n \"\"\"\n Retruns the largest number from an array\n :param array: array\n :param n: int, lenght of array\n :return: largest number in the array\"\"\"\n\n #set max as first array element\n max = array[0]\n\n #compare current max with next array element, replace max if next element is larger\n\n for i in range(1, n):\n if array[i] > max:\n max = array[i]\n return max\n\n #initialize array with distances betwen points, find length of the array and pass these as parameters to the largest() function\n arr = [len1, len2, len3]\n arr_len = len(arr)\n result = largest(arr, arr_len)\n\n #subtract the diameter of the bullet being used\n final = (100 * (result - i)//1)/100\n\n #print the group size, distance to target, and bullet diameter\n group_size = print(\"Group size is \" + str(final) + \" at a distance of\" + str(f) + \"yds with a bullet diameter of\" + str(i))\n return group_size", "def main(fpath):\n\t# Load results data\n\twith open(fpath) as f:\n\t\tlines = f.readlines()\n\n\tresults = []\n\tfor line in lines:\n\t\tif not line.startswith(\"//\"):\n\t\t\tresults.append(json.loads(line))\n\n\t# Format relevent data\n\tresults = sorted(results, key=lambda k: k['data_lens'][\"mean\"])\n\t# x = [np.log(x[\"block_lens\"][\"mean\"]) for x in results]\n\tx = [x[\"data_lens\"][\"mean\"] for x in results]\n\ty = [x[\"mean\"] for x in results]\n\tyy = [x[\"block_lens\"][\"mean\"] for x in results]\n\n\n\t# Plot graphs\n\tplt.figure(figsize=(14,5))\n\tplt.subplot(1,2,1)\n\tplt.title('Ratio of Total Block Size to Primary Data Size Against Primary Data Size')\n\tplt.ylabel('Total Block Size / Primary Data Size')\n\t# plt.xlabel('log( Primary Data Size )')\n\tplt.xlabel('Primary Data Size (chars)')\n\tplt.plot(x, y)\n\n\tplt.subplot(1,2,2)\n\tplt.title('Total Block Size Against Primary Data Size')\n\tplt.ylabel('Total Block Size (chars)')\n\tplt.xlabel('Primary Data Size (chars)')\n\tplt.plot(x, yy)\n\n\n\tplt.show()", "def plot_trends(group, country=\"US\", state=None, place=None, predictive_method=\"ARIMA\"):\n print(f\"* Plotting Google Trends of `{group}` for {country} - {state or 'All'}\")\n group_queries = get_group_queries(group, only_root=True)\n\n n_queries = len(group_queries)\n n_cols = 3\n n_rows = int(n_queries / n_cols) + (1 if n_queries % n_cols else 0)\n\n # Annotations\n annotations = []\n\n # Initialize figure with subplots\n subplot_titles = [\"%s...\" % t[:22] if len(t) >= 22 else t for t in group_queries]\n fig = make_subplots(\n rows=n_rows, cols=n_cols, subplot_titles=subplot_titles,\n shared_yaxes=True,\n print_grid=True\n )\n\n # Marked Dates\n covid_start_date = COVID_START_DATE\n reopen_date = REOPEN_DATE\n reopen_date_minus_1 = REOPEN_DATE_MINUS_1\n data_start_date = DATA_START_DATE\n data_end_date = DATA_END_DATE\n\n # Figure variable\n baseline = 0\n value_range = [0, 100]\n\n # Model params\n model_params = []\n\n for idx, query in enumerate(group_queries):\n row = int(idx / n_cols) + 1\n col = idx % n_cols + 1\n showlegend = idx == 0\n\n query_file_path = get_data_filename(group, query, country=country, state=state, full=True)\n df = pd.read_csv(query_file_path, parse_dates=True)\n count = df[\"date\"].count()\n\n # ARIMA Model\n if query in df.columns:\n print(\"Query: \", query)\n # get_arima_params(df[query])\n df, model = arima_predict(df, from_date=PREDICT_FROM_DATE, value_col=query)\n params = model.get_params()\n model_params.append([query, str(params[\"order\"])])\n # return False\n \n # No data\n if count == 0:\n continue\n\n # Process\n stayhome_order_date = place.get(\"ClosedFrom\") if place else SOCIAL_DISTANCE_ORDER_DATE\n\n df = df[(df[\"date\"] >= data_start_date) & (df[\"date\"] <= data_end_date)]\n df_before = df[(df[\"date\"] <= reopen_date)]\n df_after = df[(df[\"date\"] >= reopen_date_minus_1)]\n df_prediction = df[df[\"is_predicted\"] == 1]\n\n # Normalize\n if config.TRENDS_APPLY_NORMALIZATION:\n max_value = df[query].max()\n baseline = df_before[query].median()\n df[\"value\"] = df[query].apply(lambda x: (x - baseline) / max_value)\n df_before[\"value\"] = df_before[query].apply(lambda x: (x - baseline) / max_value)\n df_after[\"value\"] = df_after[query].apply(lambda x: (x - baseline) / max_value)\n baseline = 0\n value_range = [-1, 1]\n else:\n max_value = df[query].max()\n baseline = df_before[query].median()\n df[\"value\"] = df[query]\n df_before[\"value\"] = df_before[query]\n df_after[\"value\"] = df_after[query]\n\n # Compute difference\n query_text = query.split(\"+\")[0].strip() + \" + ...\" if \"+\" in query else query\n actual_mean, actual_meanCI95min, actual_meanCI95max = mean_confidence_interval(df_prediction[query])\n predict_mean = df_prediction[\"prediction\"].mean()\n diff = round(100 * (actual_mean - predict_mean) / predict_mean, 1)\n diffCI95min = round(100 * (actual_meanCI95min - predict_mean) / predict_mean, 1)\n diffCI95max = round(100 * (actual_meanCI95max - predict_mean) / predict_mean, 1)\n x_date = list(df['date'])[int(df[\"date\"].count()/2)]\n diff_annot = go.layout.Annotation(\n text=f'<b>{query_text}</b><br><sub><b style=\"color:{config.COLOR_UPTREND if diff >= 0 else config.COLOR_DOWNTREND}\">{diff}%</b>; 95%CI, [{diffCI95min}%, {diffCI95max}%]</sub>',\n showarrow=False, xanchor=\"center\", yanchor=\"top\", \n x=x_date,\n y=0.0,\n xshift=0,\n yshift=-5,\n xref=f\"x{'' if idx == 0 else idx + 1}\",\n yref=f\"y{'' if idx == 0 else idx + 1}\"\n )\n annotations.append(diff_annot)\n\n # Lockdown period\n max_y = max(df[query].max(), abs(df[query].min()))\n min_y = -max_y\n shape_lockdown = go.layout.Shape(**{\"type\": \"rect\",\"y0\":100,\"y1\": -100,\"x0\":COVID_START_DATE, \n \"x1\":REOPEN_DATE,\"xref\":\"x1\",\"yref\":\"y1\",\"layer\":\"below\",\n \"fillcolor\":\"#eeeeee\", \"line\":dict(width=0), \"line_width\": 0})\n fig.add_shape(shape_lockdown, row=row, col=col)\n\n # Horizontal line \n shape = go.layout.Shape(**{\"type\": \"line\",\"y0\":baseline,\"y1\": baseline,\"x0\":str(df[\"date\"].values[0]), \n \"x1\":str(df[\"date\"].values[-1]),\"xref\":\"x1\",\"yref\":\"y1\",\"layer\":\"below\",\n \"line\": {\"color\": \"rgb(200, 200, 200)\",\"width\": 1.5}})\n fig.add_shape(shape, row=row, col=col)\n\n # Stay home order\n if stayhome_order_date:\n shape_stayhome_order = go.layout.Shape(**{\"type\": \"line\",\"y0\":-0.25,\"y1\": 0.25,\"x0\":stayhome_order_date, \n \"x1\":stayhome_order_date,\"xref\":\"x1\",\"yref\":\"y1\",\n \"line\": {\"color\": \"blue\",\"width\": 1.5, \"dash\": \"dot\"}})\n fig.add_shape(shape_stayhome_order, row=row, col=col)\n\n # Plot\n subplot_before = go.Scatter(x=df_before[\"date\"], y=df_before[\"value\"], \n mode=\"lines\", name=\"Before Lockdown\",\n line=dict(width=1, color=config.LINE_COLOR_BEFORE), \n line_shape=\"linear\", showlegend=False) # linear or spline \n subplot_after = go.Scatter(x=df_after[\"date\"], y=df_after[\"value\"], \n mode=\"lines\", name=\"Actual Queries\",\n line=dict(width=1.5, color=config.LINE_COLOR_AFTER), \n line_shape=\"linear\", showlegend=showlegend) # linear or spline \n subplot_prediction = go.Scatter(x=df_prediction[\"date\"], y=df_prediction[\"prediction\"], \n mode=\"lines\", name=\"Expected Queries\",\n line=dict(width=2, color=config.LINE_COLOR_BEFORE, dash=\"dot\"), \n line_shape=\"linear\", showlegend=showlegend) # linear or spline \n subplot_lockdown_legend = go.Bar(x=[reopen_date,], y=[0,], \n name=\"Early Lockdown Phase\", \n showlegend=showlegend,\n marker_color=\"#eeeeee\")\n fig.add_trace(subplot_before, row=row, col=col)\n fig.add_trace(subplot_after, row=row, col=col)\n fig.add_trace(subplot_prediction, row=row, col=col)\n if idx == 0:\n fig.add_trace(subplot_lockdown_legend, row=row, col=col)\n\n # break\n\n # Caption\n # caption = go.layout.Annotation(\n # showarrow=False,\n # text=\"\",\n # xanchor=\"center\",\n # x=0.5,\n # yanchor=\"top\",\n # y=0.0,\n # yshift=0,\n # )\n\n # Layout\n # location = f\"{country}.{state}\" if state else country\n # fig_title = f\"\"\"Term: {group}. Location: {location}<br>\n # <span style=\"font-size: 14px;line-height:1\">Period: {data_start_date} - {data_end_date}\n # <br>Lockdown Period: {covid_start_date} - {PREDICT_FROM_DATE}</span>\"\"\"\n fig_title = \"\"\n fig.update_layout(title={\"text\": fig_title, \"x\":0.5, \"xanchor\": \"center\"}, \n title_font=dict(size=12),\n height=50 + n_rows * 175, width=250 * n_cols, coloraxis=dict(colorscale=\"Bluered_r\"), \n showlegend=True, plot_bgcolor=\"rgb(255,255,255)\", titlefont={\"size\": 30},\n margin={\"t\": 50},\n annotations=annotations,\n legend=dict(\n orientation=\"v\",\n yanchor=\"bottom\",\n y=0,\n xanchor=\"right\",\n x=1,\n bgcolor=\"white\",\n bordercolor=\"#333\",\n borderwidth=1\n )\n )\n fig.update_xaxes(showgrid=False, showticklabels=False, showline=False)\n fig.update_yaxes(showgrid=False, showticklabels=False, showline=True, range=value_range)\n\n # Store model parameters\n mkdir_if_not_exist(config.TRENDS_OUTPUT_DIR)\n df_params = pd.DataFrame(model_params, columns=[\"Query\", \"Order\"])\n df_params.to_csv(\"%s/ARIMA_orders_%s.csv\" % (config.TRENDS_OUTPUT_DIR, group), index=False)\n\n # Create online URL\n url = py.iplot(fig, filename=group, file_id=group)\n print(\"URL:\", url.src)\n\n if config.TRENDS_EXPORT_FIGURES:\n # Save\n mkdir_if_not_exist(config.TRENDS_FIGURES_DIR)\n fig.write_image(\"%s/%s_%s_%s.jpg\" % (config.TRENDS_FIGURES_DIR, country, state or \"All\", group))\n # fig.show()\n else:\n # Show\n fig.show()", "def _plot_evoked(evoked, picks, exclude, unit, show, ylim, proj, xlim, hline,\n units, scalings, titles, axes, plot_type, cmap=None,\n gfp=False, window_title=None, spatial_colors=False,\n selectable=True, zorder='unsorted',\n noise_cov=None, colorbar=True, mask=None, mask_style=None,\n mask_cmap=None, mask_alpha=.25, time_unit='s',\n show_names=False, group_by=None, sphere=None):\n import matplotlib.pyplot as plt\n\n # For evoked.plot_image ...\n # First input checks for group_by and axes if any of them is not None.\n # Either both must be dicts, or neither.\n # If the former, the two dicts provide picks and axes to plot them to.\n # Then, we call this function recursively for each entry in `group_by`.\n if plot_type == \"image\" and isinstance(group_by, dict):\n if axes is None:\n axes = dict()\n for sel in group_by:\n plt.figure()\n axes[sel] = plt.axes()\n if not isinstance(axes, dict):\n raise ValueError(\"If `group_by` is a dict, `axes` must be \"\n \"a dict of axes or None.\")\n _validate_if_list_of_axes(list(axes.values()))\n remove_xlabels = any([_is_last_row(ax) for ax in axes.values()])\n for sel in group_by: # ... we loop over selections\n if sel not in axes:\n raise ValueError(sel + \" present in `group_by`, but not \"\n \"found in `axes`\")\n ax = axes[sel]\n # the unwieldy dict comp below defaults the title to the sel\n titles = ({channel_type(evoked.info, idx): sel\n for idx in group_by[sel]} if titles is None else titles)\n _plot_evoked(evoked, group_by[sel], exclude, unit, show, ylim,\n proj, xlim, hline, units, scalings, titles,\n ax, plot_type, cmap=cmap, gfp=gfp,\n window_title=window_title,\n selectable=selectable, noise_cov=noise_cov,\n colorbar=colorbar, mask=mask,\n mask_style=mask_style, mask_cmap=mask_cmap,\n mask_alpha=mask_alpha, time_unit=time_unit,\n show_names=show_names,\n sphere=sphere)\n if remove_xlabels and not _is_last_row(ax):\n ax.set_xticklabels([])\n ax.set_xlabel(\"\")\n ims = [ax.images[0] for ax in axes.values()]\n clims = np.array([im.get_clim() for im in ims])\n min, max = clims.min(), clims.max()\n for im in ims:\n im.set_clim(min, max)\n figs = [ax.get_figure() for ax in axes.values()]\n if len(set(figs)) == 1:\n return figs[0]\n else:\n return figs\n elif isinstance(axes, dict):\n raise ValueError(\"If `group_by` is not a dict, \"\n \"`axes` must not be a dict either.\")\n\n time_unit, times = _check_time_unit(time_unit, evoked.times)\n evoked = evoked.copy() # we modify info\n info = evoked.info\n if axes is not None and proj == 'interactive':\n raise RuntimeError('Currently only single axis figures are supported'\n ' for interactive SSP selection.')\n if isinstance(gfp, str) and gfp != 'only':\n raise ValueError('gfp must be boolean or \"only\". Got %s' % gfp)\n\n scalings = _handle_default('scalings', scalings)\n titles = _handle_default('titles', titles)\n units = _handle_default('units', units)\n\n picks = _picks_to_idx(info, picks, none='all', exclude=())\n if len(picks) != len(set(picks)):\n raise ValueError(\"`picks` are not unique. Please remove duplicates.\")\n\n bad_ch_idx = [info['ch_names'].index(ch) for ch in info['bads']\n if ch in info['ch_names']]\n if len(exclude) > 0:\n if isinstance(exclude, str) and exclude == 'bads':\n exclude = bad_ch_idx\n elif (isinstance(exclude, list) and\n all(isinstance(ch, str) for ch in exclude)):\n exclude = [info['ch_names'].index(ch) for ch in exclude]\n else:\n raise ValueError(\n 'exclude has to be a list of channel names or \"bads\"')\n\n picks = np.array([pick for pick in picks if pick not in exclude])\n\n types = np.array(_get_channel_types(info, picks), str)\n ch_types_used = list()\n for this_type in _VALID_CHANNEL_TYPES:\n if this_type in types:\n ch_types_used.append(this_type)\n\n fig = None\n if axes is None:\n fig, axes = plt.subplots(len(ch_types_used), 1)\n fig.subplots_adjust(left=0.125, bottom=0.1, right=0.975, top=0.92,\n hspace=0.63)\n if isinstance(axes, plt.Axes):\n axes = [axes]\n fig.set_size_inches(6.4, 2 + len(axes))\n\n if isinstance(axes, plt.Axes):\n axes = [axes]\n elif isinstance(axes, np.ndarray):\n axes = list(axes)\n\n if fig is None:\n fig = axes[0].get_figure()\n\n if window_title is not None:\n _set_window_title(fig, window_title)\n\n if len(axes) != len(ch_types_used):\n raise ValueError('Number of axes (%g) must match number of channel '\n 'types (%d: %s)' % (len(axes), len(ch_types_used),\n sorted(ch_types_used)))\n _check_option('proj', proj, (True, False, 'interactive', 'reconstruct'))\n noise_cov = _check_cov(noise_cov, info)\n if proj == 'reconstruct' and noise_cov is not None:\n raise ValueError('Cannot use proj=\"reconstruct\" when noise_cov is not '\n 'None')\n projector, whitened_ch_names = _setup_plot_projector(\n info, noise_cov, proj=proj is True, nave=evoked.nave)\n if len(whitened_ch_names) > 0:\n unit = False\n if projector is not None:\n evoked.data[:] = np.dot(projector, evoked.data)\n if proj == 'reconstruct':\n evoked = evoked._reconstruct_proj()\n\n if plot_type == 'butterfly':\n _plot_lines(evoked.data, info, picks, fig, axes, spatial_colors, unit,\n units, scalings, hline, gfp, types, zorder, xlim, ylim,\n times, bad_ch_idx, titles, ch_types_used, selectable,\n False, line_alpha=1., nave=evoked.nave,\n time_unit=time_unit, sphere=sphere)\n plt.setp(axes, xlabel='Time (%s)' % time_unit)\n\n elif plot_type == 'image':\n for ai, (ax, this_type) in enumerate(zip(axes, ch_types_used)):\n use_nave = evoked.nave if ai == 0 else None\n this_picks = list(picks[types == this_type])\n _plot_image(evoked.data, ax, this_type, this_picks, cmap, unit,\n units, scalings, times, xlim, ylim, titles,\n colorbar=colorbar, mask=mask, mask_style=mask_style,\n mask_cmap=mask_cmap, mask_alpha=mask_alpha,\n nave=use_nave, time_unit=time_unit,\n show_names=show_names, ch_names=evoked.ch_names)\n if proj == 'interactive':\n _check_delayed_ssp(evoked)\n params = dict(evoked=evoked, fig=fig, projs=info['projs'], axes=axes,\n types=types, units=units, scalings=scalings, unit=unit,\n ch_types_used=ch_types_used, picks=picks,\n plot_update_proj_callback=_plot_update_evoked,\n plot_type=plot_type)\n _draw_proj_checkbox(None, params)\n\n plt.setp(fig.axes[:len(ch_types_used) - 1], xlabel='')\n fig.canvas.draw() # for axes plots update axes.\n plt_show(show)\n return fig", "def compare_distr(adata, key, groupby = 'batch', **kwags):\n\n plt.figure(None, (8, 6), 70)\n levels = adata.obs[groupby].cat.categories\n for level in levels:\n plt.hist(adata[adata.obs[groupby] == level].obs[key], alpha = 0.5,\n label = level, density = True , **kwags)\n plt.legend()\n plt.title(key)\n plt.show()", "def api_ranking(dcid):\n parents = json.loads(api_parent_place(dcid))\n selected_parents = []\n parent_names = {}\n for parent in parents:\n parent_dcid = parent['dcid']\n if parent_dcid == 'country/USA':\n continue\n if parent_dcid.startswith('zip'):\n continue\n selected_parents.append(parent_dcid)\n parent_names[parent_dcid] = parent['name']\n if len(selected_parents) == 2:\n break\n selected_parents.append('country/USA')\n parent_names['country/USA'] = 'United States'\n result = collections.defaultdict(list)\n # TODO(boxu): make the stats_vars in a config.\n for parent in selected_parents:\n # Population ranking\n result['Population'].append({\n 'name': parent_names[parent],\n 'data': get_related_place(\n dcid, 'Person', 'count', 'measuredValue',\n measurement_method='CensusACS5yrSurvey',\n same_place_type=True, within_place=parent)})\n # Median income\n result['Median Income'].append({\n 'name': parent_names[parent],\n 'data': get_related_place(\n dcid, 'Person', 'income', 'medianValue',\n pvs_string='age^Years15Onwards^incomeStatus^WithIncome',\n measurement_method='CensusACS5yrSurvey',\n same_place_type=True, within_place=parent)})\n # Median age\n result['Median Age'].append({\n 'name': parent_names[parent],\n 'data': get_related_place(\n dcid, 'Person', 'age', 'medianValue',\n measurement_method='CensusACS5yrSurvey',\n same_place_type=True, within_place=parent)})\n # Unemployment rate\n result['Unemployment Rate'].append({\n 'name': parent_names[parent],\n 'data': get_related_place(\n dcid, 'Person', 'unemploymentRate', 'measuredValue',\n measurement_method='BLSSeasonallyUnadjusted',\n same_place_type=True, within_place=parent)})\n # Crime\n result['Crime per capita'].append({\n 'name': parent_names[parent],\n 'data': get_related_place(\n dcid, 'CriminalActivities', 'count', 'measuredValue',\n pvs_string='crimeType^UCR_CombinedCrime',\n same_place_type=True, within_place=parent, is_per_capita=True)})\n result['label'] = [\n 'Population', 'Median Income', 'Median Age', 'Unemployment Rate',\n 'Crime per capita']\n for label in result['label']:\n no_data = True\n for item in result[label]:\n if item['data']:\n no_data = False\n break\n if no_data:\n del result[label]\n result['label'] = [x for x in result['label'] if x in result]\n return result", "def plot_results(outputs_table_totals, elec_benefits, gas_benefits):\n summer_months = [6, 7, 8, 9]\n shoulder_months = [3, 4, 5, 10]\n winter_months = [11, 12, 1, 2]\n peak_hours = [16, 17, 18, 19, 20]\n pct_hours_in_summer = 2928 / 8760\n pct_hours_in_shoulder = 2952 / 8760\n pct_hours_in_winter = 2880 / 8760\n\n trc_costs_record = outputs_table_totals[\"TRC Costs ($)\"]\n pac_costs_record = outputs_table_totals[\"PAC Costs ($)\"]\n trc_record = outputs_table_totals[\"TRC\"]\n pac_record = outputs_table_totals[\"PAC\"]\n lifecycle_net_mwh = outputs_table_totals[\"Electricity Lifecycle Net Savings (MWh)\"]\n lifecycle_net_therms = outputs_table_totals[\"Gas Lifecycle Net Savings (Therms)\"]\n lifecycle_net_ghg = outputs_table_totals[\"Total Lifecycle GHG Savings (Tons)\"]\n\n # Getting variables for plots\n elec_benefits_cols = (\n [\"hourly_savings\"] + ACC_COMPONENTS_ELECTRICITY + [\"av_csts_levelized\"]\n )\n\n elec_benefits_hour_month_year = (\n elec_benefits.groupby([\"hour_of_day\", \"year\", \"month\"])\n .agg(\n {\n **{component: \"sum\" for component in ACC_COMPONENTS_ELECTRICITY},\n **{\n \"hourly_savings\": \"sum\",\n \"marginal_ghg\": \"sum\",\n \"av_csts_levelized\": \"mean\",\n },\n }\n )\n .reset_index()\n )\n\n total_benefits = list(\n elec_benefits_hour_month_year.groupby([\"hour_of_day\"])[\"total\"].sum()\n )\n\n summer_benefits = list(\n elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n ]\n .groupby([\"hour_of_day\"])[\"total\"]\n .sum()\n )\n summer_peak_benefits = elec_benefits_hour_month_year[\"total\"][\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n & (elec_benefits_hour_month_year[\"hour_of_day\"].isin(peak_hours))\n ].sum()\n shoulder_benefits = list(\n elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(shoulder_months))\n ]\n .groupby([\"hour_of_day\"])[\"total\"]\n .sum()\n )\n winter_benefits = list(\n elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(winter_months))\n ]\n .groupby([\"hour_of_day\"])[\"total\"]\n .sum()\n )\n total_savings = list(\n elec_benefits_hour_month_year.groupby([\"hour_of_day\"])[\"hourly_savings\"].sum()\n )\n summer_savings = list(\n elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n ]\n .groupby([\"hour_of_day\"])[\"hourly_savings\"]\n .sum()\n )\n shoulder_savings = list(\n elec_benefits_hour_month_year[\n ((elec_benefits_hour_month_year[\"month\"].isin(shoulder_months)))\n ]\n .groupby([\"hour_of_day\"])[\"hourly_savings\"]\n .sum()\n )\n summer_peak_savings = elec_benefits_hour_month_year[\"hourly_savings\"][\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n & (elec_benefits_hour_month_year[\"hour_of_day\"].isin(peak_hours))\n ].sum()\n winter_savings = list(\n elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(winter_months))\n ]\n .groupby([\"hour_of_day\"])[\"hourly_savings\"]\n .sum()\n )\n total_av_csts_avg = list(\n elec_benefits_hour_month_year.groupby([\"hour_of_day\"])[\n \"av_csts_levelized\"\n ].mean()\n )\n summer_av_csts_avg = list(\n pct_hours_in_summer\n * elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n ]\n .groupby([\"hour_of_day\"])[\"av_csts_levelized\"]\n .mean()\n )\n summer_peak_av_csts_avg = elec_benefits_hour_month_year[\"av_csts_levelized\"][\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n & (elec_benefits_hour_month_year[\"hour_of_day\"].isin(peak_hours))\n ].mean()\n shoulder_av_csts_avg = list(\n pct_hours_in_shoulder\n * elec_benefits_hour_month_year[\n ((elec_benefits_hour_month_year[\"month\"].isin(shoulder_months)))\n ]\n .groupby([\"hour_of_day\"])[\"av_csts_levelized\"]\n .mean()\n )\n winter_av_csts_avg = list(\n pct_hours_in_winter\n * elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(winter_months))\n ]\n .groupby([\"hour_of_day\"])[\"av_csts_levelized\"]\n .mean()\n )\n\n elec_benefits_sum_by_hod = (\n elec_benefits[elec_benefits_cols].groupby(elec_benefits[\"hour_of_day\"]).sum()\n )\n elec_benefits_hoy = (\n elec_benefits[elec_benefits_cols]\n .groupby(elec_benefits[\"hour_of_year\"])\n .sum()\n .cumsum()\n .reset_index()\n )\n sav_avcsts_288 = (\n elec_benefits.groupby([\"hour_of_day\", \"month\"])\n .agg(\n {\n **{component: \"sum\" for component in ACC_COMPONENTS_ELECTRICITY},\n **{\n \"hourly_savings\": \"sum\",\n \"marginal_ghg\": \"sum\",\n \"av_csts_levelized\": \"mean\",\n },\n }\n )\n .reset_index()\n )\n sav_avcsts_288 = sav_avcsts_288[\n [\"hour_of_day\", \"month\", \"hourly_savings\", \"total\", \"marginal_ghg\"]\n ]\n ghgsav = sav_avcsts_288.pivot(\"hour_of_day\", \"month\", \"marginal_ghg\")\n sav = sav_avcsts_288.pivot(\"hour_of_day\", \"month\", \"hourly_savings\")\n avcsts = sav_avcsts_288.pivot(\"hour_of_day\", \"month\", \"total\")\n\n # savings load shape plot\n fig0, (ax1, ax2, ax3) = plt.subplots(\n 1, 3, figsize=(18, 5), sharex=True, sharey=True\n )\n plt.subplots_adjust(wspace=0, hspace=0)\n axs = [ax1, ax2, ax3]\n hod = elec_benefits_sum_by_hod.index\n legend_labels1 = [\"Summer\"]\n legend_labels2 = [\"Shoulder\"]\n legend_labels3 = [\"Winter\"]\n\n ax1.plot(\n hod,\n summer_savings,\n c=\"firebrick\",\n linewidth=5,\n marker=\"$\\u25EF$\",\n markersize=13,\n linestyle=\"-\",\n )\n ax2.plot(\n hod,\n shoulder_savings,\n c=\"royalblue\",\n linewidth=5,\n marker=\"$\\u2206$\",\n markersize=13,\n linestyle=\"-\",\n )\n ax3.plot(\n hod,\n winter_savings,\n c=\"green\",\n linewidth=5,\n marker=\"$\\u25A1$\",\n markersize=13,\n linestyle=\"-\",\n )\n ax1.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n ax2.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n ax3.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n # Shade peak region\n ax1.axvspan(16, 21, alpha=0.2, color=\"grey\")\n\n leg1 = ax1.legend(legend_labels1, fontsize=14, loc=\"upper left\", frameon=False)\n for line, text in zip(leg1.get_lines(), leg1.get_texts()):\n text.set_color(line.get_color())\n leg2 = ax2.legend(legend_labels2, fontsize=14, loc=\"upper left\", frameon=False)\n for line, text in zip(leg2.get_lines(), leg2.get_texts()):\n text.set_color(line.get_color())\n leg3 = ax3.legend(legend_labels3, fontsize=14, loc=\"upper left\", frameon=False)\n for line, text in zip(leg3.get_lines(), leg3.get_texts()):\n text.set_color(line.get_color())\n\n ax1.set_ylabel(\"Savings (MWh/hr)\", size=16)\n ax2.set_xlabel(\"Hour of Day\", size=16)\n\n if max(summer_savings + shoulder_savings + winter_savings) < 0:\n ymax = 0\n else:\n ymax = max(summer_savings + shoulder_savings + winter_savings)\n if min(summer_savings + shoulder_savings + winter_savings) > 0:\n ymin = 0\n else:\n ymin = min(summer_savings + shoulder_savings + winter_savings)\n\n # Tick and lebel parameters\n ax1.set_ylim(ymin * 1.08, ymax * 1.08)\n ax1.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax2.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax3.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax1.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax2.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax3.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax1.yaxis.set_minor_locator(AutoMinorLocator())\n ax1.set_xticks(np.arange(0, 24, step=4))\n ax1.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax1.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax1.xaxis.set_minor_locator(AutoMinorLocator())\n ax2.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax2.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax2.xaxis.set_minor_locator(AutoMinorLocator())\n ax3.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax3.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax3.xaxis.set_minor_locator(AutoMinorLocator())\n\n # Set plot title, size, and position\n ax1.set_title(\"Seasonal Savings Load Shapes\", size=18, loc=\"left\").set_position(\n [0, 1.03]\n )\n\n # benefits_seasonal_shape_plot\n fig1, (ax1, ax2, ax3) = plt.subplots(\n 1, 3, figsize=(18, 5), sharex=True, sharey=True\n )\n plt.subplots_adjust(wspace=0, hspace=0)\n axs = [ax1, ax2, ax3]\n hod = elec_benefits_sum_by_hod.index\n legend_labels1 = [\"Summer\"]\n legend_labels2 = [\"Shoulder\"]\n legend_labels3 = [\"Winter\"]\n\n ax1.plot(\n hod,\n summer_benefits,\n c=\"firebrick\",\n linewidth=5,\n marker=\"$\\u2B24$\",\n markersize=13,\n linestyle=\":\",\n )\n ax2.plot(\n hod,\n shoulder_benefits,\n c=\"royalblue\",\n linewidth=5,\n marker=\"$\\u25B2$\",\n markersize=13,\n linestyle=\":\",\n )\n ax3.plot(\n hod,\n winter_benefits,\n c=\"green\",\n linewidth=5,\n marker=\"$\\u25A0$\",\n markersize=13,\n linestyle=\":\",\n )\n ax1.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n ax2.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n ax3.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n # Shade peak region\n ax1.axvspan(16, 21, alpha=0.2, color=\"grey\")\n\n leg1 = ax1.legend(legend_labels1, fontsize=15, loc=\"upper left\", frameon=False)\n for line, text in zip(leg1.get_lines(), leg1.get_texts()):\n text.set_color(line.get_color())\n leg2 = ax2.legend(legend_labels2, fontsize=15, loc=\"upper left\", frameon=False)\n for line, text in zip(leg2.get_lines(), leg2.get_texts()):\n text.set_color(line.get_color())\n leg3 = ax3.legend(legend_labels3, fontsize=15, loc=\"upper left\", frameon=False)\n for line, text in zip(leg3.get_lines(), leg3.get_texts()):\n text.set_color(line.get_color())\n\n ax1.set_ylabel(\"TRC Benefits ($/hr)\", size=16)\n ax2.set_xlabel(\"Hour of Day\", size=16)\n\n if max(summer_benefits + shoulder_benefits + winter_benefits) < 0:\n ymax = 0\n else:\n ymax = max(summer_benefits + shoulder_benefits + winter_benefits)\n if min(summer_benefits + shoulder_benefits + winter_benefits) > 0:\n ymin = 0\n else:\n ymin = min(summer_benefits + shoulder_benefits + winter_benefits)\n\n # Tick and label parameters\n ax1.set_ylim(ymin * 1.08, ymax * 1.08)\n ax1.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax2.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax3.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax1.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax2.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax3.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax1.yaxis.set_minor_locator(AutoMinorLocator())\n ax1.set_xticks(np.arange(0, 24, step=4))\n ax1.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax1.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax1.xaxis.set_minor_locator(AutoMinorLocator())\n ax2.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax2.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax2.xaxis.set_minor_locator(AutoMinorLocator())\n ax3.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax3.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax3.xaxis.set_minor_locator(AutoMinorLocator())\n\n # Set plot title, size, and position\n ax1.set_title(\n \"Seasonal TRC Benefits by Hour ($)\", size=18, loc=\"left\"\n ).set_position([0, 1.03])\n\n # sum_hourly_plot\n fig2 = plt.figure(figsize=(12, 7), dpi=250)\n ax = fig2.gca()\n colors = [\n \"royalblue\",\n \"black\",\n \"pink\",\n \"firebrick\",\n \"gray\",\n \"darkviolet\",\n \"darkorange\",\n \"green\",\n \"saddlebrown\",\n ]\n legend_labels = []\n x = 1\n while x <= len(ACC_COMPONENTS_ELECTRICITY[1:]):\n if x == 1:\n ax.bar(\n hod,\n elec_benefits_sum_by_hod[ACC_COMPONENTS_ELECTRICITY[x]],\n color=colors[x - 1],\n )\n legend_labels.append(\n re.findall(\n \".*Name: (.*),\",\n str(elec_benefits_sum_by_hod[ACC_COMPONENTS_ELECTRICITY[x]]),\n )[0]\n )\n x += 1\n else:\n ax.bar(\n hod,\n elec_benefits_sum_by_hod[ACC_COMPONENTS_ELECTRICITY[x]],\n bottom=elec_benefits_sum_by_hod.iloc[:, 2 : x + 1].sum(axis=1),\n color=colors[x - 1],\n )\n legend_labels.append(\n re.findall(\n \".*Name: (.*),\",\n str(elec_benefits_sum_by_hod[ACC_COMPONENTS_ELECTRICITY[x]]),\n )[0]\n )\n x += 1\n\n # Set x and y limits based on min and max values\n ymax = elec_benefits_sum_by_hod.iloc[:, 2:x].sum(axis=1).max()\n if elec_benefits_sum_by_hod.iloc[:, 2:x].sum(axis=1).min() > 0:\n ymin = 0\n else:\n ymin = elec_benefits_sum_by_hod.iloc[:, 2:x].sum(axis=1).min()\n\n ax.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax.set_ylim(ymin * 1.1, ymax * 1.08)\n\n # Set x and y axis labels\n ax.set_xlabel(\"Hour of Day\", size=17, labelpad=5)\n ax.set_ylabel(\"$ Avoided Costs\", size=17)\n\n # Set plot title, size, and position\n ax.set_title(\n \"Sum of Electric Avoided Costs by Component and Hour of Day\",\n size=17,\n loc=\"left\",\n )\n\n # Tick and lebel parameters\n ax.tick_params(bottom=True, top=False, left=True, right=False)\n ax.set_xticks(np.arange(0, 24, step=4))\n ax.set_yticks(\n np.arange(\n int(round(ymin * 1.1, 0)),\n ymax * 1.08,\n step=max(round(ymax - ymin, 2) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n\n # Minor ticks\n ax.xaxis.set_minor_locator(AutoMinorLocator())\n ax.yaxis.set_minor_locator(AutoMinorLocator())\n\n # Legend\n plt.legend(\n legend_labels,\n bbox_to_anchor=(1, 1),\n fontsize=12,\n loc=\"upper left\",\n frameon=False,\n )\n\n # avoided_cost_summary_plot\n fig3, (ax1, ax2, ax3) = plt.subplots(\n 3, 1, figsize=(6, 10), sharex=True, sharey=False\n )\n axs = [ax1, ax2, ax3]\n hod = elec_benefits_sum_by_hod.index\n legend_labels = [\"Total\", \"Summer\", \"Shoulder\", \"Winter\"]\n\n ax1.plot(\n hod,\n total_benefits,\n c=\"royalblue\",\n marker=\"$\\u25EF$\",\n markersize=10,\n linewidth=3,\n linestyle=\"-\",\n )\n ax1.plot(hod, summer_benefits, c=\"darkorchid\", linewidth=1, linestyle=\"--\")\n ax1.plot(hod, shoulder_benefits, c=\"olivedrab\", linewidth=1, linestyle=\":\")\n ax1.plot(hod, winter_benefits, c=\"teal\", linewidth=1, linestyle=\"-\")\n ax2.plot(\n hod,\n total_savings,\n c=\"firebrick\",\n marker=\"$\\u2206$\",\n markersize=10,\n linewidth=3,\n linestyle=\"-\",\n )\n ax2.plot(hod, summer_savings, c=\"darkorchid\", linewidth=1, linestyle=\"--\")\n ax2.plot(hod, shoulder_savings, c=\"olivedrab\", linewidth=1, linestyle=\":\")\n ax2.plot(hod, winter_savings, c=\"teal\", linewidth=1, linestyle=\"-\")\n ax3.plot(\n hod,\n total_av_csts_avg,\n c=\"green\",\n marker=\"$\\u25A0$\",\n markersize=10,\n linewidth=3,\n linestyle=\"-\",\n )\n ax3.plot(hod, summer_av_csts_avg, c=\"darkorchid\", linewidth=1, linestyle=\"--\")\n ax3.plot(hod, shoulder_av_csts_avg, c=\"olivedrab\", linewidth=1, linestyle=\":\")\n ax3.plot(hod, winter_av_csts_avg, c=\"teal\", linewidth=1, linestyle=\"-\")\n\n leg1 = ax1.legend(legend_labels, fontsize=11, loc=\"upper left\", frameon=False)\n for line, text in zip(leg1.get_lines(), leg1.get_texts()):\n text.set_color(line.get_color())\n leg2 = ax2.legend(legend_labels, fontsize=11, loc=\"upper left\", frameon=False)\n for line, text in zip(leg2.get_lines(), leg2.get_texts()):\n text.set_color(line.get_color())\n leg3 = ax3.legend(legend_labels, fontsize=11, loc=\"upper left\", frameon=False)\n for line, text in zip(leg3.get_lines(), leg3.get_texts()):\n text.set_color(line.get_color())\n\n ax3.set_xticks(np.arange(0, 24, step=4))\n ax3.set_xlabel(\"Hour of Day\", size=14, labelpad=5)\n ax3.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=12\n )\n ax3.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax3.xaxis.set_minor_locator(AutoMinorLocator())\n\n ax1.set_ylabel(\"TRC Benefits ($)\", size=14)\n ax2.set_ylabel(\"Savings (MWh)\", size=14)\n ax3.set_ylabel(\"Av. Cost ($/MWh)\", size=14)\n\n if max(total_benefits + summer_benefits + shoulder_benefits + winter_benefits) < 0:\n ymax1 = 0\n else:\n ymax1 = max(\n total_benefits + summer_benefits + shoulder_benefits + winter_benefits\n )\n if min(total_benefits + summer_benefits + shoulder_benefits + winter_benefits) > 0:\n ymin1 = 0\n else:\n ymin1 = min(\n total_benefits + summer_benefits + shoulder_benefits + winter_benefits\n )\n if max(total_savings + summer_savings + shoulder_savings + winter_savings) < 0:\n ymax2 = 0\n else:\n ymax2 = max(total_savings + summer_savings + shoulder_savings + winter_savings)\n if min(total_savings + summer_savings + shoulder_savings + winter_savings) > 0:\n ymin2 = 0\n else:\n ymin2 = min(total_savings + summer_savings + shoulder_savings + winter_savings)\n if (\n max(\n total_av_csts_avg\n + summer_av_csts_avg\n + shoulder_av_csts_avg\n + winter_av_csts_avg\n )\n < 0\n ):\n ymax3 = 0\n else:\n ymax3 = max(\n total_av_csts_avg\n + summer_av_csts_avg\n + shoulder_av_csts_avg\n + winter_av_csts_avg\n )\n if (\n min(\n total_av_csts_avg\n + summer_av_csts_avg\n + shoulder_av_csts_avg\n + winter_av_csts_avg\n )\n > 0\n ):\n ymin3 = 0\n else:\n ymin3 = min(\n total_av_csts_avg\n + summer_av_csts_avg\n + shoulder_av_csts_avg\n + winter_av_csts_avg\n )\n\n # Tick and lebel parameters\n ax1.set_ylim(ymin1 * 1.08, ymax1 * 1.08)\n ax2.set_ylim(ymin2 * 1.08, ymax2 * 1.08)\n ax3.set_ylim(ymin3 * 1.08, ymax3 * 1.08)\n\n ax1.set_yticks(\n np.arange(\n ymin1 * 1.08,\n ymax1 * 1.08,\n step=max(round(ymax1 - ymin1, 3) / 5, int((round(ymax1 - ymin1, 0)) / 4)),\n )\n )\n ax2.set_yticks(\n np.arange(\n ymin2 * 1.08,\n ymax2 * 1.08,\n step=max(round(ymax2 - ymin2, 3) / 5, int((round(ymax2 - ymin2, 0)) / 4)),\n )\n )\n ax3.set_yticks(\n np.arange(\n ymin3 * 1.08,\n ymax3 * 1.08,\n step=max(round(ymax3 - ymin3, 3) / 5, int((round(ymax3 - ymin3, 0)) / 4)),\n )\n )\n\n ax1.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=12\n )\n ax2.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=12\n )\n ax3.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=12\n )\n\n # Shade peak region\n ax1.axvspan(16, 21, alpha=0.2, color=\"grey\")\n ax2.axvspan(16, 21, alpha=0.2, color=\"grey\")\n ax3.axvspan(16, 21, alpha=0.2, color=\"grey\")\n\n # Print key information\n plt.annotate(\n \"Electric Benefits = $\" + str(round(elec_benefits[\"total\"].sum(), 2)),\n xy=(350, 530),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Gas Benefits = $\" + str(round(gas_benefits, 2)),\n xy=(350, 505),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Total Benefits = $\"\n + str(round(elec_benefits[\"total\"].sum() + gas_benefits, 2)),\n xy=(350, 480),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"TRC Costs = $\" + str(trc_costs_record),\n xy=(350, 455),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"PAC Costs = $\" + str(pac_costs_record),\n xy=(350, 430),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"TRC = \" + str(trc_record),\n xy=(350, 405),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"PAC = \" + str(pac_record),\n xy=(350, 380),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Net Lifecycle Electric Savings = \" + str(lifecycle_net_mwh) + \" MWh\",\n xy=(350, 335),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Net Lifecycle Gas Savings = \" + str(lifecycle_net_therms) + \" Therms\",\n xy=(350, 310),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Net Lifecycle GHG Savings = \" + str(lifecycle_net_ghg) + \" Tons\",\n xy=(350, 285),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n str(round(100 * ((summer_peak_savings) / sum(total_savings)), 1))\n + \"% MWh savings during summer peak period\",\n xy=(350, 260),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n str(round(100 * ((summer_peak_benefits) / sum(total_benefits)), 1))\n + \"% Electric TRC benefits from summer peak period\",\n xy=(350, 235),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Electric Benefits per MWh = $\"\n + str(round(elec_benefits[\"total\"].sum() / lifecycle_net_mwh, 2)),\n xy=(350, 210),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Typical Avoided Cost per MWh = $\"\n + str(round(elec_benefits[\"av_csts_levelized\"].mean(), 2)),\n xy=(350, 145),\n xycoords=\"axes points\",\n fontsize=18,\n )\n\n # Set plot title, size, and position\n ax1.set_title(\n \"Savings and Avoided Cost Profiles\", size=16, loc=\"left\"\n ).set_position([0, 1.03])\n\n # marginal_ghg_savings_plot\n cmp = sns.diverging_palette(16, 260, l=35, n=25, as_cmap=True)\n\n fig4 = plt.figure(figsize=(8, 6), dpi=100)\n ax1 = fig4.gca()\n y_ticks = [\n 0,\n \"\",\n 2,\n \"\",\n 4,\n \"\",\n 6,\n \"\",\n 8,\n \"\",\n 10,\n \"\",\n 12,\n \"\",\n 14,\n \"\",\n 16,\n \"\",\n 18,\n \"\",\n 20,\n \"\",\n 22,\n ]\n hmp = sns.heatmap(ghgsav, cmap=cmp, ax=ax1, yticklabels=y_ticks, center=0.00)\n ax1.set_xlabel(\"Month\", size=15)\n ax1.set_ylabel(\"Hour of Day\", size=15)\n ax1.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=13\n )\n ax1.tick_params(\n which=\"major\",\n axis=\"y\",\n direction=\"out\",\n length=6,\n width=2,\n labelsize=13,\n rotation=0,\n )\n ax1.set_title(\"Electric GHG Savings by Month and Hour\", size=15, loc=\"left\", pad=8)\n cbar1 = hmp.collections[0].colorbar\n cbar1.ax.tick_params(labelsize=14)\n plt.annotate(\"Sum GHG\", xy=(370, 352), xycoords=\"axes points\", fontsize=12)\n plt.annotate(\"Savings (Tons)\", xy=(370, 336), xycoords=\"axes points\", fontsize=12)\n\n # month_hour_savings_benefits_plot\n fig5, (ax1, ax2) = plt.subplots(1, 2, figsize=(21, 10), dpi=200)\n y_ticks = [\n 0,\n \"\",\n 2,\n \"\",\n 4,\n \"\",\n 6,\n \"\",\n 8,\n \"\",\n 10,\n \"\",\n 12,\n \"\",\n 14,\n \"\",\n 16,\n \"\",\n 18,\n \"\",\n 20,\n \"\",\n 22,\n ]\n fleft = sns.heatmap(sav, cmap=cmp, ax=ax1, yticklabels=y_ticks, center=0.00)\n fright = sns.heatmap(avcsts, cmap=cmp, ax=ax2, yticklabels=y_ticks, center=0.00)\n ax1.set_xlabel(\"Month\", size=22)\n ax1.set_ylabel(\"Hour of Day\", size=22)\n ax2.set_xlabel(\"Month\", size=22)\n ax2.set_ylabel(\"Hour of Day\", size=22)\n ax1.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=18\n )\n ax1.tick_params(\n which=\"major\",\n axis=\"y\",\n direction=\"out\",\n length=6,\n width=2,\n labelsize=18,\n rotation=0,\n )\n ax2.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=18\n )\n ax2.tick_params(\n which=\"major\",\n axis=\"y\",\n direction=\"out\",\n length=6,\n width=2,\n labelsize=18,\n rotation=0,\n )\n ax1.set_title(\n \"MWh Savings by Month and Hour\", size=24, loc=\"left\", pad=15\n ).set_position([0, 1.1])\n ax2.set_title(\"$ Benefits by Month and Hour\", size=24, loc=\"left\", pad=15)\n fig4.tight_layout(pad=2.0)\n cbar1 = fleft.collections[0].colorbar\n cbar1.ax.tick_params(labelsize=18)\n cbar2 = fright.collections[0].colorbar\n cbar2.ax.tick_params(labelsize=18)\n plt.annotate(\"Sum MWh\", xy=(-200, 585), xycoords=\"axes points\", fontsize=20)\n plt.annotate(\"Savings\", xy=(-193, 560), xycoords=\"axes points\", fontsize=20)\n plt.annotate(\"Sum TRC\", xy=(435, 585), xycoords=\"axes points\", fontsize=20)\n plt.annotate(\"Benefits\", xy=(442, 560), xycoords=\"axes points\", fontsize=20)\n\n # savings_benefits_cumulative_sum_plot\n fig6 = plt.figure(figsize=(12, 7), dpi=250)\n ax1 = fig6.gca()\n ax1.plot(\n elec_benefits_hoy[\"hour_of_year\"],\n elec_benefits_hoy[\"hourly_savings\"],\n color=\"royalblue\",\n linewidth=3,\n )\n ax2 = ax1.twinx()\n ax2.plot(\n elec_benefits_hoy[\"hour_of_year\"],\n elec_benefits_hoy[\"total\"],\n color=\"firebrick\",\n linewidth=3,\n linestyle=\"--\",\n )\n ax2.axhline(y=0, color=\"gray\", linewidth=0.7, linestyle=\"--\")\n\n # Set x and y limits based on min and max values\n\n if (\n elec_benefits_hoy[\"hourly_savings\"].max() >= 0\n and elec_benefits_hoy[\"total\"].max() >= 0\n ):\n ymax1 = elec_benefits_hoy[\"hourly_savings\"].max()\n ymax2 = elec_benefits_hoy[\"total\"].max()\n elif (\n elec_benefits_hoy[\"hourly_savings\"].max() < 0\n and elec_benefits_hoy[\"total\"].max() < 0\n ):\n ymax1 = 0\n ymax2 = 0\n elif (\n elec_benefits_hoy[\"hourly_savings\"].max() < 0\n and elec_benefits_hoy[\"total\"].max() > 0\n ):\n ymax1 = (\n -1\n * elec_benefits_hoy[\"hourly_savings\"].min()\n * (\n elec_benefits_hoy[\"total\"].max()\n / (elec_benefits_hoy[\"total\"].max() - elec_benefits_hoy[\"total\"].min())\n )\n / (\n 1\n - elec_benefits_hoy[\"total\"].max()\n / (elec_benefits_hoy[\"total\"].max() - elec_benefits_hoy[\"total\"].min())\n )\n )\n ymax2 = elec_benefits_hoy[\"total\"].max()\n else:\n ymax1 = 0\n ymax2 = (\n -1\n * elec_benefits_hoy[\"total\"].min()\n * (\n elec_benefits_hoy[\"hourly_savings\"].max()\n / (\n elec_benefits_hoy[\"hourly_savings\"].max()\n - elec_benefits_hoy[\"hourly_savings\"].min()\n )\n )\n )\n\n if (\n elec_benefits_hoy[\"hourly_savings\"].min() <= 0\n and elec_benefits_hoy[\"total\"].min() <= 0\n ):\n ymin1 = elec_benefits_hoy[\"hourly_savings\"].min()\n ymin2 = elec_benefits_hoy[\"total\"].min()\n elif (\n elec_benefits_hoy[\"hourly_savings\"].min() > 0\n and elec_benefits_hoy[\"total\"].min() > 0\n ):\n ymin1 = 0\n ymin2 = 0\n elif (\n elec_benefits_hoy[\"hourly_savings\"].min() > 0\n and elec_benefits_hoy[\"total\"].min() < 0\n ):\n ymin1 = (\n -1\n * elec_benefits_hoy[\"hourly_savings\"].max()\n * (\n elec_benefits_hoy[\"total\"].min()\n / (elec_benefits_hoy[\"total\"].min() - elec_benefits_hoy[\"total\"].max())\n )\n / (\n 1\n - elec_benefits_hoy[\"total\"].min()\n / (elec_benefits_hoy[\"total\"].min() - elec_benefits_hoy[\"total\"].max())\n )\n )\n ymin2 = elec_benefits_hoy[\"total\"].min()\n else:\n ymin1 = 0\n ymin2 = (\n -1\n * elec_benefits_hoy[\"total\"].min()\n * (\n elec_benefits_hoy[\"hourly_savings\"].min()\n / (\n elec_benefits_hoy[\"hourly_savings\"].min()\n - elec_benefits_hoy[\"hourly_savings\"].min()\n )\n )\n )\n\n # Set x and y axis limits\n ax1.set_xlim(-340, 9000)\n ax1.set_ylim(ymin1 * 1.08, ymax1 * 1.08)\n ax2.set_ylim(ymin2 * 1.08, ymax2 * 1.08)\n\n # Set x and y axis labels\n ax1.set_xlabel(\"Hour of Year\", size=17, labelpad=5)\n ax1.set_ylabel(\"Net Lifecycle Savings (MWh)\", size=17)\n ax2.set_ylabel(\"$ TRC Benefits\", size=17, rotation=-90, labelpad=20)\n\n # Set plot title, size, and position\n ax1.set_title(\n \"Cumulative Savings and TRC Benefits by Hour of Year\",\n size=17,\n loc=\"left\",\n pad=8,\n )\n\n # Tick and lebel parameters\n ax1.set_xticks(np.arange(0, 8760, step=1000))\n ax1.set_yticks(\n np.arange(\n int(round(ymin1 * 1.1, 0)),\n ymax1 * 1.08,\n step=max(round(ymax1 - ymin1, 2) / 5, int((round(ymax1 - ymin1, 0)) / 4)),\n )\n )\n ax1.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax1.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n\n ax2.set_xticks(np.arange(0, 8760, step=1000))\n ax2.set_yticks(\n np.arange(\n int(round(ymin2 * 1.1, 0)),\n ymax2 * 1.08,\n step=max(round(ymax2 - ymin2, 2) / 5, int((round(ymax2 - ymin2, 0)) / 4)),\n )\n )\n ax2.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax2.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n\n # Minor ticks\n ax1.xaxis.set_minor_locator(AutoMinorLocator())\n ax1.yaxis.set_minor_locator(AutoMinorLocator())\n ax2.yaxis.set_minor_locator(AutoMinorLocator())\n\n # Legend\n ax1.legend(\n [\"Savings\"],\n fontsize=12,\n bbox_to_anchor=(0.02, 1),\n loc=\"upper left\",\n frameon=False,\n )\n ax2.legend(\n [\"TRC Beneftis\"],\n fontsize=12,\n bbox_to_anchor=(0.02, 0.95),\n loc=\"upper left\",\n frameon=False,\n )\n\n fig7 = plt.figure(figsize=(12, 7), dpi=250)\n ax = fig7.gca()\n colors1 = [\n \"black\",\n \"royalblue\",\n \"black\",\n \"pink\",\n \"firebrick\",\n \"gray\",\n \"darkviolet\",\n \"darkorange\",\n \"green\",\n \"saddlebrown\",\n ]\n legend_labels2 = []\n\n ax.plot(\n elec_benefits_hoy[\"hour_of_year\"],\n elec_benefits_hoy[ACC_COMPONENTS_ELECTRICITY[0]],\n color=colors1[0],\n linewidth=3,\n )\n legend_labels2.append(ACC_COMPONENTS_ELECTRICITY[0])\n x = 1\n while x <= len(ACC_COMPONENTS_ELECTRICITY) - 2:\n ax.plot(\n elec_benefits_hoy[\"hour_of_year\"],\n elec_benefits_hoy[ACC_COMPONENTS_ELECTRICITY[x]],\n color=colors1[x],\n )\n legend_labels2.append(ACC_COMPONENTS_ELECTRICITY[x])\n x += 1\n\n # Set x and y limits based on min and max values\n if max(elec_benefits_hoy.iloc[:, 2:x].max()) < 0:\n ymax = 0\n else:\n ymax = max(elec_benefits_hoy.iloc[:, 2:x].max())\n if min(elec_benefits_hoy.iloc[:, 2:x].min()) > 0:\n ymin = 0\n else:\n ymin = min(elec_benefits_hoy.iloc[:, 2:x].min())\n\n ax.set_xlim(-340, 9000)\n ax.set_ylim(ymin * 1.1, ymax * 1.08)\n\n # Set x and y axis labels\n ax.set_xlabel(\"Hour of Year\", size=17, labelpad=5)\n ax.set_ylabel(\"$ TRC Benefits\", size=17)\n\n # Set plot title, size, and position\n ax.set_title(\n \"Sum of Avoided Costs by Component and Hour of Day\", size=17, loc=\"left\"\n )\n\n # Tick and lebel parameters\n ax.set_xticks(np.arange(0, 8760, step=1000))\n ax.set_yticks(\n np.arange(\n int(round(ymin * 1.1, 0)),\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n\n # Minor ticks\n ax.xaxis.set_minor_locator(AutoMinorLocator())\n ax.yaxis.set_minor_locator(AutoMinorLocator())\n\n # Legend\n plt.legend(\n legend_labels2,\n bbox_to_anchor=(1, 1),\n fontsize=12,\n loc=\"upper left\",\n frameon=False,\n )", "def rank(self,others):\n self.__verify(others)\n \n #construct the n evaluation criteria + classes in an extensible way\n #evalFn = [AP,R] in the standard format -> column with as many rows as replicates\n numClasses = others[0].eval['APBCI'].shape[2]\n\n iouType = others[0].params.iouType\n if iouType in [\"segm\",\"bbox\"]:\n evalFunctions = [ \\\n lambda AP,R: np.nanmean(AP[:,:,:,0,-1],axis=(0,2)),\n lambda AP,R: np.nanmean(AP[0,:,:,0,-1],axis=(1)),\n lambda AP,R: np.nanmean(AP[5,:,:,0,-1],axis=(1)),\n lambda AP,R: np.nanmean(AP[:,:,:,1,-1],axis=(0,2)),\n lambda AP,R: np.nanmean(AP[:,:,:,2,-1],axis=(0,2)),\n lambda AP,R: np.nanmean(AP[:,:,:,3,-1],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,0,0],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,0,1],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,0,2],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,1,2],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,2,2],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,3,2],axis=(0,2))]\n\n evfAP = lambda c: (lambda AP,R: np.nanmean(AP[:,:,c,0,-1],axis=0))\n for i in range(numClasses):\n evalFunctions.append(evfAP(i))\n\n else:\n evalFunctions = [ \\\n lambda AP,R: np.nanmean(AP[:,:,:,0,0],axis=(0,2)),\n lambda AP,R: np.nanmean(AP[0,:,:,0,0],axis=(1)),\n lambda AP,R: np.nanmean(AP[5,:,:,0,0],axis=(1)),\n lambda AP,R: np.nanmean(AP[:,:,:,1,0],axis=(0,2)),\n lambda AP,R: np.nanmean(AP[:,:,:,2,0],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,0,0],axis=(0,2)),\n lambda AP,R: np.nanmean(R[0,:,:,0,0],axis=(1)),\n lambda AP,R: np.nanmean(R[5,:,:,0,0],axis=(1)),\n lambda AP,R: np.nanmean(R[:,:,:,1,0],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,2,0],axis=(0,2))]\n\n numReplicates = others[0].eval['APBCI'].shape[1]\n numInstances = len(others)\n numEvals = len(evalFunctions)\n\n replicateStats = np.zeros((numReplicates,numInstances))\n\n outperformMatrix = np.zeros((numInstances,numInstances,numEvals))\n rankCI = np.zeros((numInstances,3,numEvals))\n ranks = np.zeros((numInstances,numEvals,numReplicates))\n\n for evi,evf in enumerate(evalFunctions):\n for oi,o in enumerate(others):\n replicateStats[:,oi] = evf(o.eval['APBCI'],o.eval['RBCI'])\n\n for oi in range(len(others)):\n for oj in range(len(others)):\n outperformMatrix[oi,oj,evi] = np.mean(replicateStats[:,oi]>replicateStats[:,oj])\n\n for bci in range(numReplicates):\n ranks[:,evi,bci] = stats.rankdata(-replicateStats[bci,:],method='min')\n\n for oi in range(len(others)): \n rankCI[oi,0,evi] = np.mean(ranks[oi,evi,:])\n #use simple percentile method; the bias correction misbehaves \n rankCI[oi,1:,evi] = np.percentile(ranks[oi,evi,:],[100*(self.params.bootstrapAlpha/2),100*(1-self.params.bootstrapAlpha/2)])\n\n return rankCI, outperformMatrix, ranks", "def getGroupFuncs(self):\n\n funcs = []\n for p in self.Parameters:\n if p.arg_name[0:8] == \"Function\" and p.arg_value:\n fct, attr = p.arg_value.split(':')\n if fct and attr:\n funcs.append((fct, attr))\n if not funcs:\n funcs.append(('count', '*'))\n return funcs", "def plot(self, rerun=False, ylabel=\"Time (seconds)\"):\n if self.results is None or rerun is True:\n self.run_methods()\n # an alias\n data = self.results\n\n methods = sorted(data, key=lambda x: pylab.mean(data[x]))\n pylab.boxplot([data[x] for x in methods])\n # pylab.xticks([1+this for this in range(len(methods))], methods)\n pylab.xticks(*zip(*enumerate(methods, start=1)))\n pylab.grid(True)\n pylab.ylabel(ylabel)\n pylab.xlim([0, len(methods)+1])", "def display_by_method(data, fp):\n fp.write(\n b'%5.5s %10.10s %7.7s %-8.8s\\n'\n % (b'% ', b'cumulative', b'self', b'')\n )\n fp.write(\n b'%5.5s %9.9s %8.8s %-8.8s\\n'\n % (b\"time\", b\"seconds\", b\"seconds\", b\"name\")\n )\n\n stats = SiteStats.buildstats(data.samples)\n\n grouped = defaultdict(list)\n for stat in stats:\n grouped[stat.site.filename() + b\":\" + stat.site.function].append(stat)\n\n # compute sums for each function\n functiondata = []\n for fname, sitestats in pycompat.iteritems(grouped):\n total_cum_sec = 0\n total_self_sec = 0\n total_percent = 0\n for stat in sitestats:\n total_cum_sec += stat.totalseconds()\n total_self_sec += stat.selfseconds()\n total_percent += stat.selfpercent()\n\n functiondata.append(\n (fname, total_cum_sec, total_self_sec, total_percent, sitestats)\n )\n\n # sort by total self sec\n functiondata.sort(reverse=True, key=lambda x: x[2])\n\n for function in functiondata:\n if function[3] < 0.05:\n continue\n fp.write(\n b'%6.2f %9.2f %9.2f %s\\n'\n % (\n function[3], # total percent\n function[1], # total cum sec\n function[2], # total self sec\n function[0],\n )\n ) # file:function\n\n function[4].sort(reverse=True, key=lambda i: i.selfseconds())\n for stat in function[4]:\n # only show line numbers for significant locations (>1% time spent)\n if stat.selfpercent() > 1:\n source = stat.site.getsource(25)\n if sys.version_info.major >= 3 and not isinstance(\n source, bytes\n ):\n source = pycompat.bytestr(source)\n\n stattuple = (\n stat.selfpercent(),\n stat.selfseconds(),\n stat.site.lineno,\n source,\n )\n\n fp.write(b'%33.0f%% %6.2f line %d: %s\\n' % stattuple)", "def for_fun():\n k = 10\n total_draws = 35\n total_balls = 40\n n_experiments = 100\n old_result = None\n\n rand_color = randomcolor.RandomColor()\n fig = plt.figure(constrained_layout=False, frameon=False)\n ax = fig.add_axes([0, 0, 1, 1])\n ax.set_facecolor((0.07, 0.07, 0.05))\n\n # for total_draws, color in zip([20, 25, 30], ['red', 'red', 'red']):\n # for total_draws, color in zip([20, 25, 30], ['purple', 'yellow', 'purple']): # mardi gras argyle\n # for total_draws, color in zip([5, 25, 27, 23, 40], ['purple', 'purple', 'blue', 'blue', 'purple']):\n for total_draws, color in zip([20, 3, 5, 10, 35], ['blue', 'red', 'blue', 'purple', 'blue']): # this one is good\n for _ in range(n_experiments):\n for num_samples in [10000]:\n experiment_results = []\n for samples in range(num_samples):\n N = np.random.randint(1, k, total_balls - 1)\n N = np.append(N, k)\n N = np.array(N).flatten()\n random.shuffle(N)\n draw = N[:np.random.randint(total_draws - 3, total_draws + 3)]\n experiment_result = np.any(draw == k)\n experiment_results.append(experiment_result)\n if old_result:\n if np.random.uniform(0, 1) > 0.8:\n luminosity = None\n if color == 'green':\n luminosity = 'bright'\n if color == 'yellow':\n luminosity = 'dark'\n tmp_rgb_color = np.array(rand_color.generate(\n hue=color, luminosity=luminosity, count=1, format_='Array_rgb')) / 256.\n tmp_rgb_color = tmp_rgb_color[0]\n alpha = np.min([np.random.beta(0.01, 0.2), 0.9])\n ax.fill_between(np.arange(1, num_samples + 1),\n np.cumsum(experiment_results) / np.arange(1, num_samples + 1),\n np.cumsum(old_result) / np.arange(1, num_samples + 1),\n alpha=alpha,\n color=tmp_rgb_color)\n if np.random.uniform(0, 1) > 0.95:\n tmp_rgb_color = np.array(rand_color.generate(\n hue=color, luminosity='dark', count=1, format_='Array_rgb')) / 256.\n tmp_rgb_color = tmp_rgb_color[0]\n alpha = np.min([np.random.beta(0.1, 0.2), 0.9])\n linewidth = np.min([np.random.exponential(5.0), 0.9])\n ax.semilogx(np.arange(1, num_samples + 1),\n np.cumsum(experiment_results) / np.arange(1, num_samples + 1),\n alpha=alpha,\n linewidth=linewidth,\n c=tmp_rgb_color)\n old_result = experiment_results[:]\n\n plt.show()", "def main():\n\n convergence_rates_e1 = pd.DataFrame(index=['Mean', 'St.D'])\n convergence_rates_e4 = pd.DataFrame(index=['Mean', 'St.D'])\n success_rates = pd.DataFrame(index=['1e-1', '1e-2', '1e-3', '1e-4', '1e-5'])\n peak_ratios = pd.DataFrame(index=['1e-1', '1e-2', '1e-3', '1e-4', '1e-5'])\n\n for function in range(1, 21):\n results = pickle.load(open('results/benchmarking_result_{}.pkl'.format(function), 'rb'))\n\n col_name = 'F{}'.format(function)\n index = function-1\n\n convergence_rates_e1.insert(index, col_name, results.ConvergenceRates[0])\n convergence_rates_e4.insert(index, col_name, results.ConvergenceRates[3])\n\n success_rates.insert(index, col_name, results.SuccessRate)\n peak_ratios.insert(index, col_name, results.PeakRatio)\n\n for i in results.SimulationSwarms:\n\n x_axis = [k for k, _ in i.items()]\n y_axis = [v for _, v in i.items()]\n\n plt.subplot(4, 5, function) # subplot indexes from 1\n plt.plot(x_axis, y_axis, 'k-')\n\n plt.savefig('results/nmmso_benchmark.png')\n plt.show()\n\n pd.set_option('display.max_columns', None) # make sure all columns are printed below\n print(convergence_rates_e1)\n print(convergence_rates_e4)\n print(success_rates)\n print(peak_ratios)\n\n # Table V from Fieldsend et al.\n table4 = pd.Series([\n peak_ratios.stack().median(),\n peak_ratios.stack().mean(),\n peak_ratios.stack().std()\n ])\n print(table4)\n\n # do we want to reproduce Fig. 3", "def draw_group(\n data: pd.DataFrame,\n panel_params: panel_view,\n coord: Coord,\n ax: Axes,\n **params: Any,\n ):\n msg = \"The geom should implement this method.\"\n raise NotImplementedError(msg)", "def pie_chart_score(self, grouped):\n picked_scenario = self.scenario_dict[\"%d\" % (self.scenario_num-1)]\n distinct_enum_X = self.data_dict[picked_scenario[\"X\"]]['distinct_enum']\n score = 0\n if min(grouped) < 0:\n score = 0\n elif distinct_enum_X == 1:\n score = 0\n elif picked_scenario[\"Agg_func_Y\"] == \"avg\":\n score = 0\n elif distinct_enum_X >= 2 and distinct_enum_X <= 8:\n score += self.calculate_entropy(self.data_dict[picked_scenario[\"Y\"]]) / 8\n elif distinct_enum_X > 8:\n score += 4 * (self.calculate_entropy(self.data_dict[picked_scenario[\"Y\"]])) / distinct_enum_X\n if score > 3:\n score = 3\n return score", "def topfeats_boxplots_by_group(df, test_results_df, grouping_variable, \n plot_save_dir=None, p_value_threshold=0.05, \n n_topfeats=5):\n \n if plot_save_dir:\n # Ensure directory exists to save plots\n plot_save_dir.mkdir(exist_ok=True, parents=True)\n \n pvals_corrected = test_results_df.loc['pval_corrected']\n n_sigfeats = sum(pvals_corrected < p_value_threshold)\n \n if pvals_corrected.isna().all():\n print(\"No signficant features found in control with respect to '%s'\" % grouping_variable)\n elif n_sigfeats > 0:\n # Rank p-values in ascending order\n ranked_pvals = pvals_corrected.sort_values(ascending=True)\n \n # Drop non-sig feats\n ranked_pvals = ranked_pvals[ranked_pvals < p_value_threshold]\n \n # Select the first n pvalues for plotting\n topfeats = ranked_pvals[:n_topfeats]\n \n if n_sigfeats < n_topfeats:\n print(\"WARNING: Only %d features found to vary significantly with respect to '%s'\"\\\n % (n_sigfeats, grouping_variable))\n \n print(\"\\nTop %d features found to differ significantly with respect to '%s':\\n\"\\\n % (len(topfeats), grouping_variable))\n print(*[feat + '\\n' for feat in list(topfeats.index)])\n \n # for f, feature in enumerate(features_to_analyse[0:25]):\n for feature in topfeats.index:\n print(\"P-value for '%s': %s\" % (feature, str(topfeats[feature])))\n feat_df = df[[grouping_variable, feature]]\n \n # Plot boxplots of control across days for most significant features\n plt.close('all')\n fig = plt.figure(figsize=[10,6])\n ax = fig.add_subplot(1,1,1)\n sns.boxplot(x=grouping_variable, y=feature, data=feat_df)\n ax.set_xlabel(grouping_variable, fontsize=15, labelpad=12)\n ax.set_title(feature, fontsize=20, pad=20)\n \n # TODO: Add pvalues to plot?\n \n if plot_save_dir:\n # Save plot\n plots_outpath = plot_save_dir / (feature + '_wrt_' + grouping_variable + '.eps')\n savefig(plots_outpath, tellme=True, saveFormat='eps') \n plt.close()\n else:\n plt.show(); plt.pause(5)", "def MSE_plots(n_min, n_max, save_fig, k = [5], method = 'OLS', lamb = 1, split = False, train = 0.7, N = 1, method2 = 'OLS'):\n n = np.linspace(n_min, n_max, n_max - n_min + 1)\n errors = np.zeros((4, len(k), len(n))) # First index MSE for real FrankeFunction, MSE for the data, R2 for the real FrankeFunction, R2 for the data\n #Second index is the max order of polynomial, third index is for the n-value\n if type(k) != type([2]):\n k = [k]\n\n for j in range(N):\n #print(j)\n for i in range(len(n)):\n #print(i)\n x = np.random.uniform(0, 1, size = int(n[i]))\n y = np.random.uniform(0, 1, size = int(n[i]))\n x, y = np.meshgrid(x, y)\n\n z = FrankeFunction(x, y) + np.random.normal(0, 1, size = x.shape)\n z_real = FrankeFunction(x, y)\n\n for poly in range(len(k)):\n a = regression(x, y, z, k = k[poly], split = split, train = train)\n\n if method == 'OLS':\n beta = a.OLS()\n elif method == 'Ridge':\n beta = a.Ridge(lam = lamb)\n elif method == 'Lasso':\n beta = a.Lasso(alpha = lamb)\n elif method == 'K-fold':\n beta = a.k_cross(fold = 25, method2 = method2, lam = lamb)[0]\n\n if split == True:\n X = a.design_matrix(k = k[poly])\n X_train, X_test, z_real_train, z_real_test = a.train_test(X = X, z = z_real, train = train)\n z_tilde = a.z_tilde(X = X_test, beta = beta)\n errors[0, poly, i] += a.MSE(z_tilde, z_real_test)\n errors[1, poly, i] += a.MSE(z_tilde, a.z_test)\n errors[2, poly, i] += a.R_squared(z_tilde = z_tilde, z = z_real_test)\n errors[3, poly, i] += a.R_squared(z_tilde = z_tilde, z = a.z_test)\n else:\n z_tilde = a.z_tilde(beta = beta)\n errors[0, poly, i] += a.MSE(z_tilde, z_real)\n errors[1, poly, i] += a.MSE(z_tilde, z)\n errors[2, poly, i] += a.R_squared(z_tilde = z_tilde, z = z_real)\n errors[3, poly, i] += a.R_squared(z_tilde = z_tilde, z = z)\n\n n_mid = int(len(n)/2)\n title = ['MSE FrankeFunction', 'MSE data', 'R2 FrankeFunction', 'R2 data']\n y_label = ['MSE', 'MSE', 'R^2', 'R^2']\n errors /= N\n save_name = ['franke', 'data', 'franke', 'data']\n\n if method == 'Ridge':\n method += ' with lambda = ' + str(lamb)\n if method == 'K-fold':\n method += ' using ' + method2\n if method2 == 'Ridge' or method2 == 'Lasso':\n method += ' with lambda = ' + str(lamb)\n\n for i in range(4):\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 7))\n for j in range(len(k)):\n ax1.plot(n[:n_mid], errors[i, j, :n_mid], label = 'k = ' + str(k[j]))\n ax2.plot(n[n_mid:], errors[i, j, n_mid:], label = 'k = ' + str(k[j]))\n\n ax1.set_ylabel(y_label[i]); ax2.set_ylabel(y_label[i])\n ax1.set_xlabel('n'); ax2.set_xlabel('n')\n\n if split == True:\n fig.suptitle(title[i] + ' with ' + str(method) + ' with test/training split at ' + str(train) + ' and mean of ' + str(N) + ' runs.')\n else:\n fig.suptitle(title[i] + ' with ' + str(method) + ' without test/training split' + ' and mean of ' + str(N) + ' runs.')\n\n ax1.legend(); ax2.legend()\n #fig.savefig(results_dir + save_fig + method + save_name[i] + y_label[i] + '.png')\n plt.show()", "def summaryPlot(df):\n import datetime as dt\n import matplotlib.pyplot as plt\n import matplotlib as mpl\n import numpy as np\n import pandas as pd\n from numpy import array\n import matplotlib.patches as mpatches\n import seaborn as sns\n from matplotlib.pyplot import figure\n\n class color:\n # Allows for bolded and underlined text\n BOLD = \"\\033[1m\"\n UNDERLINE = \"\\033[4m\"\n END = \"\\033[0m\"\n\n # Reads df and fills empty values\n df.index = pd.to_datetime(df.date)\n df = df.drop(\"date\", axis=1)\n df_all = df.resample(\"1D\")\n df_all = df_all.fillna(method=\"ffill\")\n\n dataPoints = [\"pm25\", \"co\", \"so2\", \"pm10\", \"o3\", \"no2\", \"nox\", \"wd\", \"ws\"]\n\n i = 0\n sub = 1\n while i < 9:\n # Plots line and histogram plots for ecery polutant\n # in the correct location based on subplot\n plt.figure(1, figsize=(50, 50))\n plt.subplot(9, 2, sub)\n sub = sub + 1\n a = df_all[dataPoints[i]].plot.line(color=\"gold\")\n a.axes.get_xaxis().set_visible(False)\n a.yaxis.set_label_position(\"left\")\n plt.ylabel(dataPoints[i], fontsize=75, bbox=dict(facecolor=\"whitesmoke\"))\n # print(df['pm25'].max())\n\n plt.subplot(9, 2, sub)\n sub = sub + 1\n plt.hist(df_all[dataPoints[i]], bins=50, color=\"green\")\n i = i + 1\n i = 0\n while i < 9:\n # Calculates statistics\n nDf = df[dataPoints[i]]\n missing = nDf.isna().sum() + sum(n < 0 for n in nDf)\n minVal = nDf.min()\n maxVal = nDf.max()\n meanVal = nDf.mean()\n medianVal = nDf.median()\n percentile = nDf.quantile(0.95)\n print(\"---------------\")\n print(color.BOLD + color.UNDERLINE + dataPoints[i] + color.END)\n print(\"min = \" + str(0))\n print(\"max = \" + str(maxVal))\n print(\"missing = \" + str(missing))\n print(\"mean = \" + str(meanVal))\n print(\"median = \" + str(medianVal))\n print(\"95th percentile = \" + str(percentile))\n i = i + 1", "def hist_shifts(key,conn,fun,range_bn = None, fig = None):\n\n # get file name/comp_num\n (comp_num,fname) = conn.execute(\"select comp_key,fout from comps\\\n where comp_key = ? and function = 'Iden'\",(key,)).fetchone()\n\n # open file/group\n\n F = h5py.File(fname,'r')\n\n nbins = 100\n\n if range_bn is None:\n bin_edges = np.linspace(-2,2,nbins + 1)\n else:\n bin_edges = np.linspace(*(range_bn + (nbins + 1,)))\n bin_counts = np.zeros(nbins)\n # extract the relevant data\n for fr in F:\n if fr == 'parameters':\n continue\n bin_counts += fun(F,fr,comp_num,bin_edges)\n \n # plot\n istatus = lplts.non_i_plot_start()\n if fig is None:\n (fig,ax) = lplts.set_up_plot()\n else:\n ax = fig.get_axes()[0]\n \n sh = ax.step(bin_edges[:-1],bin_counts/np.sum(bin_counts))\n\n if ax.get_legend() is None:\n print 'attempt to set leg'\n ax.legend([sh],[F.attrs['Exposure']],loc = 3)\n else:\n #leg =aff ax.get_legend()\n pass\n lplts.non_i_plot_stop(istatus)\n # clean up\n F.close()\n del F\n\n return fig", "def _ols_group(dat, formula, group_col, group, rank):\n dat = dat[dat[group_col] == group].reset_index(drop=True)\n if rank:\n dat = dat.rank()\n y, x = dmatrices(formula, dat, 1, return_type=\"dataframe\")\n b = _ols(x, y, robust=None, n_lags=1, cluster=None, all_stats=False)\n return list(b)", "def show_dprime(sim_attr_generator):\n#TODO description\n dprime_fnc_list = [\n (sim_attr.id_name,sim_attr.dprime_fnc) for sim_attr in sim_attr_generator\n ]\n\n if Args.mat_file_out != None:\n save_dict = dict()\n else:\n x_axis = int(math.ceil(math.sqrt(len(dprime_fnc_list))))\n y_axis = int(math.ceil(float(len(dprime_fnc_list)) / x_axis))\n fig, axes = plt.subplots(nrows=y_axis,ncols=x_axis)\n\n#? Code duplication\n if len(dprime_fnc_list) == 1:\n id_name, dprime_fnc = dprime_fnc_list[0]\n mesh_X, mesh_Y, mesh_Z = dprime_fnc_to_mesh_grid(\n dprime_fnc, linspace=Args.grid_size\n )\n im = show_plot_imshow_from_mesh(\n axes, mesh_X, mesh_Y, mesh_Z, title=id_name, vmax=Args.upper_bound\n )\n fig.colorbar(im,shrink=0.8)\n plt.show()\n# End code duplication\n return\n\n for i, (id_name, dprime_fnc) in enumerate(dprime_fnc_list):\n mesh_X, mesh_Y, mesh_Z = dprime_fnc_to_mesh_grid(\n dprime_fnc, linspace=Args.grid_size\n )\n if Args.mat_file_out != None:\n dprime_fnc[id_name] = {'X':mesh_X, 'Y':mesh_Y, 'Z':mesh_Z}\n else:\n im = show_plot_imshow_from_mesh(\n axes.flat[i], mesh_X, mesh_Y, mesh_Z, title=id_name, vmax=Args.upper_bound\n )\n if Args.mat_file_out != None:\n scipy.io.savemat(Args.mat_file_out, save_dict)\n else:\n fig.colorbar(im,ax=axes.ravel().tolist(),shrink=0.8)\n plt.show()", "def plot_stats(model_dicts, out_fn, x_title, top_x_title, y_title, smooth, x_min, x_max, *keys):\n # Collect data to plot\n plot_data = {}\n for model_name, model_stats in model_dicts.items():\n model_name_str = str(model_name)\n sorted_xs = sorted(list(model_stats.keys()))\n plot_data[model_name_str] = [sorted_xs]\n ys = []\n for x in sorted_xs:\n cur_item = model_stats[x]\n # traverse down dictionary to find values\n for key in keys:\n cur_item = cur_item[key]\n ys.append(cur_item)\n plot_data[model_name_str].append(ys)\n\n # Plot\n fig, ax = plt.subplots()\n for model_name, model_data in plot_data.items():\n xs, ys = model_data\n filtered_xs, filtered_ys = zip(*[(x, y) for x, y in zip(xs, ys)\n if (x >= x_min) and (x <= x_max)])\n ax.scatter(filtered_xs, filtered_ys)\n if smooth:\n spl = splrep(xs, ys)\n x_new = np.linspace(x_min, x_max, 300)\n y_new = splev(x_new, spl)\n xs = x_new\n ys = y_new\n\n ax.plot(xs, ys, label = model_name)\n\n # Fix plot formatting\n logging.info(f\"Writing figures to file: {out_fn}\")\n ax.set_xlabel(x_title)\n ax.set_ylabel(y_title)\n secax = ax.secondary_xaxis('top', functions=(lambda t: (t * NUM_OF_DOCS) / 60,\n lambda t: (t * 60) / NUM_OF_DOCS))\n secax.set_xlabel(top_x_title)\n fig.legend(loc = 'lower right', bbox_to_anchor=(0.9, 0.1))\n fig.savefig(out_fn)", "def plot_ranks(self, title, ymax=None, linewidth=1):\n line_generator = self.line_gen()\n max_rank = 0\n for flowID, rank_points in self.flow_ranks.items():\n times = [(point[0] - self.start_time)*1e-6 for point in rank_points]\n ranks = [point[1] for point in rank_points]\n if len(ranks) > 0:\n max_rank = max(ranks) if max(ranks) > max_rank else max_rank\n if flowID is not None:\n linestyle = line_generator.next()\n plt.plot(times, ranks, label='Flow {}'.format(flowID), linewidth=linewidth, linestyle=linestyle, marker='o')\n plt.xlabel('time (ms)')\n plt.ylabel('rank (64KB remaining)')\n plt.title(title)\n plt.legend(loc='upper right')\n if ymax is not None:\n plt.ylim(0, max_rank)", "def _plot_group_fields(ax, xss, field, **kw):\n #check for an xmax keyword\n if('xmax' in kw):\n xmax = kw['xmax']\n else:\n xmax = False\n #plot the fields, keeping handles and finding max of all\n fields_list = [xs.fields[field] for xs in xss]\n for i in range(len(fields_list)):\n #plot\n if(xmax):\n kw['H'].append(ax.plot(fields_list[i][-xmax:xmax].index.values,\n fields_list[i][-xmax:xmax].values,\n color=_colormap[i%len(_colormap)],\n linewidth=_fields_linewidth)[0])\n else:\n kw['H'].append(ax.plot(fields_list[i].index.values,\n fields_list[i].values,\n color=_colormap[i%len(_colormap)],\n linewidth=_fields_linewidth)[0])\n kw['L'].append(xss[i].sheet)", "def make_summary_plot(run_lists, file_descriptor, attr='sipm1.threeSampleAmpl'):\n biases = []\n gains = []\n pes = []\n currs = []\n gainerrs = []\n quad_terms = []\n quad_errs = []\n for row in sorted(run_lists):\n biases.append(row[0])\n gain_out = fit_gain(row[1], attr=attr)\n out_tuple = gain_out[0]\n gains.append(out_tuple[0])\n gainerrs.append(out_tuple[3])\n smeans = sorted(gain_out[1])\n currs.append(0.5*(smeans[-1] + smeans[-2]))\n pes.append(currs[-1]/gains[-1])\n quad_terms.append(out_tuple[1])\n quad_errs.append(out_tuple[4])\n\n maxgain = max(gains)\n gains = np.array(gains)/maxgain\n gainerrs = np.array(gainerrs)/maxgain\n # gainerrs = 0.1*gains\n\n currs = np.array(currs)/max(currs)\n pes = np.array(pes)\n pe_errs = gainerrs/gains*pes\n maxpe = max(pes)\n fig, ax1 = plt.subplots()\n\n coeffs, V = np.polyfit(biases, gains, 1, w=1.0/gainerrs, cov=True)\n breakdown = -1*coeffs[1]/coeffs[0]\n\n breakdown_sigma = sigma_from_cov(coeffs, V)\n\n # calculate sigmas throughout range\n vals, vecs = np.linalg.eig(V)\n U = np.transpose(vecs)\n xs_for_error = np.arange(breakdown - 0.1, max(biases) + 0.1, 0.01)\n gain_sigmas = sig_from_diag(xs_for_error, U, vals)\n error_band_ys = np.array([i*coeffs[0] + coeffs[1] for i in xs_for_error])\n ax1.fill_between(xs_for_error, error_band_ys + gain_sigmas,\n error_band_ys - gain_sigmas, facecolor='red', alpha=0.5)\n\n fitline = [i*coeffs[0] + coeffs[1] for i in biases] + [0]\n fitbiases = biases + [breakdown]\n\n ax1.set_title('bias scan %s' % file_descriptor)\n fitplot = ax1.plot(fitbiases, fitline, 'r-')\n gainplot = ax1.errorbar(\n biases, gains, yerr=gainerrs, fmt='ro', markersize=10)\n currplot = ax1.plot(biases, currs, 'g*', markersize=15)\n ax1.set_ylim(0, 1.105)\n ax1.set_xlim([breakdown - 0.1, max(biases) + 0.1])\n ax1.set_xlabel('bias voltage [V]')\n ax1.set_ylabel('relative gain, charge [a.u.]')\n\n ticks = [breakdown]\n ticks.extend([bias for bias in biases[::2]])\n tick_labels = ['%.1f $\\pm$ %.1f' % (breakdown, breakdown_sigma)]\n tick_labels.extend([str(bias) for bias in biases[::2]])\n ax1.set_xticks(ticks)\n ax1.set_xticklabels(tick_labels)\n ax1.grid()\n ax1.get_xticklabels()[0].set_color('r')\n\n ax2 = ax1.twinx()\n peplot = ax2.errorbar(biases, pes, yerr=pe_errs, fmt='b^', markersize=10)\n ax2.set_ylabel('pe', color='b')\n ax2.set_ylim(0, maxpe*1.105)\n ax2.set_xlim([breakdown - 0.1, max(biases) + 0.1])\n for tick in ax2.get_yticklabels():\n tick.set_color('b')\n ax1.legend([gainplot[0]]+currplot+[peplot[0]]+fitplot,\n ['gain', 'charge', 'pes', 'gain fit'],\n loc='best', numpoints=1)\n\n plt.savefig('pdfs/breakdownPlot%s.pdf' % file_descriptor)\n plt.show()\n\n quadploterrs = 0.5/np.sqrt(quad_terms)*quad_errs\n plt.errorbar(biases, np.sqrt(quad_terms)*100, yerr=quadploterrs*100, fmt='ko')\n plt.xlim(min(biases) - 0.1, max(biases) + 0.1)\n plt.xlabel('bias [V]')\n plt.ylabel('sqrt(quadratic term) [%]')\n plt.title('quadratic terms %s' % file_descriptor)\n\n plt.savefig('pdfs/quadraticTerms%s.pdf' % file_descriptor)\n plt.show()", "def create_risk_groups(model, X, use_log = True, num_bins = 50, \r\n figure_size = (20, 8), **kwargs):\r\n\r\n # Ensuring that the input data has the right format\r\n X = utils.check_data(X)\r\n\r\n # Computing the risk scores\r\n risk = model.predict_risk(X)\r\n if use_log:\r\n risk = np.log(risk)\r\n \r\n # Displaying simple histogram\r\n if len(kwargs) == 0:\r\n \r\n # Initializing the chart\r\n fig, ax1 = plt.subplots(figsize=figure_size)\r\n risk_groups = None\r\n \r\n # Applying any color coding\r\n else:\r\n # Initializing the results\r\n risk_groups = {}\r\n \r\n # Initializing the chart\r\n fig, ((ax1, ax2)) = plt.subplots(1, 2, figsize=figure_size)\r\n \r\n # Displaying simple histogram with risk groups\r\n nums_per_bins, bins, patches = ax2.hist(risk, bins=num_bins)\r\n ax2.set_title('Risk groups with colors', fontsize=15)\r\n\r\n # Number of group definitions\r\n num_group_def = len(kwargs.values())\r\n\r\n # Extracting the bounds values\r\n bounds = {}\r\n colors_ = {}\r\n indexes = {}\r\n group_names = []\r\n handles = []\r\n\r\n # we need to check that the boundaries match the bins\r\n is_not_valid = 0\r\n for group_name, group_def in kwargs.items():\r\n\r\n # by ensuring that the bounds are not outside\r\n # the bins values\r\n min_bin, max_bin = min(bins), max(bins)\r\n if (group_def['lower_bound'] < min_bin and \\\r\n group_def['upper_bound'] < min_bin) or \\\r\n (group_def['lower_bound'] > max_bin and \\\r\n group_def['upper_bound'] > max_bin) :\r\n is_not_valid += 1\r\n\r\n # Extracting the bounds\r\n bounds[group_name] = (group_def['lower_bound'], \r\n group_def['upper_bound'])\r\n\r\n # Extracting the colors\r\n colors_[group_name] = group_def['color']\r\n\r\n # Creating index placeholders\r\n indexes[group_name] = []\r\n group_names.append( group_name )\r\n color_indv = group_def['color']\r\n handles.append(Rectangle((0,0),1,1, color=color_indv, ec=\"k\"))\r\n\r\n if is_not_valid >= num_group_def :\r\n error_msg = \"The boundaries definitions {} do not match\"\r\n error_msg += \", the values of the risk scores.\"\r\n error_msg = error_msg.format(list(bounds.values()))\r\n raise ValueError(error_msg)\r\n\r\n # Assigning each rectangle/bin to its group definition\r\n # and color\r\n colored_patches = []\r\n bin_index = {}\r\n for i, bin_, patch_ in zip(range(num_bins), bins, patches):\r\n\r\n # Check if the bin belongs to this bound def\r\n for grp_name, bounds_ in bounds.items():\r\n\r\n if bounds_[0] <= bin_ < bounds_[-1] :\r\n bin_index[i] = grp_name\r\n \r\n # Extracting color\r\n color_ = colors_[grp_name]\r\n if color_ not in colors.CSS4_COLORS :\r\n error_msg = '{} is not a valid color'\r\n error_msg = error_msg.format(colors_[grp_name])\r\n raise ValueError(error_msg)\r\n \r\n patch_.set_facecolor( color_ )\r\n\r\n # Saving the rectangles\r\n colored_patches.append(patch_)\r\n\r\n # Assigning each sample to its group\r\n risk_bins = np.minimum(np.digitize(risk, bins, True), num_bins-1) \r\n for i, r in enumerate(risk_bins):\r\n\r\n # Extracting the right group_name\r\n group_name = bin_index[r]\r\n indexes[group_name].append(i)\r\n \r\n\r\n # Displaying the original distribution\r\n ax1.hist(risk, bins=num_bins, color = 'black', alpha=0.5) \r\n ax1.set_title('Risk Score Distribution', fontsize=15)\r\n\r\n # Show everything\r\n plt.show()\r\n \r\n # Returning results\r\n if risk_groups is not None:\r\n for group_name in group_names:\r\n result = (colors_[group_name], indexes[group_name]) \r\n risk_groups[group_name] = result\r\n\r\n return risk_groups", "def demo_groupings_aggregations_and_visualizations():\n data = pandas.read_csv('output.csv')\n # Group by\n city_group = data.groupby(by=[\"City\"])\n country_group = data.groupby(by=[\"Country\"])\n region_group = data.groupby(by=[\"Region\"])\n\n logger.info(\"Grouping Demonstration\")\n\n logger.info(\"Grouping Example: Details on IPs from Quebec\")\n logger.info(region_group.get_group(\"Quebec\"))\n\n logger.info(\"Grouping Example: Details on IPs from the US\")\n logger.info(country_group.get_group(\"United States\"))\n\n logger.info(\"Grouping Example: Details on IPs from Seattle\")\n logger.info(city_group.get_group(\"Seattle\"))\n\n # Aggregation\n logger.info(\"Aggregation Demonstration\")\n\n logger.info(\"Aggregation example: Mean Temperature in Countries\")\n country_average_temp = country_group.aggregate({\"Temperature\": \"mean\"})\n logger.info(country_average_temp)\n\n logger.info(\"Aggregation example: Mean Temperature in Regions\")\n region_average_temp = region_group.aggregate({\"Temperature\": \"mean\"})\n logger.info(region_average_temp)\n\n logger.info(\"Aggregation example: Mean Max/Min Temperature in Countries\")\n country_average_temp_min_and_max = country_group.aggregate(\n {\n \"MaxTemperature\": \"mean\",\n \"MinTemperature\": \"mean\"\n }\n )\n logger.info(country_average_temp_min_and_max)\n\n # Visualizations\n logger.info(\"Visualization Demonstration\")\n logger.info(\"Adding Figure 1: Box plot of Humidity in Germany\")\n figure1 = plt.Figure(figsize=(25, 15))\n ax1 = figure1.add_subplot(121)\n fig1 = country_group.get_group(\"Germany\").boxplot(column=\"Humidity\", ax=ax1)\n fig1.set_ylabel(\"Humidity (%)\")\n fig1.set_title(\"Figure 1: Box plot of Humidity in Germany\")\n\n logger.info(\"Adding Figure 2: Bar chart showing Mean Max/Min Temperature in Countries\")\n ax2 = figure1.add_subplot(122)\n fig2 = country_average_temp_min_and_max.plot.bar(\n title=\"Figure2: Bar chart showing Mean Max/Min Temperature in Countries\",\n ax=ax2\n )\n fig2.set_ylabel(\"Temperature (\\u00b0 C)\")\n fig2.set_xlabel(\"Countries\")\n figure1.savefig('figures.png')", "def create_plot(group):\n statistics = models.Statistic.query.filter(\n models.Statistic.group == group\n ).order_by(\n models.Statistic.timestamp\n ).all()\n\n style_index = 0\n\n plots = collections.OrderedDict()\n\n for statistic in statistics:\n if statistic.statistic not in plots:\n plots[statistic.statistic] = {\n 'timestamps': [],\n 'datapoints': [],\n 'line_style': STYLES[style_index],\n 'current_value': 0\n }\n style_index = (style_index + 1) % len(STYLES)\n\n plots[statistic.statistic]['timestamps'].append(statistic.timestamp)\n plots[statistic.statistic]['datapoints'].append(statistic.value)\n plots[statistic.statistic]['current_value'] = statistic.value\n\n render_plot(\n os.path.join(\n app.APP.config['GRAPH_STORAGE_FOLDER'],\n '{0}.png'.format(group)\n ),\n [\n PlotDescriptor(\n timestamps=plot['timestamps'],\n datapoints=plot['datapoints'],\n line_style=plot['line_style'],\n current_value=plot['current_value'],\n label=label\n ) for (label, plot) in plots.iteritems()\n ],\n statistics[0].timestamp,\n statistics[-1].timestamp\n )", "def plot_all_perfs_by_expertise(expe, perf_eval_type, add_swarm_plot=False):\n fig = plt.figure(figsize=(7, 4))\n ax = fig.add_subplot(111)\n\n s_df = expe.get_all_s_dataframe(perf_eval_type)\n sns.boxplot(x=\"expertise_level\", y=\"performance\", hue='search_type', data=s_df, ax=ax)\n if add_swarm_plot:\n sns.swarmplot(x=\"expertise_level\", y=\"performance\", hue='search_type', data=s_df, ax=ax,\n dodge=True, size=3, color='black')\n # from https://stackoverflow.com/questions/36267705/avoiding-repeated-legend-in-seaborn-boxplot-overlaid-by-swarmplot\n handles, labels = ax.get_legend_handles_labels()\n ax.legend(handles[:2], labels[:2])\n\n ax.set_ylim(0.0, 1.0)\n ax.set(title=\"Performances sorted by expertise of subjects (eval. function = {})\"\n .format(perf_eval_type.name.lower()))\n #fig.tight_layout()", "def measure(funcs, args, comment='', verbose=False, number=1):\n if not comment:\n comment = repr(args)\n\n # measure performance\n results = []\n w = max(len(name) for name, _ in funcs)\n for name, f in funcs:\n results.append((measure_func(f, args, number=number), name))\n if verbose:\n print(\"{:{}s} {:>9s} {}\".format(\n name, w, human_seconds(results[-1][0]), comment))\n\n # print sorted results\n results.sort()\n mint = results[0][0] # minimal time\n ratios = [\"%5.2f\" % (t / mint,) for t, _ in results]\n maxratio_width = max(len(r) for r in ratios)\n # header\n print(\"{:{}s} {:>9s} {:>{}s} {}\".format(\n \"name\", w, \"time\", \"ratio\", maxratio_width, \"comment\"))\n ratios = [s.rjust(maxratio_width) for s in ratios]\n for (t, name), ratio in zip(results, ratios):\n print(\"{:{}s} {:>9s} {} {}\".format(\n name, w, human_seconds(t), ratio, comment))\n return results", "def plot_bar_group(filename, data, std=None, xlab='x', ylab='y', title='Bar-Plot', methods=None, hatchs=None, datasets=None, figwidth=8, figheight=6, colors=None, legend_loc=\"lower left\", xytick_fontsize=12, xylabel_fontsize=15, title_fontsize=15, legend_fontsize=12,ymin=0,ymax=1,rotation=45):\n import matplotlib as mpl\n mpl.use(\"pdf\")\n import matplotlib.pyplot as plt\n data=np.array(data)\n num_methods,num_datasets=data.shape\n \n if hatchs is None:\n hatchs=[None]*len(methods)\n\n # colors\n if colors is None:\n colors=['b','r','g','c','m','y','k','w'] # maximally 8 colors allowed so far\n\n ind = np.arange(num_datasets) # the x locations for the groups\n width = 0.8*(1.0/num_methods) # the width of the bars\n method_bar=[]\n fig=plt.figure(num=1,figsize=(figwidth,figheight))\n ax=fig.add_subplot(1,1,1)\n #fig, ax = plt.subplots()\n for i in range(num_methods):\n if std is None:\n method_bar.append( ax.bar(ind+i*width, data[i,:], width, color=colors[i], ecolor='k', edgecolor='black', linewidth=0.5, hatch=hatchs[i]))\n else:\n std=np.array(std)\n method_bar.append( ax.bar(ind+i*width, data[i,:], width, color=colors[i], yerr=std[i,:], ecolor='k', edgecolor='black', linewidth=0.5, hatch=hatchs[i]))\n\n # add some text for labels, title and axes ticks\n ax.set_ylabel(ylab,fontsize=xylabel_fontsize)\n ax.set_xlabel(xlab,fontsize=xylabel_fontsize)\n ax.set_title(title,fontsize=title_fontsize)\n ax.set_xticks(ind+0.5*num_methods*width)\n ax.set_xticklabels(datasets, rotation=rotation)\n ax.set_yticks(np.arange(0,1.1,0.1))\n ax.set_ylim(ymin,ymax)\n ax.set_xlim(-0.5,len(datasets)+1)\n plt.setp(ax.get_xticklabels(), fontsize=xytick_fontsize)\n plt.setp(ax.get_yticklabels(), fontsize=xytick_fontsize)\n # shrink axis box \n #box = ax.get_position()\n #ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n #ax.legend( method_bar, methods, loc='lower left', bbox_to_anchor=(1.0, 0.3), fontsize=legend_fontsize )\n ax.legend( method_bar, methods, loc=legend_loc, fontsize=legend_fontsize )\n #plt.show()\n fig.savefig(filename,bbox_inches='tight')\n plt.close(fig)", "def doAllPlots ():\n #df = processIp (\"18-06-01-1-attack.pcap\", \"ec:1a:59:79:f4:89\")\n #df.to_csv (\"df.csv\", index=False)\n df = pd.read_csv (\"df.csv\")\n attack_df = parseAnnotation (\"ec1a5979f489.csv\")\n ret = plotThresholds (df, attack_df)\n plotEntropyWithThreshold (df, ret[2])\n createUtilityHistogram (ret[0], ret[1]) \n\n \"\"\"\n Traffic flow graph\n \"\"\"\n #df = processTrafficFlow (\"18-06-01-short.pcap\", \"ec:1a:59:79:f4:89\")\n #plotTrafficFlow (df)\n\n \"\"\"\n Entropy for source port\n \"\"\"\n #df = processSrcPort (\"18-06-01-short.pcap\", \"ec:1a:59:79:f4:89\")\n #plotEntropy (df)\n\n \"\"\"\n Entropy for destination port\n \"\"\" \n #df = processDstPort (\"18-06-01-short.pcap\", \"ec:1a:59:79:f4:89\")\n #plotEntropy (df) \n\n \"\"\"\n It will be implemented next day\n df = processPorts (\"18-06-01.pcap\", \"ec:1a:59:79:f4:89\")\n attack_df = parseAnnotation (\"ec1a5979f489.csv\")\n ret = plotThresholds (df, attack_df)\n plotEntropy (df, attack_df, ret[2])\n createUtilityHistogram (ret[0], ret[1]) \n\n df = processProtocols (\"18-06-01.pcap\", \"ec:1a:59:79:f4:89\")\n attack_df = parseAnnotation (\"ec1a5979f489.csv\")\n ret = plotThresholds (df, attack_df)\n plotEntropy (df, attack_df, ret[2])\n createUtilityHistogram (ret[0], ret[1]) \n \"\"\"\n return", "def plot_fishing_mortality(df):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n ax.set_position(default_timeseries_position) \n\n Fn = df['Fn'].groupby([df.Year, df.Reg, df.Sreg]).mean()\n\n all_fishing_mortality = Fn.loc[:, 'All', 'All']\n ma_fishing_mortality = Fn.loc[:, '1', 'All']\n gb_fishing_mortality = Fn.loc[:, '2', 'All']\n\n # Don't plot the first year. Also, the data is shifted by one year.\n # For some reason, restricting the year range above results in a series\n # that still have a multi-index. This seems like the cleanest way to do\n # that.\n all_fishing_mortality = all_fishing_mortality[2:]\n ma_fishing_mortality = ma_fishing_mortality[2:]\n gb_fishing_mortality = gb_fishing_mortality[2:]\n\n all_fishing_mortality.index = all_fishing_mortality.index - 1\n ma_fishing_mortality.index = ma_fishing_mortality.index - 1\n gb_fishing_mortality.index = gb_fishing_mortality.index - 1\n\n all_fishing_mortality.plot(ax=ax, label='All') \n ma_fishing_mortality.plot(ax=ax, label='Mid Atlantic')\n gb_fishing_mortality.plot(ax=ax, label='Georges Bank')\n\n ax.legend(loc='best')\n\n content = io.BytesIO()\n plt.savefig(content, format='png')\n content.seek(0)\n image_cache['fishing_mortality']['fishing_mortality'] = content\n\n plt.close()", "def plot_calibration(df, x_input = \"Mean Predicted Avg\",\n y_input = \"Empirical Probability\",\n x_name=\"Mean Predicted\",\n y_name=\"Empirical Probability\",\n method_order = METHOD_ORDER, \n avg_x = False):\n\n methods = df['method_name']\n uniq_methods = pd.unique(methods)\n method_order = [j for j in METHOD_ORDER if j in uniq_methods]\n method_df = []\n\n if avg_x: \n df_copy = df.copy()\n new_list = [0]\n new_x_map = {}\n for method in uniq_methods: \n temp_vals = df[df['method_name'] == method][x_input]\n new_ar = np.vstack(temp_vals)\n new_ar = np.nanmean(new_ar, 0) # avg columnwise\n new_x_map[method] = new_ar\n df_copy[x_input] = [new_x_map[method] for method in methods]\n df = df_copy\n\n x, y = df[x_input].values, df[y_input].values\n\n\n method_df = [{x_name : xx, y_name : yy, \"Method\" : method}\n for x_i, y_i, method in zip(x, y, methods)\n for xx,yy in zip(x_i,y_i)]\n method_df = pd.DataFrame(method_df)\n sns.lineplot(x=x_name, y=y_name, hue=\"Method\", alpha=0.8,\n hue_order=method_order,\n data=method_df,\n palette = METHOD_COLORS)\n x = np.linspace(0,1,100)\n plt.plot(x, x, linestyle='--', color=\"black\")", "def showFunctions(self,window):\n wsx,wsy=window.size\n for i,function in enumerate(self.functions):\n self.showGraph(function,window,self.colors[i])\n window.print(str(function),[wsx-wsx/5,wsy-wsy/20*(i+2)],color=self.colors[i],size=25)", "def test_fonction():\n Func = [[7729.018255678793, 140.153834155207, 68.77595919655846, 31.62018118184545, 18.030431610812485, 11.480451328936848, 8.854799040173322, 5.891748736768329, 4.107058029460621, 3.525987646397012, 2.6501857762543453, 1.9939336429398156, 1.796115967192535, 1.3439730213174272, 1.0573728322694307, 0.9370165183504918, 0.6862225806758537, 0.58629480789044, 0.46467717773394074, 0.4351295050299971, 0.31030829231196316, 0.18283441858118177, 0.2508750473787763, 0.12603102215466033, 0.1403733845624147, 0.208944572364959, 0.05937056209629393, 0.06406561737973851, 0.02549828229037716, 0.044190126138167286, 0.12220850634047802, 0.07250107250107221, 0.008166145780824684, 1.0000000000000118, 0.0016025641025641038, 0.00644122383252819, 0.0016025641025641038], [0.9610949812702193, 0.4109019899274278, 0.3089377623397382, 0.23495840772645324, 0.20677330100735603, 0.19949331148184576, 0.19889217797273162, 0.1983236356606282, 0.20419249563878353, 0.22439671222315674, 0.2468001421725052, 0.26934377202851223, 0.3026764978536294, 0.318102154625913, 0.36021755759452945, 0.3948630408193794, 0.4151320064818989, 0.4874842761804363, 0.5224160769563155, 0.5581950422944579, 0.5694688385000937, 0.55031751183993, 0.6849362225850419, 0.6075803939330335, 0.7350416126522452, 0.8427100469155232, 0.6733603675051251, 0.7902933382920369, 0.6836315434546335, 0.8276834938319588, 0.9487611479434883, 0.9330768568229448, 0.8193885540523317, 0.997, 0.6670224119530418, 0.8010253123998721, 0.6670224119530418]]\n Func2 = [[351.7858724605074, 86.17819599440456, 40.60817807215555, 22.41370879569776, 13.71370737429577, 9.380805367958237, 6.500771030437166, 5.1934818237317595, 3.8672723604183825, 2.9429262287072286, 2.2293044011542276, 1.7413726101499962, 1.4110020679698105, 1.145285045596285, 0.8493079536648233, 0.7075504964039413, 0.5314726701362551, 0.3946934984396482, 0.35765493303260854, 0.26720947446336063, 0.20483675723548622, 0.16768018253752273, 0.1348248093028533, 0.09131928959311625, 0.06352807714123655, 0.10443083847008712, 0.08486764614717036, 0.07948616458565717, 0.034928848641655796, 0.01957585644371944, 0.0101010101010101, 0.0060868252930227846, 0.003034178389628626, 1.0000000000000095, 0.00040016006402561054, 0.999999999998485, 1.0000000000005178], [0.4014557102478067, 0.19662718856027814, 0.14714271380376454, 0.12376760998544561, 0.11465015678691455, 0.11701257764627322, 0.11971607647950641, 0.12500413021169918, 0.13585072294949355, 0.15396764607614966, 0.1688776087223454, 0.19466317245480091, 0.22196728054180964, 0.25356761298043673, 0.2838683431502283, 0.32214826256665563, 0.3474420903976237, 0.38101744922057085, 0.4472615852893438, 0.4838915064306636, 0.5092442536661715, 0.5393723754562065, 0.6281883708635211, 0.6075021365541242, 0.5864414731409806, 0.8093479024729197, 0.8515064760519562, 0.8942467588311623, 0.7951232441028355, 0.8030838419531001, 0.8347245409015025, 0.7345183562814673, 0.5796857049505159, 0.9992, 0.500100020004001, 0.9997999999999997, 0.9996000000000002]]\n Func3 = [[0, 587.8353916144741, 122.51606563339197, 43.412900839437754, 23.57555460166956, 15.269810446375592,\n 10.302086288261853, 6.559749680166944, 4.733027276883256, 3.5899442373476638, 2.6188091438741785,\n 1.8973511635960179, 1.59962405851026, 1.1900558241207209, 0.9154742469037501, 0.6663774582713599,\n 0.5510310065149937, 0.3962213545720932, 0.34948402621422503, 0.25056075858741617, 0.2104583602324079,\n 0.14271205720517338, 0.14355391510677562, 0.09448580768732265, 0.09563164108618655, 0.05255180659992084,\n 0.05056730634542649, 0.04830593760483059, 0.017362995116657634, 0.018563316442158327,\n 0.008064516129032258,\n 1.000000000000367, 0.007556675062972293],\n [0, 0.3286716474211523, 0.13242848819742412, 0.08446949128973799, 0.06942310179309806, 0.06710098910557027,\n 0.0672674712138261, 0.07277714425838515, 0.07860653317204705, 0.08731142910806171, 0.10727105079265914,\n 0.11444337732064816, 0.1388584129446931, 0.1545716580456971, 0.1909675139048617, 0.2078287621915297,\n 0.24481225152845482, 0.2809639446229486, 0.3427375708843379, 0.3563349568749672, 0.44658806716684524,\n 0.4323139797237823, 0.5209648901237686, 0.5384242118067787, 0.6800151114469211, 0.6244881568587771,\n 0.6892348767357834, 0.8342138924420222, 0.7306736811340057, 0.7746555651829621, 0.6684491978609626,\n 0.9988000000000005, 0.7158196134574087]]\n Func4 = [[15748.752858254895, 191.911645065407, 73.47842499410935, 30.41633464294201, 19.98626405383976, 11.452154438755791, 7.133577668465118, 5.558028573739012, 3.9221979491511965, 2.883103216560325, 2.134843944573437, 1.672594713096574, 1.2840603701894708, 0.9760043686353028, 0.7383287083076139, 0.5201345636008389, 0.3922546743466271, 0.3075443182363077, 0.2190805647347894, 0.20447301461609166, 0.13718384831088948, 0.10116884193975774, 0.06605172314636748, 0.05724132569770158, 0.05521472392638043, 0.028443817583450853, 0.030927835051546386, 0.020842194810718865, 0.014610389610389598, 0.008522010992889928, 0.999999999998485, 0.004924128228318766, 0.9999999999999153], [0.8339118479799609, 0.09465261480352483, 0.05645445767221664, 0.04042946161369488, 0.039816125266879804, 0.038808660214436394, 0.04108030531513686, 0.04722150007344185, 0.05756881734809522, 0.06174501095337855, 0.07754371601798399, 0.08835218320247393, 0.11277873469258441, 0.14265723594157298, 0.1591593134391955, 0.18294405416467058, 0.20022650693883753, 0.25880068050710453, 0.28508276563948803, 0.38530886060602676, 0.3827846845148159, 0.3989797846053703, 0.4321293982608086, 0.549123155471954, 0.6479481641468685, 0.6177554137838079, 0.7204610951008645, 0.7485830392471395, 0.8589097572148419, 0.7662293265433628, 0.9997999999999997, 0.7786256145580744, 0.9994]]\n Func5 = [[6025.007118984573, 201.67078346958766, 61.164046929888165, 28.570275345356297, 16.42844389234335, 10.118865208041463, 6.5569441993908315, 5.115524806237155, 3.670230880870961, 2.578942628606752, 1.9336145754313054, 1.3128869676600932, 1.0509490349325865, 0.8260606265098674, 0.6192337836269376, 0.39548178329078904, 0.3125342768364706, 0.2535171070520563, 0.17950996366785027, 0.1589593169850413, 0.08776166654828464, 0.07186902085373958, 0.05087852147014008, 0.04031209362808843, 0.015025041736227053, 0.017452299442196745, 0.017452299442196745], [0.4900449307377233, 0.052013394197577424, 0.02763077179222995, 0.022073148401405642, 0.021551468349526978, 0.022874943012867084, 0.025129502524592704, 0.0290677734501479, 0.03586807797479691, 0.04200099407612618, 0.05395970879783885, 0.06222885447396381, 0.08011656818055503, 0.10740529383718123, 0.13349486346877767, 0.14548961984802525, 0.16651317027639345, 0.2157377384182918, 0.2536490552393858, 0.3281846996863911, 0.3205769005384313, 0.37653580728960656, 0.42583822467919963, 0.5652911249293386, 0.5004170141784822, 0.6177302303447393, 0.6177302303447393]]\n Func6 = [[np.mean([4447.600676225763, 4028.6041733343945]), 1122.3591601299543, np.mean([566.3324626056967, 560.7330884421851]), 330.62666503822817, 210.43180030971985, 149.0424349106643, 82.3987852348437, 44.547601156494196, 26.426565843449865, 17.824560051495858, 13.783607579356286, 10.40131640361244, 7.455414078577158, 5.499251804904008, 4.302343361171181, 3.539763892078505, 2.784540963982787, 2.2236920038741386, 1.7559944701404429, 1.4584556538378022, 1.200557790786275, 1.0699270271332681, 0.8634699222550425, 0.7434193327535079, 0.5714709844638692, 0.4396553281521431, 0.3818399186003246, 0.32953426555502685, 0.24102244704033102, 0.22355346426540507, 0.15212505517750333, 0.16001860110466576, 0.10928264810439764], [np.mean([0.2617607830185082, 0.24308742821120072]), 0.0927423079289474, 0.05568821342607846, 0.03793131631871315, 0.0278257161673426, 0.022656568305311583, 0.016435055935377363, 0.013349794613250844, 0.011828502037667653, 0.011907722737912936, 0.011906786143346124, 0.01350556077146206, 0.014414705674904129, 0.015921318603495407, 0.018673975709110842, 0.02270468991422641, 0.02309589946853888, 0.027514993109446845, 0.032185252317963355, 0.039533712217873974, 0.04836171488037051, 0.05571282912235797, 0.0662400500529835, 0.0842953982688298, 0.09463525780280219, 0.11078755407852524, 0.12333175657275465, 0.1523270728384864, 0.1633804430013571, 0.2132018494926845, 0.20858345530936606, 0.26749188764633536, 0.28116189267776115]]\n\n index = np.linspace(0.5, 11.3, 37)\n index3 = np.linspace(0.1, 9.7, 33)\n index4 = np.linspace(0.2, 9.8, 33)\n index5 = np.linspace(0.2, 7.8, 27)\n index6 = [0.15, 0.25, 0.3, 0.4, 0.45]+[0.5+i*0.2 for i in range(28)]\n\n n = 100\n b = np.log(Func2[0][4]/Func2[0][5])/np.log(2/1.7)\n a = Func2[0][4]*(1.7**b)\n print(a, b)\n a_t, b_t = ab_finder(Func[0][3:20], index[3:20])\n a_t2, b_t2 = ab_finder(Func2[0][3:20], index[3:20])\n a_t3, b_t3 = ab_finder(Func3[0][3:20], index3[3:20])\n a_t4, b_t4 = ab_finder(Func4[0][3:20], index4[3:20])\n a_t5, b_t5 = ab_finder(Func5[0][3:20], index5[3:20])\n a_t6, b_t6 = ab_finder(Func6[0][3:20], index6[3:20])\n plt.show()\n print(a_t, b_t, a_t2, b_t2, a_t3, b_t3, a_t4, b_t4, a_t5, b_t5, a_t6, b_t6)\n\n plt.plot(index, Func[0], color='purple')\n #plt.plot(index, Func2[0], color='red')\n plt.plot(index3, Func3[0], color='black')\n #plt.plot(index4, Func4[0], color='blue')\n plt.plot(index5, Func5[0], color='yellow')\n plt.plot(index6, Func6[0], color='orange')\n index_comp = np.linspace(0.1+1/n, 11, n)\n #plt.plot(index_comp, a/((index_comp)**b), color='blue')\n plt.plot(index_comp, a_t6/(index_comp**b_t6), color='green')\n plt.show()", "def __iadd__(self, func):\n self.append_plot(func)\n return self", "def plotRadarPlot(data_grouped, save=False, *args):\n #We get the name of features\n variables = data_grouped.columns\n #We get the ranges of each features\n ranges = findRanges(data_grouped)\n #We plot each cluster on a different radar (better for vizualisation\n for i in range(0, len(data_grouped)):\n #Init the figure\n fig1 = plt.figure(figsize=(6, 6))\n #Init the radar\n radar = ComplexRadar(fig1, variables, ranges)\n #Init values on the radar\n radar.plot(data_grouped.loc[i, :], ranges)\n #Fill the radar (plot looks better with that fill)\n radar.fill(data_grouped.loc[i, :], alpha=0.2)\n if save == True:\n try:\n plt.savefig(args + \"radar\" + data_grouped.loc[i, :] + \".png\")\n except NameError:\n print('Missing the path for saving')\n plt.show()", "def plot_all(self):\n self.plot_ramps()\n self.plot_groupdq()", "def test_exercise_1():\n a, b = 5, 0\n fvals = []\n grid = np.linspace(-3, 4)\n for value in grid:\n fvals.append(get_test_function(value, a, b))\n plt.plot(grid, fvals)", "def varying_lamda(x, y, z, lambda_min, lambda_max, n_lambda, k, save_fig = None, method = 'Ridge', split = True, train = 0.7, seed = 42, max_iter = 1001, l_min = False, plot_indexes = [0,1,2]):\n\n lambdas = np.array([0] + np.logspace(lambda_min, lambda_max, n_lambda).tolist())\n polynomials = np.array(k)\n X, Y = np.meshgrid(lambdas, polynomials)\n MSE = np.zeros(np.shape(X))\n\n j = 0\n for k in polynomials:\n print(k)\n\n model = regression(x, y, z, k = int(k), split = split, train = train, seed = seed)\n if method == 'Ridge':\n model.SVD()\n i = 0\n for lam in lambdas:\n\n if method == 'Ridge':\n beta = model.Ridge(lam = lam)\n elif method == 'Lasso':\n beta = model.Lasso(lam = lam, max_iter = max_iter)\n\n z_tilde = model.z_tilde(beta = beta, X = model.X_test)\n MSE[j, i] = model.MSE(z_tilde = z_tilde, z = model.z_test)\n i += 1\n j += 1\n\n print('Method = ', method)\n lambdas_min = []\n for i in range(len(polynomials)):\n minimum_index = MSE[i].argmin()\n print('Minimum lambda for polynomial %.i: ' %(polynomials[i]), lambdas[minimum_index], MSE[i].min())\n lambdas_min.append(int(minimum_index))\n\n #plt.pcolormesh(lambdas.tolist() + [lambdas[-1] + lambdas[1]], polynomials.tolist() + [polynomials[-1] + 1], MSE)\n #plt.colorbar()\n #plt.show()\n\n plt.title('MSE for the test data with ' + method)\n plt.contourf(lambdas, polynomials, MSE)\n plt.colorbar()\n plt.ylabel('Polynomial order', fontsize = 14)\n plt.xlabel('Lambda', fontsize = 14)\n try:\n plt.savefig(results_dir + save_fig + 'contour' + '.png')\n except:\n pass\n plt.show()\n\n plt.title('MSE for the test data with ' + method)\n plt.plot(lambdas, MSE[plot_indexes[0], :], label = 'k = ' + str(polynomials[plot_indexes[0]]))\n plt.plot(lambdas, MSE[plot_indexes[1], :], label = 'k = ' + str(polynomials[plot_indexes[1]]))\n plt.plot(lambdas, MSE[plot_indexes[2], :], label = 'k = ' + str(polynomials[plot_indexes[2]]))\n if l_min:\n plt.plot(lambdas[lambdas_min[1]], MSE[1, lambdas_min[1]], 'ro', label = 'Lambda min = %.4g' %(lambdas[lambdas_min[1]]))\n else:\n pass\n plt.legend()\n plt.xlabel('Lambda', fontsize = 14)\n plt.ylabel('MSE', fontsize = 14)\n plt.tight_layout()\n try:\n plt.savefig(results_dir + save_fig + '.png')\n except:\n pass\n plt.show()\n return lambdas_min" ]
[ "0.63259864", "0.6046818", "0.53915054", "0.5336046", "0.5121077", "0.5051932", "0.48781553", "0.48333043", "0.48324347", "0.47948903", "0.47788268", "0.4766968", "0.47533748", "0.47356328", "0.47074386", "0.46614596", "0.46601972", "0.465941", "0.46516296", "0.46457088", "0.46375453", "0.46337804", "0.4626232", "0.46241716", "0.4578506", "0.45781216", "0.4568808", "0.45685348", "0.45646355", "0.45399988", "0.45261237", "0.45190975", "0.45131835", "0.45121595", "0.45029375", "0.45010042", "0.44976225", "0.44966823", "0.44955128", "0.44931495", "0.44688264", "0.44332626", "0.44278312", "0.4424856", "0.4421749", "0.44193882", "0.44169414", "0.4416717", "0.44097626", "0.4403275", "0.44027478", "0.44015694", "0.4401398", "0.43931508", "0.4392152", "0.43900156", "0.4386029", "0.43794882", "0.43762007", "0.4346688", "0.43438917", "0.4340414", "0.43353686", "0.43347323", "0.43288958", "0.43133107", "0.4313182", "0.43125308", "0.43124783", "0.430605", "0.43013412", "0.42981303", "0.4289899", "0.4287563", "0.42868328", "0.42838758", "0.42818946", "0.42810977", "0.4279294", "0.4269902", "0.42691246", "0.4266078", "0.4260558", "0.42551675", "0.4244068", "0.42434457", "0.42397046", "0.4238565", "0.42379808", "0.42363867", "0.42356774", "0.42332467", "0.4226557", "0.42233434", "0.42194745", "0.4217051", "0.42135304", "0.42134905", "0.42101827", "0.42101014" ]
0.7678806
0
Plot a rotated convergence plot. It is essentially like fval_by_budget(), but rotated by 90 degrees, showing how big budget is required to reach every target. While this is a little less intuitive at first, it allows better judgement of performance impact of each strategy. With fval_by_budget(), performance change is represented by a curve phase shift, while in evals_by_target(), it simply translates position on the y axis. groupby is the method of aggregating results of multiple instances a callable, stringable object, GroupByMedian by default. By default, absolute evaluations count is shown, but relative values to some baseline dataset can be shown instead.
def evals_by_target(ax, pds, baseline_ds=None, baseline_label="", dim=None, funcId=None, groupby=None): if groupby is None: groupby = GroupByMedian() pfsize = len(pds.algds.keys()) runlengths = 10**np.linspace(0, np.log10(pds.maxevals((dim, funcId))), num=500) target_values = pp.RunlengthBasedTargetValues(runlengths, reference_data=pds.bestalg(None), force_different_targets_factor=10**0.004) targets = target_values((funcId, dim)) if baseline_ds: baseline_fevs = groupby(baseline_ds.detEvals(targets), axis=1) for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId): #print name, ds fevs = groupby(ds.detEvals(targets), axis=1) if baseline_ds: fevs /= baseline_fevs style['markevery'] = 64 ax.loglog(targets, fevs, label=name, basey=pfsize, **style) ax.set_xlim(10**2, 10**(np.log10(targets[-1])-0.2)) if baseline_ds: ax.set_yticks([2, 3.5], minor=True) ax.set_xlabel('Function Value Targets') ax.set_ylabel(_evals_label(baseline_ds, baseline_label, str(groupby))) ax.grid() if baseline_ds: ax.yaxis.grid(True, which = 'minor')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fval_by_budget(ax, pds, baseline_ds=None, baseline_label=\"\", dim=None, funcId=None, groupby=None):\n if groupby is None: groupby = GroupByMedian()\n pfsize = len(pds.algds.keys())\n\n if baseline_ds:\n baseline_budgets = baseline_ds.funvals[:, 0]\n baseline_funvals = groupby(baseline_ds.funvals[:, 1:], axis=1)\n baseline_safefunvals = np.maximum(baseline_funvals, 10**-8) # eschew zeros\n # fvb is matrix with each row being [budget,funval]\n baseline_fvb = np.transpose(np.vstack([baseline_budgets, baseline_safefunvals]))\n\n for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):\n #print name, ds\n budgets = ds.funvals[:, 0]\n funvals = groupby(ds.funvals[:, 1:], axis=1)\n\n # Throw away funvals after ftarget reached\n try:\n limit = np.nonzero(funvals < 10**-8)[0][0] + 1\n except IndexError:\n limit = np.size(budgets)+1\n budgets = budgets[:limit]\n funvals = funvals[:limit]\n\n fvb = np.transpose(np.vstack([budgets[:limit], funvals[:limit]]))\n\n if baseline_ds:\n # Relativize by baseline\n fvba = ra.alignArrayData(ra.VArrayMultiReader([fvb, baseline_fvb]))\n budgets = fvba[:, 0]\n funvals = fvba[:, 1] / fvba[:, 2]\n\n style['markevery'] = 16\n ax.loglog(budgets, funvals, label=name, basex=pfsize, **style)\n if baseline_ds:\n ax.set_yticks([1], minor=True)\n ax.set_xlabel('Budget')\n ax.set_ylabel(_fval_label(baseline_ds, baseline_label, str(groupby)))\n ax.grid()\n if baseline_ds:\n ax.yaxis.grid(True, which = 'minor')", "def build_scatterplot(budget):\n frame = load_frames(budget=budget)\n X = frame[metrics]\n Y = frame['y']\n\n predicted = cross_val_predict(get_best_models(budget=budget, tool=settings.algo), X, Y, cv=20)\n\n fig, ax = plt.subplots()\n ax.scatter(Y, predicted, edgecolors=(0, 0, 0))\n ax.plot([Y.min(), Y.max()], [Y.min(), Y.max()], 'k--', lw=4)\n ax.set_xlabel('Measured')\n ax.set_ylabel('Predicted')\n plt.savefig('{}/cv-error-{}-{}.pdf'.format(settings.PLOTS, settings.algo, budget))", "def evals_by_evals(ax, pds, baseline1_ds=None, baseline1_label=\"\", baseline2_ds=None, baseline2_label=\"\", dim=None, funcId=None, groupby=None):\n if groupby is None: groupby = GroupByMedian()\n pfsize = len(pds.algds.keys())\n\n runlengths = 10**np.linspace(0, np.log10(pds.maxevals((dim, funcId))), num=500)\n target_values = pp.RunlengthBasedTargetValues(runlengths,\n reference_data=pds.bestalg(None), force_different_targets_factor=10**0.004)\n targets = target_values((funcId, dim))\n\n if baseline1_ds:\n baseline1_fevs = np.array(groupby(baseline1_ds.detEvals(targets), axis=1))\n if baseline2_ds:\n baseline2_fevs = np.array(groupby(baseline2_ds.detEvals(targets), axis=1))\n\n for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):\n #print name, ds\n fevs1 = groupby(ds.detEvals(targets), axis=1)\n if baseline1_ds:\n fevs1 /= baseline1_fevs\n fevs2 = groupby(ds.detEvals(targets), axis=1)\n if baseline2_ds:\n fevs2 /= baseline2_fevs\n\n infsx = np.nonzero(fevs1 == inf)\n infs = infsx[0]\n if np.size(infs) > 0:\n #print infs\n fevs1 = fevs1[:infs[0]-1]\n fevs2 = fevs2[:infs[0]-1]\n\n #print name, fevs1, fevs2\n style['markevery'] = 64\n ax.loglog(fevs2, fevs1, label=name, basex=pfsize, basey=pfsize, **style)\n ax.grid()\n ax.set_xlim(0, runlengths[-1] * pfsize) # i.e. log(runlengths) + 1\n ax.set_ylabel('Per-target ' + _evals_label(baseline1_ds, baseline1_label, str(groupby)))\n ax.set_xlabel('Per-target ' + _evals_label(baseline2_ds, baseline2_label, str(groupby)))", "def ecdf(data, group_by=None, targets=None, ax=None, **kwargs):\n text_color = plt.rcParams.get('ytick.color')\n linewidth = 2\n # Handle keyword arguments\n for k, v in kwargs.items():\n if k not in ['linewidth']:\n raise TypeError('ecdf got an unexpeted keyword argument: {}'.format(k))\n else:\n if k == 'linewidth':\n linewidth = v\n # Deal with input data\n if group_by is not None:\n if type(data) == pd.core.frame.DataFrame:\n print(\"Grouping DataFrame by {}\".format(group_by))\n print(\"Target Features:\", targets)\n if type(targets) == str:\n targets = [targets]\n else:\n try:\n it = iter(targets)\n except:\n targets = [targets]\n cols = targets + [group_by]\n data = data[cols]\n variables = data.columns[:-1]\n data = data.groupby(group_by)\n else:\n return(\"Error: only DataFrame input works with group_by functionality\")\n else: \n if type(data) == pd.core.series.Series:\n variables = [data.name]\n elif type(data) == pd.core.frame.DataFrame:\n if targets is None:\n variables = list(data.columns)\n else:\n if type(targets) == str:\n targets = [targets]\n else: \n try:\n it = iter(targets)\n except:\n targets = [targets]\n print(\"Target Features:\", targets)\n variables = targets\n elif type(data) == pd.core.groupby.generic.DataFrameGroupBy:\n variables = list(data.obj.columns)\n else:\n data = pd.Series(data, name='data')\n variables = [data.name]\n \n \n if type(data) == pd.core.groupby.generic.DataFrameGroupBy:\n for variable in variables:\n if not ax:\n fig, ax = plt.subplots(figsize=(12,8))\n max_x = 0\n for name, group in data:\n x = np.sort(group[variable])\n n = len(group)\n y = np.arange(1, n+1) / n\n ax.plot(x, y, marker='.', label=name, alpha=0.7, linewidth=linewidth)\n if max(x) > max_x:\n max_x = max(x)\n #max_x = 0\n ax.axhline(y=0.5, ls=':', color='gray')\n ax.axhline(y=0.05, ls=':', color='gray')\n ax.axhline(y=0.95, ls=':', color='gray')\n ax.annotate('0.5', xy=(max_x, 0.47))\n ax.annotate('0.95', xy=(max_x, 0.92))\n ax.annotate('0.05', xy=(max_x, 0.02))\n ax.legend()\n plt.title(\"ECDF for feature: {}\".format(variable), color=text_color)\n plt.show()\n \n else:\n n = len(data)\n y = np.arange(1, n+1) / n\n if not ax:\n fig, ax = plt.subplots(figsize=(12,8))\n max_x = 0\n for variable in variables:\n if type(data) == pd.core.series.Series:\n x = np.sort(data)\n string = variable\n else:\n x = np.sort(data[variable])\n string = 'Data'\n ax.plot(x, y, marker='.', label=variable)\n if max(x) > max_x:\n max_x = max(x)\n ax.axhline(y=0.5, ls=':', color='gray')\n ax.axhline(y=0.05, ls=':', color='gray')\n ax.axhline(y=0.95, ls=':', color='gray')\n ax.annotate('0.5', xy=(max_x, 0.47))\n ax.annotate('0.95', xy=(max_x, 0.92))\n ax.annotate('0.05', xy=(max_x, 0.02))\n plt.title(\"ECDF for {}\".format(string), color=text_color)\n plt.legend()\n plt.show()", "def plot_results(outputs, x, e, t, a, folds, groups,\n quantiles, strat='quantile', adj='KM', plot=True):\n if plot:\n mpl.rcParams['hatch.linewidth'] = 2.0\n\n fig, big_axes = plt.subplots(\n figsize=(8 * (len(groups) + 2), 6 * len(quantiles)),\n nrows=len(quantiles),\n ncols=1)\n\n plt.subplots_adjust(hspace=0.4)\n\n i = 0\n for _, big_ax in enumerate(big_axes, start=1):\n big_ax.set_title(\n 'Receiver Operator Characteristic and Calibration at t=' +\n str(quantiles[i]) + '\\n',\n fontsize=16)\n big_ax.tick_params(\n labelcolor=(1., 1., 1., 0.0),\n top='off',\n bottom='off',\n left='off',\n right='off')\n i += 1\n \n eces = {}\n metrics = {}\n\n for quant in quantiles:\n eces[quant] = {}\n \n for i in range(len(quantiles)):\n\n scores = outputs[quantiles[i]]\n for j in range(len(groups) + 2):\n\n pt = (i * (len(groups) + 2) + j + 1)\n if plot:\n ax = fig.add_subplot(len(quantiles), len(groups) + 2, pt)\n else:\n ax = None\n \n if (j==1):\n eces[quantiles[i]]['all'] = plot_calibration_curve(ax,\n scores,\n e,\n t,\n a,\n folds,\n None,\n quantiles[i],\n strat=strat,\n adj=adj,\n plot=plot) \n \n if (j>1):\n eces[quantiles[i]][groups[j - 2]] = plot_calibration_curve(ax,\n scores,\n e,\n t,\n a,\n folds,\n groups[j - 2],\n quantiles[i],\n strat=strat,\n adj=adj,\n plot=plot)\n \n if (j==0):\n metrics[quantiles[i]] = plot_roc_curve(ax,\n scores,\n e,\n t,\n a,\n folds,\n groups,\n quantiles[i],\n plot=plot)\n\n for quant in quantiles:\n metrics[quant] = metrics[quant] + (eces[quant], )\n \n if plot: \n plt.show()\n return metrics", "def plot_coupling_grid(baseline_group, fits_groups, metrics, fax=None):\n n_algorithms = len(fits_groups)\n n_metrics = len(metrics)\n\n if fax is None:\n fig, axes = plt.subplots(n_metrics, n_algorithms,\n figsize=(3 * n_algorithms, 3 * n_metrics))\n else:\n fig, axes = fax\n\n # iterate over metrics\n for row_idx, metric in enumerate(metrics):\n if metric == 'selection_ratio':\n baseline_coefs = baseline_group['coupling_coefs'][:]\n baseline_selection_ratio = \\\n calculate_selection_ratio(baseline_coefs).mean(axis=0)\n\n # iterate over algorithms\n for col_idx, algorithm in enumerate(fits_groups):\n if metric == 'selection_ratio':\n # calculate selection ratio for algorithm\n coefs = algorithm['coupling_coefs'][:]\n selection_ratio = calculate_selection_ratio(coefs).mean(axis=0)\n\n # plot direct comparison\n axes[row_idx, col_idx].scatter(\n baseline_selection_ratio,\n selection_ratio,\n alpha=0.5,\n color='k',\n edgecolor='w')\n else:\n axes[row_idx, col_idx].scatter(\n baseline_group[metric][:].mean(axis=0),\n algorithm[metric][:].mean(axis=0),\n alpha=0.5,\n color='k',\n edgecolor='w')\n\n return fig, axes", "def plot_evaluation(values, info, measures = ['Dice','Jaccard', 'TPR', 'TNR', '1-GCE', 'VS', 'RI', 'ARI', 'MI', '1-VOI', 'ICC','1/(1+PBD)', 'KAP', 'AUC', '1/(1+HD)', '1/(1+AVD)', 'MHD' ], colourmap=None, outfile='polar_results.png'):\n _min = info['minimum']\n _max = info['maximum']\n if colourmap is None:\n colourmap = [[86./255.,180./255.,233./255.] for ii in range(values.shape[0])]\n else:\n # normalize colourmap values between 0 and 1\n colourmap = (colourmap-_min)/(_max-_min)\n # apply cividis, returns the RBG1 values for cividis, for dots\n colourmap = [[cm.cividis(ii)] for ii in colourmap] \n\n # elements of the circle\n N = len(measures)\n # evenly space measures around circle\n x_as = [n / float(N) * 2 * pi for n in range(N)] \n\n # Set color of axes\n plt.rc('axes', linewidth=0.5, edgecolor=\"#888888\")\n\n # Create polar plot\n fig = plt.figure(figsize = (11,9.5))\n gs = gridspec.GridSpec(1, 3, width_ratios=[17,2,1])\n ax = plt.subplot(gs[0], polar=True)\n \n # Set position of y-labels\n ax.set_rlabel_position(0)\n\n # Set color and linestyle of grid\n ax.xaxis.grid(True, color=\"#888888\", linestyle='solid', linewidth=0.5)\n ax.yaxis.grid(True, color=\"#888888\", linestyle='solid', linewidth=0.5)\n\n # Set yticks\n plt.yticks([0.2, 0.4, 0.6, 0.8, 1.0], [\"0.2\", \"0.4\", \"0.6\", \"0.8\", \"1.0\"], fontsize=15)\n pos=ax.get_rlabel_position()\n ax.set_rlabel_position(pos+0.4*360./float(len(measures)))\n\n # Plot data\n for ii in np.arange(values.shape[0]):\n xx = np.asarray(x_as) + np.random.randn(len(x_as))*np.diff(x_as)[0]/15.\n data_norm = None\n if info['logplot']:\n data_norm = matplotlib.colors.LogNorm(vmin=_min, vmax=_max)\n sc = ax.scatter(xx, values[ii,:], 23, color=colourmap[ii]*len(xx), norm=data_norm, zorder=3) \n\n # Fill area\n # close the circle\n median = list(np.median(values, axis=0))\n median += median[:1]\n upper = list(np.percentile(values, 75, axis=0))\n upper += upper[:1]\n lower = list(np.percentile(values, 25, axis=0))\n lower += lower[:1]\n x_as += x_as[:1]\n ax.plot(x_as, median, color=[86./255.,180./255.,233./255.], zorder=5)\n ax.fill_between(x_as, upper, lower, zorder=4, color=[86./255.,180./255.,233./255.], alpha=0.3)\n\n # Set number of radial axes and remove labels\n plt.xticks(x_as[:-1], [])\n\n # Set axes limits\n plt.ylim(0, 1)\n\n # Draw ytick labels to make sure they fit properly\n for i in range(N):\n angle_rad = i / float(N) * 2 * pi-0.05\n text_size = 21\n if i in {3,8}:\n ax.text(angle_rad, 1.15, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {0}:\n ax.text(angle_rad, 1.25, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {1,5,7}:\n ax.text(angle_rad, 1.29, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {4}:\n ax.text(angle_rad, 1.32, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"top\")\n elif i in {10}:\n ax.text(angle_rad, 1.26, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {6}:\n ax.text(angle_rad, 1.25, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {9}:\n ax.text(angle_rad, 1.18, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n else:\n ax.text(angle_rad, 1.22, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n\n # colorbar location on figure\n cbaxes = plt.subplot(gs[2])\n\n # log scaling option\n norm = None\n if info['logplot']:\n norm = matplotlib.colors.LogNorm(vmin=_min,vmax=_max)\n\n img = plt.imshow(np.array([[_min,_max]]), aspect='auto', cmap=\"cividis\", norm=norm)\n img.set_visible(False)\n\n # initialize colorbar\n cbar = plt.colorbar(cax = cbaxes)\n\n # ticks and label\n c_values = cbar.get_ticks().tolist()\n \n ticklabels = [\"\" for ii in c_values]\n if _min < np.min(c_values):\n c_values = [_min] + c_values\n ticklabels = [\"%0.1f %s\" %(np.min(c_values), info['unit'])] + ticklabels\n else:\n ticklabels[0] = \"%0.1f %s\" %(np.min(c_values), info['unit'])\n\n if _max > np.max(c_values):\n c_values = c_values + [_max]\n ticklabels = ticklabels + [\"%0.1f %s\" %(np.max(c_values), info['unit'])]\n else:\n ticklabels[-1] = \"%0.1f %s\" %(np.max(c_values), info['unit'])\n \n cbar.set_ticks(c_values)\n cbar.set_ticklabels(ticklabels)\n cbaxes.yaxis.set_minor_formatter(matplotlib.ticker.NullFormatter())\n cbar.ax.set_ylabel(info[\"label\"], labelpad=-20)\n \n # font sizes for colorbar\n cbar.ax.yaxis.label.set_size(19)\n cbar.ax.tick_params(labelsize=14)\n\n # Save and show polar plot \n plt.savefig(outfile)\n if info['display']:\n plt.show()\n plt.clf()\n plt.close('all')", "def convergence():\n fig, axes = plt.subplots(nrows=2, figsize=figsize(aspect=1.2))\n\n # label names\n label1 = str(league.lambda1)\n label2_list = [str(lambda2) for lambda2 in league.lambda2_list]\n\n # point spread and point total subplots\n subplots = [\n (False, [-0.5, 0.5], league.spreads, 'probability spread > 0.5'),\n (True, [200.5], league.totals, 'probability total > 200.5'),\n ]\n\n for ax, (commutes, lines, values, ylabel) in zip(axes, subplots):\n\n # train margin-dependent Elo model\n melo = Melo(lines=lines, commutes=commutes, k=1e-4)\n melo.fit(league.times, league.labels1, league.labels2, values)\n\n line = lines[-1]\n\n for label2 in label2_list:\n\n # evaluation times and labels\n times = np.arange(league.times.size)[::1000]\n labels1 = times.size * [label1]\n labels2 = times.size * [label2]\n\n # observed win probability\n prob = melo.probability(times, labels1, labels2, lines=line)\n ax.plot(times, prob)\n\n # true (analytic) win probability\n if ax.is_first_row():\n prob = skellam.sf(line, int(label1), int(label2))\n ax.axhline(prob, color='k')\n else:\n prob = poisson.sf(line, int(label1) + int(label2))\n ax.axhline(prob, color='k')\n\n # axes labels\n if ax.is_last_row():\n ax.set_xlabel('Iterations')\n ax.set_ylabel(ylabel)\n\n set_tight(w_pad=.5)", "def target_cov_plot(context):", "def plot_convergence(\n optimizers: list = [\"COBYLA\", \"SLSQP\", \"L-BFGS-B\", \"NELDER-MEAD\"],\n g2N: float = 0.2,\n maxit: int = 10000,\n varform: list = [\"ry\"],\n depth: int = 3,\n nrep: int = 10,\n dataprefix: str = \"data/miniBMN\",\n datasuffix: str = \"h5\",\n figprefix: str = \"figures/miniBMN\",\n ht: float = 0.0,\n up: int = 1000,\n):\n # setup parameters\n params = dict()\n params[\"l\"] = str(g2N).replace(\".\", \"\")\n params[\"d\"] = depth\n params[\"v\"] = \"-\".join(varform)\n params[\"m\"] = maxit\n params[\"n\"] = nrep\n params[\"f\"] = dataprefix\n params[\"s\"] = datasuffix\n assert type(optimizers).__name__ == \"list\"\n # collect data\n result = collect_data(optimizers, params)\n # get best runs\n gs = dict()\n for r in optimizers:\n gs[r] = result.loc[r].groupby(\"rep\").apply(min).energy\n gsdf = pd.DataFrame.from_dict(gs, dtype=float)\n print(gsdf.describe().T[[\"min\", \"max\", \"mean\", \"std\"]])\n # Plot\n # select the best runs for each optimizer\n fig, ax = plt.subplots()\n for o in optimizers:\n result.loc[o, gsdf[o].idxmin()].plot(\n x=\"counts\", y=\"energy\", xlim=[0, up], label=o, ax=ax\n )\n ax.axhline(ht, c=\"k\", ls=\"--\", lw=\"2\", label=\"HT\")\n ax.set_xlabel(\"iterations\")\n ax.set_ylabel(\"VQE energy\")\n ax.legend(loc=\"upper right\")\n filename = f\"{figprefix}_l{params['l']}_convergence_{params['v']}_depth{params['d']}_nr{params['n']}_max{params['m']}_xlim{up}\"\n plt.savefig(f\"{filename}.pdf\")\n plt.savefig(f\"{filename}.png\")\n plt.savefig(f\"{filename}.svg\")\n plt.close()", "def plot_cross_validation_metric(\n df_cv, metric, rolling_window=0.1, ax=None, figsize=(10, 6)\n):\n if ax is None:\n fig = plt.figure(facecolor='w', figsize=figsize)\n ax = fig.add_subplot(111)\n else:\n fig = ax.get_figure()\n # Get the metric at the level of individual predictions, and with the rolling window.\n df_none = performance_metrics(df_cv, metrics=[metric], rolling_window=0)\n df_h = performance_metrics(df_cv, metrics=[metric], rolling_window=rolling_window)\n\n # Some work because matplotlib does not handle timedelta\n # Target ~10 ticks.\n tick_w = max(df_none['horizon'].astype('timedelta64[ns]')) / 10.\n # Find the largest time resolution that has <1 unit per bin.\n dts = ['D', 'h', 'm', 's', 'ms', 'us', 'ns']\n dt_names = [\n 'days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds',\n 'nanoseconds'\n ]\n dt_conversions = [\n 24 * 60 * 60 * 10 ** 9,\n 60 * 60 * 10 ** 9,\n 60 * 10 ** 9,\n 10 ** 9,\n 10 ** 6,\n 10 ** 3,\n 1.,\n ]\n for i, dt in enumerate(dts):\n if np.timedelta64(1, dt) < np.timedelta64(tick_w, 'ns'):\n break\n\n x_plt = df_none['horizon'].astype('timedelta64[ns]').astype(np.int64) / float(dt_conversions[i])\n x_plt_h = df_h['horizon'].astype('timedelta64[ns]').astype(np.int64) / float(dt_conversions[i])\n\n ax.plot(x_plt, df_none[metric], '.', alpha=0.5, c='gray')\n ax.plot(x_plt_h, df_h[metric], '-', c='b')\n ax.grid(True)\n\n ax.set_xlabel('Horizon ({})'.format(dt_names[i]))\n ax.set_ylabel(metric)\n return fig", "def plot_associative_learning_progress(ax, df):\n\n num_objects_list = sorted(df.curr_num_objects.unique())\n legend_list = []\n for idx in num_objects_list:\n ax.plot(df[df.curr_num_objects == idx].groupby('objects_iter').rewards.mean())\n legend_list.append(f'ns={idx}')\n ax.set_xlabel('Stimulus iteration')\n ax.set_ylabel('P(correct)')\n ax.set_ylim([0.4, 1])\n ax.legend(legend_list)", "def det_plot(data, group_by, plot_title, save_figure_path=None):\n subgroups = data.groupby(group_by)\n li_subgroups = subgroups.groups\n\n fontsize = 12\n fig, ax = plt.subplots(figsize=(8, 8), constrained_layout=True)\n for subgroup in li_subgroups:\n # for each subgroup\n df_subgroup = subgroups.get_group(subgroup)\n labels, scores = (\n df_subgroup[\"label\"].values.astype(int),\n df_subgroup[\"score\"].values,\n )\n fpr, fnr, thresholds = calculate_det_curves(labels, scores)\n ax = draw_det_curve(\n fpr, fnr, ax=ax, label=subgroup, fontsize=fontsize, title=plot_title\n )\n\n ax.xaxis.set_major_formatter(mtick.FormatStrFormatter(\"%.e\"))\n plt.minorticks_off()\n ax.set_ylabel(\"FNR (%)\", fontsize=fontsize)\n ax.set_xlabel(\"FPR\", fontsize=fontsize)\n plt.legend(fontsize=fontsize)\n ax.set_xlim([1e-4, 1])\n ax.set_ylim([0, 30])\n\n ax.tick_params(axis=\"both\", labelsize=fontsize)\n\n # save figure\n if save_figure_path is not None:\n plt.savefig(save_figure_path)", "def plot_metric(df_metrics, name, batch_size=10, epochs=10):\n\n # One groupplot\n fig, axarr = plt.subplots(3, 4, sharey=True, sharex=True)\n plotname = 'apfd'\n subplot_labels = ['(a)', '(b)', '(c)']\n\n for column, nr in enumerate(sorted(df_metrics['negative_ratio'].unique())):\n for row, emb_size in enumerate(df_metrics['emb_size'].unique()):\n for agidx, (labeltext, task, linestyle) in enumerate(\n [('Classification', 'True', '-'), ('Regression', 'False', '-.')]):\n rel_df = df_metrics[\n (df_metrics['emb_size'] == str(emb_size)) & (df_metrics['negative_ratio'] == str(nr)) &\n (df_metrics['batch_size'] == str(batch_size)) & (df_metrics['epochs'] == str(epochs))]\n\n # rel_df[rel_df['agent'] == agent].plot(x='step', y='napfd', label=labeltext, ylim=[0, 1], linewidth=0.8,\n # style=linestyle, color=sns.color_palette()[agidx], ax=axarr[row,column])\n\n apfd = rel_df.loc[rel_df['classification'] == task, 'apfd']\n miu = np.round(np.mean(apfd), 2)\n sigma = np.round(np.std(apfd), 2)\n label = labeltext + '\\n $\\mu$ - ' + str(miu) + ' $\\sigma$ - ' + str(sigma)\n\n # sns.displot(data=rel_df, x=\"apfd\", hue='classification', kde=True, ax=axarr[row, column])\n\n sns.distplot(apfd, kde=True,\n bins=int(180 / 5), color=sns.color_palette()[agidx],\n hist_kws={'edgecolor': 'black'},\n kde_kws={'linewidth': 4, 'clip': (0.0, 1.0)}, label=label, ax=axarr[row, column])\n\n axarr[row, column].xaxis.grid(True, which='major')\n\n axarr[row, column].set_title('Emb_size - %s - Neg_Ratio - %s' % (emb_size, nr), fontsize=10)\n\n if row == 2:\n axarr[row, column].set_xlabel('APFD')\n if column == 0:\n axarr[row, column].set_ylabel('Density')\n\n axarr[row, column].legend(frameon=True, prop={'size': 6})\n\n # Tweak spacing to prevent clipping of ylabel\n fig.suptitle('APFD Parameter Tuning - %d Epochs and batch-size - %d' % (epochs, batch_size))\n fig.tight_layout()\n plt.savefig(name, bbox_inches='tight')\n plt.show()", "def rank_by_budget(ax, pds, dim=None, funcId=None, groupby=None):\n if groupby is None: groupby = GroupByMedian()\n pfsize = len(pds.algds.keys())\n\n try: # funcId is array?\n # _pds_plot_iterator[] uses funcId only for things we don't care for\n fakeFuncId = funcId[0]\n\n manyranking = np.array([pds.ranking((dim, i), groupby) for i in funcId])\n rankcount = np.shape(manyranking[0])[1] - 1\n amanyranking = ra.alignArrayData(ra.VArrayMultiReader(manyranking))\n budget = amanyranking[:,0]\n rankings = np.hsplit(amanyranking[:,1:], len(funcId))\n avgranking = np.average(rankings, axis=0)\n ranking = np.vstack([budget, avgranking.T]).T\n\n except TypeError: # funcId is scalar\n fakeFuncId = funcId\n ranking = pds.ranking((dim, funcId), groupby)\n\n i = 0\n for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, fakeFuncId):\n if kind != 'algorithm' and kind != 'strategy':\n continue\n #print name, ds\n budgets = ranking[:,0]\n ranks = ranking[:,1+i]\n\n style['markevery'] = 64\n ax.plot(budgets, ranks, label=name, **style)\n i += 1\n\n ax.set_xlabel('Budget')\n ax.set_ylabel('Rank by '+str(groupby).title()+' Function Value')\n ax.set_xscale('log', basex=pfsize)\n ax.grid()", "def finalize_plot(self, artifact_name, attacker_x=None, attacker_y=None):\n # Plot the axis ticks.\n plt.ylim((self.min_y - 10.0, self.max_y + 10.0))\n plt.xlim((self.min_x - 10.0, self.max_x + 10.0))\n plt.xticks([self.min_x + 1000, 0.0, self.max_x], size=15)\n plt.yticks([self.min_y + 1000, 0.0, self.max_y], size=15)\n # Add and place the labels.\n ax = plt.gca()\n plt.ylabel(\"Crossrange (ft)\", size=15)\n plt.xlabel(\"Downrange (ft)\", size=15)\n plt.subplots_adjust(bottom=0.25, left=0.25)\n ax.yaxis.set_label_coords(-0.1, 0.5)\n # Place the plane.\n plane = plt.imread(\"plane.png\").transpose((1, 0, 2))\n width = (self.max_x - self.min_x) / 10\n height = (496.0 / 499.0) * width\n x_start = -(width / 2.0)\n y_start = -(height / 2.0)\n plt.imshow(plane, extent=[x_start, x_start + width,\n y_start, y_start + height], zorder=100)\n plane = np.flip(plane, 1)\n if attacker_x is None:\n attacker_x = self.max_x - (2 * width)\n if attacker_y is None:\n attacker_y = self.max_y - (2 * height)\n red_plane = self.color_plane_png(plane, [1.0, 0, 0], True)\n plt.imshow(red_plane, zorder=100,\n extent=[attacker_x, attacker_x + width,\n attacker_y, attacker_y + height])\n self.record_artifact(plt, artifact_name, \"matplotlib\")\n plt.clf()", "def plot_calibration_curve(est, name, fig_index):\n # Calibrated with isotonic calibration\n isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')\n\n # Calibrated with sigmoid calibration\n sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')\n\n # Logistic regression with no calibration as baseline\n lr = LogisticRegression(C=1.)\n\n fig = plt.figure(fig_index, figsize=(10, 10))\n ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)\n ax2 = plt.subplot2grid((3, 1), (2, 0))\n\n ax1.plot([0, 1], [0, 1], \"k:\", label=\"Perfectly calibrated\")\n for clf, name in [(lr, 'Logistic'),(est, name),(isotonic, name + ' + Isotonic'),(sigmoid, name + ' + Sigmoid')]:\n #Para cada modelo, entrenamos y predecimos \n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n if hasattr(clf, \"predict_proba\"):\n prob_pos = clf.predict_proba(X_test)[:, 1]\n else: # use decision function\n prob_pos = clf.decision_function(X_test)\n prob_pos = \\\n (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())\n\n clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())\n print(\"%s:\" % name)\n print(\"\\tBrier: %1.3f\" % (clf_score))\n print(\"\\tPrecision: %1.3f\" % precision_score(y_test, y_pred))\n print(\"\\tRecall: %1.3f\" % recall_score(y_test, y_pred))\n print(\"\\tF1: %1.3f\\n\" % f1_score(y_test, y_pred))\n\n fraction_of_positives, mean_predicted_value = \\\n calibration_curve(y_test, prob_pos, n_bins=10)\n\n ax1.plot(mean_predicted_value, fraction_of_positives, \"s-\",\n label=\"%s (%1.3f)\" % (name, clf_score))\n\n ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,\n histtype=\"step\", lw=2)\n\n ax1.set_ylabel(\"Fraction of positives\")\n ax1.set_ylim([-0.05, 1.05])\n ax1.legend(loc=\"lower right\")\n ax1.set_title('Calibration plots (reliability curve)')\n\n ax2.set_xlabel(\"Mean predicted value\")\n ax2.set_ylabel(\"Count\")\n ax2.legend(loc=\"upper center\", ncol=2)\n\n plt.tight_layout()", "def plotDistributionWithLimitsRefine(lXs, llYs, lKClassif,out=\"out.png\", title=\"title\", xax=\"xax\", yax=\"yax\",legend=\"\"):\n\n fig = plt.Figure(figsize=(40,20))\n fig.suptitle(title, fontsize=32)\n nbPlots = len(llYs)\n sqrt = int(math.ceil(math.sqrt(nbPlots)))\n ymax = 0.0\n for i,val in enumerate(llYs):\n if lKClassif[i] != \"refine\":\n ymax = max(max(val[0]),ymax)\n ymaxCurrent = max(max(val[2]),ymax)\n ymax = ymax*1.05\n xmax = 147\n gs = gridspec.GridSpec(1,2) \n ax = fig.add_subplot(gs[0])\n gsLimit = gridspec.GridSpecFromSubplotSpec(sqrt,sqrt, subplot_spec=gs[1])\n for i,val in enumerate(llYs):\n if lKClassif[i] != \"refine\":\n ax.plot(lXs,val[0],color=Graphics.lColors[i%25])\n axCurrent = fig.add_subplot(gsLimit[i]) \n axCurrent.fill_between(lXs, val[1], val[2], alpha=0.35, edgecolor='black', facecolor=Graphics.lColors[i%25])\n axCurrent.set_title(\"Cluster K{}, (position: {})\".format(i,lKClassif[i]))\n axCurrent.fill_between(lXs, val[3], val[4], alpha=0.85, edgecolor='darkgray', facecolor='lightgray')\n axCurrent.plot(lXs,val[0],color=Graphics.lColors[i%25])\n axCurrent.set_ylim(0,ymaxCurrent)\n axCurrent.set_xlim(1,xmax)\n axCurrent.text(10, ymaxCurrent*0.90, \"#nucleosomes: {}\".format(legend[i]), fontsize=12)\n axis_font = {'size':'28'}\n ax.set_ylim(0,ymax)\n ax.set_xlim(1,xmax)\n ax.legend([\"K{}\".format(x) for x in range(0,nbPlots)])\n ax.set_title(\"all nucleosomes\", **axis_font)\n ax.set_xlabel(xax, **axis_font)\n ax.set_ylabel(yax, **axis_font)\n ax.tick_params(labelsize=20)\n canvas = FigureCanvasAgg(fig)\n canvas.print_figure(out, dpi=80)", "def plot_budget_analyais_results(df, fs=8, fs_title=14, lw=3, fontsize=20, colors=['#AA3377', '#009988', '#EE7733', '#0077BB', '#BBBBBB', '#EE3377', '#DDCC77']):\n df_decomposed = df.loc[df['block'] == 'decomposed']\n df_joint = df.loc[df['block'] == 'joint']\n ticklabels = []\n num_sweeps = df_decomposed['num_sweeps'].to_numpy()\n sample_sizes = df_decomposed['sample_sizes'].to_numpy()\n for i in range(len(num_sweeps)):\n ticklabels.append('K=%d\\nL=%d' % (num_sweeps[i], sample_sizes[i]))\n fig = plt.figure(figsize=(fs*2.5, fs))\n ax1 = fig.add_subplot(1, 2, 1)\n ax1.plot(num_sweeps, df_decomposed['density'].to_numpy(), 'o-', c=colors[0], linewidth=lw, label=r'$\\{\\mu, \\tau\\}, \\{c\\}$')\n ax1.plot(num_sweeps, df_joint['density'].to_numpy(), 'o-', c=colors[1], linewidth=lw,label=r'$\\{\\mu, \\tau, c\\}$')\n ax1.set_xticks(num_sweeps)\n ax1.set_xticklabels(ticklabels)\n ax1.tick_params(labelsize=fontsize)\n ax1.grid(alpha=0.4)\n ax2 = fig.add_subplot(1, 2, 2)\n ax2.plot(num_sweeps, df_decomposed['ess'].to_numpy(), 'o-', c=colors[0], linewidth=lw,label=r'$\\{\\mu, \\tau\\}, \\{c\\}$')\n ax2.plot(num_sweeps, df_joint['ess'].to_numpy(), 'o-', c=colors[1], linewidth=lw,label=r'$\\{\\mu, \\tau, c\\}$')\n ax2.set_xticks(num_sweeps)\n ax2.set_xticklabels(ticklabels)\n ax2.tick_params(labelsize=fontsize)\n ax2.grid(alpha=0.4)\n ax2.legend(fontsize=fontsize)\n ax1.legend(fontsize=fontsize)\n ax1.set_ylabel(r'$\\log \\: p_\\theta(x, \\: z)$', fontsize=35)\n ax2.set_ylabel('ESS / L', fontsize=35)", "def make_accuracy_plot(ax,\n groundtruth_boxes,\n hpu_boxes,\n cpu_boxes,\n hpu_strategy,\n label,\n N=10,\n num_graph_points=20,\n match_mode=\"ellipse\",\n):\n print \"Making plot for\", repr(label)\n print \"TODO: this should graph seconds per image\"\n mix_fractions = np.linspace(0, 1.0, num_graph_points)\n # Plot confidence intervals\n min_ci = []\n max_ci = []\n mean_accs = []\n stderr_accs = []\n for mix_fraction in mix_fractions:\n accuracies = [\n maximum_F_score(\n groundtruth_boxes,\n hpu_strategy(hpu_boxes, cpu_boxes, mix_fraction),\n match_mode=match_mode,\n )\n for _ in xrange(N)\n ]\n mean_accs.append(np.mean(accuracies))\n stderr_accs.append(np.std(accuracies, ddof=1) / np.sqrt(N))\n #print mix_fraction, np.mean(accuracies)\n ax.errorbar(mix_fractions, mean_accs, stderr_accs, label=label)\n ax.set_xlabel(\"Fraction of HPU-labeled images\")\n ax.set_ylabel(\"Maximum F-score\")", "def plot_cdf_compare(self, output_fn_base=\"CDF_compare.png\"):\n self.logger.debug(\"Plot CDF to %s_[train|test].png\", output_fn_base)\n\n timeout = self.scenario.cutoff\n\n data = self.data\n\n def prepare_data(x_data):\n \"\"\" Helper function to keep things easy, generates y_data and\n manages x_data-timeouts \"\"\"\n x_data = sorted(x_data)\n y_data = np.array(range(len(x_data)))/(len(x_data)-1)\n for idx in range(len(x_data)):\n if (timeout != None) and (x_data[idx] >= timeout):\n x_data[idx] = timeout\n y_data[idx] = y_data[idx-1]\n return (x_data, y_data)\n\n # Generate y_data\n data = {config_name : {label : prepare_data(x_data) for label, x_data in\n data[config_name].items()}\n for config_name in data}\n\n output_fn = [output_fn_base + \"_\" + inst_set + '.png' for inst_set in\n ['train', 'test']]\n\n for inst_set, out in zip(['train', 'test'], output_fn):\n f = plt.figure(1, dpi=100, figsize=(10,10))\n ax1 = f.add_subplot(1,1,1)\n ax1.step(data['default'][inst_set][0],\n data['default'][inst_set][1], color='red',\n linestyle='-', label='default train')\n ax1.step(data['incumbent'][inst_set][0],\n data['incumbent'][inst_set][1], color='blue',\n linestyle='-', label='incumbent train')\n ax1.legend()\n ax1.grid(True)\n ax1.set_xscale('log')\n ax1.set_ylabel('probability of being solved')\n ax1.set_xlabel('time')\n # Plot 'timeout'\n if timeout:\n ax1.text(timeout,\n ax1.get_ylim()[0] - 0.1 * np.abs(ax1.get_ylim()[0]),\n \"timeout \", horizontalalignment='center',\n verticalalignment=\"top\", rotation=30)\n ax1.axvline(x=timeout, linestyle='--')\n\n f.tight_layout()\n f.savefig(out)\n plt.close(f)\n return output_fn", "def parameter_forecast_plot(model_obj,time_index,start,end,num_samples = 100,cached_samples=None,col_labels = ['P','PET','Lag-1 Q','Lag-1 P','Seasonal','P$^2$','Constant']):\n \n f = plt.figure(figsize = (8,10))\n num_components = len(col_labels)\n gs = gridspec.GridSpec(8+2*num_components,6)\n ax0 = plt.subplot(gs[-8:-6,:])\n ax1 = plt.subplot(gs[-6::,:])\n col_labels = ['P','PET','Lag-1 Q','Lag-1 P','Seasonal','P$^2$','Constant']\n ffbs = model_obj # 120 is French Broad River at Blantyre, NC\n if cached_samples is None:\n samples = ffbs.backward_sample(num_samples=num_samples)\n else: \n samples = cached_samples\n for i in range(7):\n ax_new = plt.subplot(gs[2*i:2*i+2,:])\n\n upper = np.percentile(samples[start:end,i,:],75,axis = 1)\n mid = np.percentile(samples[start:end,i,:],50,axis = 1)\n lower = np.percentile(samples[start:end,i,:],25,axis = 1)\n\n ax_new.plot(time_index[start:end],mid,color='k')\n ax_new.fill_between(time_index[start:end],upper,lower,color='0.8')\n ax_new.tick_params(labelbottom=False,direction='in')\n ax_new.text(0.02, 0.82,col_labels[i],\n horizontalalignment='left',\n verticalalignment='center',transform=ax_new.transAxes)\n\n ax1.plot(time_index[start:end],ffbs.f[start:end],color='k',label='1-step forecast')\n ax1.plot(time_index[start:end],ffbs.Y[start:end],color='k',linestyle='',marker='+',\n markersize = 10,label='Observed streamflow')\n\n ax1.fill_between(time_index[start:end],\n np.squeeze(ffbs.f[start:end] + 2*ffbs.Q[start:end,0]),\n np.squeeze(ffbs.f[start:end] - 2*ffbs.Q[start:end,0]),color='0.8',\n label = 'Forecast $\\pm 2V_t$')\n ax1.tick_params(direction='in')\n ax1.legend(loc='upper right',ncol=1,frameon=True)\n #ax1.set_ylabel('Standardized streamflow')\n ax1.set_xlabel('Date',fontsize=16)\n ax1.get_yaxis().set_label_coords(-0.1,0.5)\n ax1.text(0.02, 0.92,'Standardized streamflow',\n horizontalalignment='left',\n verticalalignment='center',transform=ax1.transAxes,)\n ax0.plot(time_index[start:end],ffbs.s[start:end],color='k')\n ax0.text(0.02, 0.82,'$E[V_t]$',\n horizontalalignment='left',\n verticalalignment='center',transform=ax0.transAxes,)\n ax0.get_yaxis().set_label_coords(-0.1,0.5)\n return f,samples", "def plot_evaluation(parameters_dict, log_df, settings, evaluation_set_kde, plotname):\n\n\n plots = []\n\n\n ### setup the colors for each component\n if int(settings['nr_components']) < 3:\n colors = ['rgb(228,26,28)', 'rgb(55,126,184)']\n elif int(settings['nr_components']) < 13:\n colors = np.array(cl.scales[str(settings['nr_components'])]['qual']['Paired'])\n else:\n colors = cl.interp(cl.scales['10']['qual']['Paired'], 20)\n\n\n ### set up ab list\n ab_list = evaluation_set_kde['contact'].keys()\n\n\n\n\n ####################### plotting of settings\n print_to_table = {}\n for key in sorted(settings.keys()):\n if key not in ['fold_id_dir','plot_name', 'fixed_parameters', 'threads_proteins', 'qijab_dir',\n 'debug_mode', 'parameter_file', 'settings_file', 'optimization_log_file', 'braw_dir', 'pdb_dir', 'paramdir',\n 'mask_sse', 'lambda_w_fix', 'lfactor', 'plotdir', 'psicov_dir', 'contact', 'hessian_pseudocount']:\n print_to_table[key] = settings[key]\n\n print(\"Generate settings table...\")\n table_settings_1 = plot_settings_table(print_to_table, 1)\n table_settings_2 = plot_settings_table(print_to_table, 2)\n table_settings_3 = plot_settings_table(print_to_table, 3)\n plots.append(table_settings_1)\n plots.append(table_settings_2)\n plots.append(table_settings_3)\n\n\n ####################### negLL and realted plots\n if 'step' in log_df.columns and 'pass' in log_df.columns:\n\n if 'negLL' in log_df.columns:\n plot_negll = plot_convergence_trace_plotly(log_df,\n name=['negLL', 'negLL_crossval'],\n plot_title='neg LL trace for training and cross-val set')\n plots.append(plot_negll)\n\n plot_expfit_negll = plot_exponentialFit_negLL(log_df, plot_title='exponential Fit neg LL')\n plots.append(plot_expfit_negll)\n\n if 'timestamp' in log_df.columns:\n plot_timestamps = plot_convergence_trace_plotly(log_df,\n name=['timestamp'],\n plot_title='time (s) per iteration')\n plots.append(plot_timestamps)\n\n\n if 'gradient_norm_weights' in log_df.columns:\n plot_grad_norm_weights = plot_convergence_trace_plotly(log_df,\n name=['gradient_norm_weights'],\n plot_title='norm of weight gradients')\n plots.append(plot_grad_norm_weights)\n\n if 'gradient_norm_means' in log_df.columns:\n plot_grad_norm_means = plot_convergence_trace_plotly(log_df,\n name=['gradient_norm_means'],\n plot_title='norm of mean gradients')\n plots.append(plot_grad_norm_means)\n\n if 'gradient_norm_prec' in log_df.columns:\n plot_grad_norm_prec = plot_convergence_trace_plotly(log_df,\n name=['gradient_norm_prec'],\n plot_title='norm of precMat gradients')\n plots.append(plot_grad_norm_prec)\n\n\n ####################### plotting of parameters\n print(\"Generate distribution of parameters...\")\n\n #weights\n weights_dict = {}\n for component in range(settings['nr_components']):\n weights_dict['component ' + str(component)] = {\n 'weights (contact)': parameters_dict[\"weight_contact_\" + str(component)][0],\n 'weights (bg)': parameters_dict[\"weight_bg_\" + str(component)][0]\n }\n plot_weights = plot_barplot(\n weights_dict,\n 'Distribution of weights',\n 'component weights',\n type='group',\n colors=colors\n #,plot_out=\"/home/vorberg/weights.html\"\n )\n\n #mu\n mu_df = pd.DataFrame.from_dict(dict((k, parameters_dict[k]) for k in sorted(parameters_dict.keys()) if 'mu' in k))\n plot_means = plot_boxplot(\n mu_df,\n 'Distribution of Means',\n \"values of mean parameters\",\n colors=colors\n #,plot_out=\"/home/vorberg/mus.html\"\n )\n\n #std deviation\n prec_df = pd.DataFrame.from_dict(dict((k, parameters_dict[k]) for k in sorted(parameters_dict.keys()) if 'prec' in k))\n try:\n std_dev = prec_df.apply(lambda p: np.sqrt(1.0/p))\n if settings['prec_wrt_L']:\n std_dev = prec_df.apply(lambda p: np.sqrt(1.0/(p*142))) #in case precision is specified depending on L=142\n except ZeroDivisionError as e:\n print(e)\n std_dev=prec_df\n\n std_dev.columns = [column_name.replace(\"prec\", \"std\") for column_name in std_dev.columns]\n plot_stddev = plot_boxplot(\n std_dev,\n 'Distribution of std deviations',\n \"values of std deviation parameters\",\n colors=colors\n #,plot_out=\"/home/vorberg/std.html\"\n )\n\n\n plots.append(plot_weights)\n plots.append(plot_means)\n plots.append(plot_stddev)\n\n ####################### Scatterplot mu vs std dev\n print(\"Generate scatter plot mu vs std...\")\n scatter_dict = {}\n for component in range(settings['nr_components']):\n scatter_dict['mu_'+str(component)] = [\n mu_df['mu_'+str(component)].tolist(),\n std_dev['std_'+str(component)].tolist(),\n AB.values()\n ]\n plot_mu_vs_stddev = plot_scatter(scatter_dict,\n 'Mean vs std deviation',\n 'mean',\n \"std deviation\",\n False,\n colors\n #,plot_out=\"/home/vorberg/mu_vs_std.html\"\n )\n\n plots.append(plot_mu_vs_stddev)\n\n\n ############################################## plotting of gradient norms\n print(\"Generate gradient norms plot...\")\n\n #gradients for mu\n mu_grad_dict = {}\n annotations_dict = {}\n for component in range(settings['nr_components']):\n key = 'mu_'+str(component)\n mu_grad_dict[key] = log_df[key].tolist()[-1]\n annotations_dict[key] = AB\n\n\n plot_gradient_mu_stats = jitter_plot(mu_grad_dict,\n 'Distribution of gradients for mean in last iteration',\n annotations_dict,\n colors,\n None)\n plots.append(plot_gradient_mu_stats)\n\n\n #gradients for precMat\n precMat_grad_dict = {}\n annotations_dict = {}\n for component in range(settings['nr_components']):\n key = 'prec_'+str(component)\n precMat_grad_dict['diagPrecMat_'+str(component)] = log_df[key].tolist()[-1]\n annotations_dict['diagPrecMat_'+str(component)] = AB\n\n\n plot_gradient_precMat_stats = jitter_plot(\n precMat_grad_dict,\n 'Distribution of gradients for precMat in last iteration',\n annotations_dict,\n colors,\n None\n )\n plots.append(plot_gradient_precMat_stats)\n\n ##################################### plotting of gradient trace of a specific ab pair for all components\n print(\"Generate gradient trace plot...\")\n\n gradient_df = log_df.filter(regex=(\"mu_[0-9]*\"))\n plot_gradient_mu_ab_trace = plot_gradient_ab_trace(gradient_df,\n ab_list,\n colors\n )\n plots.append(plot_gradient_mu_ab_trace)\n\n gradient_df = log_df.filter(regex=(\"prec_[0-9]*\"))\n plot_gradient_prec_ab_trace = plot_gradient_ab_trace(\n gradient_df,\n ab_list,\n colors\n )\n plots.append(plot_gradient_prec_ab_trace)\n\n\n ##################################### plotting of univariate mixtures\n if len(evaluation_set_kde['contact']) == 0 or len(evaluation_set_kde['bg']) == 0:\n print \"Evaluation set is empty. Cannot plot Mixture Visualization.\"\n else:\n print(\"Generate parameter visualization 1d plots...\")\n plots.append(plot_parameter_visualisation_1d(parameters_dict, evaluation_set_kde, settings, colors, settings['prec_wrt_L']))\n # plot_parameter_visualisation_1d(parameters_dict, evaluation_set_kde, settings, colors, settings['prec_wrt_L'], plot_out=\"/home/vorberg/1d_vis.html\")\n\n # ------------------------------------------------------------------------------\n ### define merged plot\n # ------------------------------------------------------------------------------\n cols = 3.0\n rows = int(np.ceil((len(plots)-1) / cols)) + 2\n subplot_titles = []\n\n # set up titles\n for plot in range(len(plots)-1):\n subplot_titles.append(plots[plot]['layout']['title'])\n if len(subplot_titles) < (cols * (rows-2)):\n for i in range(int((cols * (rows-2))) - len(subplot_titles) ):\n subplot_titles.append(\" \")\n subplot_titles.append(plots[-1]['layout']['title'])\n\n\n # plot all plots as subplots\n fig = tools.make_subplots(rows=rows,\n cols=3,\n specs = [ [{} for col in range(int(cols))] for row in range(rows-2)] + \\\n [[{'rowspan':2, 'colspan': 3}, None, None], [None, None, None]],\n subplot_titles=tuple(subplot_titles),\n print_grid=False)\n\n\n\n\n for i, plot in enumerate(plots[:-1]):\n col = i % int(cols)\n row = (i - col) / int(cols)\n\n #add traces to subplot\n for trace in plot['data']:\n trace['showlegend']=False\n fig.append_trace(trace, row + 1, col + 1)\n\n # adjust x and y axis for table plotting\n if 'annotations' in plot['layout'].keys():\n for cell in plot['layout']['annotations']:\n cell['yref'] = 'y' + str(i + 1)\n cell['xref'] = 'x' + str(i + 1)\n fig['layout']['annotations'] += plot['layout']['annotations']\n\n # adjust axis for all plots\n fig['layout']['xaxis' + str(i + 1)].update(plot['layout']['xaxis1'])\n fig['layout']['yaxis' + str(i + 1)].update(plot['layout']['yaxis1'])\n\n ## add mixture visualisation plot - spans 3 columns\n for trace in plots[-1]['data']:\n fig.append_trace(trace, int(rows)-1, 1)\n fig['layout']['xaxis' + str(int(cols * (rows-2) + 1))].update(plots[-1]['layout']['xaxis1'])\n fig['layout']['yaxis' + str(int(cols * (rows-2) + 1))].update(plots[-1]['layout']['yaxis1'])\n\n #check which plots are visible/invisible according to menu selection\n trace_visibility_ab = {}\n for ab in range(len(ab_list)):\n trace_visibility_ab[ab] = []\n for i, plot in enumerate(plots):\n if 'updatemenus' not in plot['layout'].keys():\n trace_visibility_ab[ab].extend([True] * len(plot['data']))\n else:\n trace_visibility_ab[ab].extend(plot['layout']['updatemenus'][0]['buttons'][ab]['args'][1])\n\n\n #use menu of last plot (=vis of mixture) as template for multiplot menu\n fig['layout']['updatemenus'] = plots[-1]['layout']['updatemenus']\n for ab in range(len(ab_list)):\n fig['layout']['updatemenus'][0]['buttons'][ab]['args'][1] = trace_visibility_ab[ab]\n\n\n fig['layout']['legend']['yanchor'] = 'bottom'\n fig['layout']['legend']['y'] = 0\n fig['layout']['height'] = rows * 250\n fig['layout']['font'] = {'size': 18} # set global font size\n\n plotly_plot(fig, filename=plotname, auto_open=False)", "def plot(self, **kwargs):\n\n # get colors\n colors = kwargs.get(\"colors\", GW_OBSERVATORY_COLORS)\n\n # get Result samples\n self._samples = {\n label: value.posterior\n for label, value in self.results.items()\n if isinstance(value, Result)\n }\n\n # get Grid posteriors\n self._grids = {\n label: [value, value.ln_evidence] # store grid and log evidence\n for label, value in self.results.items()\n if isinstance(value, Grid)\n }\n\n # apply offsets for slightly nicer plots axes\n self.parameter_offsets = {parameter: 0.0 for parameter in self.parameters}\n if len(self._grids) == 0 and len(self._samples) == 1:\n for label in self._samples:\n for parameter in self.parameters:\n srange = [\n np.min(self._samples[label][parameter]),\n np.max(self._samples[label][parameter]),\n ]\n label_suffix = \"\"\n\n # offset values\n median = np.median(self._samples[label][parameter])\n relwidth = np.abs((srange[1] - srange[0]) / median)\n\n if relwidth < 1e-4:\n offsetstr = f\"{median:.4e}\"\n a, b = offsetstr.split(\"e\")\n\n if np.abs(int(b)) < 3:\n offsetstr = f\"{median:.4f}\"\n offset = float(offsetstr)\n else:\n offset = float(offsetstr)\n offsetstr = a + rf\"\\!\\times\\!10^{{{int(b)}}}\"\n\n self.parameter_offsets[parameter] = offset\n\n self._samples[label][parameter] -= offset\n label_suffix = rf\" [${{\\scriptstyle {offsetstr}}}$]\"\n\n self.latex_labels[parameter] += label_suffix\n\n colordicts = []\n for j, res in enumerate([self._samples, self._grids]):\n colordicts.append({})\n for i, key in enumerate(res):\n if key in colors:\n colordicts[-1][key] = colors[key]\n elif key.lower() == \"joint\":\n # if using \"Joint\" as the multi-detector analysis key, set the color to black\n colordicts[-1][key] = \"k\"\n else:\n # use PESummary color cycle\n colordicts[-1][key] = list(colorcycle)[\n (j * 2 + i) % len(colorcycle)\n ]\n\n # store original keywords arguments\n origkwargs = kwargs.copy()\n\n # plot samples\n fig = None\n if len(self._samples) > 0:\n kwargs[\"colors\"] = list(colordicts[0].values())\n if self._num_parameters == 1:\n fig = self._1d_plot_samples(**kwargs)\n elif self._num_parameters == 2 and self.plottype != \"corner\":\n fig = self._2d_plot_samples(**kwargs)\n else:\n fig = self._nd_plot_samples(**kwargs)\n\n # restore keywords\n kwargs = origkwargs\n\n if len(self._grids) > 0:\n kwargs[\"colors\"] = list(colordicts[1].values())\n if fig is not None and \"fig\" not in kwargs:\n kwargs[\"fig\"] = fig\n if self._num_parameters == 1:\n fig = self._1d_plot_grid(**kwargs)\n elif self._num_parameters == 2 and self.plottype != \"corner\":\n fig = self._2d_plot_grid(**kwargs)\n else:\n fig = self._nd_plot_grid(**kwargs)\n\n # add further figure information\n if self._num_parameters == 1:\n ax = fig.gca()\n\n # set figure bounds if outside defaults\n if self.parameters[0] in DEFAULT_BOUNDS:\n _set_axes_limits(ax, self.parameters[0], axis=\"x\")\n\n # add injection values\n if self.injection_parameters is not None:\n if self.injection_parameters[self.parameters[0]] is not None:\n ax.axvline(\n (\n self.injection_parameters[self.parameters[0]]\n - self.parameter_offsets[self.parameters[0]]\n ),\n color=kwargs.get(\"injection_color\", \"k\"),\n linewidth=1,\n )\n elif self._num_parameters == 2:\n if \"triangle\" in self.plottype:\n a1, a2, a3 = fig[1:]\n order = [\"x\", \"y\"] if self.plottype == \"triangle\" else [\"y\", \"x\"]\n params = (\n self.parameters[:2]\n if self.plottype == \"triangle\"\n else self.parameters[1::-1]\n )\n\n # set figure bounds if outside defaults\n for param, axes, axis in zip(params, [[a1, a2], [a2, a3]], order):\n for ax in axes:\n _set_axes_limits(ax, param, axis=axis)\n\n self.fig = fig\n return self.fig", "def plot_optimization_history(\n study: Study | Sequence[Study],\n *,\n target: Callable[[FrozenTrial], float] | None = None,\n target_name: str = \"Objective Value\",\n error_bar: bool = False,\n) -> \"Axes\":\n\n _imports.check()\n\n info_list = _get_optimization_history_info_list(study, target, target_name, error_bar)\n return _get_optimization_history_plot(info_list, target_name)", "def plot(self, plot_cmd=None, tf=lambda y: y):\r\n if not plot_cmd:\r\n plot_cmd = self.plot_cmd\r\n colors = 'bgrcmyk'\r\n pylab.hold(False)\r\n res = self.res\r\n\r\n flatx, flatf = self.flattened()\r\n minf = np.inf\r\n for i in flatf:\r\n minf = min((minf, min(flatf[i])))\r\n addf = 1e-9 - minf if minf <= 0 else 0\r\n for i in sorted(res.keys()): # we plot not all values here\r\n if type(i) is int:\r\n color = colors[i % len(colors)]\r\n arx = sorted(res[i].keys())\r\n plot_cmd(arx, [tf(np.median(res[i][x]) + addf) for x in arx], color + '-')\r\n pylab.text(arx[-1], tf(np.median(res[i][arx[-1]])), i)\r\n pylab.hold(True)\r\n plot_cmd(flatx[i], tf(np.array(flatf[i]) + addf), color + 'o')\r\n pylab.ylabel('f + ' + str(addf))\r\n pylab.draw()\r\n show()\r\n # raw_input('press return')\r\n return self", "def plot_comparisons(self, exact, blocked, blockederr, axdelta=None):\n if axdelta is None:\n axdelta = plt.gca()\n delta = self.means - exact\n axdelta.errorbar(list(range(1, self.max_dets)), delta[0], yerr=self.stderr[0], label='independent')\n axdelta.errorbar(list(range(1, self.max_dets)), delta[1], yerr=self.stderr[1], label='correlated')\n axdelta.axhline(delta[0, 0], linestyle=':', color='grey', label='reference')\n axdelta.axhline(0, linestyle='-', linewidth=1, color='black')\n if blocked:\n axdelta.axhline(blocked-exact, linestyle='--', color='darkgreen', label='reblocked')\n if blockederr:\n axdelta.fill_between([0, self.max_dets], [blocked-exact-blockederr,blocked-exact-blockederr],\n [blocked-exact+blockederr,blocked-exact+blockederr], color='green', alpha=0.2)\n axdelta.set_xlabel('Number of determinants in estimator')\n axdelta.set_ylabel(r'$E-E_\\mathrm{CCSD}$ / ha')\n axdelta.legend()\n return axdelta", "def plot(\n self,\n group_delay=False,\n slce=None,\n flim=None,\n dblim=None,\n tlim=None,\n grpdlim=None,\n dbref=1,\n show=False,\n use_fig=None,\n label=None,\n unwrap_phase=False,\n logf=True,\n third_oct_f=True,\n plot_kw={},\n **fig_kw,\n ):\n if use_fig is None:\n fig_kw = {**{\"figsize\": (10, 10)}, **fig_kw}\n fig, axes = plt.subplots(nrows=3, constrained_layout=True, **fig_kw)\n else:\n fig = use_fig\n axes = fig.axes\n\n self.plot_magnitude(\n use_ax=axes[0],\n slce=slce,\n dblim=dblim,\n flim=flim,\n dbref=dbref,\n label=label,\n plot_kw=plot_kw,\n logf=logf,\n third_oct_f=third_oct_f,\n )\n if group_delay:\n self.plot_group_delay(\n use_ax=axes[1],\n slce=slce,\n flim=flim,\n ylim=grpdlim,\n plot_kw=plot_kw,\n logf=logf,\n third_oct_f=third_oct_f,\n )\n else:\n self.plot_phase(\n use_ax=axes[1],\n slce=slce,\n flim=flim,\n plot_kw=plot_kw,\n unwrap=unwrap_phase,\n logf=logf,\n third_oct_f=third_oct_f,\n )\n self.plot_time(\n use_ax=axes[2], tlim=tlim, slce=slce, plot_kw=plot_kw\n )\n\n if show:\n plt.show()\n\n return fig", "def _show_learning_rate():\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(6.4 * 2, 4.8))\n\n # Visualize c_prime\n c_prime_list = np.linspace(1, 100, num=11)\n x_label = f\"c'\"\n y_label = \"Minimum Clusters Size\"\n title = \"\"\n\n ax = axes[0]\n x_list = c_prime_list\n\n # MNIST\n y_list = [161, 16, 14, 15, 20, 21, 24, 27, 30, 30, 35]\n ax.plot(x_list, y_list, label=\"MNIST\")\n\n # Fashion MNIST\n y_list = [63, 12, 12, 15, 18, 19, 22, 25, 26, 28, 30]\n ax.plot(x_list, y_list, label=\"Fashion MNIST\")\n\n # 20 news groups\n y_list = [1297, 724, 221, 80, 52, 51, 54, 54, 52, 60, 60]\n ax.plot(x_list, y_list, label=\"Newsgroups\")\n\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_title(title)\n ax.legend()\n ax.set_yscale('log')\n\n # Visualize t0\n t0_list = np.linspace(1, 100, num=11)\n x_label = f\"t0\"\n y_label = \"Minimum Clusters Size\"\n title = \"\"\n\n ax = axes[1]\n x_list = t0_list\n\n # MNIST\n y_list = [16, 16, 16, 16, 16, 17, 16, 16, 16, 16, 16]\n ax.plot(x_list, y_list, label=\"MNIST\")\n\n # Fashion MNIST\n y_list = [12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12]\n ax.plot(x_list, y_list, label=\"Fashion MNIST\")\n\n # 20 news groups\n y_list = [765, 765, 767, 772, 772, 773, 789, 789, 793, 796, 799]\n ax.plot(x_list, y_list, label=\"Newsgroups\")\n\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_title(title)\n ax.legend()\n ax.set_yscale('log')\n\n plt.show()", "def plot_effective_beta(t, recalled_ctx, ctx, ctx_test_env, ax=None):\n if ax is None:\n ax = plt.gca()\n\n ax.set_prop_cycle('color', sns.color_palette(\"husl\", ctx_test_env.n))\n y = np.sum(recalled_ctx * ctx, axis=1)\n for i in range(1, ctx_test_env.n):\n sel = (t > i) & (t <= i + 1)\n ax.plot(t[sel], y[sel])\n\n ax.axhline(y=ctx_test_env.beta, c='k', ls='--')\n ax.set_xlabel(r\"Time $t/\\mathrm{s}$\")\n ax.set_ylabel(r\"$\\beta'$\")\n ax.set_yticks([0, ctx_test_env.beta, 1])", "def plot_learning_curve(X, y, maxdepth, estimator, plt):\n # create cv training and test scores for various training set sizes\n train_sizes, train_scores, test_scores = learning_curve(estimator,\n X, # feature matrix\n y, # target vector\n cv=10, # number of folds in cross-validation\n scoring='neg_mean_squared_error', # metric\n n_jobs=-1, # use all computer cores,\n train_sizes=np.linspace(0.01, 1.0, 30) # 30 different sizes of the training set\n )\n # create means and standart deviations of training set scores\n train_mean = np.mean(train_scores, axis=1)\n train_std = np.std(train_scores, axis=1)\n\n # create means and standart deviations of test set scores\n test_mean = np.mean(test_scores, axis=1)\n test_std = np.std(test_scores, axis=1)\n\n # draw lines\n plt.plot(train_sizes, train_mean, '--', color='#111111', label=\"Training score\")\n plt.plot(train_sizes, test_mean, color='#111111', label=\"Cross-validation score\")\n\n # draw bands\n plt.fill_between(train_sizes, train_mean - train_std, train_mean + train_std, color=\"#DDDDDD\")\n plt.fill_between(train_sizes, test_mean - test_std, test_mean + test_std, color=\"#f4d0d7\")\n \n # create plot \n plt.title(\"Learning curve\")\n plt.xlabel(\"Training set size\", fontsize=18)\n plt.ylabel(\"mse\", fontsize=18)\n plt.legend(loc=\"best\")\n plt.tight_layout()", "def parallel_group(\n G, group_by, ax=None, y_offset=-0.3, rotation=45, ha=\"right\", va=\"top\"\n):\n if ax is None:\n ax = plt.gca()\n nt = utils.node_table(G)\n # groups = nt.groupby(group_by).apply(lambda df: len(df)).sort_index()\n groups = sorted(nt[group_by].unique())\n\n for i, label in enumerate(groups):\n x = i * 4\n y = y_offset\n ax.annotate(label, xy=(x, y), ha=ha, va=va, rotation=rotation)\n ax.relim()", "def plot_retention_curves(distribution_shift_name,\n dataset_to_model_results,\n plot_dir: str,\n no_oracle=True,\n cutoff_perc=0.99):\n set_matplotlib_constants()\n retention_types = [\n 'retention_accuracy_arr', 'retention_nll_arr', 'retention_auroc_arr',\n 'retention_auprc_arr'\n ]\n\n datasets = list(sorted(list(dataset_to_model_results.keys())))\n\n for dataset in datasets:\n dataset_results = dataset_to_model_results[dataset]\n for tuning_domain in ['indomain', 'joint']:\n for retention_type in retention_types:\n fig, ax = plt.subplots()\n plt.subplots_adjust(left=0.20, bottom=0.20)\n\n retention_name = RETENTION_ARR_TO_FULL_NAME[retention_type]\n oracle_str = 'no_oracle' if no_oracle else 'oracle'\n plot_name = (f'retention-{distribution_shift_name}-{dataset}'\n f'-{tuning_domain}-{retention_name}-{oracle_str}')\n\n model_names = []\n for ((mt, k, is_d, key_tuning_domain, n_mc),\n model_dict) in dataset_results.items():\n if tuning_domain != key_tuning_domain:\n continue\n model_name = get_model_name((mt, k, is_d, key_tuning_domain, n_mc))\n model_names.append(model_name)\n\n # Subsample the array to ~500 points\n retention_arr = np.array(model_dict[retention_type])\n\n if no_oracle:\n prop_expert = np.arange(\n retention_arr.shape[1]) / retention_arr.shape[1]\n prop_model = 1 - prop_expert\n retention_arr = (retention_arr - prop_expert) / prop_model\n\n if retention_arr.shape[1] > 500:\n subsample_factor = max(2, int(retention_arr.shape[1] / 500))\n retention_arr = retention_arr[:, ::subsample_factor]\n\n retain_percs = np.arange(\n retention_arr.shape[1]) / retention_arr.shape[1]\n n_seeds = retention_arr.shape[0]\n mean = np.mean(retention_arr, axis=0)\n std_err = np.std(retention_arr, axis=0) / np.sqrt(n_seeds)\n\n if cutoff_perc is not None and 'accuracy' in retention_type:\n retain_percs = retain_percs[:-100]\n mean = mean[:-100]\n std_err = std_err[:-100]\n\n if 'retention_nll_arr' == retention_type:\n cutoff_index = int(retain_percs.shape[0] * 0.95)\n retain_percs = retain_percs[:cutoff_index]\n mean = mean[:cutoff_index]\n std_err = mean[:cutoff_index]\n\n color, linestyle = get_colors_and_linestyle(\n MODEL_TYPE_TO_FULL_NAME[(mt, k > 1)])\n\n # Visualize mean with standard error\n ax.plot(\n retain_percs,\n mean,\n label=model_name,\n color=color,\n linestyle=linestyle)\n ax.fill_between(\n retain_percs,\n mean - std_err,\n mean + std_err,\n color=color,\n alpha=0.25)\n ax.set(\n xlabel='Proportion of Cases Referred to Expert',\n ylabel=retention_name)\n fig.tight_layout()\n\n if isinstance(plot_dir, str):\n os.makedirs(plot_dir, exist_ok=True)\n metric_plot_path = os.path.join(plot_dir, f'{plot_name}.pdf')\n fig.savefig(metric_plot_path, transparent=True, dpi=300, format='pdf')\n logging.info(f'Saved retention plot for distribution shift '\n f'{distribution_shift_name},'\n f'dataset {dataset}, '\n f'tuning domain {tuning_domain}, '\n f'metric {retention_type}, '\n f'models {model_names}, '\n f'oracle setting {oracle_str} to {metric_plot_path}.')\n\n print(plot_name)\n # plt.show()", "def update_plotmon_adaptive_cma(self, force_update=False):\n\n if self._live_plot_enabled():\n try:\n if (time.time() - self.time_last_ad_plot_update >\n self.plotting_interval() or force_update):\n ##########################################\n # Main plotmon\n ##########################################\n i = 0\n nr_sweep_funcs = len(self.sweep_function_names)\n\n # best_idx -1 as we count from 0 and best eval\n # counts from 1.\n best_index = int(self.opt_res_dset[-1, -1] - 1)\n\n for j in range(len(self.detector_function.value_names)):\n y_ind = nr_sweep_funcs + j\n\n ##########################################\n # Main plotmon\n ##########################################\n for x_ind in range(nr_sweep_funcs):\n\n x = self.dset[:, x_ind]\n y = self.dset[:, y_ind]\n\n self.curves[i]['config']['x'] = x\n self.curves[i]['config']['y'] = y\n\n best_x = x[best_index]\n best_y = y[best_index]\n self.curves_best_ever[i]['config']['x'] = [best_x]\n self.curves_best_ever[i]['config']['y'] = [best_y]\n mean_x = self.opt_res_dset[:, 2+x_ind]\n # std_x is needed to implement errorbars on X\n # std_x = self.opt_res_dset[:, 2+nr_sweep_funcs+x_ind]\n # to be replaced with an actual mean\n mean_y = self.opt_res_dset[:, 2+2*nr_sweep_funcs]\n mean_y = get_generation_means(\n self.opt_res_dset[:, 1], y)\n # TODO: turn into errorbars\n self.curves_distr_mean[i]['config']['x'] = mean_x\n self.curves_distr_mean[i]['config']['y'] = mean_y\n i += 1\n ##########################################\n # Secondary plotmon\n ##########################################\n # Measured value vs function evaluation\n y = self.dset[:, y_ind]\n x = range(len(y))\n self.iter_traces[j]['config']['x'] = x\n self.iter_traces[j]['config']['y'] = y\n\n # generational means\n gen_idx = self.opt_res_dset[:, 1]\n self.iter_mean_traces[j]['config']['x'] = gen_idx\n self.iter_mean_traces[j]['config']['y'] = mean_y\n\n # This plots the best ever measured value vs iteration\n # number of evals column\n best_evals_idx = (\n self.opt_res_dset[:, -1] - 1).astype(int)\n best_func_val = y[best_evals_idx]\n self.iter_bever_traces[j]['config']['x'] = best_evals_idx\n self.iter_bever_traces[j]['config']['y'] = best_func_val\n\n self.main_QtPlot.update_plot()\n self.secondary_QtPlot.update_plot()\n\n self.time_last_ad_plot_update = time.time()\n\n except Exception as e:\n log.warning(traceback.format_exc())", "def plot_test_objective_multi(df, exp_config, output_dir, show):\n output_file_name = f\"{inspect.stack()[0][3]}.{FILE_EXTENSION}\"\n output_path = os.path.join(output_dir, output_file_name)\n\n plt.figure()\n\n for exp_name, exp_df in df.items():\n\n if \"rep\" in exp_config[\"data\"][exp_name]:\n\n exp_dfs = exp_df\n\n T = np.linspace(0, exp_config[\"t_max\"], 50000)\n\n y_list = []\n for i, df_i in enumerate(exp_dfs):\n\n df_i = process_for_test_objective(\n df_i.sort_values(\"timestamp_end\"),\n mode=MODE,\n max_budget=exp_config[\"max_budget\"],\n )\n x = df_i.loc[df_i[\"max_idx\"]][\"timestamp_end\"].values\n y = df_i.loc[df_i[\"max_idx\"]][exp_config[\"test_objective\"]].values\n\n f = interp1d(x, y, kind=\"previous\", fill_value=\"extrapolate\")\n y = exp_config.get(\"best_objective\", 1) - f(T)\n y_list.append(y)\n\n y_list = np.asarray(y_list)\n y_mean = y_list.mean(axis=0)\n y_std = y_list.std(axis=0)\n y_se = y_std / np.sqrt(y_list.shape[0])\n\n plt.plot(\n T,\n y_mean,\n label=exp_config[\"data\"][exp_name][\"label\"],\n color=exp_config[\"data\"][exp_name][\"color\"],\n linestyle=exp_config[\"data\"][exp_name].get(\"linestyle\", \"-\"),\n )\n plt.fill_between(\n T,\n y_mean - 1.96 * y_se,\n y_mean + 1.96 * y_se,\n facecolor=exp_config[\"data\"][exp_name][\"color\"],\n alpha=0.3,\n )\n\n else:\n\n exp_df = process_for_test_objective(\n exp_df.sort_values(\"timestamp_end\"),\n mode=MODE,\n max_budget=exp_config[\"max_budget\"],\n )\n x = exp_df.loc[exp_df[\"max_idx\"]][\"timestamp_end\"].values\n y = exp_df.loc[exp_df[\"max_idx\"]][exp_config[\"test_objective\"]].values\n\n idx = np.unique(x, return_index=True, axis=0)[1]\n\n x = x[idx]\n y = y[idx]\n\n x = np.clip(np.concatenate([x, [exp_config[\"t_max\"]]]), 0, exp_config[\"t_max\"])\n y = np.clip(exp_config.get(\"best_objective\", 1) - np.concatenate([y, [y[-1]]]), 0, 1)\n \n area = aulc(x, y)\n exp_config[\"data\"][exp_name][\"AULC\"] = area\n \n plt.step(\n x[:],\n y[:],\n where=\"post\",\n label=exp_config[\"data\"][exp_name][\"label\"],\n color=exp_config[\"data\"][exp_name][\"color\"],\n marker=exp_config[\"data\"][exp_name].get(\"marker\", None),\n markevery=len(x) // 5,\n linestyle=exp_config[\"data\"][exp_name].get(\"linestyle\", \"-\"),\n )\n\n ax = plt.gca()\n ticker_freq = exp_config[\"t_max\"] / 5\n ax.xaxis.set_major_locator(ticker.MultipleLocator(ticker_freq))\n ax.xaxis.set_major_formatter(minute_major_formatter)\n\n if exp_config.get(\"title\") and PRINT_TITLE:\n plt.title(exp_config.get(\"title\"))\n\n # if MODE == \"min\":\n # plt.legend(loc=\"upper right\")\n # else:\n # plt.legend(loc=\"lower right\")\n plt.legend(loc=exp_config.get(\"legend\", \"best\"))\n\n plt.ylabel(\"Test Regret\")\n plt.xlabel(\"Search time (min.)\")\n\n if exp_config.get(\"ylim\"):\n plt.ylim(*exp_config.get(\"ylim\"))\n\n if exp_config.get(\"xlim\"):\n plt.xlim(*exp_config.get(\"xlim\"))\n else:\n plt.xlim(0, exp_config[\"t_max\"])\n\n if exp_config.get(\"yscale\"):\n plt.yscale(exp_config.get(\"yscale\"))\n\n plt.grid(which=\"minor\", color=\"gray\", linestyle=\":\")\n plt.grid(which=\"major\", linestyle=\"-\")\n plt.tight_layout()\n plt.savefig(output_path, dpi=360)\n if show:\n plt.show()\n plt.close()", "def plot_cf_target(df, columns, hue = \"default_payment_next_month\"):\n \n loc = 1\n numplots = ((len(columns)*(len(columns)-1))/2)+len(columns)\n fig = plt.figure(figsize = (40,60))\n for i, ycol in enumerate(columns):\n for j, xcol in enumerate(columns):\n if j < i:\n continue\n else:\n ax = fig.add_subplot((numplots/4)+1, 4, loc)\n if xcol == ycol:\n sns.distplot(df[xcol], ax = ax);\n else:\n sns.scatterplot(x=xcol, y=ycol, data=df, palette = \"GnBu_d\", ax = ax, hue = hue, legend = False);\n loc += 1", "def plot_metric_results():\n from run_metric_comparison_experiments import (\n PIVECTOR_TEMPLATE,\n PIVECTOR_DISTANCE_MATRIX_TEMPLATE,\n DISCRIMINATOR_DISTANCE_MATRIX_TEMPLATE,\n GAUSSIAN_DISTANCE_MATRIX_TEMPLATE,\n ENCODER_DISTANCE_MATRIX_TEMPLATE,\n DISCRETIZATION_DISTANCE_MATRIX_TEMPLATE,\n NUM_TRAJECTORIES,\n NUM_COMPONENTS,\n NUM_REPETITIONS,\n REWARD_SCALES,\n ENVS\n )\n\n # Path-templates to each distance matrix to compare\n # BC = Behavioural Characteristication\n BC_DISTANCE_MATRIX_TEMPLATES = [\n PIVECTOR_DISTANCE_MATRIX_TEMPLATE,\n GAUSSIAN_DISTANCE_MATRIX_TEMPLATE,\n DISCRIMINATOR_DISTANCE_MATRIX_TEMPLATE,\n ENCODER_DISTANCE_MATRIX_TEMPLATE,\n DISCRETIZATION_DISTANCE_MATRIX_TEMPLATE\n ]\n\n BC_LEGEND_NAMES = [\n \"Supervector\",\n \"Gaussian\",\n \"Discriminator\",\n \"Encoder\",\n \"Discretization\"\n ]\n\n BC_PLOT_COLORS = [\n \"C0\",\n \"C1\",\n \"C2\",\n \"C3\",\n \"C4\"\n ]\n\n fig, axs = pyplot.subplots(\n figsize=[4.8 * 3 * 0.75, 4.8 * 0.75],\n nrows=1,\n ncols=3,\n )\n\n def get_policy_names(env):\n policy_names = glob(PIVECTOR_TEMPLATE.format(env=env, num_traj=\"*\", num_components=\"*\", policy_name=\"*\", repetition_num=\"*\"))\n policy_names = [\"_\".join(os.path.basename(x).split(\"_\")[-4:-2]) for x in policy_names]\n policy_names = sorted(list(set(policy_names)))\n return policy_names\n\n # For each different distance measurement\n for distance_matrix_template, plot_legend_name, plot_color in zip(BC_DISTANCE_MATRIX_TEMPLATES, BC_LEGEND_NAMES, BC_PLOT_COLORS):\n # These will be NUM_TRAJECTORY length lists\n average_scores = np.ones((len(NUM_TRAJECTORIES),))\n std_scores = np.ones((len(NUM_TRAJECTORIES),))\n for num_traj_idx, num_traj in enumerate(NUM_TRAJECTORIES):\n # Average over environments, policies and repetitions\n scores = []\n for env_i, env in enumerate(ENVS):\n if \"Bipedal\" in env and distance_matrix_template == DISCRETIZATION_DISTANCE_MATRIX_TEMPLATE:\n print(\"[Note] Skipping env {} for discretization distances (OOM)\".format(env))\n continue\n min_reward, max_reward = REWARD_SCALES[env]\n policy_names = get_policy_names(env)\n\n for policy_name in policy_names:\n for repetition in range(1, NUM_REPETITIONS + 1):\n # Ugh bit of messing around because I did not think this through...\n if distance_matrix_template == PIVECTOR_DISTANCE_MATRIX_TEMPLATE:\n file_path = distance_matrix_template.format(env=env, num_traj=num_traj, num_components=NUM_COMPONENTS, policy_name=policy_name, repetition_num=repetition)\n else:\n file_path = distance_matrix_template.format(env=env, num_traj=num_traj, policy_name=policy_name, repetition_num=repetition)\n\n data = np.load(file_path)\n distance_matrix = data[\"distance_matrix\"]\n rewards = data[\"average_episodic_rewards\"]\n\n raveled_reward_distances = np.abs(rewards - rewards[:, None])\n # Take upper diagonal, skip diagonal\n raveled_reward_distances = raveled_reward_distances[np.triu_indices(raveled_reward_distances.shape[0], 1)]\n raveled_distances = distance_matrix[np.triu_indices(distance_matrix.shape[0], 1)]\n\n # Score is correlation between the two\n correlation = np.corrcoef(raveled_distances, raveled_reward_distances)[0, 1]\n scores.append(correlation)\n\n scores = np.array(scores)\n average_score = np.mean(scores)\n std_score = np.std(scores)\n average_scores[num_traj_idx] = average_score\n std_scores[num_traj_idx] = std_score\n ax = axs[0]\n ax.plot(NUM_TRAJECTORIES, average_scores, c=plot_color, label=plot_legend_name)\n ax.scatter(NUM_TRAJECTORIES, average_scores, c=plot_color)\n #ax.fill_between(\n # NUM_TRAJECTORIES,\n # average_scores - std_scores,\n # average_scores + std_scores,\n # alpha=0.2,\n # color=plot_color,\n # edgecolor=\"none\",\n # linewidth=0.0\n #)\n ax.set_xticks(NUM_TRAJECTORIES)\n ax.tick_params(axis='both', which='both', labelsize=\"x-large\")\n ax.set_ylabel(\"Correlation with return-distances\", fontsize=\"x-large\")\n ax.set_xlabel(\"Number of trajectories\", fontsize=\"x-large\")\n ax.grid(alpha=0.2)\n\n # Amount of error to \"ground truth\" result,\n # where \"ground truth\" is one of the results with 100 trajectories of data.\n # Because of wonkyness of this, store list [#num-traj] of lists,\n # each storing results for that num-traj run\n per_trajectory_relative_errors = [[] for i in NUM_TRAJECTORIES]\n for env in ENVS:\n if \"Bipedal\" in env and distance_matrix_template == DISCRETIZATION_DISTANCE_MATRIX_TEMPLATE:\n print(\"[Note] Skipping env {} for discretization distances (OOM)\".format(env))\n continue\n policy_names = get_policy_names(env)\n for policy_name in policy_names:\n # The \"ground truth\" distances, will be filled with first\n # result with 100 trajectories.\n anchor_distance = None\n for traj_i, num_traj in enumerate(NUM_TRAJECTORIES):\n for repetition in range(1, NUM_REPETITIONS + 1):\n if distance_matrix_template == PIVECTOR_DISTANCE_MATRIX_TEMPLATE:\n file_path = distance_matrix_template.format(env=env, num_traj=num_traj, num_components=NUM_COMPONENTS, policy_name=policy_name, repetition_num=repetition)\n else:\n file_path = distance_matrix_template.format(env=env, num_traj=num_traj, policy_name=policy_name, repetition_num=repetition)\n distance_matrix = np.load(file_path)[\"distance_matrix\"]\n # Normalize to [0, 1]\n distance_matrix = (distance_matrix - distance_matrix.min()) / (distance_matrix.max() - distance_matrix.min())\n # Get only upper triangle as distance matrix is symmetric. Exlude diagonal\n raveled_distances = distance_matrix[np.triu_indices(distance_matrix.shape[0], 1)]\n # Check if we use this as the zero-point or compute relative error to\n if anchor_distance is None:\n assert num_traj == 100\n anchor_distance = raveled_distances\n else:\n per_trajectory_relative_errors[traj_i].append(\n np.mean(np.abs(raveled_distances - anchor_distance) / np.abs(anchor_distance))\n )\n # Lists are not of equal length, so can not just change into an array\n mean_average_errors = np.array([np.mean(np.array(results) * 100) for results in per_trajectory_relative_errors])\n std_average_errors = np.array([np.std(np.array(results) * 100) for results in per_trajectory_relative_errors])\n ax = axs[1]\n ax.plot(NUM_TRAJECTORIES, mean_average_errors, c=plot_color, label=plot_legend_name)\n ax.scatter(NUM_TRAJECTORIES, mean_average_errors, c=plot_color)\n #ax.fill_between(\n # NUM_TRAJECTORIES,\n # mean_average_errors - std_average_errors,\n # mean_average_errors + std_average_errors,\n # alpha=0.2,\n # color=plot_color,\n # edgecolor=\"none\",\n # linewidth=0.0\n #)\n ax.set_xticks(NUM_TRAJECTORIES)\n ax.tick_params(axis='both', which='both', labelsize=\"x-large\")\n ax.set_ylabel(\"Relative error to ground truth (%)\", fontsize=\"x-large\")\n ax.set_xlabel(\"Number of trajectories\", fontsize=\"x-large\")\n ax.grid(alpha=0.2)\n\n # Variation between results\n cv_means = np.ones((len(NUM_TRAJECTORIES,)))\n cv_stds = np.ones((len(NUM_TRAJECTORIES,)))\n for traj_i, num_traj in enumerate(NUM_TRAJECTORIES):\n traj_averaged_cvs = []\n for env in ENVS:\n if \"Bipedal\" in env and distance_matrix_template == DISCRETIZATION_DISTANCE_MATRIX_TEMPLATE:\n print(\"[Note] Skipping env {} for discretization distances (OOM)\".format(env))\n continue\n policy_names = get_policy_names(env)\n for policy_name in policy_names:\n # Compute std over repetitions\n distances = []\n for repetition in range(1, NUM_REPETITIONS + 1):\n if distance_matrix_template == PIVECTOR_DISTANCE_MATRIX_TEMPLATE:\n file_path = distance_matrix_template.format(env=env, num_traj=num_traj, num_components=NUM_COMPONENTS, policy_name=policy_name, repetition_num=repetition)\n else:\n file_path = distance_matrix_template.format(env=env, num_traj=num_traj, policy_name=policy_name, repetition_num=repetition)\n\n distance_matrix = np.load(file_path)[\"distance_matrix\"]\n # Normalize to [0, 1]\n distance_matrix = (distance_matrix - distance_matrix.min()) / (distance_matrix.max() - distance_matrix.min())\n # Get only upper triangle as distance matrix is symmetric. Exlude diagonal\n raveled_distances = distance_matrix[np.triu_indices(distance_matrix.shape[0], 1)]\n distances.append(raveled_distances)\n distances = np.stack(distances)\n # Coefficient of variance (std / mean)\n average_cv = np.mean(np.std(distances, axis=0) / np.mean(distances, axis=0))\n traj_averaged_cvs.append(average_cv)\n traj_averaged_cvs = np.array(traj_averaged_cvs)\n cv_means[traj_i] = np.mean(traj_averaged_cvs)\n cv_stds[traj_i] = np.std(traj_averaged_cvs)\n\n ax = axs[2]\n ax.plot(NUM_TRAJECTORIES, cv_means, c=plot_color, label=plot_legend_name)\n ax.scatter(NUM_TRAJECTORIES, cv_means, c=plot_color)\n #ax.fill_between(\n # NUM_TRAJECTORIES,\n # cv_means - cv_stds,\n # cv_means + cv_stds,\n # alpha=0.2,\n # color=plot_color,\n # edgecolor=\"none\",\n # linewidth=0.0\n #)\n ax.set_xticks(NUM_TRAJECTORIES)\n ax.tick_params(axis='both', which='both', labelsize=\"x-large\")\n ax.set_ylabel(\"Coefficient of variance $\\\\sigma/\\\\mu$\", fontsize=\"x-large\")\n ax.set_xlabel(\"Number of trajectories\", fontsize=\"x-large\")\n ax.grid(alpha=0.2)\n\n axs[1].legend(prop={\"size\": \"large\"})\n pyplot.tight_layout()\n pyplot.savefig(\"figures/metric_comparison.pdf\", bbox_inches=\"tight\", pad_inches=0.0)", "def fit_and_plot(self, max_iter):\n from matplotlib import pyplot as plt\n from matplotlib import cm\n\n colours = cm.rainbow(np.linspace(0, 1, self.num_classes)) # FIXME: rainbow list -> array\n\n def plot_data(d):\n for c in range(self.num_classes):\n for n in range(self.num_nuisances):\n plt.scatter(*d[c][n].T, c=colours[c])\n plt.waitforbuttonpress()\n\n def plot_mean(th):\n for c in range(self.num_classes):\n for n in range(self.num_nuisances):\n plt.scatter(*th[c][n].mean.T, c=colours[c], marker=\"x\")\n plt.waitforbuttonpress()\n\n plt.ion()\n plt.scatter(*self.data.T)\n plt.waitforbuttonpress()\n\n split_data = self.initialise_clusters_with_kmeans()\n plot_data(split_data)\n thetas = self.maximization(split_data)\n plot_mean(thetas)\n\n for i in range(max_iter):\n plt.clf()\n split_data = self.expectation(thetas)\n plot_data(split_data)\n thetas = self.maximization(split_data)\n plot_mean(thetas)\n return split_data, thetas", "def plot_results(outputs_table_totals, elec_benefits, gas_benefits):\n summer_months = [6, 7, 8, 9]\n shoulder_months = [3, 4, 5, 10]\n winter_months = [11, 12, 1, 2]\n peak_hours = [16, 17, 18, 19, 20]\n pct_hours_in_summer = 2928 / 8760\n pct_hours_in_shoulder = 2952 / 8760\n pct_hours_in_winter = 2880 / 8760\n\n trc_costs_record = outputs_table_totals[\"TRC Costs ($)\"]\n pac_costs_record = outputs_table_totals[\"PAC Costs ($)\"]\n trc_record = outputs_table_totals[\"TRC\"]\n pac_record = outputs_table_totals[\"PAC\"]\n lifecycle_net_mwh = outputs_table_totals[\"Electricity Lifecycle Net Savings (MWh)\"]\n lifecycle_net_therms = outputs_table_totals[\"Gas Lifecycle Net Savings (Therms)\"]\n lifecycle_net_ghg = outputs_table_totals[\"Total Lifecycle GHG Savings (Tons)\"]\n\n # Getting variables for plots\n elec_benefits_cols = (\n [\"hourly_savings\"] + ACC_COMPONENTS_ELECTRICITY + [\"av_csts_levelized\"]\n )\n\n elec_benefits_hour_month_year = (\n elec_benefits.groupby([\"hour_of_day\", \"year\", \"month\"])\n .agg(\n {\n **{component: \"sum\" for component in ACC_COMPONENTS_ELECTRICITY},\n **{\n \"hourly_savings\": \"sum\",\n \"marginal_ghg\": \"sum\",\n \"av_csts_levelized\": \"mean\",\n },\n }\n )\n .reset_index()\n )\n\n total_benefits = list(\n elec_benefits_hour_month_year.groupby([\"hour_of_day\"])[\"total\"].sum()\n )\n\n summer_benefits = list(\n elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n ]\n .groupby([\"hour_of_day\"])[\"total\"]\n .sum()\n )\n summer_peak_benefits = elec_benefits_hour_month_year[\"total\"][\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n & (elec_benefits_hour_month_year[\"hour_of_day\"].isin(peak_hours))\n ].sum()\n shoulder_benefits = list(\n elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(shoulder_months))\n ]\n .groupby([\"hour_of_day\"])[\"total\"]\n .sum()\n )\n winter_benefits = list(\n elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(winter_months))\n ]\n .groupby([\"hour_of_day\"])[\"total\"]\n .sum()\n )\n total_savings = list(\n elec_benefits_hour_month_year.groupby([\"hour_of_day\"])[\"hourly_savings\"].sum()\n )\n summer_savings = list(\n elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n ]\n .groupby([\"hour_of_day\"])[\"hourly_savings\"]\n .sum()\n )\n shoulder_savings = list(\n elec_benefits_hour_month_year[\n ((elec_benefits_hour_month_year[\"month\"].isin(shoulder_months)))\n ]\n .groupby([\"hour_of_day\"])[\"hourly_savings\"]\n .sum()\n )\n summer_peak_savings = elec_benefits_hour_month_year[\"hourly_savings\"][\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n & (elec_benefits_hour_month_year[\"hour_of_day\"].isin(peak_hours))\n ].sum()\n winter_savings = list(\n elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(winter_months))\n ]\n .groupby([\"hour_of_day\"])[\"hourly_savings\"]\n .sum()\n )\n total_av_csts_avg = list(\n elec_benefits_hour_month_year.groupby([\"hour_of_day\"])[\n \"av_csts_levelized\"\n ].mean()\n )\n summer_av_csts_avg = list(\n pct_hours_in_summer\n * elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n ]\n .groupby([\"hour_of_day\"])[\"av_csts_levelized\"]\n .mean()\n )\n summer_peak_av_csts_avg = elec_benefits_hour_month_year[\"av_csts_levelized\"][\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n & (elec_benefits_hour_month_year[\"hour_of_day\"].isin(peak_hours))\n ].mean()\n shoulder_av_csts_avg = list(\n pct_hours_in_shoulder\n * elec_benefits_hour_month_year[\n ((elec_benefits_hour_month_year[\"month\"].isin(shoulder_months)))\n ]\n .groupby([\"hour_of_day\"])[\"av_csts_levelized\"]\n .mean()\n )\n winter_av_csts_avg = list(\n pct_hours_in_winter\n * elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(winter_months))\n ]\n .groupby([\"hour_of_day\"])[\"av_csts_levelized\"]\n .mean()\n )\n\n elec_benefits_sum_by_hod = (\n elec_benefits[elec_benefits_cols].groupby(elec_benefits[\"hour_of_day\"]).sum()\n )\n elec_benefits_hoy = (\n elec_benefits[elec_benefits_cols]\n .groupby(elec_benefits[\"hour_of_year\"])\n .sum()\n .cumsum()\n .reset_index()\n )\n sav_avcsts_288 = (\n elec_benefits.groupby([\"hour_of_day\", \"month\"])\n .agg(\n {\n **{component: \"sum\" for component in ACC_COMPONENTS_ELECTRICITY},\n **{\n \"hourly_savings\": \"sum\",\n \"marginal_ghg\": \"sum\",\n \"av_csts_levelized\": \"mean\",\n },\n }\n )\n .reset_index()\n )\n sav_avcsts_288 = sav_avcsts_288[\n [\"hour_of_day\", \"month\", \"hourly_savings\", \"total\", \"marginal_ghg\"]\n ]\n ghgsav = sav_avcsts_288.pivot(\"hour_of_day\", \"month\", \"marginal_ghg\")\n sav = sav_avcsts_288.pivot(\"hour_of_day\", \"month\", \"hourly_savings\")\n avcsts = sav_avcsts_288.pivot(\"hour_of_day\", \"month\", \"total\")\n\n # savings load shape plot\n fig0, (ax1, ax2, ax3) = plt.subplots(\n 1, 3, figsize=(18, 5), sharex=True, sharey=True\n )\n plt.subplots_adjust(wspace=0, hspace=0)\n axs = [ax1, ax2, ax3]\n hod = elec_benefits_sum_by_hod.index\n legend_labels1 = [\"Summer\"]\n legend_labels2 = [\"Shoulder\"]\n legend_labels3 = [\"Winter\"]\n\n ax1.plot(\n hod,\n summer_savings,\n c=\"firebrick\",\n linewidth=5,\n marker=\"$\\u25EF$\",\n markersize=13,\n linestyle=\"-\",\n )\n ax2.plot(\n hod,\n shoulder_savings,\n c=\"royalblue\",\n linewidth=5,\n marker=\"$\\u2206$\",\n markersize=13,\n linestyle=\"-\",\n )\n ax3.plot(\n hod,\n winter_savings,\n c=\"green\",\n linewidth=5,\n marker=\"$\\u25A1$\",\n markersize=13,\n linestyle=\"-\",\n )\n ax1.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n ax2.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n ax3.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n # Shade peak region\n ax1.axvspan(16, 21, alpha=0.2, color=\"grey\")\n\n leg1 = ax1.legend(legend_labels1, fontsize=14, loc=\"upper left\", frameon=False)\n for line, text in zip(leg1.get_lines(), leg1.get_texts()):\n text.set_color(line.get_color())\n leg2 = ax2.legend(legend_labels2, fontsize=14, loc=\"upper left\", frameon=False)\n for line, text in zip(leg2.get_lines(), leg2.get_texts()):\n text.set_color(line.get_color())\n leg3 = ax3.legend(legend_labels3, fontsize=14, loc=\"upper left\", frameon=False)\n for line, text in zip(leg3.get_lines(), leg3.get_texts()):\n text.set_color(line.get_color())\n\n ax1.set_ylabel(\"Savings (MWh/hr)\", size=16)\n ax2.set_xlabel(\"Hour of Day\", size=16)\n\n if max(summer_savings + shoulder_savings + winter_savings) < 0:\n ymax = 0\n else:\n ymax = max(summer_savings + shoulder_savings + winter_savings)\n if min(summer_savings + shoulder_savings + winter_savings) > 0:\n ymin = 0\n else:\n ymin = min(summer_savings + shoulder_savings + winter_savings)\n\n # Tick and lebel parameters\n ax1.set_ylim(ymin * 1.08, ymax * 1.08)\n ax1.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax2.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax3.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax1.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax2.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax3.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax1.yaxis.set_minor_locator(AutoMinorLocator())\n ax1.set_xticks(np.arange(0, 24, step=4))\n ax1.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax1.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax1.xaxis.set_minor_locator(AutoMinorLocator())\n ax2.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax2.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax2.xaxis.set_minor_locator(AutoMinorLocator())\n ax3.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax3.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax3.xaxis.set_minor_locator(AutoMinorLocator())\n\n # Set plot title, size, and position\n ax1.set_title(\"Seasonal Savings Load Shapes\", size=18, loc=\"left\").set_position(\n [0, 1.03]\n )\n\n # benefits_seasonal_shape_plot\n fig1, (ax1, ax2, ax3) = plt.subplots(\n 1, 3, figsize=(18, 5), sharex=True, sharey=True\n )\n plt.subplots_adjust(wspace=0, hspace=0)\n axs = [ax1, ax2, ax3]\n hod = elec_benefits_sum_by_hod.index\n legend_labels1 = [\"Summer\"]\n legend_labels2 = [\"Shoulder\"]\n legend_labels3 = [\"Winter\"]\n\n ax1.plot(\n hod,\n summer_benefits,\n c=\"firebrick\",\n linewidth=5,\n marker=\"$\\u2B24$\",\n markersize=13,\n linestyle=\":\",\n )\n ax2.plot(\n hod,\n shoulder_benefits,\n c=\"royalblue\",\n linewidth=5,\n marker=\"$\\u25B2$\",\n markersize=13,\n linestyle=\":\",\n )\n ax3.plot(\n hod,\n winter_benefits,\n c=\"green\",\n linewidth=5,\n marker=\"$\\u25A0$\",\n markersize=13,\n linestyle=\":\",\n )\n ax1.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n ax2.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n ax3.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n # Shade peak region\n ax1.axvspan(16, 21, alpha=0.2, color=\"grey\")\n\n leg1 = ax1.legend(legend_labels1, fontsize=15, loc=\"upper left\", frameon=False)\n for line, text in zip(leg1.get_lines(), leg1.get_texts()):\n text.set_color(line.get_color())\n leg2 = ax2.legend(legend_labels2, fontsize=15, loc=\"upper left\", frameon=False)\n for line, text in zip(leg2.get_lines(), leg2.get_texts()):\n text.set_color(line.get_color())\n leg3 = ax3.legend(legend_labels3, fontsize=15, loc=\"upper left\", frameon=False)\n for line, text in zip(leg3.get_lines(), leg3.get_texts()):\n text.set_color(line.get_color())\n\n ax1.set_ylabel(\"TRC Benefits ($/hr)\", size=16)\n ax2.set_xlabel(\"Hour of Day\", size=16)\n\n if max(summer_benefits + shoulder_benefits + winter_benefits) < 0:\n ymax = 0\n else:\n ymax = max(summer_benefits + shoulder_benefits + winter_benefits)\n if min(summer_benefits + shoulder_benefits + winter_benefits) > 0:\n ymin = 0\n else:\n ymin = min(summer_benefits + shoulder_benefits + winter_benefits)\n\n # Tick and label parameters\n ax1.set_ylim(ymin * 1.08, ymax * 1.08)\n ax1.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax2.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax3.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax1.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax2.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax3.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax1.yaxis.set_minor_locator(AutoMinorLocator())\n ax1.set_xticks(np.arange(0, 24, step=4))\n ax1.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax1.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax1.xaxis.set_minor_locator(AutoMinorLocator())\n ax2.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax2.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax2.xaxis.set_minor_locator(AutoMinorLocator())\n ax3.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax3.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax3.xaxis.set_minor_locator(AutoMinorLocator())\n\n # Set plot title, size, and position\n ax1.set_title(\n \"Seasonal TRC Benefits by Hour ($)\", size=18, loc=\"left\"\n ).set_position([0, 1.03])\n\n # sum_hourly_plot\n fig2 = plt.figure(figsize=(12, 7), dpi=250)\n ax = fig2.gca()\n colors = [\n \"royalblue\",\n \"black\",\n \"pink\",\n \"firebrick\",\n \"gray\",\n \"darkviolet\",\n \"darkorange\",\n \"green\",\n \"saddlebrown\",\n ]\n legend_labels = []\n x = 1\n while x <= len(ACC_COMPONENTS_ELECTRICITY[1:]):\n if x == 1:\n ax.bar(\n hod,\n elec_benefits_sum_by_hod[ACC_COMPONENTS_ELECTRICITY[x]],\n color=colors[x - 1],\n )\n legend_labels.append(\n re.findall(\n \".*Name: (.*),\",\n str(elec_benefits_sum_by_hod[ACC_COMPONENTS_ELECTRICITY[x]]),\n )[0]\n )\n x += 1\n else:\n ax.bar(\n hod,\n elec_benefits_sum_by_hod[ACC_COMPONENTS_ELECTRICITY[x]],\n bottom=elec_benefits_sum_by_hod.iloc[:, 2 : x + 1].sum(axis=1),\n color=colors[x - 1],\n )\n legend_labels.append(\n re.findall(\n \".*Name: (.*),\",\n str(elec_benefits_sum_by_hod[ACC_COMPONENTS_ELECTRICITY[x]]),\n )[0]\n )\n x += 1\n\n # Set x and y limits based on min and max values\n ymax = elec_benefits_sum_by_hod.iloc[:, 2:x].sum(axis=1).max()\n if elec_benefits_sum_by_hod.iloc[:, 2:x].sum(axis=1).min() > 0:\n ymin = 0\n else:\n ymin = elec_benefits_sum_by_hod.iloc[:, 2:x].sum(axis=1).min()\n\n ax.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax.set_ylim(ymin * 1.1, ymax * 1.08)\n\n # Set x and y axis labels\n ax.set_xlabel(\"Hour of Day\", size=17, labelpad=5)\n ax.set_ylabel(\"$ Avoided Costs\", size=17)\n\n # Set plot title, size, and position\n ax.set_title(\n \"Sum of Electric Avoided Costs by Component and Hour of Day\",\n size=17,\n loc=\"left\",\n )\n\n # Tick and lebel parameters\n ax.tick_params(bottom=True, top=False, left=True, right=False)\n ax.set_xticks(np.arange(0, 24, step=4))\n ax.set_yticks(\n np.arange(\n int(round(ymin * 1.1, 0)),\n ymax * 1.08,\n step=max(round(ymax - ymin, 2) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n\n # Minor ticks\n ax.xaxis.set_minor_locator(AutoMinorLocator())\n ax.yaxis.set_minor_locator(AutoMinorLocator())\n\n # Legend\n plt.legend(\n legend_labels,\n bbox_to_anchor=(1, 1),\n fontsize=12,\n loc=\"upper left\",\n frameon=False,\n )\n\n # avoided_cost_summary_plot\n fig3, (ax1, ax2, ax3) = plt.subplots(\n 3, 1, figsize=(6, 10), sharex=True, sharey=False\n )\n axs = [ax1, ax2, ax3]\n hod = elec_benefits_sum_by_hod.index\n legend_labels = [\"Total\", \"Summer\", \"Shoulder\", \"Winter\"]\n\n ax1.plot(\n hod,\n total_benefits,\n c=\"royalblue\",\n marker=\"$\\u25EF$\",\n markersize=10,\n linewidth=3,\n linestyle=\"-\",\n )\n ax1.plot(hod, summer_benefits, c=\"darkorchid\", linewidth=1, linestyle=\"--\")\n ax1.plot(hod, shoulder_benefits, c=\"olivedrab\", linewidth=1, linestyle=\":\")\n ax1.plot(hod, winter_benefits, c=\"teal\", linewidth=1, linestyle=\"-\")\n ax2.plot(\n hod,\n total_savings,\n c=\"firebrick\",\n marker=\"$\\u2206$\",\n markersize=10,\n linewidth=3,\n linestyle=\"-\",\n )\n ax2.plot(hod, summer_savings, c=\"darkorchid\", linewidth=1, linestyle=\"--\")\n ax2.plot(hod, shoulder_savings, c=\"olivedrab\", linewidth=1, linestyle=\":\")\n ax2.plot(hod, winter_savings, c=\"teal\", linewidth=1, linestyle=\"-\")\n ax3.plot(\n hod,\n total_av_csts_avg,\n c=\"green\",\n marker=\"$\\u25A0$\",\n markersize=10,\n linewidth=3,\n linestyle=\"-\",\n )\n ax3.plot(hod, summer_av_csts_avg, c=\"darkorchid\", linewidth=1, linestyle=\"--\")\n ax3.plot(hod, shoulder_av_csts_avg, c=\"olivedrab\", linewidth=1, linestyle=\":\")\n ax3.plot(hod, winter_av_csts_avg, c=\"teal\", linewidth=1, linestyle=\"-\")\n\n leg1 = ax1.legend(legend_labels, fontsize=11, loc=\"upper left\", frameon=False)\n for line, text in zip(leg1.get_lines(), leg1.get_texts()):\n text.set_color(line.get_color())\n leg2 = ax2.legend(legend_labels, fontsize=11, loc=\"upper left\", frameon=False)\n for line, text in zip(leg2.get_lines(), leg2.get_texts()):\n text.set_color(line.get_color())\n leg3 = ax3.legend(legend_labels, fontsize=11, loc=\"upper left\", frameon=False)\n for line, text in zip(leg3.get_lines(), leg3.get_texts()):\n text.set_color(line.get_color())\n\n ax3.set_xticks(np.arange(0, 24, step=4))\n ax3.set_xlabel(\"Hour of Day\", size=14, labelpad=5)\n ax3.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=12\n )\n ax3.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax3.xaxis.set_minor_locator(AutoMinorLocator())\n\n ax1.set_ylabel(\"TRC Benefits ($)\", size=14)\n ax2.set_ylabel(\"Savings (MWh)\", size=14)\n ax3.set_ylabel(\"Av. Cost ($/MWh)\", size=14)\n\n if max(total_benefits + summer_benefits + shoulder_benefits + winter_benefits) < 0:\n ymax1 = 0\n else:\n ymax1 = max(\n total_benefits + summer_benefits + shoulder_benefits + winter_benefits\n )\n if min(total_benefits + summer_benefits + shoulder_benefits + winter_benefits) > 0:\n ymin1 = 0\n else:\n ymin1 = min(\n total_benefits + summer_benefits + shoulder_benefits + winter_benefits\n )\n if max(total_savings + summer_savings + shoulder_savings + winter_savings) < 0:\n ymax2 = 0\n else:\n ymax2 = max(total_savings + summer_savings + shoulder_savings + winter_savings)\n if min(total_savings + summer_savings + shoulder_savings + winter_savings) > 0:\n ymin2 = 0\n else:\n ymin2 = min(total_savings + summer_savings + shoulder_savings + winter_savings)\n if (\n max(\n total_av_csts_avg\n + summer_av_csts_avg\n + shoulder_av_csts_avg\n + winter_av_csts_avg\n )\n < 0\n ):\n ymax3 = 0\n else:\n ymax3 = max(\n total_av_csts_avg\n + summer_av_csts_avg\n + shoulder_av_csts_avg\n + winter_av_csts_avg\n )\n if (\n min(\n total_av_csts_avg\n + summer_av_csts_avg\n + shoulder_av_csts_avg\n + winter_av_csts_avg\n )\n > 0\n ):\n ymin3 = 0\n else:\n ymin3 = min(\n total_av_csts_avg\n + summer_av_csts_avg\n + shoulder_av_csts_avg\n + winter_av_csts_avg\n )\n\n # Tick and lebel parameters\n ax1.set_ylim(ymin1 * 1.08, ymax1 * 1.08)\n ax2.set_ylim(ymin2 * 1.08, ymax2 * 1.08)\n ax3.set_ylim(ymin3 * 1.08, ymax3 * 1.08)\n\n ax1.set_yticks(\n np.arange(\n ymin1 * 1.08,\n ymax1 * 1.08,\n step=max(round(ymax1 - ymin1, 3) / 5, int((round(ymax1 - ymin1, 0)) / 4)),\n )\n )\n ax2.set_yticks(\n np.arange(\n ymin2 * 1.08,\n ymax2 * 1.08,\n step=max(round(ymax2 - ymin2, 3) / 5, int((round(ymax2 - ymin2, 0)) / 4)),\n )\n )\n ax3.set_yticks(\n np.arange(\n ymin3 * 1.08,\n ymax3 * 1.08,\n step=max(round(ymax3 - ymin3, 3) / 5, int((round(ymax3 - ymin3, 0)) / 4)),\n )\n )\n\n ax1.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=12\n )\n ax2.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=12\n )\n ax3.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=12\n )\n\n # Shade peak region\n ax1.axvspan(16, 21, alpha=0.2, color=\"grey\")\n ax2.axvspan(16, 21, alpha=0.2, color=\"grey\")\n ax3.axvspan(16, 21, alpha=0.2, color=\"grey\")\n\n # Print key information\n plt.annotate(\n \"Electric Benefits = $\" + str(round(elec_benefits[\"total\"].sum(), 2)),\n xy=(350, 530),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Gas Benefits = $\" + str(round(gas_benefits, 2)),\n xy=(350, 505),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Total Benefits = $\"\n + str(round(elec_benefits[\"total\"].sum() + gas_benefits, 2)),\n xy=(350, 480),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"TRC Costs = $\" + str(trc_costs_record),\n xy=(350, 455),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"PAC Costs = $\" + str(pac_costs_record),\n xy=(350, 430),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"TRC = \" + str(trc_record),\n xy=(350, 405),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"PAC = \" + str(pac_record),\n xy=(350, 380),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Net Lifecycle Electric Savings = \" + str(lifecycle_net_mwh) + \" MWh\",\n xy=(350, 335),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Net Lifecycle Gas Savings = \" + str(lifecycle_net_therms) + \" Therms\",\n xy=(350, 310),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Net Lifecycle GHG Savings = \" + str(lifecycle_net_ghg) + \" Tons\",\n xy=(350, 285),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n str(round(100 * ((summer_peak_savings) / sum(total_savings)), 1))\n + \"% MWh savings during summer peak period\",\n xy=(350, 260),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n str(round(100 * ((summer_peak_benefits) / sum(total_benefits)), 1))\n + \"% Electric TRC benefits from summer peak period\",\n xy=(350, 235),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Electric Benefits per MWh = $\"\n + str(round(elec_benefits[\"total\"].sum() / lifecycle_net_mwh, 2)),\n xy=(350, 210),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Typical Avoided Cost per MWh = $\"\n + str(round(elec_benefits[\"av_csts_levelized\"].mean(), 2)),\n xy=(350, 145),\n xycoords=\"axes points\",\n fontsize=18,\n )\n\n # Set plot title, size, and position\n ax1.set_title(\n \"Savings and Avoided Cost Profiles\", size=16, loc=\"left\"\n ).set_position([0, 1.03])\n\n # marginal_ghg_savings_plot\n cmp = sns.diverging_palette(16, 260, l=35, n=25, as_cmap=True)\n\n fig4 = plt.figure(figsize=(8, 6), dpi=100)\n ax1 = fig4.gca()\n y_ticks = [\n 0,\n \"\",\n 2,\n \"\",\n 4,\n \"\",\n 6,\n \"\",\n 8,\n \"\",\n 10,\n \"\",\n 12,\n \"\",\n 14,\n \"\",\n 16,\n \"\",\n 18,\n \"\",\n 20,\n \"\",\n 22,\n ]\n hmp = sns.heatmap(ghgsav, cmap=cmp, ax=ax1, yticklabels=y_ticks, center=0.00)\n ax1.set_xlabel(\"Month\", size=15)\n ax1.set_ylabel(\"Hour of Day\", size=15)\n ax1.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=13\n )\n ax1.tick_params(\n which=\"major\",\n axis=\"y\",\n direction=\"out\",\n length=6,\n width=2,\n labelsize=13,\n rotation=0,\n )\n ax1.set_title(\"Electric GHG Savings by Month and Hour\", size=15, loc=\"left\", pad=8)\n cbar1 = hmp.collections[0].colorbar\n cbar1.ax.tick_params(labelsize=14)\n plt.annotate(\"Sum GHG\", xy=(370, 352), xycoords=\"axes points\", fontsize=12)\n plt.annotate(\"Savings (Tons)\", xy=(370, 336), xycoords=\"axes points\", fontsize=12)\n\n # month_hour_savings_benefits_plot\n fig5, (ax1, ax2) = plt.subplots(1, 2, figsize=(21, 10), dpi=200)\n y_ticks = [\n 0,\n \"\",\n 2,\n \"\",\n 4,\n \"\",\n 6,\n \"\",\n 8,\n \"\",\n 10,\n \"\",\n 12,\n \"\",\n 14,\n \"\",\n 16,\n \"\",\n 18,\n \"\",\n 20,\n \"\",\n 22,\n ]\n fleft = sns.heatmap(sav, cmap=cmp, ax=ax1, yticklabels=y_ticks, center=0.00)\n fright = sns.heatmap(avcsts, cmap=cmp, ax=ax2, yticklabels=y_ticks, center=0.00)\n ax1.set_xlabel(\"Month\", size=22)\n ax1.set_ylabel(\"Hour of Day\", size=22)\n ax2.set_xlabel(\"Month\", size=22)\n ax2.set_ylabel(\"Hour of Day\", size=22)\n ax1.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=18\n )\n ax1.tick_params(\n which=\"major\",\n axis=\"y\",\n direction=\"out\",\n length=6,\n width=2,\n labelsize=18,\n rotation=0,\n )\n ax2.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=18\n )\n ax2.tick_params(\n which=\"major\",\n axis=\"y\",\n direction=\"out\",\n length=6,\n width=2,\n labelsize=18,\n rotation=0,\n )\n ax1.set_title(\n \"MWh Savings by Month and Hour\", size=24, loc=\"left\", pad=15\n ).set_position([0, 1.1])\n ax2.set_title(\"$ Benefits by Month and Hour\", size=24, loc=\"left\", pad=15)\n fig4.tight_layout(pad=2.0)\n cbar1 = fleft.collections[0].colorbar\n cbar1.ax.tick_params(labelsize=18)\n cbar2 = fright.collections[0].colorbar\n cbar2.ax.tick_params(labelsize=18)\n plt.annotate(\"Sum MWh\", xy=(-200, 585), xycoords=\"axes points\", fontsize=20)\n plt.annotate(\"Savings\", xy=(-193, 560), xycoords=\"axes points\", fontsize=20)\n plt.annotate(\"Sum TRC\", xy=(435, 585), xycoords=\"axes points\", fontsize=20)\n plt.annotate(\"Benefits\", xy=(442, 560), xycoords=\"axes points\", fontsize=20)\n\n # savings_benefits_cumulative_sum_plot\n fig6 = plt.figure(figsize=(12, 7), dpi=250)\n ax1 = fig6.gca()\n ax1.plot(\n elec_benefits_hoy[\"hour_of_year\"],\n elec_benefits_hoy[\"hourly_savings\"],\n color=\"royalblue\",\n linewidth=3,\n )\n ax2 = ax1.twinx()\n ax2.plot(\n elec_benefits_hoy[\"hour_of_year\"],\n elec_benefits_hoy[\"total\"],\n color=\"firebrick\",\n linewidth=3,\n linestyle=\"--\",\n )\n ax2.axhline(y=0, color=\"gray\", linewidth=0.7, linestyle=\"--\")\n\n # Set x and y limits based on min and max values\n\n if (\n elec_benefits_hoy[\"hourly_savings\"].max() >= 0\n and elec_benefits_hoy[\"total\"].max() >= 0\n ):\n ymax1 = elec_benefits_hoy[\"hourly_savings\"].max()\n ymax2 = elec_benefits_hoy[\"total\"].max()\n elif (\n elec_benefits_hoy[\"hourly_savings\"].max() < 0\n and elec_benefits_hoy[\"total\"].max() < 0\n ):\n ymax1 = 0\n ymax2 = 0\n elif (\n elec_benefits_hoy[\"hourly_savings\"].max() < 0\n and elec_benefits_hoy[\"total\"].max() > 0\n ):\n ymax1 = (\n -1\n * elec_benefits_hoy[\"hourly_savings\"].min()\n * (\n elec_benefits_hoy[\"total\"].max()\n / (elec_benefits_hoy[\"total\"].max() - elec_benefits_hoy[\"total\"].min())\n )\n / (\n 1\n - elec_benefits_hoy[\"total\"].max()\n / (elec_benefits_hoy[\"total\"].max() - elec_benefits_hoy[\"total\"].min())\n )\n )\n ymax2 = elec_benefits_hoy[\"total\"].max()\n else:\n ymax1 = 0\n ymax2 = (\n -1\n * elec_benefits_hoy[\"total\"].min()\n * (\n elec_benefits_hoy[\"hourly_savings\"].max()\n / (\n elec_benefits_hoy[\"hourly_savings\"].max()\n - elec_benefits_hoy[\"hourly_savings\"].min()\n )\n )\n )\n\n if (\n elec_benefits_hoy[\"hourly_savings\"].min() <= 0\n and elec_benefits_hoy[\"total\"].min() <= 0\n ):\n ymin1 = elec_benefits_hoy[\"hourly_savings\"].min()\n ymin2 = elec_benefits_hoy[\"total\"].min()\n elif (\n elec_benefits_hoy[\"hourly_savings\"].min() > 0\n and elec_benefits_hoy[\"total\"].min() > 0\n ):\n ymin1 = 0\n ymin2 = 0\n elif (\n elec_benefits_hoy[\"hourly_savings\"].min() > 0\n and elec_benefits_hoy[\"total\"].min() < 0\n ):\n ymin1 = (\n -1\n * elec_benefits_hoy[\"hourly_savings\"].max()\n * (\n elec_benefits_hoy[\"total\"].min()\n / (elec_benefits_hoy[\"total\"].min() - elec_benefits_hoy[\"total\"].max())\n )\n / (\n 1\n - elec_benefits_hoy[\"total\"].min()\n / (elec_benefits_hoy[\"total\"].min() - elec_benefits_hoy[\"total\"].max())\n )\n )\n ymin2 = elec_benefits_hoy[\"total\"].min()\n else:\n ymin1 = 0\n ymin2 = (\n -1\n * elec_benefits_hoy[\"total\"].min()\n * (\n elec_benefits_hoy[\"hourly_savings\"].min()\n / (\n elec_benefits_hoy[\"hourly_savings\"].min()\n - elec_benefits_hoy[\"hourly_savings\"].min()\n )\n )\n )\n\n # Set x and y axis limits\n ax1.set_xlim(-340, 9000)\n ax1.set_ylim(ymin1 * 1.08, ymax1 * 1.08)\n ax2.set_ylim(ymin2 * 1.08, ymax2 * 1.08)\n\n # Set x and y axis labels\n ax1.set_xlabel(\"Hour of Year\", size=17, labelpad=5)\n ax1.set_ylabel(\"Net Lifecycle Savings (MWh)\", size=17)\n ax2.set_ylabel(\"$ TRC Benefits\", size=17, rotation=-90, labelpad=20)\n\n # Set plot title, size, and position\n ax1.set_title(\n \"Cumulative Savings and TRC Benefits by Hour of Year\",\n size=17,\n loc=\"left\",\n pad=8,\n )\n\n # Tick and lebel parameters\n ax1.set_xticks(np.arange(0, 8760, step=1000))\n ax1.set_yticks(\n np.arange(\n int(round(ymin1 * 1.1, 0)),\n ymax1 * 1.08,\n step=max(round(ymax1 - ymin1, 2) / 5, int((round(ymax1 - ymin1, 0)) / 4)),\n )\n )\n ax1.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax1.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n\n ax2.set_xticks(np.arange(0, 8760, step=1000))\n ax2.set_yticks(\n np.arange(\n int(round(ymin2 * 1.1, 0)),\n ymax2 * 1.08,\n step=max(round(ymax2 - ymin2, 2) / 5, int((round(ymax2 - ymin2, 0)) / 4)),\n )\n )\n ax2.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax2.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n\n # Minor ticks\n ax1.xaxis.set_minor_locator(AutoMinorLocator())\n ax1.yaxis.set_minor_locator(AutoMinorLocator())\n ax2.yaxis.set_minor_locator(AutoMinorLocator())\n\n # Legend\n ax1.legend(\n [\"Savings\"],\n fontsize=12,\n bbox_to_anchor=(0.02, 1),\n loc=\"upper left\",\n frameon=False,\n )\n ax2.legend(\n [\"TRC Beneftis\"],\n fontsize=12,\n bbox_to_anchor=(0.02, 0.95),\n loc=\"upper left\",\n frameon=False,\n )\n\n fig7 = plt.figure(figsize=(12, 7), dpi=250)\n ax = fig7.gca()\n colors1 = [\n \"black\",\n \"royalblue\",\n \"black\",\n \"pink\",\n \"firebrick\",\n \"gray\",\n \"darkviolet\",\n \"darkorange\",\n \"green\",\n \"saddlebrown\",\n ]\n legend_labels2 = []\n\n ax.plot(\n elec_benefits_hoy[\"hour_of_year\"],\n elec_benefits_hoy[ACC_COMPONENTS_ELECTRICITY[0]],\n color=colors1[0],\n linewidth=3,\n )\n legend_labels2.append(ACC_COMPONENTS_ELECTRICITY[0])\n x = 1\n while x <= len(ACC_COMPONENTS_ELECTRICITY) - 2:\n ax.plot(\n elec_benefits_hoy[\"hour_of_year\"],\n elec_benefits_hoy[ACC_COMPONENTS_ELECTRICITY[x]],\n color=colors1[x],\n )\n legend_labels2.append(ACC_COMPONENTS_ELECTRICITY[x])\n x += 1\n\n # Set x and y limits based on min and max values\n if max(elec_benefits_hoy.iloc[:, 2:x].max()) < 0:\n ymax = 0\n else:\n ymax = max(elec_benefits_hoy.iloc[:, 2:x].max())\n if min(elec_benefits_hoy.iloc[:, 2:x].min()) > 0:\n ymin = 0\n else:\n ymin = min(elec_benefits_hoy.iloc[:, 2:x].min())\n\n ax.set_xlim(-340, 9000)\n ax.set_ylim(ymin * 1.1, ymax * 1.08)\n\n # Set x and y axis labels\n ax.set_xlabel(\"Hour of Year\", size=17, labelpad=5)\n ax.set_ylabel(\"$ TRC Benefits\", size=17)\n\n # Set plot title, size, and position\n ax.set_title(\n \"Sum of Avoided Costs by Component and Hour of Day\", size=17, loc=\"left\"\n )\n\n # Tick and lebel parameters\n ax.set_xticks(np.arange(0, 8760, step=1000))\n ax.set_yticks(\n np.arange(\n int(round(ymin * 1.1, 0)),\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n\n # Minor ticks\n ax.xaxis.set_minor_locator(AutoMinorLocator())\n ax.yaxis.set_minor_locator(AutoMinorLocator())\n\n # Legend\n plt.legend(\n legend_labels2,\n bbox_to_anchor=(1, 1),\n fontsize=12,\n loc=\"upper left\",\n frameon=False,\n )", "def plot_calibration_curve(est, name, fig_index, data):\n\n X_train = data[0]\n X_test = data[1]\n y_train = data[2]\n y_test = data[3]\n\n y = np.concatenate([y_train, y_test], axis=0)\n\n # Calibrated with isotonic calibration\n isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')\n\n # Calibrated with sigmoid calibration\n sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')\n\n # Logistic regression with no calibration as baseline\n lr = LogisticRegression(C=1., solver='lbfgs')\n\n fig = plt.figure(1, figsize=(15, 10))\n ax1 = plt.subplot2grid((4, 6), (0, 0), colspan=2, rowspan=2)\n ax2 = plt.subplot2grid((4, 6), (0, 2), colspan=2, rowspan=2)\n ax3 = plt.subplot2grid((4, 6), (0, 4), colspan=2, rowspan=2)\n ax4 = plt.subplot2grid((4, 6), (2, 0), colspan=6, rowspan=2)\n\n ax1.plot([0, 1], [0, 1], \"k:\", label=\"Perfectly calibrated\")\n for clf, name in [(lr, 'Logistic'),\n (est, name),\n (isotonic, name + ' + Isotonic'),\n (sigmoid, name + ' + Sigmoid')]:\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n if hasattr(clf, \"predict_proba\"):\n prob_pos = clf.predict_proba(X_test)[:, 1]\n y_proba = prob_pos.copy()\n else: # use decision function\n prob_pos = clf.decision_function(X_test)\n y_proba = prob_pos.copy()\n prob_pos = \\\n (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())\n\n clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())\n print(\"%s:\" % name)\n print(\"\\tBrier: %1.3f\" % (clf_score))\n print(\"\\tPrecision: %1.3f\" % precision_score(y_test, y_pred))\n print(\"\\tRecall: %1.3f\" % recall_score(y_test, y_pred))\n print(\"\\tF1: %1.3f\" % f1_score(y_test, y_pred))\n print(\"\\tAve. Precision Score: %1.3f\\n\" % \\\n average_precision_score(y_test, y_proba))\n\n fraction_of_positives, mean_predicted_value = \\\n calibration_curve(y_test, prob_pos, n_bins=10)\n\n ax1.plot(mean_predicted_value, fraction_of_positives, \"s-\",\n label=\"%s (%1.3f)\" % (name, clf_score))\n\n fpr, tpr, thresholds = roc_curve(y_test, y_proba, drop_intermediate=False)\n roc_auc = roc_auc_score(y_test, y_proba)\n ax2.plot(fpr, tpr, ls='-', label=\"%s (%1.3f)\" % (name, roc_auc))\n\n precision, recall, _ = precision_recall_curve(y_test, y_proba)\n ax3.plot(recall, precision)\n\n ax4.hist(prob_pos, range=(0, 1), bins=10,\n label='%s' % name, histtype=\"step\", lw=2)\n\n ax1.set_xlabel(\"Score\", fontsize=14)\n ax1.set_ylabel(\"Fraction of positives\", fontsize=14)\n ax1.set_ylim([-0.05, 1.05])\n ax1.legend(loc=\"lower right\")\n ax1.set_title('Calibration plots (reliability curve)', fontsize=16)\n\n ax2.set_xlabel(\"False Positive Rate\", fontsize=14)\n ax2.set_ylabel(\"True Positive Rate\", fontsize=14)\n ax2.set_ylim([-0.05, 1.05])\n ax2.legend(loc=\"lower right\")\n ax2.set_title('ROC Curve', fontsize=16)\n\n ax3.set_xlabel(\"Recall\", fontsize=14)\n ax3.set_ylabel(\"Precision\", fontsize=14)\n ax3.set_ylim([-0.05, 1.05])\n ax3.legend(loc=\"lower center\")\n ax3.set_title('Precision-Recall Curve', fontsize=16)\n\n ax4.set_xlabel(\"Mean predicted value\", fontsize=14)\n ax4.set_ylabel(\"Count\", fontsize=14)\n ax4.legend(loc=\"upper center\")\n ax4.set_title('Classification Result', fontsize=16)\n\n plt.tight_layout()\n\n plt.show()\n\n return", "def plot_results(sgd_train_acc, sgd_train_std, sgd_heldout_acc, sgd_heldout_std, sgd_test_acc,\n dt_train_acc, dt_train_std, dt_heldout_acc, dt_heldout_std, dt_test_acc,\n dt4_train_acc, dt4_train_std, dt4_heldout_acc, dt4_heldout_std, dt4_test_acc,\n stumps_train_acc, stumps_train_std, stumps_heldout_acc, stumps_heldout_std, stumps_test_acc):\n train_x_pos = [0, 4, 8, 12]\n cv_x_pos = [1, 5, 9, 13]\n test_x_pos = [2, 6, 10, 14]\n ticks = cv_x_pos\n\n labels = ['sgd', 'dt', 'dt4', 'stumps (4 x 50)']\n\n train_accs = [sgd_train_acc, dt_train_acc, dt4_train_acc, stumps_train_acc]\n train_errors = [sgd_train_std, dt_train_std, dt4_train_std, stumps_train_std]\n\n cv_accs = [sgd_heldout_acc, dt_heldout_acc, dt4_heldout_acc, stumps_heldout_acc]\n cv_errors = [sgd_heldout_std, dt_heldout_std, dt4_heldout_std, stumps_heldout_std]\n\n test_accs = [sgd_test_acc, dt_test_acc, dt4_test_acc, stumps_test_acc]\n\n fig, ax = plt.subplots()\n ax.bar(train_x_pos, train_accs, yerr=train_errors, align='center', alpha=0.5, ecolor='black', capsize=10, label='train')\n ax.bar(cv_x_pos, cv_accs, yerr=cv_errors, align='center', alpha=0.5, ecolor='black', capsize=10, label='held-out')\n ax.bar(test_x_pos, test_accs, align='center', alpha=0.5, capsize=10, label='test')\n ax.set_ylabel('Accuracy')\n ax.set_xticks(ticks)\n ax.set_xticklabels(labels)\n ax.set_title('Models')\n ax.yaxis.grid(True)\n ax.legend()\n plt.tight_layout()", "def plot_percentage_difference_graph(results, datasets, name_suffix=\"\", parameter=\"BACC\", x_label=\"Feature selection approach\", difference_from=\"no feature selection\", figsize=(16, 5), legend_y=None,\n label_rotation=0, y_label_pos=None, y_ticks=None, x_label_replacement_dict=None, feature_selection_specific=False):\n if x_label_replacement_dict is None:\n x_label_replacement_dict = {}\n\n if (len(results) == 1 or len(results) == 2) or legend_y is None:\n legend_y = -0.31\n\n if len(results) == 1 or len(results) == 2:\n y_label_pos = 0.5\n if len(results) < 4 and y_label_pos is None:\n y_label_pos = 0\n elif y_label_pos is None:\n y_label_pos = -0.4\n # Output a raw dump of results to file so that it can be used to tweak visualisation without re-executing experiment\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n file_name = \"raw_dump_{0}.txt\".format(current_time)\n with open(os.path.dirname(os.path.realpath(__file__)) + \"/../results/\" + file_name, \"wb\") as output_file:\n output_file.write(str(results))\n patterns = (None, \"////\")\n\n colors = [\"#64B3DE\", \"#1f78b4\", \"#FBAC44\", \"#B9B914\", \"#bc1659\", \"#33a02c\", \"#6ABF20\", \"#ff7f00\", \"#6a3d9a\", \"#5a2add\", \"#b15928\", \"#e31a1c\", \"grey\"]\n classifier_arr = []\n for i in range(len(results)):\n classifier_arr.append(list())\n index = 0\n\n # Calculate difference in BACC from first entry as well as mean difference across classifiers\n for results_per_classifier in results:\n no_feature_selection = results[index][0][1]\n for i in range(len(no_feature_selection) + 1):\n classifier_arr[index].append(list())\n for i in range(1, len(results_per_classifier)):\n data_balancer_results = results_per_classifier[i][1]\n x = 0\n mean_classification = 0\n for result_tuple in data_balancer_results:\n value = result_tuple[0][2] - no_feature_selection[x][0][2]\n classifier_arr[index][x].append(value)\n mean_classification += value\n x += 1\n mean_classification /= float(len(data_balancer_results))\n classifier_arr[index][x].append(mean_classification)\n index += 1\n\n fig = plt.figure(figsize=figsize)\n\n classifiers = np.arange(len(classifier_arr[0]))\n\n bar_width = 0.2\n opacity = 0.9\n num_columns = 1 if len(results) == 1 else 2\n subplt_val = (100 * round(len(results) / 2.0)) + (10 * num_columns) + 1\n plt.subplots_adjust(hspace=0.42, wspace=0.1)\n ax1 = plt.subplot(subplt_val)\n\n for i in range(len(classifier_arr[0])):\n if i + 1 != len(classifier_arr[0]):\n label = results[0][0][1][i][1]\n else:\n label = \"Mean classification\"\n data_balancers = np.arange(len(classifier_arr[0][i])) * 3\n plt.bar(data_balancers + (i * bar_width), classifier_arr[0][i], bar_width,\n alpha=opacity,\n color=colors[i],\n hatch=patterns[i % len(patterns)],\n label=label)\n\n feature_selection_labels = [results[0][i][0] if results[0][i][0] not in x_label_replacement_dict else x_label_replacement_dict[results[0][i][0]] for i in range(1, len(results[0]))]\n if feature_selection_specific:\n feature_selection_labels = [feature_selection_labels[i - 1] + \"\\n{0}-{1:.1f}-{2}\".format(results[0][i][1][0][4][0], results[0][i][1][0][4][1], results[0][i][1][0][4][2]) for i in\n range(1, len(results[0]))]\n\n plt.xticks(data_balancers + (bar_width / 2) * len(classifiers), feature_selection_labels, rotation=label_rotation)\n bonus = \"\"\n if feature_selection_specific:\n bonus = \" ({0})\".format(results[0][0][1][0][4][3])\n plt.title(datasets[0].replace(\"_\", \" \") + bonus)\n plt.ylabel(\"Change in {0} from {1}\".format(parameter, difference_from), y=y_label_pos)\n\n vertical_plt = 0\n for z in range(1, len(results)):\n ax2 = plt.subplot(subplt_val + z, sharey=ax1)\n color = iter(cm.Set1(np.linspace(0, 1, len(no_feature_selection) + 1)))\n for i in range(len(classifier_arr[z])):\n if i + 1 != len(classifier_arr[z]):\n label = results[z][0][1][i][1]\n else:\n label = \"Mean classification\"\n data_balancers = np.arange(len(classifier_arr[z][i])) * 3\n plt.bar(data_balancers + (i * bar_width), classifier_arr[z][i], bar_width,\n alpha=opacity,\n color=colors[i],\n hatch=patterns[i % len(patterns)],\n label=label)\n\n feature_selection_labels = [results[0][i][0] if results[0][i][0] not in x_label_replacement_dict else x_label_replacement_dict[results[0][i][0]] for i in range(1, len(results[0]))]\n if feature_selection_specific:\n feature_selection_labels = [feature_selection_labels[i - 1] + \"\\n{0}-{1:.1f}-{2}\".format(results[z][i][1][0][4][0], results[z][i][1][0][4][1], results[z][i][1][0][4][2]) for i in\n range(1, len(results[0]))]\n\n plt.xticks(data_balancers + (bar_width / 2) * len(classifiers), feature_selection_labels, rotation=label_rotation)\n bonus = \"\"\n if feature_selection_specific:\n bonus = \" ({0})\".format(results[z][0][1][0][4][3])\n plt.title(datasets[z].replace(\"_\", \" \") + bonus)\n\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n\n if len(results) >= 4:\n legend_x = -0.08\n elif len(results) == 1:\n legend_x = 0.5\n elif len(results) == 2:\n legend_x = 0\n else:\n legend_x = 1\n\n legend = plt.legend(loc='lower center', bbox_to_anchor=(legend_x, legend_y), fancybox=True, frameon=True, ncol=7)\n legend.get_frame().set_facecolor('#ffffff')\n\n if y_ticks is not None:\n plt.yticks(y_ticks)\n plt.ylim(ymin=y_ticks[0])\n plt.ylim(ymax=y_ticks[-1])\n\n x_label_x_pos = 0\n if len(results) == 1:\n x_label_x_pos = 0.5\n elif len(results) == 3:\n x_label_x_pos = 1\n plt.xlabel(x_label, x=x_label_x_pos, y=-2)\n feature_selection_labels = [results[0][i][0] for i in range(1, len(results[0]))]\n\n plt.locator_params(axis='y', nbins=15)\n name = \"{3}_results_per_classifier_plot{0}_{4}_{1}_{2}\".format(name_suffix, parameter, current_time, x_label, datasets)\n plt.savefig(os.path.dirname(os.path.realpath(__file__)) + \"/../results/{0}\".format(name.replace(\" \", \"_\")), bbox_extra_artists=(legend,), bbox_inches='tight')\n plt.close(fig)", "def boxplot_from_data_frame(df,\n group_by=\"Method\",\n metric=\"Precision\",\n hue=None,\n y_min=0.0,\n y_max=1.0,\n plotf=violinplot,\n color='grey',\n color_palette=None,\n label_rotation=45):\n\n sns.set_style(\"whitegrid\")\n ax = violinplot(x=group_by, y=metric, hue=hue, data=df, color=color,\n palette=color_palette, order=sorted(df[group_by].unique()))\n ax.set_ylim(bottom=y_min, top=y_max)\n ax.set_ylabel(metric)\n ax.set_xlabel(group_by)\n for lab in ax.get_xticklabels():\n lab.set_rotation(label_rotation)\n\n plt.show()\n\n return ax", "def visualization(obj_value):\n for n in range(3):\n plt.loglog(obj_value[n],\".\");\n\n plt.ylabel('objective values');\n plt.xlabel('iteration counter');\n plt.title('objective values for each pair against iterations');\n plt.legend();\n plt.show();", "def plot_similarity_decay(\n t, ctx, ctx_test_env, max_diff=0.2, ax=None):\n if ax is None:\n ax = plt.gca()\n\n with sns.color_palette(\"GnBu_d\", ctx_test_env.n):\n out_normed = ctx / np.linalg.norm(ctx, axis=1)[:, None]\n for i in range(1, ctx_test_env.n):\n start = int((i + .7) / ctx_test_env.dt)\n end = int((i + 1.) / ctx_test_env.dt)\n target = np.mean(out_normed[start:end], axis=0)\n y = np.dot(out_normed, target)\n ax.plot(t - i, y, c=sns.color_palette()[-i])\n\n decay = lambda x: np.sqrt(1. - x**2)**np.floor(t)\n ax.plot(t, decay(ctx_test_env.beta), color='gray')\n\n ax.set_xlim(left=0.)\n ax.set_ylim(0., 1.)\n ax.set_xlabel(r\"Time $t/\\mathrm{s}$\")\n ax.set_ylabel(r\"$\\mathbf{c}_i \\cdot \\mathbf{c}(t)$\")\n\n return ax", "def plot_test(y_test, y_pred, title = None, xlabel = 'Measured $Y = \\log_2(MIC)$', ylabel = 'Predicted $Y = \\log_2(MIC)$', legend = ['Ideal', 'Result'], groups = None):\n \n fig, ax = plt.subplots(1,1)\n fig.set_figheight(5)\n fig.set_figwidth(5)\n if groups is not None:\n groups_obj = pd.concat([y_test,y_pred], axis=1).groupby(np.array(groups))\n cmap=plt.get_cmap('tab10')\n for name, group in groups_obj:\n # Works only for groups with numeric names that are max cmap length:\n ax.plot(group.iloc[:,0], group.iloc[:,1], marker=\"o\", linestyle=\"\", label=int(name), color = cmap.colors[int(name)])\n ax.legend()\n else:\n ax.scatter(y_test,y_pred, color = 'red')\n ax_max = 10\n if np.max(y_test.values)>ax_max:\n ax_max = np.max(y_test).values\n ax_min = 0\n if np.min(y_test.values)<ax_min:\n ax_min = np.min(y_test.values)\n ax.plot([ax_min, ax_max], [ax_min, ax_max], '--', color='black')\n ax.set_aspect('equal', 'box')\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n #plt.savefig(title+'.pdf')\n plt.savefig(title+'.svg')\n #plt.savefig(title+'.png')#, dpi=600)\n #plt.show()", "def decision_plot(self, X, y):\n\n y = self._slice_target_index(y=y)\n\n for index in range(_n_targets(y)):\n if sklearn.utils.multiclass.type_of_target(y) == 'continuous-multioutput':\n self.fit(X, y.iloc[:, index].values.ravel(order='K'))\n else:\n self.fit(X, y)\n explainer, shap_values = self.explainer(X=X)\n shap.decision_plot(base_value=explainer.expected_value, shap_values=shap_values,\n feature_names=list(X.columns), show=self.show)", "def plot_perf(ax, best_per_lr, learning_rate_updates_epoch, mode=\"loss\"):\n colors = [ \"b\", \"r\", \"g\", \"c\", \"m\", \"y\", \"k\", \"w\"]\n ind = 2*np.arange(len(best_per_lr))\n ybars = [elem[1] for elem in best_per_lr]\n width = 1\n rect = plt.bar(ind, ybars, width, color=colors[0:len(ybars)], alpha=0.5)\n ax.set_ylim([min(ybars)*0.8,max(ybars)*1.2])\n ax.set_ylabel(\"Best models %s\"%mode)\n ax.set_xticks(ind+width*0.5)\n tlabels = [\"Epoch %d\"%best_per_lr[0][0]]\n if len(best_per_lr) > 1:\n for i, elem in enumerate(best_per_lr[1:]):\n tlabels.append(\"Epoch %d\"%(elem[0]+learning_rate_updates_epoch[i]))\n ax.set_xticklabels(tlabels)\n ax.set_yticks([])\n autolabel(ax, rect)", "def plot_calibration_curve(ax,\n scores,\n e,\n t,\n a,\n folds,\n group,\n quant,\n strat='quantile',\n adj='IPCW', \n plot=True):\n\n allscores = np.ones_like(t).astype('float')\n\n for fold in set(folds):\n allscores[folds == fold] = scores[fold]\n\n scores = allscores\n\n b_fc = (0, 0, 1, .4)\n r_fc = (1, 0, 0, .2)\n\n b_ec = (0, 0, 1, .8)\n r_ec = (1, 0, 0, .8)\n\n n_bins = 20\n\n hatch = '//'\n\n fs = 16\n\n prob_true_n, _, outbins, ece = calibration_curve(\n scores,\n e,\n t,\n a,\n group,\n quant,\n typ=adj,\n ret_bins=True,\n strat=strat,\n n_bins=n_bins)\n \n for d in range(len(prob_true_n)):\n\n binsize = outbins[d + 1] - outbins[d]\n binloc = (outbins[d + 1] + outbins[d]) / 2\n\n gap = (prob_true_n[d] - binloc)\n\n if gap < 0:\n bottom = prob_true_n[d]\n else:\n bottom = prob_true_n[d] - abs(gap)\n\n if d == len(prob_true_n) - 1:\n lbl1 = 'Score'\n lbl2 = 'Gap'\n else:\n lbl1 = None\n lbl2 = None\n \n if plot:\n ax.bar(\n binloc,\n prob_true_n[d],\n width=binsize,\n facecolor=b_fc,\n edgecolor=b_ec,\n linewidth=2.5,\n label=lbl1)\n ax.bar(\n binloc,\n abs(gap),\n bottom=bottom,\n width=binsize,\n facecolor=r_fc,\n edgecolor=r_ec,\n linewidth=2.5,\n hatch=hatch,\n label=lbl2)\n\n d += 1\n \n if plot:\n \n ax.plot([0, 1], [0, 1], c='k', ls='--', lw=2, zorder=100)\n\n ax.set_xlabel('Predicted Score', fontsize=fs)\n ax.set_ylabel('True Score', fontsize=fs)\n\n ax.legend(fontsize=fs)\n ax.set_title(str(group), fontsize=fs)\n ax.set_xlim(0, 1)\n ax.set_ylim(0, 1)\n\n ax.grid(ls=':', lw=2, zorder=-100, color='grey')\n ax.set_axisbelow(True)\n\n ax.text(\n x=0.030,\n y=.7,\n s='ECE=' + str(round(ece, 3)),\n size=fs,\n bbox=dict(boxstyle='round', fc='white', ec='grey', pad=0.2))\n\n return ece", "def ridge_cross_validation_visualization(lambdas, accuracies):\n colors = ['r', 'b', 'y', 'g']\n labels = ['group_0', 'group_1', 'group_2', 'group_3']\n for i in range(len(accuracies)):\n plt.semilogx(lambdas, accuracies[i], marker=\".\", color=colors[i], label=labels[i])\n plt.xlabel(\"lambda\")\n plt.ylabel(\"accuracy\")\n plt.xlim(1e-4, 1)\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"./img/ridge_cross_validation\")", "def plot_strategy(ticker, start, end, df):\r\n\r\n fig = plt.figure(figsize=(20, 10))\r\n ax1 = plt.plot(df)\r\n ax1 = plt.title(\"Comparing simple investment strategies for \" +\r\n ticker + \" between \" + start + \" and \" + end, fontsize=22)\r\n ax1 = plt.xlabel(\"Date\", fontsize=18)\r\n ax1 = plt.ylabel(\"Price\", fontsize=18)\r\n ax1 = plt.legend(list(df_return_of_strategy.columns), prop={\"size\": 22}, loc=\"upper left\")\r\n plt.grid(True)\r\n plt.show()", "def plot_strategy(ticker, start, end, df):\r\n\r\n fig = plt.figure(figsize=(20, 10))\r\n ax1 = plt.plot(df)\r\n ax1 = plt.title(\"Comparing simple investment strategies for \" +\r\n ticker + \" between \" + start + \" and \" + end, fontsize=22)\r\n ax1 = plt.xlabel(\"Date\", fontsize=18)\r\n ax1 = plt.ylabel(\"Price\", fontsize=18)\r\n ax1 = plt.legend(list(df_return_of_strategy.columns), prop={\"size\": 22}, loc=\"upper left\")\r\n plt.grid(True)\r\n plt.show()", "def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=-1, train_sizes=np.linspace(.1, 1.0, 5),color1 = \"g\",color2 = \"r\",\n plotmedian=True, includebasetext=False, baselinepos=[180,0.9]):\n fig,ax = plt.subplots(1,1,figsize=(10,7))\n plt.title(title, color=\"dimgray\", loc=\"left\", size=20)\n plt.xlabel(\"Training sample size\", color=\"dimgrey\",size=15)\n plt.ylabel(\"Accuracy score\", color=\"dimgrey\",size=15)\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n print (\"number of element in train sample = \", train_sizes)\n\n \n \n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_median = np.median(train_scores, axis=1)\n\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n if plotmedian:\n test_scores_median = np.median(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n #plt.grid()\n plt.plot(train_sizes[:], np.zeros(len(train_sizes))+0.889, ls=\"--\", color = \"dimgrey\", lw=2)\n if includebasetext:\n plt.text(baselinepos[0], baselinepos[1], \"baseline prediction\",color=\"dimgrey\",size=15)\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.3,\n color=color2)\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.3, color=color1)\n plt.plot(train_sizes, train_scores_mean, 'o-', color=color2, lw=2,\n label=\"Training\")\n if plotmedian:\n plt.plot(train_sizes, train_scores_median, 'x:', color=color2,alpha=0.3,\n label=\"Training\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=color1,\n label=\"Cross-validation\", lw=3)\n if plotmedian:\n plt.plot(train_sizes, test_scores_median, 'x:', color=color1, alpha=0.3,\n label=\"Cross-validation\")\n plt.ylim([0.8,1.05])\n\n legend=plt.legend(loc=\"best\")\n plt.setp(legend.get_texts(), color='dimgray', size=15)\n\n ax.spines['bottom'].set_color('dimgray')\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['left'].set_color('dimgray')\n ax.tick_params(axis='x', colors='dimgray')\n ax.tick_params(axis='y', colors='dimgray', size=3)\n ax.get_yaxis().set_ticks([0.8,0.9,1.0])\n\n ax.tick_params(labelsize=14)\n\n return plt", "def parameter_compare(regressions,colors=['m','c'],upper_q=75,lower_q=25,ci_alpha = 0.2, bound_alpha = 0.0,\n labels = None,vertical_bbox_position = 1.4,width = 6,height = 5,draw_samples=True,num_samples =500):\n\n assert type(regressions) is dict\n \n # If no labels are provided, we take them from the first DynamicRegression object\n if labels is None:\n labels = regressions[regressions.keys()[0]].predictor_columns\n \n # this is the number of subplots in this figure\n n_predictors = regressions[regressions.keys()[0]].design.shape[1]\n figure, axes = plt.subplots(n_predictors,figsize = (width,height),sharex=True)\n \n for i,key in enumerate(regressions.keys()):\n \n if draw_samples:\n samples = regressions[key].ffbs.backward_sample(num_samples = num_samples)\n else:\n samples = regressions[key].ffbs.theta\n x = regressions[key].design.index\n \n for j in range(n_predictors):\n \n # Calculate and plot the confidence interval plus median\n lower = np.percentile(samples[:,j,:],lower_q,axis=1)\n upper = np.percentile(samples[:,j,:],upper_q,axis=1)\n median = np.percentile(samples[:,j,:],50,axis=1)\n axes[j].fill_between(x,upper,lower,color=colors[i],alpha = ci_alpha,\n label = '{0}%-{1}% range for {2}'.format(lower_q,upper_q,key))\n axes[j].plot(x,lower,color=colors[i],linestyle='--',alpha = bound_alpha)\n axes[j].plot(x,upper,color=colors[i],linestyle='--',alpha = bound_alpha)\n axes[j].plot(x,median,color=colors[i])\n axes[j].tick_params(direction = 'in')\n\n # a twin axis is made so we can label it easily on the right hand side of the plot\n twin = plt.twinx(axes[j])\n twin.set_ylabel(labels[j])\n \n # hide the tick labels and ticks because we only want the axis label\n twin.set_yticks([])\n \n axes[0].legend(ncol=len(list(regressions.keys())),bbox_to_anchor=(1.00, vertical_bbox_position), borderaxespad=0.,frameon=True,edgecolor='k',fancybox=False)\n return figure", "def PlotComparison(result_values, descrete, continuous, jitter=100):\n df = result_values.copy()\n np.random.seed(0)\n df[continuous] = df[continuous] + np.random.randint(low=-jitter, high=jitter, size=len(df))\n base = alt.Chart(df).transform_calculate(\n ymin=\"datum.mean-2*datum.std\",\n ymax=\"datum.mean+2*datum.std\",\n ).properties(\n title = '[Interactive] Accuracy by Params'\n )\n \n points = base.mark_point(\n filled=True,\n size=10\n ).encode(\n x=continuous,\n y=alt.Y('mean:Q'),#, scale=alt.Scale(domain=(0.55, 0.7))),\n color=descrete,\n tooltip=['mean','std']\n )\n\n errorbars = base.mark_errorbar().encode(\n x=continuous,\n y=alt.Y(\"ymin:Q\",title='Accuracy'),\n y2=\"ymax:Q\",\n color=descrete,\n )\n\n return(points + errorbars)", "def plot_exp1():\n legend = ['unweighted', 'weighted']\n labels = ['Degree','Closeness','Current-flow closeness','Betweenness','Current-flow betweenness','Load','Eigenvector','PageRank','HITS authorities','HITS hubs']\n\n # classification\n d = [[0.52500000000000002,0.49444444444444446], # Degree\n [0.57499999999999996,0.57499999999999996], # Closeness\n [0.56944444444444442,0.58333333333333337], # Current-flow closeness\n [0.36388888888888887,0.36944444444444446], # Betweenness\n [0.23333333333333334,0.20833333333333334], # Current-flow betweenness\n [0.35555555555555557,0.36666666666666664], # Load\n [0.49722222222222223,0.45555555555555555], # Eigenvector\n [0.52777777777777779,0.51111111111111107], # PageRank\n [0.49722222222222223,0.45555555555555555], # HITS authorities\n [0.49722222222222223,0.45555555555555555]] # HITS hubs\n ys = {0:'0.0',.1:'0.1',.2:'0.2', .3:'0.3',.4:'0.4',.5:'0.5',.6:'0.6'}\n fig = plotter.tikz_barchart(d, labels, scale = 3.5, yscale=2.8, color='black', legend=legend, legend_sep=1.0, tick=False, y_tics=ys)\n data.write_to_file(fig,'../../masteroppgave/report/imgs/tikz/dependency_eval_class.tex',mode='w')\n\n # retrieval\n d = [[0.18149811054435275,0.18821229318222113], # Degree\n [0.17184314735361236,0.18216618328598347], # Closeness\n [0.14606637651984622,0.13586098100141117], # Betweenness\n [0.17399729543537901,0.17613717518129621], # Current-flow closeness\n [0.042019078720146409,0.042019078720146409], # Current-flow betweenness\n [0.14700372822743263,0.15104493506838745], # Load\n [0.19854658693196564,0.17540014008712554], # Eigenvector\n [0.17725358882165362,0.17252331100724849], # PageRank\n [0.19854658693196564,0.17540014008712554], # HITS authorities\n [0.19854658693196564,0.17540014008712554]] # HITS hubs\n ys = {0:'0.0',.05:'0.05', .1:'0.1',.15:'0.15', .2:'0.2'}\n fig = plotter.tikz_barchart(d, labels, scale = 3.5, yscale=8, color='black', legend=legend, legend_sep=1.0, tick=False, grid_step=0.05, y_tics=ys)\n data.write_to_file(fig,'../../masteroppgave/report/imgs/tikz/dependency_eval_retr.tex',mode='w')", "def display_group_density_plot(df, groupby, on, palette, figsize):\n\n if not isinstance(df, pd.core.frame.DataFrame):\n raise ValueError('df must be a pandas DataFrame')\n\n if not groupby:\n raise ValueError('groupby parameter must be provided')\n\n elif not groupby in df.keys():\n raise ValueError(groupby + ' column does not exist in the given DataFrame')\n\n if not on:\n raise ValueError('on parameter must be provided')\n\n elif not on in df.keys():\n raise ValueError(on + ' column does not exist in the given DataFrame')\n\n if len(set(df[groupby])) > 10:\n groups = df[groupby].value_counts().index[:10]\n\n else:\n groups = set(df[groupby])\n\n # Get relevant palette\n if palette:\n palette = palette[:len(groups)]\n else:\n palette = sns.color_palette()[:len(groups)]\n\n # Plot\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111)\n ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left')\n\n for value, color in zip(groups, palette):\n sns.kdeplot(df.loc[df[groupby] == value][on], shade=True, color=color, label=value)\n\n ax.set_title(str(\"Distribution of \" + on + \" per \" + groupby + \" group\"), fontsize=30)\n \n ax.set_xlabel(on, fontsize=20)\n return ax", "def plot_fishing_mortality(df):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n ax.set_position(default_timeseries_position) \n\n Fn = df['Fn'].groupby([df.Year, df.Reg, df.Sreg]).mean()\n\n all_fishing_mortality = Fn.loc[:, 'All', 'All']\n ma_fishing_mortality = Fn.loc[:, '1', 'All']\n gb_fishing_mortality = Fn.loc[:, '2', 'All']\n\n # Don't plot the first year. Also, the data is shifted by one year.\n # For some reason, restricting the year range above results in a series\n # that still have a multi-index. This seems like the cleanest way to do\n # that.\n all_fishing_mortality = all_fishing_mortality[2:]\n ma_fishing_mortality = ma_fishing_mortality[2:]\n gb_fishing_mortality = gb_fishing_mortality[2:]\n\n all_fishing_mortality.index = all_fishing_mortality.index - 1\n ma_fishing_mortality.index = ma_fishing_mortality.index - 1\n gb_fishing_mortality.index = gb_fishing_mortality.index - 1\n\n all_fishing_mortality.plot(ax=ax, label='All') \n ma_fishing_mortality.plot(ax=ax, label='Mid Atlantic')\n gb_fishing_mortality.plot(ax=ax, label='Georges Bank')\n\n ax.legend(loc='best')\n\n content = io.BytesIO()\n plt.savefig(content, format='png')\n content.seek(0)\n image_cache['fishing_mortality']['fishing_mortality'] = content\n\n plt.close()", "def plot_series(groups, series):\n fig, ax = plt.subplots()\n ax.set_xlabel(\"Iterations\")\n ax.set_ylabel(series)\n\n for gkey, gval in groups.items():\n args = dict(gkey)\n\n series_values = get_series(gval, series)\n interval_size = args['test_interval']\n interval_count = series_values.shape[1] - 1\n\n x = np.arange(0, interval_size * interval_count + 1, step=interval_size)\n mean = np.mean(series_values, axis=0)\n std = np.std(series_values, axis=0)\n\n ax.plot(x, mean, label=format_group_key(gkey))\n ax.fill_between(x, mean + std, mean - std, alpha=0.2)\n\n ax.legend()\n return fig, ax", "def plot_model_curves(class_name, model, range_metrics, ax):\n def plot_axis(ax, data, color):\n \"\"\"\n Plot data on axis in certain color\n \"\"\"\n x_indices = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n ax.scatter(x_indices, data, color=color, s=4)\n ax.plot(x_indices, data, color=color, linewidth=2)\n ax.set_yticks([]) # same for y ticks\n ax.set_ylim([0, 1])\n # Get balanced purities\n preds = np.concatenate(model.results)\n if model.name == \"Binary Classifiers\":\n purities = get_binary_balanced_purity_ranges(\n preds, model.class_labels, 0.1, model.class_counts)[class_name]\n else:\n purities = get_balanced_purity_ranges(\n preds, model.class_labels, 0.1, model.class_counts)[class_name]\n\n # Get completenesses\n comps = get_completeness_ranges(model.class_counts, range_metrics, class_name)\n\n print(\"\\n\\n Model: \" + str(model.name) + \", class: \" + class_name)\n print(\"Completeness\")\n print(comps)\n print(\"Purity\")\n print(purities)\n\n plot_axis(ax, comps, C_BAR_COLOR)\n ax2 = ax.twinx() # instantiate a second axes that shares the same x-axis\n ax2.set_ylim([0, 1])\n plot_axis(ax2, purities, P_BAR_COLOR)\n for axis in ['top', 'bottom', 'left', 'right']:\n ax.spines[axis].set_linewidth(1.5)\n return ax2", "def plot_k_clust(model, cparam, num_clusters_set, clust_thresh_size, col_palette, max_x=8, x_step=2.0, y_label=[0,24,48], y_minmax=(-5, 53), xlabel='Time from Onset (Years)', ylabel='ALSFRS-R Total'):\n fig = plt.figure(figsize=(20,7), constrained_layout=True)\n wrs = [2, 0.2]+[1]*(math.ceil(num_clusters_set/2))\n gs = fig.add_gridspec(2, len(wrs), width_ratios=wrs)\n f_ax1 = fig.add_subplot(gs[:, 0])\n\n axs = []\n for i in range(0,2):\n for j in range(2,math.ceil(num_clusters_set/2)+2):\n axs.append(fig.add_subplot(gs[i,j]))\n\n # Plot kcluster\n kmeans = KMeans(n_clusters=num_clusters_set, random_state=0).fit(cparam[['neg_linmap.A', 'rbf.lengthscale']])\n cparam['k_label']=kmeans.labels_\n cparam_freq = cparam.groupby('k_label')['clust_size'].sum()/cparam['clust_size'].sum()\n collist = [col_palette[x] for x in kmeans.labels_]\n f_ax1.scatter(cparam['neg_linmap.A'],cparam['rbf.lengthscale'], s=cparam['clust_size']*2, color=collist, alpha=0.9)\n _ = f_ax1.set_xlabel('Negative Mean Slope')\n _ = f_ax1.set_ylabel('Lengthscale')\n\n\n # plot clusters on subplots below\n nkclust = len(np.unique(kmeans.labels_))\n# klist = np.unique(kmeans.labels_)\n # sort by progression rate\n klist = cparam.groupby('k_label')['neg_linmap.A'].mean().sort_values(ascending=False).index\n kalph = [ascii_lowercase[a] for a, j in enumerate(klist)]\n cparam['kalph']=cparam['k_label'].map(dict(zip(klist,kalph)))\n # klist = [2,0,3,1]\n\n for j, kclust in enumerate(klist):\n allclust = cparam.index[kmeans.labels_==kclust]\n cax = axs[j]\n for i, k in enumerate(allclust):\n if model.allocmodel.Nk[int(k)]>=clust_thresh_size:\n _, num_pat_k = plot_mogp_by_clust(cax, model, None, int(k), data_flag=False, data_col=col_palette[kclust],\n model_flag=True, model_col=col_palette[kclust])\n _ = format_mogp_axs(cax, max_x=max_x, x_step=x_step, y_label=y_label, y_minmax=y_minmax)\n _ = cax.text(0.9, 0.9, '{}) {:.2f}%'.format(ascii_lowercase[j], cparam_freq.loc[kclust]*100), va='top', ha='right', transform = cax.transAxes)\n _ = cax.get_legend().remove()\n _ = cax.set_xlabel(xlabel)\n _ = cax.set_ylabel(ylabel)\n return cparam, cparam_freq, fig, f_ax1, axs", "def plot(self, data_grouped, *args, **kw):\n sdata = _scale_data(data_grouped, self.ranges)\n self.ax.plot(self.angle, np.r_[sdata, sdata[0]], *args, **kw)", "def plot_convergence(self, x, y, **kwargs):\n self.plot(x, y, **kwargs)", "def plot_degree(degree_object, output_file):\n\n D = dict(degree_object)\n degrees = {k: v for k, v in D.items()}\n degree_values=np.array(list(degrees.values()))\n\n fig, axes = plt.subplots(1, figsize=(10, 10))\n g1 = sns.distplot(degree_values, hist=True, ax=axes)\n\n key_max = max(degrees.keys(), key=(lambda k: degrees[k]))\n g1 = sns.distplot([degrees[key_max]], hist=False, kde=False, rug=True, color='r', ax=axes)\n axes.annotate('%s: %d' %(key_max, degrees[key_max]), xy=(degrees[key_max], 0),\n xytext=(degrees[key_max], axes.dataLim.y1/2),\n arrowprops=dict(arrowstyle=\"->\")\n )\n\n g1 = sns.distplot([np.median(degree_values)], hist=False, kde=False, rug=True, color='r', ax=axes)\n axes.annotate('median %f' %np.median(degree_values), xy=(np.median(degree_values), 0),\n xytext=(np.median(degree_values), axes.dataLim.y1/2),\n arrowprops=dict(arrowstyle=\"->\")\n )\n\n sns.despine(ax=axes, top=True, bottom=False, right=True, left=True)\n g1.set_ylabel(\"Density\")\n g1.set_xlabel(\"Node Degree\")\n\n if output_file.endswith('.pdf'):\n plt.savefig(output_file, format=\"pdf\")\n elif output_file.endswith('.png'):\n plt.savefig(output_file, format=\"png\")\n else:\n logging.warning('The null distribution figure can only be saved in pdf or png, forced to png')\n fig.savefig(output_file+'.png', format=\"png\")", "def plot_partial_dependence(estimator, X, features, *, feature_names=..., target=..., response_method=..., n_cols=..., grid_resolution=..., percentiles=..., method=..., n_jobs=..., verbose=..., line_kw=..., contour_kw=..., ax=..., kind=..., subsample=..., random_state=...):\n ...", "def make_final_graph(base_dir=DEFAULT_BASE_DIR,\n start_run=0, end_run=100):\n plt.style.use('default')\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif') # sans-\n plt.rcParams.update({'font.size': 16,\n 'font.serif' : ['Computer Modern Roman']})\n plt.figure(1, figsize=(8, 7))\n pos = {4: 221, 2: 222, 1: 223, 0:224}\n for i, _ in [(4, 10000), (2, 25), (1, 5), (0, 1)]:\n out_folder_list = [\"{}/exp_{}/run_{:02d}\".format(base_dir, i, j)\n for j in range(start_run, end_run)]\n res_dict = dict()\n\n for out_folder in out_folder_list:\n p_learn = json.load(open(\n \"{}/dynamics.json\".format(out_folder), \"rt\"))\n\n # Convert to array to make everything plottable.\n for k in p_learn:\n if k.endswith(\"AUC\"):\n p_learn[k] = np.array(p_learn[k])\n if k in res_dict:\n res_dict[k].append(p_learn[k])\n else:\n res_dict[k] = [p_learn[k]]\n\n out_folder_plot = \"/\".join(out_folder_list[0].split(\"/\")[:-1])\n plt.subplot(pos[i])\n me.plot_quantiles(res_dict, out_folder_plot, \"quantile\",\n pos=pos[i]%10, saveit=False)\n plt.savefig(\"cumul_shuttle_exp.pdf\")", "def plot_balancer_results_per_classifier(data_balancer_results_per_classifier, parameter=(2, \"Balanced Accuracy\")):\n classifier_arr = []\n color = iter(cm.Set1(np.linspace(0, 1, len(data_balancer_results_per_classifier) + 1)))\n mean_classifier_arr = [0] * len(data_balancer_results_per_classifier[0][1])\n for (classifier_name, data_balancer_results) in data_balancer_results_per_classifier:\n individual_data_balance_plot = []\n x = 0\n for (data_balancer_name, result_arr) in data_balancer_results:\n individual_data_balance_plot.append(result_arr[parameter[0]]) # Average True rate\n mean_classifier_arr[x] += result_arr[parameter[0]]\n x += 1\n classifier_arr.append(individual_data_balance_plot)\n\n classifier_arr.append([value / float(len(data_balancer_results_per_classifier)) for value in mean_classifier_arr])\n\n fig = plt.figure(figsize=(12, 10))\n\n classifiers = np.arange(len(classifier_arr))\n data_balancers = np.arange(len(classifier_arr[0])) * 3\n bar_width = 0.2\n opacity = 0.9\n\n for i in range(len(classifier_arr)):\n if i + 1 != len(classifier_arr):\n label = data_balancer_results_per_classifier[i][0]\n else:\n label = \"Mean classification\"\n\n plt.bar(data_balancers + (i * bar_width), classifier_arr[i], bar_width,\n alpha=opacity,\n color=color.next(),\n label=label)\n\n plt.locator_params(axis='y', nbins=10)\n plt.xlabel(\"Data balance algorithm\")\n plt.ylabel(parameter[1])\n plt.legend(loc=\"lower right\", fancybox=True, frameon=True)\n plt.title(\"{0} per data balance algorithm\".format(parameter[1]))\n plt.ylim([0.0, 1.00])\n data_balance_labels = [filter(str.isupper, data_balance_name) if data_balance_name != \"None\" and len(filter(str.isupper, data_balance_name)) < 6 else data_balance_name for\n (data_balance_name, _) in data_balancer_results_per_classifier[0][1]]\n plt.xticks(data_balancers + (bar_width / 2) * len(classifiers), data_balance_labels)\n\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n plt.savefig(os.path.dirname(os.path.realpath(__file__)) + \"/../results/data_balancer_results_per_classifier_plot_{0}_{1}.png\".format(parameter[1], current_time))\n plt.close(fig)", "def plot_sorted_accuracies(results):\n acc = []\n for comb in results:\n acc.append(comb[\"accuracy\"])\n sorted_list = sorted(acc)\n plt.plot(range(42),sorted_list,'bo-')\n plt.ylabel(\"Accuracy\")\n plt.xlabel(\"Setting\")\n plt.savefig(\"accuracies.png\")", "def plot_slice_wise_measures(labels, preds, args):\n\n cal_roc = [[], []]\n cal_prrcf1 = [[], [], []] # save PR, RC, F1 respectively\n noncal_prrcf1 = [[], [], []]\n thres_all = []\n noncal_roc = [[], []]\n n_slices = len(labels)\n for thres in range(500, -1, -5):\n print(\"[Threshold # of pixels: {}]\".format(thres))\n thres_all.append(thres)\n cal_pgt, cal_pp, cal_tp, noncal_pgt, noncal_pp, noncal_tp = \\\n plaque_detection_rate(labels, preds, thres=thres)\n\n\n cal_prrcf1[0].append(float(cal_tp) / cal_pp if cal_pp != 0 else 0.0)\n cal_prrcf1[1].append(float(cal_tp) / cal_pgt)\n cal_prrcf1[2].append(2.0 * cal_tp / (cal_pgt + cal_pp))\n noncal_prrcf1[0].append(float(noncal_tp) / noncal_pp if noncal_pp != 0 else 0.0)\n noncal_prrcf1[1].append(float(noncal_tp) / noncal_pgt)\n noncal_prrcf1[2].append(2.0 * noncal_tp / (noncal_pgt + noncal_pp))\n\n cal_roc[0].append((cal_pp - cal_tp) / (n_slices - cal_pgt)) # false negative ratio\n cal_roc[1].append(cal_tp / cal_pgt) # true positive ratio\n noncal_roc[0].append((noncal_pp - noncal_tp) / (n_slices - noncal_pgt)) # false negative ratio\n noncal_roc[1].append(noncal_tp / noncal_pgt) # true positive ratio\n\n print('Cal: PR - {:.4f} RC - {:.4f} F1 - {:.4f} Noncal: PR - {:.4f} RC - {:.4f} F1 - {:.4f}'.format(\n cal_prrcf1[0][-1], cal_prrcf1[1][-1], cal_prrcf1[2][-1],\n noncal_prrcf1[0][-1], noncal_prrcf1[1][-1], noncal_prrcf1[2][-1]))\n print('Cal: fpr - {:.4f} tpr - {:.4f} Noncal: fpr - {:.4f} tpr - {:.4f}'.format(\n cal_roc[0][-1], cal_roc[1][-1], noncal_roc[0][-1], noncal_roc[1][-1]))\n\n # plot the roc curve and calculate AUC\n fig_names = ['calcified', 'non-calcified']\n for plq_metrics, fig_name in zip([cal_roc, noncal_roc], fig_names):\n plt.figure()\n lw = 2\n auc_metric = auc(plq_metrics[0], plq_metrics[1])\n print(\"{} : {}\".format(fig_name, auc_metric))\n plt.plot(plq_metrics[0], plq_metrics[1], color='darkorange',\n lw=lw, label='ROC curve (area = %0.2f)' % auc_metric)\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('slice-wise ROC curve of {} plaques'.format(fig_name))\n plt.legend(loc=\"lower right\")\n plt.savefig(\"./{}/{}_roc.png\".format(args.fig_dir, fig_name))\n\n for plq_metrics, fig_name in zip([cal_prrcf1, noncal_prrcf1], fig_names):\n plt.figure()\n lw = 2\n plt.plot(thres_all, plq_metrics[0], color='r', lw=lw, label='precision')\n plt.plot(thres_all, plq_metrics[1], color='g', lw=lw, label='recall')\n plt.plot(thres_all, plq_metrics[2], color='b', lw=lw, label='f1')\n\n plt.xlim([min(thres_all), max(thres_all)])\n plt.ylim([0.0, 1.05])\n plt.xlabel('Threshold Number of Pixels')\n plt.title('{} measures under different thresholds'.format(fig_name))\n plt.legend(bbox_to_anchor=(1, 0.95), loc=\"upper right\")\n plt.savefig(\"./{}/{}_prrcf1.png\".format(args.fig_dir, fig_name))", "def plot(self):\r\n \r\n\r\n print(\"Printing decision surfaces of decision trees\")\r\n plot_colors = \"rb\"\r\n plot_step = 0.02\r\n n_classes = 2\r\n for _ in range (self.n_estimators):\r\n plt.subplot(2, 3, _ + 1)\r\n x_min, x_max = self.X.iloc[:, 0].min() - 1, self.X.iloc[:, 0].max() + 1\r\n y_min, y_max = self.X.iloc[:, 1].min() - 1, self.X.iloc[:, 1].max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),np.arange(y_min, y_max, plot_step))\r\n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\r\n Z = self.clfs[_].predict(np.c_[xx.ravel(), yy.ravel()])\r\n Z = np.array(Z)\r\n Z = Z.reshape(xx.shape)\r\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdBu)\r\n for i, color in zip(range(n_classes), plot_colors):\r\n if i == 0:\r\n idx = np.where(self.y == -1)\r\n if i == 1:\r\n idx = np.where(self.y == 1)\r\n for i in range (len(idx[0])):\r\n plt.scatter(self.X.loc[idx[0][i]][0], self.X.loc[idx[0][i]][1],c=color,cmap=plt.cm.RdBu, edgecolor='black', s=15)\r\n plt.suptitle(\"Decision surface of a decision tree using paired features\")\r\n plt.legend(loc='lower right', borderpad=0, handletextpad=0)\r\n plt.axis(\"tight\")\r\n\r\n plt.show()\r\n fig1 = plt\r\n\r\n # Figure 2\r\n print(\"Printing decision surface by combining the individual estimators\")\r\n plot_colors = \"rb\"\r\n plot_step = 0.02\r\n n_classes = 2\r\n x_min, x_max = self.X.iloc[:, 0].min() - 1, self.X.iloc[:, 0].max() + 1\r\n y_min, y_max = self.X.iloc[:, 1].min() - 1, self.X.iloc[:, 1].max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),np.arange(y_min, y_max, plot_step))\r\n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\r\n Z = config.Classifier_AB.predict(np.c_[xx.ravel(), yy.ravel()])\r\n Z = np.array(Z)\r\n Z = Z.reshape(xx.shape)\r\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdBu)\r\n for i, color in zip(range(n_classes), plot_colors):\r\n if i == 0:\r\n idx = np.where(self.y == -1)\r\n if i == 1:\r\n idx = np.where(self.y == 1)\r\n for i in range (len(idx[0])):\r\n plt.scatter(self.X.loc[idx[0][i]][0], self.X.loc[idx[0][i]][1],c=color,cmap=plt.cm.RdBu, edgecolor='black', s=15)\r\n plt.suptitle(\"Decision surface by combining individual estimators\")\r\n plt.legend(loc='lower right', borderpad=0, handletextpad=0)\r\n plt.axis(\"tight\")\r\n\r\n plt.show()\r\n fig2 = plt\r\n\r\n return [fig1,fig2]", "def ranking(self, dimfun, groupby, ftarget=10**-8):\n nameds = list(itertools.chain(self.algds_dimfunc(dimfun), self.stratds_dimfunc(dimfun)))\n count = len(nameds)\n\n # Produce \"fv\" items, one per dataset, containing single function value\n # for each budget\n fvset = []\n for (name, ds) in nameds:\n budgets = ds.funvals[:,0]\n f1vals = np.maximum(groupby(ds.funvals[:, 1:], axis=1), ftarget)\n fv = np.transpose(np.vstack([budgets, f1vals]))\n fvset.append(fv)\n\n # Align the \"fv\" items by budget and merge them\n fva = ra.alignArrayData(ra.VArrayMultiReader(fvset))\n budgets = fva[:,0]\n\n # Assign function values and rank them\n # However, we want to resolve eventual ties by ranking first\n # converging function first. So we do a trick and rewrite ftarget\n # values in increasing convergence sort order.\n values = fva[:,1:].copy()\n firstconv = np.ones(count) * (np.size(budgets)+1) # runlength+1 is default\n for i in range(count): # XXX: drop the loop\n try:\n firstconv[i] = np.nonzero(values[:,i] == ftarget)[0][0]\n except IndexError:\n continue # no rewriting needed\n firstconvranks = ss.mstats.rankdata(firstconv)\n for i in range(count):\n r = firstconvranks[i]\n values[firstconv[i]:, i] = ftarget - (1-r/count)*ftarget\n\n ranks = ss.mstats.rankdata(values, axis=1)\n\n return np.transpose(np.vstack([budgets, ranks.T]))", "def display_group_density_plot(df, groupby, on, palette, figsize):\n\n if not isinstance(df, pd.core.frame.DataFrame):\n raise ValueError('df must be a pandas DataFrame')\n\n if not groupby:\n raise ValueError('groupby parameter must be provided')\n\n elif not groupby in df.keys():\n raise ValueError(groupby + ' column does not exist in the given DataFrame')\n\n if not on:\n raise ValueError('on parameter must be provided')\n\n elif not on in df.keys():\n raise ValueError(on + ' column does not exist in the given DataFrame')\n\n if len(set(df[groupby])) > 10:\n groups = df[groupby].value_counts().index[:10]\n\n else:\n groups = set(df[groupby])\n\n # Get relevant palette\n if palette:\n palette = palette[:len(groups)]\n else:\n palette = sns.color_palette()[:len(groups)]\n\n # Plot\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111)\n ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left')\n\n for value, color in zip(groups, palette):\n sns.kdeplot(df.loc[df[groupby] == value][on], shade=True, color=color, label=value)\n\n ax.set_title(str(\"Distribution of \" + on + \" per \" + groupby + \" group\"), fontsize=10)\n return ax", "def oop(poster=False):\n fig = plt.figure(constrained_layout=True, figsize=(7.8, 5.5))\n gs = gridspec.GridSpec(nrows=4, ncols=10, hspace=0.5, wspace=0.0,\n left=0.075, bottom=0.07, top=0.91, right=0.95,\n width_ratios=(1, 0.25, 0.15, 0.05, 0.03, 0.17, 0.35, 0.05, 0.03, 0.12),\n height_ratios=(0.5, 0.5, 0.9, 0.2))\n ax1 = fig.add_subplot(gs[0:2, 0:3])\n ax2 = fig.add_subplot(gs[2, 0])\n\n axb1 = fig.add_subplot(gs[0, 6], projection=fabricplotlib.PRJ)\n axb2 = fig.add_subplot(gs[1, 6], projection=fabricplotlib.PRJ)\n axb_cb = fig.add_subplot(gs[0:2, 8])\n\n ax8 = fig.add_subplot(gs[2, 2:10], sharex=ax2)\n ax_leg = fig.add_subplot(gs[3, :])\n ax_leg.axis('off')\n dist = 75.0\n x, y = np.ones(3) * 1000.0 * dist, np.array([1000., 1500.])\n\n ftws = ['ftw', 'uc', 'hiacc']\n oops = ['1.0e-3', '1.0e-2', '1.0e-1']\n bms = ['0.0', '1.0e-2', '2.0e-2']\n rcs = ['2.0e-3', '2.0e-4', '2.0e-2']\n\n oop_names = [name_fmt.format(ftws[0], rcs[0], oop, bms[0]) for oop in oops]\n bck_names = [ob_name_fmt.format(ftws[0], rcs[0], oop, bms[0]) for oop in oops]\n\n fmts = oop_names + bck_names\n pretty_fmts = {}\n pretty_fmts_oop = {}\n for fmt in fmts:\n fm_spl = fmt.split('_')\n pretty_fmts[fmt] = r'%s $\\lambda$=%1.4g $\\dot b$=%1.4g' % (short_name_dict[fm_spl[1]], float(fm_spl[2][2:]), float(fm_spl[4][2:]))\n if fmt[0] == 's':\n pretty_fmts_oop[fmt] = r'$\\dot\\varepsilon_{xy}^{(max)}$=%1.4g Fwd' % (float(fm_spl[3][3:]))\n else:\n pretty_fmts_oop[fmt] = r'$\\dot\\varepsilon_{xy}^{(max)}$=%1.4g Bck' % (float(fm_spl[3][3:]))\n\n\n if not debug:\n files = [glob('../stream/stream_ftw/' + fmt + '_????.vtu') for fmt in fmts]\n inds = [np.argsort(np.array([float(fn[-8:-4]) for fn in filel])) for filel in files]\n fns = [[file_list[ind] for ind in inds_i] for inds_i, file_list in zip(inds, files)]\n\n times = np.hstack(([0.0, 0.1], np.arange(10.0, 100010.0, 10.0)))\n\n a_s = {}\n for i, fmt in enumerate(fmts):\n a_s[fmt] = get_vars_or_cache(x, y, fmt, fns[i], folder='../stream/stream_ftw')\n timess = {fmt: times[:min(a_s[fmt]['fabric 1'].shape[1], len(times))] for fmt, fn in zip(fmts, fns)}\n taus = {name: val / (dist * 1000.0 / 50.0) for name, val in timess.items()}\n\n else:\n taus = {name: np.linspace(0, 3, 100) for name in fmts}\n vtu = {name: np.ones((3, 100)) for name in ['fabric 1', 'fabric 2', 'fabric 3', 'eigenv 1', 'eigenv 2', 'eigenv 3', 'eigenv 4']}\n a_s = {name: vtu for name in fmts}\n\n def do_plot(fmt, color, lw, ls):\n if fmt[0] == 's':\n label = pretty_fmts_oop[fmt]\n else:\n label = None\n a_s[fmt]['eigenv 3'][1, :][a_s[fmt]['eigenv 3'][1, :] > 1.0] = 1.0\n ax2.plot(timess[fmt] / 1000.0, a_s[fmt]['eigenv 3'][1, :], color=color, linewidth=lw, linestyle=ls, label=label)\n\n if fmt[0] == 'a':\n label = pretty_fmts_oop[fmt]\n else:\n label = None\n\n ax8.plot(timess[fmt][1:] / 1000.0, fabricplotlib.fabric_to_hor_rot(a_s[fmt]['fabric 1'][1, 1:],\n a_s[fmt]['fabric 2'][1, 1:],\n a_s[fmt]['fabric 5'][1, 1:]),\n color=color, linewidth=lw, linestyle=ls, label=pretty_fmts_oop[fmt])\n\n for fmt, color in zip(oop_names, oop_colors):\n do_plot(fmt, color, 1, 'solid')\n\n for fmt, color in zip(bck_names, oop_colors):\n do_plot(fmt, color, 1, 'dashed')\n\n ax1.set_ylabel('Depth (m)', fontsize=fs)\n ax1.set_xlabel('Distance (km)', fontsize=fs)\n ax2.set_ylabel(r'$a^{(2)}_{1}$', fontsize=fs)\n ax8.set_ylabel(r'$\\theta$', fontsize=fs)\n ax2.set_xlabel(r'Time (kyr)', fontsize=fs)\n ax8.set_xlabel(r'Time (kyr)', fontsize=fs)\n ax2.set_ylim(0.66666, 1.0)\n ax2.set_yticks([0.66666, 5. / 6., 1.])\n ax2.set_yticklabels([r'$\\frac{2}{3}$', r'$\\frac{5}{6}$', '1'])\n ax2.set_xlim(0, 3)\n ax8.set_ylim(0, 45)\n h, l = ax8.get_legend_handles_labels()\n ax_leg.legend([h[0], h[3], h[1], h[4], h[2], h[5]], [l[0], l[3], l[1], l[4], l[2], l[5]], loc='upper left', frameon=False, ncol=3, fontsize=fs)\n # ax2.legend(loc='upper right', frameon=False, fontsize=fs)\n # ax5.legend(loc='upper right', frameon=False, fontsize=fs)\n\n vtus = [fastvtulib.get_structured_vtu(fns[1][-1])]\n tris = [Triangulation(np.array([rc[0] + 100000. for rc in vtu.raw_coords[:, 0]]) / 1000., np.array([rc[1] for rc in vtu.raw_coords[:, 0]]), vtu.simptt) for vtu in vtus]\n\n a12_axes = [ax1]\n ax_c_a12 = fig.add_subplot(gs[0:2, 4])\n\n for axa, tri, vtu in zip(a12_axes, tris, vtus):\n axa.set_xlim(0, 175)\n axa.set_xticks([0., 50., 100., 150.])\n axa.set_xticklabels(['' for tick in axa.get_xticklabels()])\n\n cm3 = axa.tricontourf(tri, vtu.rawdata_dict['eigenv 3'], cmap='summer', levels=np.linspace(0.3333, 1, 101), extend='neither')\n for c in cm3.collections:\n c.set_edgecolor(\"face\")\n axa.set_xticklabels(['0', '50', '100', '150'])\n fabricplotlib.quiver(axa, vtu, scale=25, width=0.003)\n\n a12_axes[0].scatter(-1000, -1000, marker=r'$\\uparrow$', label='Single max. in x-z', color='k')\n a12_axes[0].legend(loc='lower left', bbox_to_anchor=(0.1, 1.0), ncol=2, fontsize=fs, framealpha=1.0)\n a12_axes[0].set_xlim(0, 175)\n a12_axes[0].set_ylim(0, 2200)\n\n cbr = plt.colorbar(cm3, cax=ax_c_a12, orientation='vertical', ticks=(1. / 3., 2. / 3., 1.))\n cbr.set_label(label=r'$a^{(2)}_{1}$', size=fs)\n cbr.ax.set_yticklabels([r'$\\frac{1}{3}$', r'$\\frac{2}{3}$', '1'])\n cbr.ax.tick_params(axis='both', which='major', labelsize=fs)\n\n # Cartoons\n x, y = np.array([40000, 40000]), np.array([1100, 150])\n ax1.text(x[0] / 1000. + 100, y[0], 'b', fontsize=fs, ha='center', va='center', bbox=dict(boxstyle='square,pad=0.1', facecolor='white', alpha=0.75))\n ax1.text(x[1] / 1000. + 100, y[1], 'c', fontsize=fs, ha='center', va='center', bbox=dict(boxstyle='square,pad=0.1', facecolor='white', alpha=0.75))\n fab_at_pts = vtu.get_pts_2d(anisolib.fabs, x, y)\n a2 = anisolib.fabric_dict_to_a2(fab_at_pts)\n\n for letter, ax in zip('bcde', [axb1, axb2]):\n ax.text(0.00, 0.9, letter, transform=ax.transAxes, fontsize=bfs)\n\n fabricplotlib.a2plot(a2[:, :, 0], ax=axb1, cbr=False, show=False, levels=13)\n cm = fabricplotlib.a2plot(a2[:, :, 1], ax=axb2, cbr=False, show=False, levels=13)\n cbr = plt.colorbar(cm, cax=axb_cb, orientation='vertical')\n cbr.set_label(label=r'ODF($\\theta,\\phi$)', size=fs)\n # cbr.ax.set_xticklabels(['0', '1', '2'])\n cbr.ax.tick_params(axis='both', which='major', labelsize=fs)\n\n for letter, ax in zip('adefghijklmnop', (ax1, ax2, ax8)):\n ax.text(0.05, 0.85, letter, transform=ax.transAxes, fontsize=bfs)\n ax.tick_params(axis='both', which='major', labelsize=fs)\n\n fig.savefig('../plots/idealized_core_oop_td.png', dpi=300)\n fig.savefig('../plots/poster_idealized_core_oop_td.png', dpi=300, transparent=True)", "def dependence_plot(self, X, y, interaction_index='auto', alpha=None,\n dot_size=None):\n\n y = self._slice_target_index(y=y)\n\n for index in range(_n_targets(y)):\n if sklearn.utils.multiclass.type_of_target(y) == 'continuous-multioutput':\n self.fit(X, y.iloc[:, index].values.ravel(order='K'))\n else:\n self.fit(X, y)\n _, shap_values = self.explainer(X=X)\n shap.dependence_plot(ind='rank(0)', shap_values=shap_values,\n features=X, feature_names=list(X.columns),\n cmap=plt.get_cmap('hot'),\n interaction_index=interaction_index,\n alpha=alpha, dot_size=dot_size, show=self.show)", "def plot_density(data: pd.DataFrame, target: str, feature: str):\n\n plt.figure(figsize=(16, 4))\n\n sns.kdeplot(\n data[feature][data[target] == 1],\n shade=True, label='{}=1'.format(target), linewidth=3)\n sns.kdeplot(\n data[feature][data[target] == 0],\n shade=True, label='{}=0'.format(target), linewidth=3)\n\n min_v = data[feature].min()\n max_v = data[feature].max()\n plt.xlim(min_v, max_v)\n\n plt.title('Distribution of {} by {} value'.format(\n feature.upper(), target.upper()))\n plt.xlabel('{}'.format(feature))\n plt.ylabel('Density')", "def _stats_plot(self, element, y, data=None):\n data, x, y = self._process_args(data, None, y)\n\n opts = {'plot': dict(self._plot_opts), 'norm': self._norm_opts,\n 'style': self._style_opts}\n\n ylim = self._plot_opts.get('ylim', (None, None))\n if not isinstance(y, (list, tuple)):\n ranges = {y: ylim}\n return (element(data, self.by, y).redim.range(**ranges).relabel(**self._relabel).opts(**opts))\n\n labelled = ['y' if self.invert else 'x'] if self.group_label != 'Group' else []\n if self.value_label != 'value':\n labelled.append('x' if self.invert else 'y')\n\n if 'xlabel' in self._plot_opts and 'x' not in labelled:\n labelled.append('x')\n if 'ylabel' in self._plot_opts and 'y' not in labelled:\n labelled.append('y')\n\n opts['plot']['labelled'] = labelled\n\n kdims = [self.group_label]\n data = data[list(y)]\n if check_library(data, 'dask'):\n from dask.dataframe import melt\n else:\n melt = pd.melt\n df = melt(data, var_name=self.group_label, value_name=self.value_label)\n ranges = {self.value_label: ylim}\n return (element(df, kdims, self.value_label).redim(**self._redim)\n .redim.range(**ranges).relabel(**self._relabel).opts(**opts))", "def plot_lcga(betas, time, data, degree, clusters_pred, title=None, varname=None):\n # we'll take advantage of the similarity between our problem and to plot a GCM estimation with classes\n # and have a very similar function, differing on the data format\n N,T = data.shape\n assert T == len(time)\n assert clusters_pred.shape in [(N,),(N,1)]\n if clusters_pred.shape == (N,1):\n clusters_pred = clusters_pred.flatten() \n if not np.issubdtype(clusters_pred.dtype, np.integer):\n clusterspred_int = clusters_pred.astype(int)\n assert np.all(clusterspred_int == clusters_pred), 'clusters_pred entries in categorical form should belong to some np.integer dtype'\n clusters_pred = clusterspred_int\n n_clusters = max(clusters_pred)+1\n assert len(betas) == n_clusters\n colors = {0:'tab:blue', 1:'tab:orange', 2:'tab:green', 3:'tab:red', 4:'tab:purple',\n 5:'tab:brown', 6:'tab:pink', 7:'tab:gray', 8:'tab:olive', 9:'tab:cyan'}\n plt.figure()\n # plot individual, observed curves\n for i in range(N):\n # do not plot people that belong to groups that are not present in groups2plot:\n # if tuple(groups[i]) not in groups2plot:\n # continue\n color = colors[clusters_pred[i]]\n plt.plot(time, data[i], color=color, linestyle='dotted', linewidth=1)\n # plot population-level curves\n interval = np.linspace(time[0],time[-1], 100)\n for counter in range(n_clusters):\n curve = np.zeros(100)\n coeffs = np.copy(betas[counter])\n for i in range(degree+1):\n curve += coeffs[i] * interval**i\n plt.plot(interval, curve, color=colors[counter], linewidth=5)\n # legends\n legend = ['group '+str(x) for x in range(n_clusters)]\n handles = [Line2D([0],[0],color=colors[i]) for i in range(n_clusters)]\n plt.legend(handles, legend)\n plt.xlabel(\"time steps\")\n varname = 'y' if varname is None else varname\n plt.ylabel(varname)\n if title:\n plt.title(title)\n plt.show()", "def plot_age_curve(self, period=6., showfig=True):\n\t\tgroup = self['%g_sec'%( period )]\n\t\ttomo_data = group['tomo_data'].value\n\t\ttomo_data_msk = group['tomo_data_msk'].value\n\t\tage_Arr = group['age_Arr'].value\n\t\tage_Arr_msk = group['age_Arr_msk'].value\n\t\tmask = np.logical_or(tomo_data_msk, age_Arr_msk)\n\t\tvel_vec = tomo_data[~mask]\n\t\tage_vec = age_Arr[~mask]\n\t\tplt.plot(age_vec, vel_vec, 'r.')\n\t\tages = np.linspace(0,age_vec.max(),100)\n\t\tvels = group.attrs['c0']+group.attrs['c1']*np.sqrt(ages)+group.attrs['c2']*ages\n\t\tplt.plot(ages, vels, 'b-')\n\t\tplt.xlim(xmin=0)\n\t\tplt.xlabel('Age (Ma)', fontsize=14)\n\t\tplt.ylabel('vel (km/s)', fontsize=14)\n\t\tfig = plt.gcf()\n\t\tfig.suptitle(str(period)+' sec', fontsize=14)\n\t\tif showfig:\n\t\t\tplt.show()\n\t\tpass", "def plot_cost(self):\n steps = np.arange(len(self.cost_values))\n plt.plot(steps, self.cost_values, '-o')\n plt.xlabel(\"Steps\")\n plt.ylabel(\"Cost value\")\n plt.title(\"Cost value per step using Gradient Descent\")\n plt.show()", "def plot_decision_regions(X, y, classifier, resolution=.02, test_idx=None):\n # setup marker generator & color map\n plt.figure()\n markers = ('x', 'o')\n colors = ('red', 'blue')\n\n # calculate and plot the decision surface\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\n np.arange(x2_min, x2_max, resolution))\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n plt.contourf(xx1, xx2, Z, alpha=.35, cmap=ListedColormap(colors=colors[:len(np.unique(y))]))\n plt.xlim(xx1.min(), xx2.max())\n plt.ylim(xx2.min(), xx2.max())\n\n # scatter plot all values of the data sets\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(x=X[y == cl, 0],\n y=X[y == cl, 1],\n c=colors[idx],\n marker=markers[idx],\n label=cl,\n edgecolors='black')\n if test_idx:\n # circle test data\n X_test, y_test = X[test_idx, :], y[test_idx]\n plt.scatter(X_test[:, 0],\n X_test[:, 1],\n c='',\n edgecolors='black',\n alpha=1.0,\n linewidths=1,\n marker='o',\n s=100,\n label='test set')", "def plot_bias(clf_list = ['test_small','rt_small','test2_small'],return_df = False,XKCD = False):\n if XKCD = True:\n plt.xkcd()\n print('damn')\n df = load_all_dfs(clf_list)\n df = df.swaplevel(0,1)\n del df['std']\n df.hist()\n plt.figure()\n\n for clf in clf_list:\n df.ix[clf].mean().plot(label = clf,figsize=(16, 4))\n plt.legend(loc='upper right')\n plt.title('mean')\n plt.figure()\n \n # c = df.columns\n for clf in clf_list:\n #df[c[1:]].ix[clf].max().plot(label = clf,figsize=(16, 4))\n df.ix[clf].max().plot(label = clf,figsize=(16, 4))\n plt.legend(loc='upper right')\n plt.title('max')\n \n plt.figure()\n for clf in clf_list:\n df.ix[clf].std().plot(label = clf,figsize=(16, 4))\n\n \n plt.legend(loc='upper right')\n plt.title('std')\n plt.figure()\n used_list = []\n for clf in clf_list:\n for clf2 in clf_list:\n if (clf != clf2) and ({clf,clf2} not in used_list):\n diff = ((df.ix[clf] - df.ix[clf2])**2)**(1/2)\n diff.mean().plot(label = clf+' - ' +clf2,figsize=(16, 4))\n used_list.append({clf,clf2})\n \n \n \n \n \n plt.legend(loc='upper right')\n plt.title('difference')\n print('damnover')\n if return_df == True:\n return df", "def _get_safety_totals_plot(self, ax, safety_stats):\n meta = self.meta\n violations_labels = meta['safety_constraints']\n total_violations = safety_stats['total_violations'].T\n\n for idx, violations in enumerate(total_violations):\n label = violations_labels[idx]\n ax.plot(np.arange(violations.shape[0]), violations, label=label)\n\n ax.set_title('# violations / episode')\n ax.legend()\n ax.set_ylabel('# violations')\n ax.set_xlabel('Episode')\n ax.plot()", "def plot_scenario_distribution(self):\n x = self.arms\n\n y = self.df.groupby('price').mean().Converted[x]\n y_sex_0 = self.df[self.df.Sex == 0].groupby('price').mean().Converted[x]\n y_sex_1 = self.df[self.df.Sex == 1].groupby('price').mean().Converted[x]\n y_age_0 = self.df[self.df.Under_30 == 0].groupby('price').mean().Converted[x]\n y_age_1 = self.df[self.df.Under_30 == 1].groupby('price').mean().Converted[x]\n\n fig, ax_list = plt.subplots(2,1, figsize=(12, 9))\n\n for ax in ax_list:\n ax.grid(alpha=0.3, linestyle='--')\n\n ax.set_ylim(bottom=0, top=0.6)\n ax.set_xlim(left=50, right=104)\n\n ax.set_xlabel(\"Price\", fontsize=14)\n ax.set_ylabel(\"Conversion Rate\", fontsize=14)\n\n ax.set_xticks(self.arms)\n ax.set_xticklabels(self.arms.astype(np.int64), fontsize=12, alpha=0.7)\n ax.set_yticks(np.linspace(0, 0.7, 8))\n ax.set_yticklabels([str((i * 100).astype(np.int64)) + \"%\" for i in np.linspace(0, 0.7, 8)], fontsize=12, alpha=0.7)\n\n ax.spines['right'].set_alpha(0)\n ax.spines['left'].set_alpha(0.3)\n ax.spines['top'].set_alpha(0)\n ax.spines['bottom'].set_alpha(0.3)\n\n ax_list[0].plot(x, y, label='Global')\n ax_list[0].plot(x, y_sex_0, label='Male', color='moccasin')\n ax_list[0].plot(x, y_sex_1, label='Female', color='darkorange')\n\n ax_list[1].plot(x, y, label='Global')\n ax_list[1].plot(x, y_age_0, label='Under 30', color='red')\n ax_list[1].plot(x, y_age_1, label='Over 30', color='darkred')\n\n ax_list[0].legend()\n ax_list[1].legend()\n\n fig.suptitle(\"Conversion Rate\", fontsize=22)\n\n fig.show()\n\n plt.savefig('chapter5_pricing.png')", "def cost_profile_plot(cost_values):\n \n ax = plt.figure(figsize = (7.5,4.5)).gca()\n cost_values = np.array(cost_values)\n span = np.arange(1,len(cost_values)+1)\n ax.plot(span,cost_values, color = 'k', alpha = 0.7)\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n ax.set_xlabel('Epoch')\n ax.set_ylabel('Cost (MSE) value')\n plt.show()\n plt.close('all')", "def plot_profile(outdir, xval='x', xscale=1, yscale=1, comp2los=False, adjustRadial=False,\n fig=True):\n #Load data\n path = os.path.join(outdir,'points.h5')\n x,y,z,ux,uy,uz = pu.extract_points(path)\n\n Y = uz / yscale\n if xval == 'x':\n X = x / xscale\n Y1 = ux / yscale\n elif xval == 'r':\n X = np.hypot(x,y) / xscale\n ur = np.hypot(ux,uy)\n Y1 = ur / yscale\n if adjustRadial: #fix sign from hypot square root\n ur = pu.radial2negative(Y1)\n\n if fig:\n plt.figure()\n # otherwise profile added to active plot\n\n #plt.plot(X,uy/yscale,'r.-',label='Uy') #should be zero along EW axis\n de = 90e3 / xscale #eastern data extent\n if comp2los != False:\n data_extents = (X<=de)\n if comp2los == 'west': #switch sign of radial profile\n #ux = -ux #move to comp2los function\n X = -X\n Y1 = -Y1\n de = -de\n data_extents = (X>=de)\n\n los = pu.comp2los(x,ux,uy,uz,track=comp2los)\n plt.plot(X, los/yscale, 'k-', lw=2, label='Ulos_' + comp2los)\n plt.fill_between(X,los/yscale, where=data_extents, color='gray',alpha=0.5)\n\n plt.plot(X, Y, 'b-', lw=2, label='Uz')\n plt.plot(X, Y1, 'b--',lw=2, mfc='None',label='U{0}'.format(xval))\n\n # Annotate\n plt.title(outdir)\n plt.xlabel('Distance [{}]'.format(get_unit(xscale)))\n plt.ylabel('Uz [{}]'.format(get_unit(yscale)))\n plt.axhline(color='k')\n plt.axvline(de,color='k', linestyle='dashed', label='EW data extent') #EW extent of InSAR coverage\n plt.legend(loc='best')\n plt.grid(True)\n plt.show()", "def _get_safety_per_step_plots(self, ax, safety_stats):\n meta = self.meta\n violations_labels = meta['safety_constraints']\n per_step_violations = safety_stats['per_step_violations']\n\n for idx, violations in enumerate(per_step_violations.T):\n label = violations_labels[idx]\n ax.plot(\n np.arange(violations.shape[0]), violations, label=label, alpha=0.75)\n\n ax.set_title('Mean violations / timestep')\n ax.legend(loc='upper right')\n ax.set_ylabel('Mean # violations')\n ax.set_xlabel('Timestep')\n ax.plot()", "def plot_groups(\n self,\n lim=4,\n center=(0, 0),\n x1='',\n y1='',\n x2='',\n y2='',\n linecolor='k',\n alpha_group=1,\n legend=False,\n pause=False):\n ax = plt.gca()\n ax.clear()\n\n shape = (\n np.sqrt(len(self.contours)).astype(int),\n np.sqrt(len(self.contours)).astype(int))\n\n plt.contour(\n -self.contours.x.values.reshape(shape),\n self.contours.y.values.reshape(shape),\n self.contours.f.values.reshape(shape),\n colors='grey',\n levels=np.arange(2, int(np.max(self.contours.f) + 1), 1),\n linewidths=1,\n zorder=1)\n\n plt.contour(\n -self.contours.x.values.reshape(shape),\n self.contours.y.values.reshape(shape),\n self.contours.f.values.reshape(shape),\n colors='k',\n levels=self.levels,\n linewidths=2,\n zorder=1)\n\n if self.target:\n plt.plot([-lim * 0.05, -lim * 0.025], [0, 0], color='k')\n plt.plot([lim * 0.05, lim * 0.025], [0, 0], color='k')\n plt.plot([0, 0], [-lim * 0.05, -lim * 0.025], color='k')\n plt.plot([0, 0], [lim * 0.05, lim * 0.025], color='k')\n\n plt.scatter(\n -self.df_gxys.loc[self.gxys.group_peak == 1, 'x'],\n self.df_gxys.loc[self.gxys.group_peak == 1, 'y'],\n edgecolor='k',\n facecolor='none',\n linewidth=2,\n s=32,\n zorder=5)\n\n inds = np.argsort(\n np.sqrt(\n self.df_gxys.loc[self.gxys.group_peak == 1, 'x'] ** 2 +\n self.df_gxys.loc[self.gxys.group_peak == 1, 'y'] ** 2))\n\n marker = 'o'\n\n alpha = np.ones_like(self.df_gxys['x'])\n alpha[\n (self.df_gxys['group_no'] > 1) &\n (self.df_gxys['group_no'] < alpha_group)] = 0.25\n\n for group_no in [0, 1]:\n plt.scatter(\n -self.df_gxys.loc[lambda x: x['group_no'] == group_no, 'x'],\n self.df_gxys.loc[lambda x: x['group_no'] == group_no, 'y'],\n c=f'C{group_no}',\n s=30,\n zorder=2,\n marker=marker,\n alpha=alpha[self.df_gxys['group_no'] == group_no][0])\n\n marker_ = np.tile(np.array(['o', 's', 'D', '^', 'x']), 2000)\n\n for i, group_no in enumerate(\n self.df_gxys.loc[lambda x: x['group_no'] > 1, 'group_no']):\n group = self.df_gxys['group_no'] == group_no\n\n color = f'C{(i % 7) + 2}'\n marker = marker_[np.floor((i + 2) / 10).astype(int)]\n\n plt.scatter(\n -self.df_gxys.loc[group, 'x'],\n self.df_gxys.loc[group, 'y'],\n c=color,\n s=30,\n zorder=2,\n marker=marker,\n label=f'Group {group_no}: {group.sum()}',\n alpha=alpha[group][0])\n\n if (x1 != '') & (y1 != '') & (x2 != '') & (y2 != ''):\n plt.plot(\n [-x1, -x2],\n [y1, y2],\n linestyle='--',\n color=linecolor,\n zorder=3)\n if (x1 != '') & (y1 != ''):\n plt.scatter(\n -x1,\n y1,\n marker='o',\n edgecolor='r',\n facecolor='none',\n zorder=4,\n s=80)\n if (x2 != '') & (y2 != ''):\n plt.scatter(-x2, y2, marker='x', color='r', zorder=4, s=80)\n\n plt.title(self.title, zorder=6)\n\n median = np.argsort(self.df_gxys['x'])[len(self.df_gxys['x']) // 2]\n\n if center == (0, 0):\n if not self.target:\n plt.xlim(\n self.df_gxys['x'][median] - lim,\n self.df_gxys['x'][median] + lim)\n plt.ylim(\n self.df_gxys['y'][median] - lim,\n self.df_gxys['y'][median] + lim)\n\n else:\n plt.xlim(-lim, lim)\n plt.ylim(-lim, lim)\n\n else:\n plt.xlim(center[0] - lim, center[0] + lim)\n plt.ylim(center[1] - lim, center[1] + lim)\n\n plt.gca().set_aspect('equal', 'box')\n\n plt.xlabel('x (Mpc)')\n\n if legend:\n plt.legend(loc='lower right', ncol=4)\n\n if self.pause:\n plt.pause(0.001)", "def plot_dep_curve(self, period=6., showfig=True):\n\t\tgroup = self['%g_sec'%( period )]\n\t\ttomo_data = group['tomo_data'].value\n\t\tdep_Arr = group['dep_Arr'].value\n\t\tmask = np.logical_or(group['tomo_data_msk'].value, group['dep_Arr_msk'].value)\n\t\tvel_vec = tomo_data[~mask]\n\t\tdep_vec = dep_Arr[~mask]\n\t\tplt.plot(dep_vec, vel_vec, 'r.')\n\t\tplt.xlim(xmax=0)\n\t\tplt.xlabel('Water depth (m)', fontsize=14)\n\t\tplt.ylabel('vel (km/s)', fontsize=14)\n\t\tfig = plt.gcf()\n\t\tfig.suptitle(str(period)+' sec', fontsize=14)\n\t\tif showfig:\n\t\t\tplt.show()\n\t\tpass", "def get_convergence_plot(self):\n fig, ax = plt.subplots()\n first_episode = self.get_convergence_episode()\n\n values = self.stats['return_stats']['episode_totals']\n ax.plot(np.arange(len(values)), values, color='steelblue', lw=2, alpha=.9,\n label='Return')\n ax.axvline(first_episode, color='seagreen', lw=2, label='Converged')\n ax.set_xlim(left=0, right=first_episode * 2)\n\n ax.set_title('Normalized regret = {:.3f}'.format(\n self.get_normalized_regret()))\n ax.legend()\n ax.set_ylabel('Return')\n ax.set_xlabel('Episode')\n return fig", "def display_group_density_plot(df, groupby, on, palette = None, figsize = None, title=\"\", ax=None):\n if palette is None:\n palette = sns.color_palette('Set2')\n if figsize is None:\n figsize = (10, 5)\n if not isinstance(df, pd.core.frame.DataFrame):\n raise ValueError('df must be a pandas DataFrame')\n\n if not groupby:\n raise ValueError('groupby parameter must be provided')\n\n elif not groupby in df.keys():\n raise ValueError(groupby + ' column does not exist in the given DataFrame')\n\n if not on:\n raise ValueError('on parameter must be provided')\n\n elif not on in df.keys():\n raise ValueError(on + ' column does not exist in the given DataFrame')\n\n if len(set(df[groupby])) > 10:\n groups = df[groupby].value_counts().index[:10]\n\n else:\n groups = set(df[groupby])\n\n # Get relevant palette\n if palette:\n palette = palette[:len(groups)]\n else:\n palette = sns.color_palette()[:len(groups)]\n\n if ax is None:\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111)\n \n ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left')\n for value, color in zip(groups, palette):\n sns.kdeplot(df.loc[df[groupby] == value][on], shade=True, color=color, label=value, ax=ax)\n if not title:\n title = str(\"Distribution of \" + on + \" per \" + groupby + \" group\")\n \n ax.set_title(title,fontsize=10)\n ax.set_xlabel(on, fontsize=10)\n return ax", "def graph_vals_logliks(optimal_vals_logliks_mus, within_run_vals_logliks):\n # Plot single graph for optimal mu-beta pairs.\n plt.style.use('ggplot')\n fig = plt.figure()\n fig.suptitle('Value vs. Log Likelihood for Generated Data Against True '\n 'Data Distribution: Optimal Pairs', size=14)\n ax = fig.add_subplot(111)\n vals = [v for (v, l, m) in optimal_vals_logliks_mus]\n logliks = [l for (v, l, m) in optimal_vals_logliks_mus]\n mus = [m for (v, l, m) in optimal_vals_logliks_mus]\n t = np.arange(len(vals))\n sc = ax.scatter(vals, logliks, c=t, cmap='cool', s=50)\n for i, txt in enumerate(mus):\n ax.annotate(round(txt, 2), (vals[i], logliks[i]))\n cb = plt.colorbar(sc)\n cb.set_label('Iter', labelpad=-31, y=1.05, rotation=0)\n plt.xlabel('Value')\n plt.ylabel('Log Likelihood')\n\n # Plot within-run, gridded, mu-beta graphs for each iteration.\n fig = plt.figure()\n fig.suptitle('Value vs. Log Likelihood: Gridded Pairs, Per Iteration',\n size=14)\n num_runs = len(optimal_vals_logliks_mus)\n dims = np.ceil(np.sqrt(num_runs))\n for run_index in range(num_runs):\n optimal_val = optimal_vals_logliks_mus[run_index][0]\n optimal_loglik = optimal_vals_logliks_mus[run_index][1]\n optimal_mu = optimal_vals_logliks_mus[run_index][2]\n\n ax = plt.subplot(dims, dims, run_index + 1)\n plt.title(r'Iter: {}'.format(run_index), size=10)\n vals_logliks = within_run_vals_logliks[run_index]\n vals = [v for (v, l) in vals_logliks]\n logliks = [l for (v, l) in vals_logliks]\n ax.scatter(vals, logliks, c=COLORS['lightgreen'], s=30)\n ax.scatter(optimal_val, optimal_loglik, c='red', s=50)\n ax.annotate(round(optimal_mu, 2), (optimal_val, optimal_loglik))\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n if run_index == 0:\n plt.xlabel('Value')\n plt.ylabel('Log Likelihood')\n plt.setp(ax.get_xticklabels(), visible=True)\n plt.setp(ax.get_yticklabels(), visible=False)", "def plot_slice(\n study: Study,\n params: Optional[List[str]] = None,\n *,\n target: Optional[Callable[[FrozenTrial], float]] = None,\n target_name: str = \"Objective Value\",\n) -> \"go.Figure\":\n\n _imports.check()\n _check_plot_args(study, target, target_name)\n return _get_slice_plot(study, params, target, target_name)", "def plot_groups(sb, **kw):\n\n #check kws\n B_flag = True\n if('B' in kw):\n B_flag = bool(kw['B'])\n E_flag = True\n if('E' in kw):\n E_flag = bool(kw['E'])\n ugroups = sb.unique_group_names\n if('groups' in kw):\n ugroups = set(kw['groups'])\n if('return_figs' in kw):\n if(kw['return_figs']):\n return_figs = True\n figs = {'E': {}, 'B': {}}\n else:\n return_figs = False\n else:\n if((not B_flag) or (not E_flag)):\n group_lim = 8\n else:\n group_lim = 4\n if(len(ugroups) <= group_lim):\n return_figs = True\n figs = {'E': {}, 'B': {}}\n else:\n return_figs = False\n\n flags = [B_flag, E_flag]\n fields = ['Bmax', 'Emax']\n ylabels = ['Maximum Magnetic Field (mG)', 'Maximum Electric Field (kV/m)']\n title_pre = ['Maximum Magnetic Field - ',\n 'Maximum Electric Field - ']\n keys = ['B', 'E']\n it = zip(flags, fields, ylabels, title_pre, keys)\n\n #iterate over groups with more than 1 CrossSection\n for xss in sb.groups:\n if(xss[0].group in ugroups):\n for (fl, fi, yl, ti, k) in it:\n if(fl):\n #get plotting objects\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n #init handles and labels lists for legend\n kw['H'], kw['L'] = [], []\n #plot the Bmax results for each xs in the group\n _plot_group_fields(ax, xss, fi, **kw)\n #plot wires\n max_field = max([xs.fields[fi].max() for xs in xss])\n _plot_group_wires(ax, xss, max_field, **kw)\n #draw ground surface if necessary\n if(len(xss) <= 2):\n _check_und_conds(xss, [ax], **kw)\n #plot ROW lines\n _plot_group_ROW_edges(ax, xss, **kw)\n #set axis text and legend\n ax.set_xlabel('Distance (ft)')\n ax.set_ylabel(yl)\n ax.set_title(textwrap.fill(ti + str(xss[0].group)))\n ax.legend(kw['H'], kw['L'], **_leg_kw)\n _format_line_axes_legends(ax)\n #save the figure if keyword 'save' == True, and append fig\n _save_fig('group_%s-%s' % (str(xss[0].group), fi), fig, **kw)\n #store the fig or close it\n if(return_figs):\n figs[k][xss[0].group] = fig\n else:\n plt.close(fig)\n\n if(return_figs):\n return(figs)", "def plot_mogp_panel(model, data, disp_clust=12, k_alph_flag=True, mogp_color='b', slope_color='r', add_full_slope=False):\n fig, axs = plt.subplots(math.ceil(disp_clust/4), 4, figsize=(20, 3*(math.ceil(disp_clust/4))), sharex=True, sharey=True)\n\n nc = len(np.where(model.allocmodel.Nk > 0)[0])\n idx = np.argsort(-model.allocmodel.Nk)[0:nc]\n \n df_disp_clust = pd.DataFrame(columns=['k', 'k_alph', 'estim_diff'])\n for i, k in enumerate(idx[:disp_clust]):\n if k_alph_flag:\n k_alph = ascii_lowercase[i]\n else:\n k_alph = None\n \n axs.flat[i], num_pat = plot_mogp_by_clust(axs.flat[i], model, data, k, data_col='k', model_col=mogp_color) \n estim_diff = plot_slope_by_clust(axs.flat[i], model, k, slope_col=slope_color) \n if add_full_slope:\n \tmax_x = model.obsmodel[k].X.max()\n \t_ = plot_slope_by_clust(axs.flat[i], model, k, slope_col='g', upper_bound=max_x) \n axs.flat[i] = format_panel_axs(axs.flat[i], k_alph, num_pat, k_alph_flag)\n \n df_disp_clust = df_disp_clust.append({'k': k, 'k_alph': k_alph, 'estim_diff': estim_diff}, ignore_index=True)\n \n return fig, axs, df_disp_clust", "def plot_iteration(ax, x_n, y_n, f, \n max_labels=6, resfct = 100, include_chords=True,\n left_extra=0.01, right_extra=0.01):\n if include_chords:\n # create a list including chord points:\n x_c = sum([[x, x] for x in x_n[1:]], [x_n[0]])\n y_c = sum([[0, y] for y in y_n[1:]], [y_n[0]])\n else:\n x_c = x_n\n y_c = y_n\n # the iteration results\n ax.scatter(x_c, y_c, marker='x', color='red', s=30)\n\n # the convergence pattern\n ax.plot(x_c, y_c, color='green', ls='--')\n\n # add some labels\n # figure out a reasonable offset for labels\n dxt = (np.max(x_n)-np.min(x_n))/50.\n dyt = (np.max(y_n)-np.min(y_n))/50.\n # only plot a maximum of max_labels labels, so plot doesn't get too messy\n for i,(x,y) in enumerate(zip(x_n, y_n)):\n ax.text(x_n[i]+dxt, y_n[i]+dyt, '$x_{}$'.format(i), fontsize=16)\n if i == max_labels:\n break\n\n # the function\n x = np.linspace(np.min(x_n) - left_extra, np.max(x_n) + right_extra, resfct)\n ax.plot(x, f(x), 'b', label='$F(x)$')\n\n # zero line\n xlim = ax.get_xlim()\n ax.plot([xlim[0], xlim[1]], [0., 0.], 'k--')\n # add ticks for the x_n\n for x in x_n:\n ax.plot([x, x], [-dyt, dyt], 'k')\n ax.set_xlim(xlim)\n ax.set_xlabel('$x$', fontsize=16)\n ax.set_ylabel('$y=F(x)$', fontsize=16)", "def plot_log_convergence(\n self, acquisition_risks, errors='std',\n fig=None, ax=None, alpha=0.3, i=0, names=None, labels=None,\n rolling=False, zorder=100, colors=None, with_errors=False,\n swapaxes=False, print_it=False, lw=None, error_type='quant',\n scale='default', lwfill=None\n):\n if errors == 'percentiles':\n upper_base = self.quant_errors\n elif errors == 'std':\n upper_base = self.errors\n elif errors == 'log mean':\n upper_base = self.log_sq_diff\n if scale != 'manual log':\n raise ValueError('Log target!')\n else:\n raise ValueError\n\n if fig is None or ax is None:\n fig, ax = plt.subplots(dpi=200)\n\n linestyles = itertools.cycle(['--', '-.', ':'])\n for acquisition, risk in acquisition_risks:\n acq_risk = f'{acquisition}_{risk}'\n if print_it:\n print(acq_risk)\n\n if colors is None:\n color = acquisition_risks_to_color[acq_risk]\n else:\n color = colors[i]\n\n y = upper_base.loc[acquisition][risk].values\n\n\n if (R := rolling) is not False:\n y = np.convolve(\n y, np.ones(R)/R, mode='valid')\n\n if scale == 'manual log':\n plot = ax.plot\n if errors != 'log mean':\n y = np.log10(y)\n else:\n plot = ax.loglog\n\n x = np.arange(1, y.size+1)\n if swapaxes:\n plot(\n y, x, '-', color=color, label=labels[i],\n zorder=zorder, lw=lw)\n else:\n plot(\n x, y, '-', color=color, label=labels[i],\n zorder=zorder, lw=lw)\n\n if with_errors and error_type == 'quant':\n\n low, up = self.extra_quant_errors\n low = low.loc[acquisition][risk].values\n up = up.loc[acquisition][risk].values\n\n if (R := rolling) is not False:\n up = np.convolve(\n up, np.ones(R)/R, mode='valid')\n low = np.convolve(\n low, np.ones(R)/R, mode='valid')\n x = np.arange(0, len(s_u))\n\n elif with_errors and error_type == 'std_log_error':\n middle = y\n std = self.log_sq_diff_std.loc[acquisition][risk].values\n std = std/np.sqrt(self.n_runs)\n if scale != 'manual log':\n std = np.power(std, 10)\n low = middle - std\n up = middle + std\n\n # elif with_errors and error_type == 'log_std_error':\n # middle = y\n # std = self.stds.loc[acquisition][risk].values\n # std = std**2 / self.n_runs\n # if add_sqrt:\n # std = np.sqrt(std)\n # if scale == 'manual log':\n # std = np.log10(std)\n # low = middle - std\n # up = middle + std\n\n # else:\n # raise\n\n if with_errors and swapaxes:\n raise\n\n if with_errors:\n# ax.fill_between(x, low, up, color='white', alpha=1, zorder=-100)\n ax.fill_between(\n x, low, up, color=color, alpha=0.3, zorder=-100, lw=lwfill)\n # plot(x, std, color=color, alpha=0.3, zorder=-100, lw=1)\n\n i += 1\n\n return fig, ax", "def show_learning_curve(self):\n\n # Loop output classes\n for c in range(1,self.n_output_classes):\n # Get data\n x_values = np.array(self.n_class_samples_list[c])\n accuracy = np.array(self.accuracy_list[c])\n precision = np.array(self.precision_list[c])\n recall = np.array(self.recall_list[c])\n F1 = np.array(self.F1_list[c])\n\n # Make plot\n with sns.axes_style(\"ticks\"):\n fig,ax = plt.subplots()\n plt.plot([np.min(x_values),np.max(x_values)],[0.5,0.5],\n color='#777777',linestyle='--')\n plt.plot([np.min(x_values),np.max(x_values)],[0.66,0.66],\n color='#777777',linestyle=':')\n plt.plot([np.min(x_values),np.max(x_values)],[0.8,0.8],\n color='#777777',linestyle=':')\n plt.plot([np.min(x_values),np.max(x_values)],[0.9,0.9],\n color='#777777',linestyle=':')\n\n plt.plot( x_values, accuracy, color='#000000',\n linewidth=1, label='Accuracy' )\n plt.plot( x_values, precision, color='#0000aa',\n linewidth=1, label='Precision' )\n plt.plot( x_values, recall, color='#00aa00',\n linewidth=1, label='Recall' )\n plt.plot( x_values, F1, color='#aa0000',\n linewidth=2, label='F1' )\n\n plt.yticks( [0, 0.5, 0.66, 0.8, 0.9, 1.0],\n ['0','0.5','0.66','0.8','0.9','1.0'], ha='right' )\n plt.xlim(np.max(x_values)*-0.02,np.max(x_values)*1.02)\n plt.ylim(-0.02,1.02)\n plt.xlabel('Number of training samples')\n plt.ylabel('Performance')\n plt.title('Learning curve, class {}'.format(c))\n sns.despine(ax=ax, offset=0, trim=True)\n lgnd = plt.legend(loc=4, ncol=1, frameon=True, fontsize=9)\n lgnd.get_frame().set_facecolor('#ffffff')\n ax.spines['left'].set_bounds(0,1)\n ax.spines['bottom'].set_bounds(np.min(x_values),np.max(x_values))", "def plot_svc_decision_function(clf, ax=None):\n plot_decision_function(clf.decision_function, [-1, 0, 1], ax)", "def plot(\n grad_stats,\n loss_scale,\n log,\n iterations=[0, 100, 1000, 3000],\n grad_name=\"ReLUGrad2\",\n grad_index=0,\n title=None,\n out=None,\n):\n fig, axes = plt.subplots(ncols=3, figsize=(16, 4))\n\n # grad stats\n for it in iterations:\n gdf = grad_stats[\n (grad_stats[\"iter\"] == it)\n & (grad_stats[\"label\"] == grad_name)\n & (grad_stats[\"index\"] == grad_index)\n ]\n axes[0].plot(\n np.arange(1, len(gdf) + 1)[::-1],\n gdf[\"nonzero\"] / gdf[\"size\"] * 100,\n label=\"iter={}\".format(it),\n )\n axes[0].set_xlabel(\"Layer ID\")\n axes[0].set_ylabel(\"Nonzero (%)\")\n axes[0].set_title(\"Percentage of nonzero activation gradients\")\n axes[0].legend()\n\n # loss scale\n for it in iterations:\n for key in [\"unbound\", \"final\"]:\n loss_scale_ = loss_scale[\n (loss_scale[\"iter\"] == it) & (loss_scale[\"key\"] == key)\n ]\n axes[1].plot(\n np.arange(1, len(loss_scale_) + 1)[::-1],\n loss_scale_[\"val\"],\n label=\"iter={} key={}\".format(it, key),\n )\n axes[1].set_ylabel(\"Loss scale per layer\")\n axes[1].set_xlabel(\"Layer ID\")\n axes[1].set_title(\"Loss scale per layer\")\n axes[1].legend()\n\n # train log\n axes[2].plot(log[\"validation/main/accuracy\"], label=\"validation\")\n axes[2].plot(log[\"main/accuracy\"], label=\"train\")\n axes[2].set_ylabel(\"Accuracy\")\n axes[2].set_xlabel(\"Epoch\")\n axes[2].set_title(\"Training log\")\n axes[2].legend()\n\n if title is not None:\n plt.suptitle(title)\n\n return fig", "def deciles_chart(\n df,\n filename,\n period_column=None,\n column=None,\n title=\"\",\n ylabel=\"\",\n time_window=\"\",\n):\n\n max_practices = df.groupby(period_column).agg(\n {\"practice\": \"nunique\"}\n ).max().values[0]\n\n\n # remove any practices with value of 0 each month\n df = df.loc[df[\"value\"]>0, :]\n\n # monthly number of practices with column > 0\n practice_numbers = df.groupby(period_column).agg(\n {\"practice\": \"nunique\"}\n )\n \n practice_numbers = practice_numbers.apply(lambda x: round(x / 5) * 5)\n df = compute_deciles(df, period_column, column, has_outer_percentiles=False)\n \n # calculate monthyl proportion of practices with non zero values\n\n sns.set_style(\"whitegrid\", {\"grid.color\": \".9\"})\n\n fig, ax = plt.subplots(1, 1, figsize=(15, 8))\n\n linestyles = {\n \"decile\": {\n \"line\": \"b--\",\n \"linewidth\": 1,\n \"label\": \"Decile\",\n },\n \"median\": {\n \"line\": \"b-\",\n \"linewidth\": 1.5,\n \"label\": \"Median\",\n },\n \"percentile\": {\n \"line\": \"b:\",\n \"linewidth\": 0.8,\n \"label\": \"1st-9th, 91st-99th percentile\",\n },\n }\n label_seen = []\n for percentile in range(1, 100): # plot each decile line\n data = df[df[\"percentile\"] == percentile]\n add_label = False\n\n if percentile == 50:\n style = linestyles[\"median\"]\n add_label = True\n\n else:\n style = linestyles[\"decile\"]\n if \"decile\" not in label_seen:\n label_seen.append(\"decile\")\n add_label = True\n if add_label:\n label = style[\"label\"]\n else:\n label = \"_nolegend_\"\n\n ax.plot(\n data[period_column],\n data[column],\n style[\"line\"],\n linewidth=style[\"linewidth\"],\n label=label,\n )\n ax.set_ylabel(ylabel, size=15, alpha=0.6)\n if title:\n ax.set_title(title, size=14, wrap=True)\n # set ymax across all subplots as largest value across dataset\n\n ax.set_ylim(\n [0, 100 if df[column].isnull().values.all() else df[column].max() * 1.05]\n )\n ax.tick_params(labelsize=12)\n ax.set_xlim(\n [df[period_column].min(), df[period_column].max()]\n ) # set x axis range as full date range\n\n plt.setp(ax.xaxis.get_majorticklabels(), rotation=90)\n ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter(\"%B %Y\"))\n\n plt.xticks(sorted(df[period_column].unique()), rotation=90)\n\n plt.vlines(\n x=[pd.to_datetime(\"2020-03-01\")],\n ymin=0,\n ymax=100,\n colors=\"orange\",\n ls=\"--\",\n label=\"National Lockdown\",\n )\n\n if not time_window == \"\":\n plt.vlines(\n x=[pd.to_datetime(time_window)],\n ymin=0,\n ymax=100,\n colors=\"green\",\n ls=\"--\",\n label=\"Date of expected impact\",\n )\n\n ax.legend(\n bbox_to_anchor=(1.1, 0.8), # arbitrary location in axes\n # specified as (x0, y0, w, h)\n loc=CENTER_LEFT, # which part of the bounding box should\n # be placed at bbox_to_anchor\n ncol=1, # number of columns in the legend\n fontsize=20,\n borderaxespad=0.0,\n ) # padding between the axes and legend\n # specified in font-size units\n\n\n # plot proportions opn second y axis\n ax2 = ax.twinx()\n ax2.bar(practice_numbers.index, practice_numbers.practice, color=\"gray\", label=\"Proportion of practices with non-zero values\", width=20, alpha=0.3)\n # st y lim to 0- (proportions max rounded up to nearest 10)\n ax2.set_ylim(0, max_practices + 10 - (max_practices % 10))\n\n ax2.set_ylabel(\"Number of practices included\", size=15, alpha=0.6)\n\n plt.tight_layout()\n plt.savefig(filename)\n plt.clf()" ]
[ "0.57839125", "0.5279949", "0.50594544", "0.50512856", "0.50476915", "0.4997148", "0.4956228", "0.48934639", "0.4885822", "0.4879313", "0.48789495", "0.48726746", "0.48418292", "0.48268393", "0.48255506", "0.4809495", "0.47626424", "0.4736917", "0.47095123", "0.46958274", "0.4682187", "0.46796298", "0.46700087", "0.4663738", "0.464751", "0.46452853", "0.46386296", "0.4635964", "0.46338877", "0.46318033", "0.46274742", "0.46243897", "0.46160036", "0.46031418", "0.4589304", "0.45755672", "0.45705038", "0.45462695", "0.4540683", "0.45397264", "0.45392677", "0.45373666", "0.45317507", "0.4528655", "0.45260683", "0.45208454", "0.4519813", "0.45152447", "0.45106798", "0.45099145", "0.45015383", "0.45015383", "0.45015076", "0.44946426", "0.44880792", "0.44824466", "0.4479937", "0.4473086", "0.44701877", "0.44697624", "0.44631642", "0.4460359", "0.4456663", "0.44505504", "0.4445442", "0.4442711", "0.4433534", "0.44312963", "0.44305864", "0.44265375", "0.44204235", "0.44190985", "0.44181204", "0.44150412", "0.44135898", "0.44119623", "0.44115862", "0.4410751", "0.4404935", "0.439571", "0.43941247", "0.4386917", "0.43864053", "0.43861747", "0.43851265", "0.43829167", "0.43789044", "0.4378749", "0.43777755", "0.4374497", "0.43734065", "0.4371279", "0.43709606", "0.43686745", "0.436633", "0.43633455", "0.43628487", "0.4361519", "0.4358869", "0.4347986" ]
0.5902508
0
Plot the evolution of relative evaluations for a target based on increasing absolute evaluations. In other words, for each absolute number of evaluations, determine the target reached and show how faster did baseline reach it. groupby is the method of aggregating results of multiple instances a callable, stringable object, GroupByMedian by default. It's not clear whether this will eventually be useful at all, but it offers another perspective that might aid some analysis.
def evals_by_evals(ax, pds, baseline1_ds=None, baseline1_label="", baseline2_ds=None, baseline2_label="", dim=None, funcId=None, groupby=None): if groupby is None: groupby = GroupByMedian() pfsize = len(pds.algds.keys()) runlengths = 10**np.linspace(0, np.log10(pds.maxevals((dim, funcId))), num=500) target_values = pp.RunlengthBasedTargetValues(runlengths, reference_data=pds.bestalg(None), force_different_targets_factor=10**0.004) targets = target_values((funcId, dim)) if baseline1_ds: baseline1_fevs = np.array(groupby(baseline1_ds.detEvals(targets), axis=1)) if baseline2_ds: baseline2_fevs = np.array(groupby(baseline2_ds.detEvals(targets), axis=1)) for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId): #print name, ds fevs1 = groupby(ds.detEvals(targets), axis=1) if baseline1_ds: fevs1 /= baseline1_fevs fevs2 = groupby(ds.detEvals(targets), axis=1) if baseline2_ds: fevs2 /= baseline2_fevs infsx = np.nonzero(fevs1 == inf) infs = infsx[0] if np.size(infs) > 0: #print infs fevs1 = fevs1[:infs[0]-1] fevs2 = fevs2[:infs[0]-1] #print name, fevs1, fevs2 style['markevery'] = 64 ax.loglog(fevs2, fevs1, label=name, basex=pfsize, basey=pfsize, **style) ax.grid() ax.set_xlim(0, runlengths[-1] * pfsize) # i.e. log(runlengths) + 1 ax.set_ylabel('Per-target ' + _evals_label(baseline1_ds, baseline1_label, str(groupby))) ax.set_xlabel('Per-target ' + _evals_label(baseline2_ds, baseline2_label, str(groupby)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evals_by_target(ax, pds, baseline_ds=None, baseline_label=\"\", dim=None, funcId=None, groupby=None):\n if groupby is None: groupby = GroupByMedian()\n pfsize = len(pds.algds.keys())\n\n runlengths = 10**np.linspace(0, np.log10(pds.maxevals((dim, funcId))), num=500)\n target_values = pp.RunlengthBasedTargetValues(runlengths,\n reference_data=pds.bestalg(None), force_different_targets_factor=10**0.004)\n targets = target_values((funcId, dim))\n\n if baseline_ds:\n baseline_fevs = groupby(baseline_ds.detEvals(targets), axis=1)\n\n for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):\n #print name, ds\n fevs = groupby(ds.detEvals(targets), axis=1)\n if baseline_ds:\n fevs /= baseline_fevs\n style['markevery'] = 64\n ax.loglog(targets, fevs, label=name, basey=pfsize, **style)\n ax.set_xlim(10**2, 10**(np.log10(targets[-1])-0.2))\n if baseline_ds:\n ax.set_yticks([2, 3.5], minor=True)\n ax.set_xlabel('Function Value Targets')\n ax.set_ylabel(_evals_label(baseline_ds, baseline_label, str(groupby)))\n ax.grid()\n if baseline_ds:\n ax.yaxis.grid(True, which = 'minor')", "def plot_results(self):\n experiment_utils.plot_exp_metric_comparison(self.experiments(reverse_sort=False))", "def plot_associative_learning_progress(ax, df):\n\n num_objects_list = sorted(df.curr_num_objects.unique())\n legend_list = []\n for idx in num_objects_list:\n ax.plot(df[df.curr_num_objects == idx].groupby('objects_iter').rewards.mean())\n legend_list.append(f'ns={idx}')\n ax.set_xlabel('Stimulus iteration')\n ax.set_ylabel('P(correct)')\n ax.set_ylim([0.4, 1])\n ax.legend(legend_list)", "def plot_optimization_history(\n study: Study | Sequence[Study],\n *,\n target: Callable[[FrozenTrial], float] | None = None,\n target_name: str = \"Objective Value\",\n error_bar: bool = False,\n) -> \"Axes\":\n\n _imports.check()\n\n info_list = _get_optimization_history_info_list(study, target, target_name, error_bar)\n return _get_optimization_history_plot(info_list, target_name)", "def plot_test_objective_multi(df, exp_config, output_dir, show):\n output_file_name = f\"{inspect.stack()[0][3]}.{FILE_EXTENSION}\"\n output_path = os.path.join(output_dir, output_file_name)\n\n plt.figure()\n\n for exp_name, exp_df in df.items():\n\n if \"rep\" in exp_config[\"data\"][exp_name]:\n\n exp_dfs = exp_df\n\n T = np.linspace(0, exp_config[\"t_max\"], 50000)\n\n y_list = []\n for i, df_i in enumerate(exp_dfs):\n\n df_i = process_for_test_objective(\n df_i.sort_values(\"timestamp_end\"),\n mode=MODE,\n max_budget=exp_config[\"max_budget\"],\n )\n x = df_i.loc[df_i[\"max_idx\"]][\"timestamp_end\"].values\n y = df_i.loc[df_i[\"max_idx\"]][exp_config[\"test_objective\"]].values\n\n f = interp1d(x, y, kind=\"previous\", fill_value=\"extrapolate\")\n y = exp_config.get(\"best_objective\", 1) - f(T)\n y_list.append(y)\n\n y_list = np.asarray(y_list)\n y_mean = y_list.mean(axis=0)\n y_std = y_list.std(axis=0)\n y_se = y_std / np.sqrt(y_list.shape[0])\n\n plt.plot(\n T,\n y_mean,\n label=exp_config[\"data\"][exp_name][\"label\"],\n color=exp_config[\"data\"][exp_name][\"color\"],\n linestyle=exp_config[\"data\"][exp_name].get(\"linestyle\", \"-\"),\n )\n plt.fill_between(\n T,\n y_mean - 1.96 * y_se,\n y_mean + 1.96 * y_se,\n facecolor=exp_config[\"data\"][exp_name][\"color\"],\n alpha=0.3,\n )\n\n else:\n\n exp_df = process_for_test_objective(\n exp_df.sort_values(\"timestamp_end\"),\n mode=MODE,\n max_budget=exp_config[\"max_budget\"],\n )\n x = exp_df.loc[exp_df[\"max_idx\"]][\"timestamp_end\"].values\n y = exp_df.loc[exp_df[\"max_idx\"]][exp_config[\"test_objective\"]].values\n\n idx = np.unique(x, return_index=True, axis=0)[1]\n\n x = x[idx]\n y = y[idx]\n\n x = np.clip(np.concatenate([x, [exp_config[\"t_max\"]]]), 0, exp_config[\"t_max\"])\n y = np.clip(exp_config.get(\"best_objective\", 1) - np.concatenate([y, [y[-1]]]), 0, 1)\n \n area = aulc(x, y)\n exp_config[\"data\"][exp_name][\"AULC\"] = area\n \n plt.step(\n x[:],\n y[:],\n where=\"post\",\n label=exp_config[\"data\"][exp_name][\"label\"],\n color=exp_config[\"data\"][exp_name][\"color\"],\n marker=exp_config[\"data\"][exp_name].get(\"marker\", None),\n markevery=len(x) // 5,\n linestyle=exp_config[\"data\"][exp_name].get(\"linestyle\", \"-\"),\n )\n\n ax = plt.gca()\n ticker_freq = exp_config[\"t_max\"] / 5\n ax.xaxis.set_major_locator(ticker.MultipleLocator(ticker_freq))\n ax.xaxis.set_major_formatter(minute_major_formatter)\n\n if exp_config.get(\"title\") and PRINT_TITLE:\n plt.title(exp_config.get(\"title\"))\n\n # if MODE == \"min\":\n # plt.legend(loc=\"upper right\")\n # else:\n # plt.legend(loc=\"lower right\")\n plt.legend(loc=exp_config.get(\"legend\", \"best\"))\n\n plt.ylabel(\"Test Regret\")\n plt.xlabel(\"Search time (min.)\")\n\n if exp_config.get(\"ylim\"):\n plt.ylim(*exp_config.get(\"ylim\"))\n\n if exp_config.get(\"xlim\"):\n plt.xlim(*exp_config.get(\"xlim\"))\n else:\n plt.xlim(0, exp_config[\"t_max\"])\n\n if exp_config.get(\"yscale\"):\n plt.yscale(exp_config.get(\"yscale\"))\n\n plt.grid(which=\"minor\", color=\"gray\", linestyle=\":\")\n plt.grid(which=\"major\", linestyle=\"-\")\n plt.tight_layout()\n plt.savefig(output_path, dpi=360)\n if show:\n plt.show()\n plt.close()", "def fval_by_budget(ax, pds, baseline_ds=None, baseline_label=\"\", dim=None, funcId=None, groupby=None):\n if groupby is None: groupby = GroupByMedian()\n pfsize = len(pds.algds.keys())\n\n if baseline_ds:\n baseline_budgets = baseline_ds.funvals[:, 0]\n baseline_funvals = groupby(baseline_ds.funvals[:, 1:], axis=1)\n baseline_safefunvals = np.maximum(baseline_funvals, 10**-8) # eschew zeros\n # fvb is matrix with each row being [budget,funval]\n baseline_fvb = np.transpose(np.vstack([baseline_budgets, baseline_safefunvals]))\n\n for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):\n #print name, ds\n budgets = ds.funvals[:, 0]\n funvals = groupby(ds.funvals[:, 1:], axis=1)\n\n # Throw away funvals after ftarget reached\n try:\n limit = np.nonzero(funvals < 10**-8)[0][0] + 1\n except IndexError:\n limit = np.size(budgets)+1\n budgets = budgets[:limit]\n funvals = funvals[:limit]\n\n fvb = np.transpose(np.vstack([budgets[:limit], funvals[:limit]]))\n\n if baseline_ds:\n # Relativize by baseline\n fvba = ra.alignArrayData(ra.VArrayMultiReader([fvb, baseline_fvb]))\n budgets = fvba[:, 0]\n funvals = fvba[:, 1] / fvba[:, 2]\n\n style['markevery'] = 16\n ax.loglog(budgets, funvals, label=name, basex=pfsize, **style)\n if baseline_ds:\n ax.set_yticks([1], minor=True)\n ax.set_xlabel('Budget')\n ax.set_ylabel(_fval_label(baseline_ds, baseline_label, str(groupby)))\n ax.grid()\n if baseline_ds:\n ax.yaxis.grid(True, which = 'minor')", "def plot_results(outputs, x, e, t, a, folds, groups,\n quantiles, strat='quantile', adj='KM', plot=True):\n if plot:\n mpl.rcParams['hatch.linewidth'] = 2.0\n\n fig, big_axes = plt.subplots(\n figsize=(8 * (len(groups) + 2), 6 * len(quantiles)),\n nrows=len(quantiles),\n ncols=1)\n\n plt.subplots_adjust(hspace=0.4)\n\n i = 0\n for _, big_ax in enumerate(big_axes, start=1):\n big_ax.set_title(\n 'Receiver Operator Characteristic and Calibration at t=' +\n str(quantiles[i]) + '\\n',\n fontsize=16)\n big_ax.tick_params(\n labelcolor=(1., 1., 1., 0.0),\n top='off',\n bottom='off',\n left='off',\n right='off')\n i += 1\n \n eces = {}\n metrics = {}\n\n for quant in quantiles:\n eces[quant] = {}\n \n for i in range(len(quantiles)):\n\n scores = outputs[quantiles[i]]\n for j in range(len(groups) + 2):\n\n pt = (i * (len(groups) + 2) + j + 1)\n if plot:\n ax = fig.add_subplot(len(quantiles), len(groups) + 2, pt)\n else:\n ax = None\n \n if (j==1):\n eces[quantiles[i]]['all'] = plot_calibration_curve(ax,\n scores,\n e,\n t,\n a,\n folds,\n None,\n quantiles[i],\n strat=strat,\n adj=adj,\n plot=plot) \n \n if (j>1):\n eces[quantiles[i]][groups[j - 2]] = plot_calibration_curve(ax,\n scores,\n e,\n t,\n a,\n folds,\n groups[j - 2],\n quantiles[i],\n strat=strat,\n adj=adj,\n plot=plot)\n \n if (j==0):\n metrics[quantiles[i]] = plot_roc_curve(ax,\n scores,\n e,\n t,\n a,\n folds,\n groups,\n quantiles[i],\n plot=plot)\n\n for quant in quantiles:\n metrics[quant] = metrics[quant] + (eces[quant], )\n \n if plot: \n plt.show()\n return metrics", "def plot_ratios(path='/Volumes/OptiHDD/data/pylith/3d/agu2014/output',\n\t\t\t\tsteps=['step01','step02'],\n\t\t\t\t#labels='',\n\t\t\t\tshow=True,\n\t\t\t\txscale=1e3,\n\t\t\t\tyscale=1e-2):\n\tplt.figure()\n\t#path = '/Users/scott/Desktop/elastic'\n\n\t# Deep source\n\t#labels = ['no APMB', 'APMB']\n\t#if labels == '':\n\tlabels = steps\n\tdeep = {}\n\t#uzmax = 0.824873455364\n\t# NOT sure why hardcoded...\n\tuzmax = 1\n\tfor i,outdir in enumerate(steps):\n\t\tpointsFile = os.path.join(path, outdir, 'points.h5')\n\t\tprint(pointsFile)\n\t\tx,y,z,ux,uy,uz = pu.extract_points(pointsFile)\n\n\t\tX = x / xscale\n\t\tY1 = ux / yscale\n\n\t\tx_fem = X #/ xscale #double scaling!\n\t\tur_fem = Y1 #/ yscale\n\t\tuz_fem = uz / yscale\n\n\t\t#print(pointsFile)\n\t\tprint(ur_fem.min(), ur_fem.max(), uz_fem.min(), uz_fem.max(), uz_fem.max() / ur_fem.max())\n\n\t\t#normalize\n\t\tuz_fem = uz_fem / uzmax\n\t\tur_fem = ur_fem / uzmax\n\t\tx_fem = x_fem / 30.0\n\n\t\tl, = plt.plot(x_fem,uz_fem,'o-',ms=4,lw=4,label=labels[i])\n\t\tplt.plot(x_fem,ur_fem,'o--',ms=4,lw=4,color=l.get_color()) #mfc='none' transparent\n\t\tdeep[outdir] = uz_fem/uz_fem\n\n\t'''\n\t# Shallow Source\n\tshallow = {}\n\tuzmax = 0.949652827795\n\tfor i,outdir in enumerate(['step11','step12']):\n\t\tpointsFile = os.path.join(path, outdir, 'points.h5')\n\n\t\tx,y,z,ux,uy,uz = pu.extract_points(pointsFile)\n\n\t\tX = x / xscale\n\t\tY1 = ux / yscale\n\n\t\tx_fem = X #/ xscale #double scaling!\n\t\tur_fem = Y1 #/ yscale\n\t\tuz_fem = uz / yscale\n\n\t\t#print(pointsFile)\n\t\tprint(ur_fem.min(), ur_fem.max(), uz_fem.min(), uz_fem.max(), uz_fem.max() / ur_fem.max())\n\n\t#normalize\n\tuz_fem = uz_fem / uzmax\n\tur_fem = ur_fem / uzmax\n\tx_fem = x_fem / 20.0\n\n\t\tl, = plt.plot(x_fem,uz_fem,'.-', mfc='w', lw=4,label=labels[i])\n\t\tplt.plot(x_fem,ur_fem,'.--',lw=4, mfc='w',color=l.get_color()) #mfc='none' transparent\n\n\t\tshallow[outdir] = uz_fem/ur_fem\n\t'''\n\n\t# Annotate\n\tplt.axhline(color='k',lw=0.5)\n\t#plt.xlabel('Distance [{}]'.format(get_unit(xscale)))\n\t#plt.ylabel('Displacement [{}]'.format(get_unit(yscale)))\n\tplt.legend()\n\tplt.grid()\n\t#plt.ylim(-0.5, 3.5)\n\t#plt.savefig('deep.png',bbox_inches='tight')\n\t#plt.savefig('shallow.png',bbox_inches='tight')\n\n\t# normalized\n\tplt.ylim(-0.5, 4)\n\tplt.xlim(0,10)\n\tplt.xlabel('Normalized Radial Distance [R / D]')\n\tplt.ylabel('Normalized Displacement [U / Uz_max]')\n\t#plt.savefig('normalized_deep.png',bbox_inches='tight')\n\tplt.savefig('normalized_shallow.png',bbox_inches='tight')\n\n\n\t# Plot ratios of uz versus NOTE: this plot is confusing,,, just keep ratio of uz_max to ur_max\n\t'''\n\tplt.figure()\n\tplt.plot(x_fem, deep['step01'], label='Deep no APMB')\n\tplt.plot(x_fem, deep['step02'], label='Deep w/ APMB')\n\tplt.plot(x_fem, shallow['step11'], label='Shallow no APMB')\n\tplt.plot(x_fem, shallow['step12'], label='Shallow w/ APMB')\n\tplt.xlabel('Distance [km]') #NOTE: maybe plot normailzed X-axis (R-d)\n\t#plt.xlabel('Normalized Distance [R/d]')\n\tplt.ylabel('Ratio [Uz/Ur]')\n\tplt.title('Ratio of vertical to radial displacement')\n\tplt.legend()\n\tplt.show()\n\t'''", "def plot_metric(df_metrics, name, batch_size=10, epochs=10):\n\n # One groupplot\n fig, axarr = plt.subplots(3, 4, sharey=True, sharex=True)\n plotname = 'apfd'\n subplot_labels = ['(a)', '(b)', '(c)']\n\n for column, nr in enumerate(sorted(df_metrics['negative_ratio'].unique())):\n for row, emb_size in enumerate(df_metrics['emb_size'].unique()):\n for agidx, (labeltext, task, linestyle) in enumerate(\n [('Classification', 'True', '-'), ('Regression', 'False', '-.')]):\n rel_df = df_metrics[\n (df_metrics['emb_size'] == str(emb_size)) & (df_metrics['negative_ratio'] == str(nr)) &\n (df_metrics['batch_size'] == str(batch_size)) & (df_metrics['epochs'] == str(epochs))]\n\n # rel_df[rel_df['agent'] == agent].plot(x='step', y='napfd', label=labeltext, ylim=[0, 1], linewidth=0.8,\n # style=linestyle, color=sns.color_palette()[agidx], ax=axarr[row,column])\n\n apfd = rel_df.loc[rel_df['classification'] == task, 'apfd']\n miu = np.round(np.mean(apfd), 2)\n sigma = np.round(np.std(apfd), 2)\n label = labeltext + '\\n $\\mu$ - ' + str(miu) + ' $\\sigma$ - ' + str(sigma)\n\n # sns.displot(data=rel_df, x=\"apfd\", hue='classification', kde=True, ax=axarr[row, column])\n\n sns.distplot(apfd, kde=True,\n bins=int(180 / 5), color=sns.color_palette()[agidx],\n hist_kws={'edgecolor': 'black'},\n kde_kws={'linewidth': 4, 'clip': (0.0, 1.0)}, label=label, ax=axarr[row, column])\n\n axarr[row, column].xaxis.grid(True, which='major')\n\n axarr[row, column].set_title('Emb_size - %s - Neg_Ratio - %s' % (emb_size, nr), fontsize=10)\n\n if row == 2:\n axarr[row, column].set_xlabel('APFD')\n if column == 0:\n axarr[row, column].set_ylabel('Density')\n\n axarr[row, column].legend(frameon=True, prop={'size': 6})\n\n # Tweak spacing to prevent clipping of ylabel\n fig.suptitle('APFD Parameter Tuning - %d Epochs and batch-size - %d' % (epochs, batch_size))\n fig.tight_layout()\n plt.savefig(name, bbox_inches='tight')\n plt.show()", "def _report(self, pagerank_by_target):\r\n for target in sorted(pagerank_by_target, key=pagerank_by_target.get, reverse=True):\r\n yield '%f - %s' % (pagerank_by_target[target], target)", "def make_plot_for_proportion_within_target(\n lambda_2,\n lambda_1,\n mu,\n num_of_servers,\n num_of_trials,\n seed_num,\n target,\n runtime=1440,\n max_threshold=None,\n):\n ambulance_proportions = []\n other_proportions = []\n all_proportions = []\n if max_threshold == None:\n max_threshold = num_of_servers\n for threshold in range(max_threshold + 1):\n mean_ambulance, mean_other, mean_combined = get_mean_waits_of_current_threshold(\n lambda_2,\n lambda_1,\n mu,\n num_of_servers,\n threshold,\n seed_num,\n num_of_trials,\n runtime,\n target,\n )\n ambulance_proportions.append(mean_ambulance)\n other_proportions.append(mean_other)\n all_proportions.append(mean_combined)\n\n plt.figure(figsize=(23, 10))\n proportion_plot = plt.plot(\n ambulance_proportions, \":\", other_proportions, \":\", all_proportions, \"-\"\n )\n plt.title(\n \"Proportion of individuals within target for different capacity thresholds\"\n )\n plt.xlabel(\"Capacity Threshold\")\n plt.ylabel(\"Proportion of Individuals within target\")\n plt.legend(\n [\"Ambulance Patients\", \"Other Patient\", \"All Patients\"], fontsize=\"x-large\"\n )\n\n return proportion_plot", "def summarize(group, fs=None, include_source=True):\n _line_break = '{0:-<120}\\n'.format('')\n tests = sorted(ComparisonBenchmark.groups[group], key=lambda t: getattr(t, 'time_average_seconds'))\n log = StringIO.StringIO()\n log.write('Call statement:\\n\\n')\n log.write('\\t' + tests[0].stmt)\n log.write('\\n\\n\\n')\n fmt = \"{0: <8} {1: <35} {2: <12} {3: <15} {4: <15} {5: <14}\\n\"\n log.write(fmt.format('Rank', 'Function Name', 'Time', '% of Slowest', 'timeit_repeat', 'timeit_number'))\n log.write(_line_break)\n log.write('\\n')\n\n for i, t in enumerate(tests):\n func_name = \"{}.{}\".format(t.classname, t.callable.__name__) if t.classname else t.callable.__name__\n if i == len(tests)-1:\n time_percent = 'Slowest'\n else:\n time_percent = \"{:.1f}\".format(t.time_average_seconds / tests[-1].time_average_seconds * 100)\n log.write(fmt.format(i+1,\n func_name,\n convert_time_units(t.time_average_seconds),\n time_percent,\n t.timeit_repeat,\n t.timeit_number))\n log.write(_line_break)\n\n if include_source:\n log.write('\\n\\n\\nSource Code:\\n')\n log.write(_line_break)\n for test in tests:\n log.write(test.log.getvalue())\n log.write(_line_break)\n\n if isinstance(fs, str):\n with open(fs, 'w') as f:\n f.write(log.getvalue())\n\n elif fs is None:\n print(log.getvalue())\n else:\n try:\n fs.write(log.getvalue())\n except AttributeError as e:\n print(e)", "def plot_test(y_test, y_pred, title = None, xlabel = 'Measured $Y = \\log_2(MIC)$', ylabel = 'Predicted $Y = \\log_2(MIC)$', legend = ['Ideal', 'Result'], groups = None):\n \n fig, ax = plt.subplots(1,1)\n fig.set_figheight(5)\n fig.set_figwidth(5)\n if groups is not None:\n groups_obj = pd.concat([y_test,y_pred], axis=1).groupby(np.array(groups))\n cmap=plt.get_cmap('tab10')\n for name, group in groups_obj:\n # Works only for groups with numeric names that are max cmap length:\n ax.plot(group.iloc[:,0], group.iloc[:,1], marker=\"o\", linestyle=\"\", label=int(name), color = cmap.colors[int(name)])\n ax.legend()\n else:\n ax.scatter(y_test,y_pred, color = 'red')\n ax_max = 10\n if np.max(y_test.values)>ax_max:\n ax_max = np.max(y_test).values\n ax_min = 0\n if np.min(y_test.values)<ax_min:\n ax_min = np.min(y_test.values)\n ax.plot([ax_min, ax_max], [ax_min, ax_max], '--', color='black')\n ax.set_aspect('equal', 'box')\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n #plt.savefig(title+'.pdf')\n plt.savefig(title+'.svg')\n #plt.savefig(title+'.png')#, dpi=600)\n #plt.show()", "def median_absolute_error(self):\n print('Median absolute error regression loss: ' + str(median_absolute_error(self.model.dataset.get_y_test(),\n self.model.get_predicted())))", "def plot(self, plot_cmd=None, tf=lambda y: y):\r\n if not plot_cmd:\r\n plot_cmd = self.plot_cmd\r\n colors = 'bgrcmyk'\r\n pylab.hold(False)\r\n res = self.res\r\n\r\n flatx, flatf = self.flattened()\r\n minf = np.inf\r\n for i in flatf:\r\n minf = min((minf, min(flatf[i])))\r\n addf = 1e-9 - minf if minf <= 0 else 0\r\n for i in sorted(res.keys()): # we plot not all values here\r\n if type(i) is int:\r\n color = colors[i % len(colors)]\r\n arx = sorted(res[i].keys())\r\n plot_cmd(arx, [tf(np.median(res[i][x]) + addf) for x in arx], color + '-')\r\n pylab.text(arx[-1], tf(np.median(res[i][arx[-1]])), i)\r\n pylab.hold(True)\r\n plot_cmd(flatx[i], tf(np.array(flatf[i]) + addf), color + 'o')\r\n pylab.ylabel('f + ' + str(addf))\r\n pylab.draw()\r\n show()\r\n # raw_input('press return')\r\n return self", "def median_absolute_error(y_true, y_pred, *, multioutput=..., sample_weight=...):\n ...", "def plot_objective_multi(df, exp_config, output_dir, show):\n output_file_name = f\"{inspect.stack()[0][3]}.{FILE_EXTENSION}\"\n output_path = os.path.join(output_dir, output_file_name)\n\n plt.figure()\n\n for exp_name, exp_df in df.items():\n\n if \"rep\" in exp_config[\"data\"][exp_name]:\n\n exp_dfs = exp_df\n\n T = np.linspace(0, exp_config[\"t_max\"], 50000)\n\n y_list = []\n for i, df_i in enumerate(exp_dfs):\n df_i = df_i.sort_values(\"timestamp_end\")\n x, y = df_i.timestamp_end.to_numpy(), df_i.objective.cummin().to_numpy()\n f = interp1d(x, y, kind=\"previous\", fill_value=\"extrapolate\")\n y = f(T)\n y_list.append(y)\n\n y_list = np.asarray(y_list)\n y_mean = y_list.mean(axis=0)\n y_std = y_list.std(axis=0)\n y_se = y_std / np.sqrt(y_list.shape[0])\n\n plt.plot(\n T,\n y_mean,\n label=exp_config[\"data\"][exp_name][\"label\"],\n color=exp_config[\"data\"][exp_name][\"color\"],\n linestyle=exp_config[\"data\"][exp_name].get(\"linestyle\", \"-\"),\n )\n plt.fill_between(\n T,\n y_mean - 1.96 * y_se,\n y_mean + 1.96 * y_se,\n facecolor=exp_config[\"data\"][exp_name][\"color\"],\n alpha=0.3,\n )\n # plt.fill_between(T,\n # y_mean-1.96*y_std,\n # y_mean+1.96*y_std,\n # facecolor=exp_config[\"data\"][exp_name][\"color\"],\n # alpha=0.3)\n else:\n exp_df = exp_df.sort_values(\"timestamp_end\")\n x, y = exp_df.timestamp_end.to_numpy(), exp_df.objective.cummax().to_numpy()\n if \"hartmann6D\" in exp_name:\n y = y + 3.32237 # hartmann6D\n\n plt.plot(\n x,\n y,\n label=exp_config[\"data\"][exp_name][\"label\"],\n color=exp_config[\"data\"][exp_name][\"color\"],\n marker=exp_config[\"data\"][exp_name].get(\"marker\", None),\n markevery=len(x) // 5,\n linestyle=exp_config[\"data\"][exp_name].get(\"linestyle\", \"-\"),\n )\n\n ax = plt.gca()\n ticker_freq = exp_config[\"t_max\"] / 5\n ax.xaxis.set_major_locator(ticker.MultipleLocator(ticker_freq))\n ax.xaxis.set_major_formatter(minute_major_formatter)\n\n if exp_config.get(\"title\") and PRINT_TITLE:\n plt.title(exp_config.get(\"title\"))\n\n if MODE == \"min\":\n plt.legend(loc=\"upper right\")\n else:\n plt.legend(loc=\"lower right\")\n\n plt.ylabel(exp_config.get(\"ylabel\", \"Objective\"))\n plt.xlabel(\"Search time (min.)\")\n\n if exp_config.get(\"ylim\"):\n plt.ylim(*exp_config.get(\"ylim\"))\n\n if exp_config.get(\"xlim\"):\n plt.xlim(*exp_config.get(\"xlim\"))\n else:\n plt.xlim(0, exp_config[\"t_max\"])\n\n if exp_config.get(\"yscale\"):\n plt.yscale(exp_config.get(\"yscale\"))\n\n plt.grid()\n plt.tight_layout()\n plt.savefig(output_path, dpi=360)\n if show:\n plt.show()\n plt.close()", "def visualization(obj_value):\n for n in range(3):\n plt.loglog(obj_value[n],\".\");\n\n plt.ylabel('objective values');\n plt.xlabel('iteration counter');\n plt.title('objective values for each pair against iterations');\n plt.legend();\n plt.show();", "def expression_peaks(cluster, magnitude, group1 = [ \"SP\", \"SL06\", \"SL12\", \"SL24\",\"SL48\", \"SL96\" ], group2 = [ \"FL\", \"FP06\", \"FP12\", \"FP24\",\"FP48\", \"FP96\" ]):\n if cluster.averaged == False:\n cluster.average_matrix(group1 + group2)\n verbalise(\"G\", cluster.sample_header)\n peaklist = {}\n\n for gene in range(cluster.genenumber):\n # for group 1:\n datalist = list(cluster.data_matrix[:,gene])\n maxexpression = max(datalist[:len(group1)])\n maxposn = datalist.index(maxexpression)\n\n # check fold change is sufficient:\n if maxexpression >= magnitude + datalist[0]:\n # check adjacent peaks are not too big:\n # difference of 5.64 corresponds to 2% of the untransformed fpkm value\n # difference of 1.00 corresponds to 50% of the untransformed fpkm value\n if maxposn == len(group1) - 1:\n if (maxexpression - 5.64 < datalist[maxposn - 1] < maxexpression - 1):\n peaklist[cluster.gene_header[gene]] = group1[maxposn]\n\n elif (maxexpression * 0.02 < datalist[maxposn - 1] < maxexpression * 0.5) and \\\n (maxexpression * 0.02 < datalist[maxposn + 1] < maxexpression * 0.5):\n\n peaklist[cluster.gene_header[gene]] = group1[maxposn]\n\n # for group 2:\n maxexpression = max(datalist[len(group1):])\n maxposn = datalist.index(maxexpression)\n\n # check fold change is sufficient for reciprocal swap:\n if maxexpression >= magnitude * datalist[len(group1)]:\n # check adjacent peaks are not too big:\n try:\n if maxposn == len(group1+group2) - 1:\n if (maxexpression * 0.02 < datalist[maxposn - 1] < maxexpression * 0.5):\n peaklist[cluster.gene_header[gene]] = (group1 + group2)[maxposn]\n\n elif (maxexpression * 0.02 < datalist[maxposn - 1] < maxexpression * 0.5) and \\\n (maxexpression * 0.02 < datalist[maxposn + 1] < maxexpression * 0.5):\n\n peaklist[cluster.gene_header[gene]] = (group1 + group2)[maxposn]\n except IndexError as inst:\n verbalise(\"R\", inst)\n verbalise(\"R\", datalist)\n verbalise(\"R\", \"Max is %.3f at position %d\" % (maxexpression, maxposn))\n\n verbalise(\"G\", len(peaklist), \"significant peaks found.\")\n return peaklist", "def plot(self, **kwargs):\n\n # get colors\n colors = kwargs.get(\"colors\", GW_OBSERVATORY_COLORS)\n\n # get Result samples\n self._samples = {\n label: value.posterior\n for label, value in self.results.items()\n if isinstance(value, Result)\n }\n\n # get Grid posteriors\n self._grids = {\n label: [value, value.ln_evidence] # store grid and log evidence\n for label, value in self.results.items()\n if isinstance(value, Grid)\n }\n\n # apply offsets for slightly nicer plots axes\n self.parameter_offsets = {parameter: 0.0 for parameter in self.parameters}\n if len(self._grids) == 0 and len(self._samples) == 1:\n for label in self._samples:\n for parameter in self.parameters:\n srange = [\n np.min(self._samples[label][parameter]),\n np.max(self._samples[label][parameter]),\n ]\n label_suffix = \"\"\n\n # offset values\n median = np.median(self._samples[label][parameter])\n relwidth = np.abs((srange[1] - srange[0]) / median)\n\n if relwidth < 1e-4:\n offsetstr = f\"{median:.4e}\"\n a, b = offsetstr.split(\"e\")\n\n if np.abs(int(b)) < 3:\n offsetstr = f\"{median:.4f}\"\n offset = float(offsetstr)\n else:\n offset = float(offsetstr)\n offsetstr = a + rf\"\\!\\times\\!10^{{{int(b)}}}\"\n\n self.parameter_offsets[parameter] = offset\n\n self._samples[label][parameter] -= offset\n label_suffix = rf\" [${{\\scriptstyle {offsetstr}}}$]\"\n\n self.latex_labels[parameter] += label_suffix\n\n colordicts = []\n for j, res in enumerate([self._samples, self._grids]):\n colordicts.append({})\n for i, key in enumerate(res):\n if key in colors:\n colordicts[-1][key] = colors[key]\n elif key.lower() == \"joint\":\n # if using \"Joint\" as the multi-detector analysis key, set the color to black\n colordicts[-1][key] = \"k\"\n else:\n # use PESummary color cycle\n colordicts[-1][key] = list(colorcycle)[\n (j * 2 + i) % len(colorcycle)\n ]\n\n # store original keywords arguments\n origkwargs = kwargs.copy()\n\n # plot samples\n fig = None\n if len(self._samples) > 0:\n kwargs[\"colors\"] = list(colordicts[0].values())\n if self._num_parameters == 1:\n fig = self._1d_plot_samples(**kwargs)\n elif self._num_parameters == 2 and self.plottype != \"corner\":\n fig = self._2d_plot_samples(**kwargs)\n else:\n fig = self._nd_plot_samples(**kwargs)\n\n # restore keywords\n kwargs = origkwargs\n\n if len(self._grids) > 0:\n kwargs[\"colors\"] = list(colordicts[1].values())\n if fig is not None and \"fig\" not in kwargs:\n kwargs[\"fig\"] = fig\n if self._num_parameters == 1:\n fig = self._1d_plot_grid(**kwargs)\n elif self._num_parameters == 2 and self.plottype != \"corner\":\n fig = self._2d_plot_grid(**kwargs)\n else:\n fig = self._nd_plot_grid(**kwargs)\n\n # add further figure information\n if self._num_parameters == 1:\n ax = fig.gca()\n\n # set figure bounds if outside defaults\n if self.parameters[0] in DEFAULT_BOUNDS:\n _set_axes_limits(ax, self.parameters[0], axis=\"x\")\n\n # add injection values\n if self.injection_parameters is not None:\n if self.injection_parameters[self.parameters[0]] is not None:\n ax.axvline(\n (\n self.injection_parameters[self.parameters[0]]\n - self.parameter_offsets[self.parameters[0]]\n ),\n color=kwargs.get(\"injection_color\", \"k\"),\n linewidth=1,\n )\n elif self._num_parameters == 2:\n if \"triangle\" in self.plottype:\n a1, a2, a3 = fig[1:]\n order = [\"x\", \"y\"] if self.plottype == \"triangle\" else [\"y\", \"x\"]\n params = (\n self.parameters[:2]\n if self.plottype == \"triangle\"\n else self.parameters[1::-1]\n )\n\n # set figure bounds if outside defaults\n for param, axes, axis in zip(params, [[a1, a2], [a2, a3]], order):\n for ax in axes:\n _set_axes_limits(ax, param, axis=axis)\n\n self.fig = fig\n return self.fig", "def optimizeEps(group, rep, fig=None):\n\tX = group[[\"ae1\", \"ae2\"]].to_numpy()\n\tneigh = NearestNeighbors(n_neighbors=2)\n\tnbrs = neigh.fit(X)\n\tdist, idx = nbrs.kneighbors(X)\n\t\n\tdist = np.sort(dist, axis=0)\n\td = dist[:,1]\n\tdist[:,0] = idx[:,0]\n\t#print(dist)\n\t#if fig is not None:\n\t#ax=fig.add_subplot(10,10,rep)\n\t#ax.plot(d)\n\t#plt.show()\n\t\n\trotor = Rotor()\n\trotor.fit_rotate(dist)\n\telbow_index = rotor.get_elbow_index()\n\t#ax.axhline(dist[elbow_index][1])\n\treturn(dist[elbow_index][1])", "def plot_comparisons(self, exact, blocked, blockederr, axdelta=None):\n if axdelta is None:\n axdelta = plt.gca()\n delta = self.means - exact\n axdelta.errorbar(list(range(1, self.max_dets)), delta[0], yerr=self.stderr[0], label='independent')\n axdelta.errorbar(list(range(1, self.max_dets)), delta[1], yerr=self.stderr[1], label='correlated')\n axdelta.axhline(delta[0, 0], linestyle=':', color='grey', label='reference')\n axdelta.axhline(0, linestyle='-', linewidth=1, color='black')\n if blocked:\n axdelta.axhline(blocked-exact, linestyle='--', color='darkgreen', label='reblocked')\n if blockederr:\n axdelta.fill_between([0, self.max_dets], [blocked-exact-blockederr,blocked-exact-blockederr],\n [blocked-exact+blockederr,blocked-exact+blockederr], color='green', alpha=0.2)\n axdelta.set_xlabel('Number of determinants in estimator')\n axdelta.set_ylabel(r'$E-E_\\mathrm{CCSD}$ / ha')\n axdelta.legend()\n return axdelta", "def psi(bench, target, group, print_df=True):\n labels_q = np.percentile(\n bench, [(100.0 / group) * i for i in range(group + 1)], interpolation=\"nearest\")\n\n # This is the right approach when you have not a lot of unique value\n ben_pct = (pd.cut(bench, bins=np.unique(labels_q),\n include_lowest=True).value_counts()) / len(bench)\n target_pct = (pd.cut(target, bins=np.unique(labels_q),\n include_lowest=True).value_counts()) / len(target)\n target_pct = target_pct.sort_index() # sort the index\n ben_pct = ben_pct.sort_index() # sort the index\n psi = sum((target_pct - ben_pct) * np.log(target_pct / ben_pct))\n # Print results for better understanding\n if print_df:\n results = pd.DataFrame({'ben_pct': ben_pct.values,\n 'target_pct': target_pct.values},\n index=ben_pct.index)\n return {'data': results, 'statistic': psi}\n return psi", "def plot_groups(\n self,\n lim=4,\n center=(0, 0),\n x1='',\n y1='',\n x2='',\n y2='',\n linecolor='k',\n alpha_group=1,\n legend=False,\n pause=False):\n ax = plt.gca()\n ax.clear()\n\n shape = (\n np.sqrt(len(self.contours)).astype(int),\n np.sqrt(len(self.contours)).astype(int))\n\n plt.contour(\n -self.contours.x.values.reshape(shape),\n self.contours.y.values.reshape(shape),\n self.contours.f.values.reshape(shape),\n colors='grey',\n levels=np.arange(2, int(np.max(self.contours.f) + 1), 1),\n linewidths=1,\n zorder=1)\n\n plt.contour(\n -self.contours.x.values.reshape(shape),\n self.contours.y.values.reshape(shape),\n self.contours.f.values.reshape(shape),\n colors='k',\n levels=self.levels,\n linewidths=2,\n zorder=1)\n\n if self.target:\n plt.plot([-lim * 0.05, -lim * 0.025], [0, 0], color='k')\n plt.plot([lim * 0.05, lim * 0.025], [0, 0], color='k')\n plt.plot([0, 0], [-lim * 0.05, -lim * 0.025], color='k')\n plt.plot([0, 0], [lim * 0.05, lim * 0.025], color='k')\n\n plt.scatter(\n -self.df_gxys.loc[self.gxys.group_peak == 1, 'x'],\n self.df_gxys.loc[self.gxys.group_peak == 1, 'y'],\n edgecolor='k',\n facecolor='none',\n linewidth=2,\n s=32,\n zorder=5)\n\n inds = np.argsort(\n np.sqrt(\n self.df_gxys.loc[self.gxys.group_peak == 1, 'x'] ** 2 +\n self.df_gxys.loc[self.gxys.group_peak == 1, 'y'] ** 2))\n\n marker = 'o'\n\n alpha = np.ones_like(self.df_gxys['x'])\n alpha[\n (self.df_gxys['group_no'] > 1) &\n (self.df_gxys['group_no'] < alpha_group)] = 0.25\n\n for group_no in [0, 1]:\n plt.scatter(\n -self.df_gxys.loc[lambda x: x['group_no'] == group_no, 'x'],\n self.df_gxys.loc[lambda x: x['group_no'] == group_no, 'y'],\n c=f'C{group_no}',\n s=30,\n zorder=2,\n marker=marker,\n alpha=alpha[self.df_gxys['group_no'] == group_no][0])\n\n marker_ = np.tile(np.array(['o', 's', 'D', '^', 'x']), 2000)\n\n for i, group_no in enumerate(\n self.df_gxys.loc[lambda x: x['group_no'] > 1, 'group_no']):\n group = self.df_gxys['group_no'] == group_no\n\n color = f'C{(i % 7) + 2}'\n marker = marker_[np.floor((i + 2) / 10).astype(int)]\n\n plt.scatter(\n -self.df_gxys.loc[group, 'x'],\n self.df_gxys.loc[group, 'y'],\n c=color,\n s=30,\n zorder=2,\n marker=marker,\n label=f'Group {group_no}: {group.sum()}',\n alpha=alpha[group][0])\n\n if (x1 != '') & (y1 != '') & (x2 != '') & (y2 != ''):\n plt.plot(\n [-x1, -x2],\n [y1, y2],\n linestyle='--',\n color=linecolor,\n zorder=3)\n if (x1 != '') & (y1 != ''):\n plt.scatter(\n -x1,\n y1,\n marker='o',\n edgecolor='r',\n facecolor='none',\n zorder=4,\n s=80)\n if (x2 != '') & (y2 != ''):\n plt.scatter(-x2, y2, marker='x', color='r', zorder=4, s=80)\n\n plt.title(self.title, zorder=6)\n\n median = np.argsort(self.df_gxys['x'])[len(self.df_gxys['x']) // 2]\n\n if center == (0, 0):\n if not self.target:\n plt.xlim(\n self.df_gxys['x'][median] - lim,\n self.df_gxys['x'][median] + lim)\n plt.ylim(\n self.df_gxys['y'][median] - lim,\n self.df_gxys['y'][median] + lim)\n\n else:\n plt.xlim(-lim, lim)\n plt.ylim(-lim, lim)\n\n else:\n plt.xlim(center[0] - lim, center[0] + lim)\n plt.ylim(center[1] - lim, center[1] + lim)\n\n plt.gca().set_aspect('equal', 'box')\n\n plt.xlabel('x (Mpc)')\n\n if legend:\n plt.legend(loc='lower right', ncol=4)\n\n if self.pause:\n plt.pause(0.001)", "def _plot_comparison_repeatables(ax_abs, ax_per, ax_mag, pan, field, unit,\n other_program_name, **kw):\n\n #plot absolute error\n h_abs = ax_abs.plot(pan['absolute-difference'][field].index.values,\n pan['absolute-difference'][field].values,\n color=mpl.rcParams['axes.labelcolor'], zorder=-2)\n ax_abs.set_ylabel('Absolute Difference ' + unit)\n #plot percentage error\n h_per = ax_per.plot(pan['percent-difference'][field].index.values,\n pan['percent-difference'][field].values,\n color='firebrick', zorder=-1)\n ax_per.set_ylabel('Percent Difference', color='firebrick')\n #set error axes legend\n #ax_per.legend(h_abs + h_per, ['Absolute Difference','Percent Difference'], **_leg_kw)\n #ax_per.get_legend().set_zorder(1)\n #plot full results profiles\n kw['H'] += [ax_mag.plot(pan['%s-results' % other_program_name][field],\n color=_colormap[1])[0],\n ax_mag.plot(pan['emf.fields-results'][field],\n color=_colormap[0])[0]]\n kw['L'] += [other_program_name + ' Results', 'emf.fields Results']\n ax_mag.set_xlabel('Distance (ft)')", "def compare_ratios(path='/Volumes/OptiHDD/data/pylith/3d/agu2013/output',\n\t\t\t\tsteps=['step01','step02'],\n\t\t\t\t#labels='',\n\t\t\t\tshow=True,\n\t\t\t\txscale=1e3,\n\t\t\t\tyscale=1e-2):\n\tplt.figure()\n\t#path = '/Users/scott/Desktop/elastic'\n\n\t# Deep source\n\tlabels = ['no APMB', 'APMB']\n\tdeep = {}\n\tuzmax = 0.824873455364\n\t# NOT sure why hardcoded...\n\tuzmax = 1\n\tfor i,outdir in enumerate(steps):\n\t\tpointsFile = os.path.join(path, outdir, 'points.h5')\n\n\t\tx,y,z,ux,uy,uz = pu.extract_points(pointsFile)\n\n\t\tX = x / xscale\n\t\tY1 = ux / yscale\n\n\t\tx_fem = X #/ xscale #double scaling!\n\t\tur_fem = Y1 #/ yscale\n\t\tuz_fem = uz / yscale\n\n\t\t#print(pointsFile)\n\t\tprint(ur_fem.min(), ur_fem.max(), uz_fem.min(), uz_fem.max(), uz_fem.max() / ur_fem.max())\n\n\t\t#normalize\n\t\tuz_fem = uz_fem / uzmax\n\t\tur_fem = ur_fem / uzmax\n\t\tx_fem = x_fem / 30.0\n\n\t\tl, = plt.plot(x_fem,uz_fem,'o-',ms=4,lw=4,label=labels[i])\n\t\tplt.plot(x_fem,ur_fem,'o--',ms=4,lw=4,color=l.get_color()) #mfc='none' transparent\n\t\tdeep[outdir] = uz_fem/uz_fem\n\n\n\t# Shallow Source\n\tshallow = {}\n\tuzmax = 0.949652827795 # Why?\n\tfor i,outdir in enumerate(['step11','step12']):\n\t\tpointsFile = os.path.join(path, outdir, 'points.h5')\n\n\t\tx,y,z,ux,uy,uz = pu.extract_points(pointsFile)\n\n\t\tX = x / xscale\n\t\tY1 = ux / yscale\n\n\t\tx_fem = X #/ xscale #double scaling!\n\t\tur_fem = Y1 #/ yscale\n\t\tuz_fem = uz / yscale\n\n\t\t#print(pointsFile)\n\t\tprint(ur_fem.min(), ur_fem.max(), uz_fem.min(), uz_fem.max(), uz_fem.max() / ur_fem.max())\n\n\t\t#normalize\n\t\tuz_fem = uz_fem / uzmax\n\t\tur_fem = ur_fem / uzmax\n\t\tx_fem = x_fem / 20.0\n\n\t\tl, = plt.plot(x_fem,uz_fem,'.-', mfc='w', lw=4,label=labels[i])\n\t\tplt.plot(x_fem,ur_fem,'.--',lw=4, mfc='w',color=l.get_color()) #mfc='none' transparent\n\n\t\tshallow[outdir] = uz_fem/ur_fem\n\n\t# Annotate\n\tplt.axhline(color='k',lw=0.5)\n\t#plt.xlabel('Distance [{}]'.format(get_unit(xscale)))\n\t#plt.ylabel('Displacement [{}]'.format(get_unit(yscale)))\n\tplt.legend()\n\tplt.grid()\n\t#plt.ylim(-0.5, 3.5)\n\t#plt.savefig('deep.png',bbox_inches='tight')\n\t#plt.savefig('shallow.png',bbox_inches='tight')\n\n\t# normalized\n\tplt.ylim(-0.5, 4)\n\tplt.xlim(0,10)\n\tplt.xlabel('Normalized Radial Distance [R / D]')\n\tplt.ylabel('Normalized Displacement [U / Uz_max]')\n\t#plt.savefig('normalized_deep.png',bbox_inches='tight')\n\tplt.savefig('normalized_shallow.png',bbox_inches='tight')\n\n\n\t# Plot ratios of uz versus NOTE: this plot is confusing,,, just keep ratio of uz_max to ur_max\n\t'''\n\tplt.figure()\n\tplt.plot(x_fem, deep['step01'], label='Deep no APMB')\n\tplt.plot(x_fem, deep['step02'], label='Deep w/ APMB')\n\tplt.plot(x_fem, shallow['step11'], label='Shallow no APMB')\n\tplt.plot(x_fem, shallow['step12'], label='Shallow w/ APMB')\n\tplt.xlabel('Distance [km]') #NOTE: maybe plot normailzed X-axis (R-d)\n\t#plt.xlabel('Normalized Distance [R/d]')\n\tplt.ylabel('Ratio [Uz/Ur]')\n\tplt.title('Ratio of vertical to radial displacement')\n\tplt.legend()\n\tplt.show()\n\t'''", "def plot_posteriors_conditions(self, *args, **kwargs):\n group_nodes = self.get_group_nodes()\n for dep in self.depends_on.keys():\n nodes = group_nodes.loc[group_nodes.knode_name == dep]\n if all(nodes.hidden == True):\n continue\n analyze.plot_posterior_nodes(nodes[\"node\"], *args, **kwargs)", "def plot_metric_values(self, threshold=0):\n epochs_range = np.arange(threshold, len(self.accuracies), 1)\n plt.plot(epochs_range, self.accuracies[threshold:], color='red', marker='o')\n plt.title('Accuracy on test data. Eta={:.2f} Lambda={:2.2f}'.format(self.eta, self.lambda_r))\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.grid(True)\n plt.show()", "def decision_plot(self, X, y):\n\n y = self._slice_target_index(y=y)\n\n for index in range(_n_targets(y)):\n if sklearn.utils.multiclass.type_of_target(y) == 'continuous-multioutput':\n self.fit(X, y.iloc[:, index].values.ravel(order='K'))\n else:\n self.fit(X, y)\n explainer, shap_values = self.explainer(X=X)\n shap.decision_plot(base_value=explainer.expected_value, shap_values=shap_values,\n feature_names=list(X.columns), show=self.show)", "def plot_series(groups, series):\n fig, ax = plt.subplots()\n ax.set_xlabel(\"Iterations\")\n ax.set_ylabel(series)\n\n for gkey, gval in groups.items():\n args = dict(gkey)\n\n series_values = get_series(gval, series)\n interval_size = args['test_interval']\n interval_count = series_values.shape[1] - 1\n\n x = np.arange(0, interval_size * interval_count + 1, step=interval_size)\n mean = np.mean(series_values, axis=0)\n std = np.std(series_values, axis=0)\n\n ax.plot(x, mean, label=format_group_key(gkey))\n ax.fill_between(x, mean + std, mean - std, alpha=0.2)\n\n ax.legend()\n return fig, ax", "def make_accuracy_plot(ax,\n groundtruth_boxes,\n hpu_boxes,\n cpu_boxes,\n hpu_strategy,\n label,\n N=10,\n num_graph_points=20,\n match_mode=\"ellipse\",\n):\n print \"Making plot for\", repr(label)\n print \"TODO: this should graph seconds per image\"\n mix_fractions = np.linspace(0, 1.0, num_graph_points)\n # Plot confidence intervals\n min_ci = []\n max_ci = []\n mean_accs = []\n stderr_accs = []\n for mix_fraction in mix_fractions:\n accuracies = [\n maximum_F_score(\n groundtruth_boxes,\n hpu_strategy(hpu_boxes, cpu_boxes, mix_fraction),\n match_mode=match_mode,\n )\n for _ in xrange(N)\n ]\n mean_accs.append(np.mean(accuracies))\n stderr_accs.append(np.std(accuracies, ddof=1) / np.sqrt(N))\n #print mix_fraction, np.mean(accuracies)\n ax.errorbar(mix_fractions, mean_accs, stderr_accs, label=label)\n ax.set_xlabel(\"Fraction of HPU-labeled images\")\n ax.set_ylabel(\"Maximum F-score\")", "def plot_transfer_effects(self, sort: str = \"mae_base\") -> None:\n tmp = {}\n for transfer_key, mae_transfer in self.mae_transfer.items():\n source_port_name, target_port_name, _ = self._decode_transfer_key(transfer_key)\n mae_source_base = self._get_mae_base(transfer_key, group=False)\n\n if target_port_name in tmp:\n tmp[target_port_name][0].append(source_port_name)\n tmp[target_port_name][1].append(mae_source_base)\n tmp[target_port_name][2].append(mae_transfer)\n else:\n tmp[target_port_name] = ([source_port_name], [mae_source_base], [mae_transfer])\n\n def compute_metrics(key, val: Tuple[List[str], List[float], List[float]]) -> Tuple[str, str, float, str, float,\n str, float, str, float,\n float, float]:\n \"\"\"\n :return: Tuple in form of\n transfer_port_name,\n max_mae_source_port_name, max_mae_source_base,\n min_mae_source_port_name, min_mae_source_base,\n max_mae_transfer_port_name, max_mae_transfer,\n min_mae_transfer_port_name, min_mae_transfer,\n avg_mae_base,\n avg_mae_transfer\n \"\"\"\n max_mae_base = max(val[1])\n max_mae_base_port_name = val[0][val[1].index(max_mae_base)]\n min_mae_base = min(val[1])\n min_mae_base_port_name = val[0][val[1].index(min_mae_base)]\n max_mae_transfer = max(val[2])\n max_mae_transfer_port_name = val[0][val[2].index(max_mae_transfer)]\n min_mae_transfer = min(val[2])\n min_mae_transfer_port_name = val[0][val[2].index(min_mae_transfer)]\n return (key, max_mae_base_port_name, max_mae_base, min_mae_base_port_name, min_mae_base,\n max_mae_transfer_port_name, max_mae_transfer, min_mae_transfer_port_name, min_mae_transfer,\n sum(val[1]) / len(val[1]), sum(val[2]) / len(val[2]))\n\n result = [compute_metrics(key, val) for key, val in tmp.items()]\n\n if sort == \"mae_base\":\n result.sort(key=lambda r: r[0])\n result = list(map(list, zip(*result)))\n\n path = os.path.join(self.output_dir, \"eval\", f\"transfer-effects_{sort}.png\")\n plot_transfer_effects(result[0], result[1], result[2], result[3], result[4], result[5], result[6], result[7],\n result[8], result[9], result[10], path)", "def summary_plot(self, X, y, plot_type='dot'):\n\n assert(plot_type in _SHAP_SUMMARY_PLOT_CHOICE)\n\n y = self._slice_target_index(y=y)\n\n for index in range(_n_targets(y)):\n if sklearn.utils.multiclass.type_of_target(y) == 'continuous-multioutput':\n self.fit(X, y.iloc[:, index].values.ravel(order='K'))\n else:\n self.fit(X, y)\n _, shap_values = self.explainer(X=X)\n\n shap.summary_plot(shap_values=shap_values, features=X,\n plot_type=plot_type, feature_names=list(X.columns),\n show=self.show)", "def PlotComparison(result_values, descrete, continuous, jitter=100):\n df = result_values.copy()\n np.random.seed(0)\n df[continuous] = df[continuous] + np.random.randint(low=-jitter, high=jitter, size=len(df))\n base = alt.Chart(df).transform_calculate(\n ymin=\"datum.mean-2*datum.std\",\n ymax=\"datum.mean+2*datum.std\",\n ).properties(\n title = '[Interactive] Accuracy by Params'\n )\n \n points = base.mark_point(\n filled=True,\n size=10\n ).encode(\n x=continuous,\n y=alt.Y('mean:Q'),#, scale=alt.Scale(domain=(0.55, 0.7))),\n color=descrete,\n tooltip=['mean','std']\n )\n\n errorbars = base.mark_errorbar().encode(\n x=continuous,\n y=alt.Y(\"ymin:Q\",title='Accuracy'),\n y2=\"ymax:Q\",\n color=descrete,\n )\n\n return(points + errorbars)", "def plot_aggregate(values, label='', smth_wnd=50, plot_mean=False, plot_stdev=False, plot_med=True, plot_iqr=True,\n\t\t\t\t plot_ext=False):\n\tif label != '':\n\t\tlabel += ' '\n\n\tsmoothen = True if 0 < 3 * smth_wnd < values.shape[1] else False\n\n\tx_values = np.arange(1, values.shape[1] + 1)\n\n\tmeans = np.mean(values, axis=0)\n\tif smoothen:\n\t\tmeans = pd.Series(means).rolling(smth_wnd, min_periods=smth_wnd).mean()\n\n\tif plot_stdev:\n\t\tstd_dev = np.std(values, axis=0)\n\n\t\tif smoothen:\n\t\t\tstd_dev = pd.Series(std_dev).rolling(smth_wnd, min_periods=smth_wnd).mean()\n\n\t\tplt.fill_between(x_values, means - std_dev, means + std_dev, alpha=0.25, label=label + '1×σ')\n\n\tif plot_mean:\n\t\tplt.plot(x_values, means, '--', label=label + 'Mean')\n\n\tif plot_iqr:\n\t\tiqr_25 = np.percentile(values, 25, axis=0)\n\t\tiqr_75 = np.percentile(values, 75, axis=0)\n\n\t\tif smoothen:\n\t\t\tiqr_25 = pd.Series(iqr_25).rolling(smth_wnd, min_periods=smth_wnd).mean()\n\t\t\tiqr_75 = pd.Series(iqr_75).rolling(smth_wnd, min_periods=smth_wnd).mean()\n\n\t\tplt.fill_between(x_values, iqr_25, iqr_75, alpha=0.45, label=label + 'IQR')\n\n\tif plot_med:\n\t\tmedians = np.percentile(values, 50, axis=0)\n\n\t\tif smoothen:\n\t\t\tmedians = pd.Series(medians).rolling(smth_wnd, min_periods=smth_wnd).mean()\n\n\t\tplt.plot(x_values, medians, '--', label=label + 'Median', linewidth=1.5)\n\n\tif plot_ext:\n\t\text_min = np.min(values, axis=0)\n\t\text_max = np.max(values, axis=0)\n\n\t\tif smoothen:\n\t\t\text_min = pd.Series(ext_min).rolling(smth_wnd, min_periods=smth_wnd).mean()\n\t\t\text_max = pd.Series(ext_max).rolling(smth_wnd, min_periods=smth_wnd).mean()\n\n\t\tplt.fill_between(x_values, ext_min, ext_max, alpha=0.125, label=label + 'Extremes')", "def plot_observed_predictions(self):\n \n # Plot of X vs Y\n fig = plt.figure(figsize=(15,5))\n plt.subplot(1,3,1) \n for k in self.phd_filter['estimated_positions'].keys():\n plt.plot(self.phd_filter['estimated_positions'][k][0], self.phd_filter['estimated_positions'][k][1], 'bx')\n plt.xlabel(\"X\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n \n # Plot of time vs X\n plt.subplot(1,3,2)\n for k in self.phd_filter['estimated_positions'].keys(): \n plt.plot(k*np.ones(self.phd_filter['estimated_positions'][k].shape[1]), self.phd_filter['estimated_positions'][k][0], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"X\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n\n # Plot of time vs Y\n plt.subplot(1,3,3)\n for k in self.phd_filter['estimated_positions'].keys():\n plt.plot(k*np.ones(self.phd_filter['estimated_positions'][k].shape[1]), self.phd_filter['estimated_positions'][k][1], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n plt.show();", "def print_summary(accuracies, group, df):\n\n p_ids = np.unique(group)\n print(\"Accuracies: \")\n for accuracy, p_id in zip(accuracies, p_ids):\n print(f\"Participant {p_id}: accuracy = {accuracy}\")\n num_window_baseline = len(df[(df['id'] == p_id) & (df['is_hot'] == 0)].to_numpy())\n num_window_pain = len(df[(df['id'] == p_id) & (df['is_hot'] == 1)].to_numpy())\n print(f\"Baseline = {num_window_baseline}\")\n print(f\"Pain = {num_window_pain}\")\n print(f\"Ratio Baseline/Pain = {num_window_baseline / num_window_pain}\")\n print(\"------\")\n\n print(f\"Mean accuracy: {np.mean(accuracies)}\")", "def test_and_plot_results(self, end, nb_samples):\n self.test(end, nb_samples)\n self.plot_results()", "def plot_cost_over_time(self, rh, traj, output=\"performance_over_time.png\",\n validator=None):\n self.logger.debug(\"Estimating costs over time for best run.\")\n validator.traj = traj # set trajectory\n time, configs = [], []\n\n for entry in traj:\n time.append(entry[\"wallclock_time\"])\n configs.append(entry[\"incumbent\"])\n\n self.logger.debug(\"Using %d samples (%d distinct) from trajectory.\",\n len(time), len(set(configs)))\n\n if validator.epm: # not log as validator epm is trained on cost, not log cost\n epm = validator.epm\n else:\n self.logger.debug(\"No EPM passed! Training new one from runhistory.\")\n # Train random forest and transform training data (from given rh)\n # Not using validator because we want to plot uncertainties\n rh2epm = RunHistory2EPM4Cost(num_params=len(self.scenario.cs.get_hyperparameters()),\n scenario=self.scenario)\n X, y = rh2epm.transform(rh)\n self.logger.debug(\"Training model with data of shape X: %s, y:%s\",\n str(X.shape), str(y.shape))\n\n types, bounds = get_types(self.scenario.cs, self.scenario.feature_array)\n epm = RandomForestWithInstances(types=types,\n bounds=bounds,\n instance_features=self.scenario.feature_array,\n #seed=self.rng.randint(MAXINT),\n ratio_features=1.0)\n epm.train(X, y)\n\n ## not necessary right now since the EPM only knows the features\n ## of the training instances\n # use only training instances\n #=======================================================================\n # if self.scenario.feature_dict:\n # feat_array = []\n # for inst in self.scenario.train_insts:\n # feat_array.append(self.scenario.feature_dict[inst])\n # backup_features_epm = epm.instance_features\n # epm.instance_features = np.array(feat_array)\n #=======================================================================\n\n # predict performance for all configurations in trajectory\n config_array = convert_configurations_to_array(configs)\n mean, var = epm.predict_marginalized_over_instances(config_array)\n\n #=======================================================================\n # # restore feature array in epm\n # if self.scenario.feature_dict:\n # epm.instance_features = backup_features_epm\n #=======================================================================\n\n mean = mean[:, 0]\n var = var[:, 0]\n uncertainty_upper = mean+np.sqrt(var)\n uncertainty_lower = mean-np.sqrt(var)\n if self.scenario.run_obj == 'runtime': # We have to clip at 0 as we want to put y on the logscale\n uncertainty_lower[uncertainty_lower < 0] = 0\n uncertainty_upper[uncertainty_upper < 0] = 0\n\n # plot\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n ax.set_ylabel('performance')\n ax.set_xlabel('time [sec]')\n ax.plot(time, mean, 'r-', label=\"estimated performance\")\n ax.fill_between(time, uncertainty_upper, uncertainty_lower, alpha=0.8,\n label=\"standard deviation\")\n ax.set_xscale(\"log\", nonposx='clip')\n if self.scenario.run_obj == 'runtime':\n ax.set_yscale('log')\n\n # ax.set_ylim(min(mean)*0.8, max(mean)*1.2)\n # start after 1% of the configuration budget\n ax.set_xlim(min(time)+(max(time) - min(time))*0.01, max(time))\n\n ax.legend()\n plt.tight_layout()\n fig.savefig(output)\n plt.close(fig)", "def plot_coupling_grid(baseline_group, fits_groups, metrics, fax=None):\n n_algorithms = len(fits_groups)\n n_metrics = len(metrics)\n\n if fax is None:\n fig, axes = plt.subplots(n_metrics, n_algorithms,\n figsize=(3 * n_algorithms, 3 * n_metrics))\n else:\n fig, axes = fax\n\n # iterate over metrics\n for row_idx, metric in enumerate(metrics):\n if metric == 'selection_ratio':\n baseline_coefs = baseline_group['coupling_coefs'][:]\n baseline_selection_ratio = \\\n calculate_selection_ratio(baseline_coefs).mean(axis=0)\n\n # iterate over algorithms\n for col_idx, algorithm in enumerate(fits_groups):\n if metric == 'selection_ratio':\n # calculate selection ratio for algorithm\n coefs = algorithm['coupling_coefs'][:]\n selection_ratio = calculate_selection_ratio(coefs).mean(axis=0)\n\n # plot direct comparison\n axes[row_idx, col_idx].scatter(\n baseline_selection_ratio,\n selection_ratio,\n alpha=0.5,\n color='k',\n edgecolor='w')\n else:\n axes[row_idx, col_idx].scatter(\n baseline_group[metric][:].mean(axis=0),\n algorithm[metric][:].mean(axis=0),\n alpha=0.5,\n color='k',\n edgecolor='w')\n\n return fig, axes", "def ecdf(data, group_by=None, targets=None, ax=None, **kwargs):\n text_color = plt.rcParams.get('ytick.color')\n linewidth = 2\n # Handle keyword arguments\n for k, v in kwargs.items():\n if k not in ['linewidth']:\n raise TypeError('ecdf got an unexpeted keyword argument: {}'.format(k))\n else:\n if k == 'linewidth':\n linewidth = v\n # Deal with input data\n if group_by is not None:\n if type(data) == pd.core.frame.DataFrame:\n print(\"Grouping DataFrame by {}\".format(group_by))\n print(\"Target Features:\", targets)\n if type(targets) == str:\n targets = [targets]\n else:\n try:\n it = iter(targets)\n except:\n targets = [targets]\n cols = targets + [group_by]\n data = data[cols]\n variables = data.columns[:-1]\n data = data.groupby(group_by)\n else:\n return(\"Error: only DataFrame input works with group_by functionality\")\n else: \n if type(data) == pd.core.series.Series:\n variables = [data.name]\n elif type(data) == pd.core.frame.DataFrame:\n if targets is None:\n variables = list(data.columns)\n else:\n if type(targets) == str:\n targets = [targets]\n else: \n try:\n it = iter(targets)\n except:\n targets = [targets]\n print(\"Target Features:\", targets)\n variables = targets\n elif type(data) == pd.core.groupby.generic.DataFrameGroupBy:\n variables = list(data.obj.columns)\n else:\n data = pd.Series(data, name='data')\n variables = [data.name]\n \n \n if type(data) == pd.core.groupby.generic.DataFrameGroupBy:\n for variable in variables:\n if not ax:\n fig, ax = plt.subplots(figsize=(12,8))\n max_x = 0\n for name, group in data:\n x = np.sort(group[variable])\n n = len(group)\n y = np.arange(1, n+1) / n\n ax.plot(x, y, marker='.', label=name, alpha=0.7, linewidth=linewidth)\n if max(x) > max_x:\n max_x = max(x)\n #max_x = 0\n ax.axhline(y=0.5, ls=':', color='gray')\n ax.axhline(y=0.05, ls=':', color='gray')\n ax.axhline(y=0.95, ls=':', color='gray')\n ax.annotate('0.5', xy=(max_x, 0.47))\n ax.annotate('0.95', xy=(max_x, 0.92))\n ax.annotate('0.05', xy=(max_x, 0.02))\n ax.legend()\n plt.title(\"ECDF for feature: {}\".format(variable), color=text_color)\n plt.show()\n \n else:\n n = len(data)\n y = np.arange(1, n+1) / n\n if not ax:\n fig, ax = plt.subplots(figsize=(12,8))\n max_x = 0\n for variable in variables:\n if type(data) == pd.core.series.Series:\n x = np.sort(data)\n string = variable\n else:\n x = np.sort(data[variable])\n string = 'Data'\n ax.plot(x, y, marker='.', label=variable)\n if max(x) > max_x:\n max_x = max(x)\n ax.axhline(y=0.5, ls=':', color='gray')\n ax.axhline(y=0.05, ls=':', color='gray')\n ax.axhline(y=0.95, ls=':', color='gray')\n ax.annotate('0.5', xy=(max_x, 0.47))\n ax.annotate('0.95', xy=(max_x, 0.92))\n ax.annotate('0.05', xy=(max_x, 0.02))\n plt.title(\"ECDF for {}\".format(string), color=text_color)\n plt.legend()\n plt.show()", "def _plot_comparison(xs, pan, other_program_name, **kw):\n\n pans = ['Bmax', 'Emax']\n units = ['(mG)', '(kV/m)']\n title_app = [', Max Magnetic Field', ', Max Electric Field']\n save_suf = ['-%s-comparison-Bmax' % other_program_name,\n '-%s-comparison-Emax' % other_program_name]\n\n for p,u,t,s in zip(pans, units, title_app, save_suf):\n #figure object and axes\n fig = plt.figure()\n ax_abs = fig.add_subplot(2,1,1)\n ax_per = ax_abs.twinx()\n ax_mag = fig.add_subplot(2,1,2)\n #Bmax\n #init handles and labels lists for legend\n kw['H'], kw['L'] = [], []\n _plot_comparison_repeatables(ax_abs, ax_per, ax_mag, pan, p, u,\n other_program_name, **kw)\n _plot_wires(ax_mag, xs.hot, xs.gnd, pan['emf.fields-results'][p], **kw)\n _check_und_conds([xs], [ax_mag], **kw)\n ax_abs.set_title('Absolute and Percent Difference' + t)\n ax_mag.set_ylabel(p + ' ' + u)\n ax_mag.set_title('Model Results' + t)\n ax_mag.legend(kw['H'], kw['L'], **_leg_kw)\n _color_twin_axes(ax_abs, mpl.rcParams['axes.labelcolor'], ax_per, 'firebrick')\n _format_line_axes_legends(ax_abs, ax_per, ax_mag)\n #_format_twin_axes(ax_abs, ax_per)\n _save_fig(xs.sheet + s, fig, **kw)", "def visualizeTargets():\n\tglobal normalized\n\tintWarmthMap, intCompMap = parser.extractInformation(parser.getMappings(normalized)[0], parser.getMappings(normalized)[1])\n\ttargetMap = targetParser.getMappings()\n\tallCategories = set()\n\tcompAxis = []\n\twarmthAxis = []\n\tfig, ax = plt.subplots()\n\tfor key in intWarmthMap.keys():\n\t\tintegrated = key\n\t\tg1 = integrated.split(\"_\")[0]\n\t\tg2 = integrated.split(\"_\")[1]\n\t\tif g1 != \"\":\n\t\t\tallCategories.add(g1)\n\t\tif g2 != \"\":\n\t\t\tallCategories.add(g2)\n\n\t#adjusts label positions for annotation\n\tcatLabelMap = dict()\n\tfor category in allCategories:\n\t\tif category == \"jewish\":\n\t\t\tcatLabelMap[category] = (-30, -30)\n\t\telif category == \"farmer\": \n\t\t\tcatLabelMap[category] = (0, 20)\n\t\telif category == \"greek\" or category == \"nurse\":\n\t\t \tcatLabelMap[category] = (20, -20)\n\t\telif category == \"british\": \n\t\t\tcatLabelMap[category] = (-30, -30)\n\t\telse:\n\t\t\tcatLabelMap[category] = (-20, 20)\n\t\tcatLabelMap[\"japanese\"] =(-40, 20)\n\n\tfor category in allCategories:\n\t\tx = targetMap[category][1]\n\t\ty = targetMap[category][0]\n\t\tcompAxis.append(x)\n\t\twarmthAxis.append(y)\n\n\n\t\tax.annotate(category, (x,y), xytext = catLabelMap[category],\n textcoords = 'offset points', \n bbox = dict(boxstyle = 'round,pad=0.2', fc = 'yellow', alpha = 0.5),\n arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))\n\n\tplt.xlabel(\"warmth\")\n\tplt.ylabel(\"competence\")\n\tplt.title(\"Individual Ratings\")\n\tax.scatter(compAxis, warmthAxis)\n\tplt.show()", "def get_return_plot(evaluator_list, stride=500):\n values = collections.defaultdict(list)\n for e in evaluator_list:\n values[e.task_name].append(e.stats['return_stats']['episode_totals'])\n values = _map(np.vstack, values)\n means = _map(functools.partial(np.mean, axis=0), values)\n stds = _map(functools.partial(np.std, axis=0), values)\n\n fig, ax = plt.subplots()\n for i, task_name in enumerate(means):\n idx = i % len(_COLORS)\n x = np.arange(len(means[task_name]))\n ax.plot(x, means[task_name], lw=2, color=_COLORS[idx], alpha=.6, label=None)\n ax.plot(x[::stride], means[task_name][::stride], 'o', lw=2,\n marker=_MARKERS[idx], markersize=10, color=_COLORS[idx],\n label=task_name)\n ax.fill_between(x, means[task_name] - stds[task_name],\n means[task_name] + stds[task_name], alpha=.4, lw=2,\n color=_COLORS[idx])\n ax.legend()\n ax.set_ylabel('Return')\n ax.set_xlabel('Episode')\n return fig", "def make_results_plot( df, k, reg ):\n\tuid = smalldf['user_id'].values\n\tbid = smalldf['business_id'].values\n\tactual = smalldf['stars'].values\n\tpredicted = np.zeros( len(actual) )\n\tcounter = 0\n\tfor biz_id, user_id in izip( bid, uid ):\n\t\tpredicted[counter] = rating( biz_id, user_id, k = k, reg = reg ) \n\t\tcounter = counter + 1\n\t# compare_results( actual, predicted )", "def fit_and_plot(self, max_iter):\n from matplotlib import pyplot as plt\n from matplotlib import cm\n\n colours = cm.rainbow(np.linspace(0, 1, self.num_classes)) # FIXME: rainbow list -> array\n\n def plot_data(d):\n for c in range(self.num_classes):\n for n in range(self.num_nuisances):\n plt.scatter(*d[c][n].T, c=colours[c])\n plt.waitforbuttonpress()\n\n def plot_mean(th):\n for c in range(self.num_classes):\n for n in range(self.num_nuisances):\n plt.scatter(*th[c][n].mean.T, c=colours[c], marker=\"x\")\n plt.waitforbuttonpress()\n\n plt.ion()\n plt.scatter(*self.data.T)\n plt.waitforbuttonpress()\n\n split_data = self.initialise_clusters_with_kmeans()\n plot_data(split_data)\n thetas = self.maximization(split_data)\n plot_mean(thetas)\n\n for i in range(max_iter):\n plt.clf()\n split_data = self.expectation(thetas)\n plot_data(split_data)\n thetas = self.maximization(split_data)\n plot_mean(thetas)\n return split_data, thetas", "def summary(trace, varnames=None, transform=lambda x: x, stat_funcs=None,\n extend=False, include_transformed=False,\n alpha=0.05, start=0, batches=None):\n\n if varnames is None:\n varnames = get_default_varnames(trace.varnames,\n include_transformed=include_transformed)\n\n if batches is None:\n batches = min([100, len(trace)])\n\n funcs = [lambda x: pd.Series(np.mean(x, 0), name='mean'),\n lambda x: pd.Series(np.std(x, 0), name='sd'),\n lambda x: pd.Series(mc_error(x, batches), name='mc_error'),\n lambda x: _hpd_df(x, alpha)]\n\n if stat_funcs is not None:\n if extend:\n funcs = funcs + stat_funcs\n else:\n funcs = stat_funcs\n\n var_dfs = []\n for var in varnames:\n vals = transform(trace.get_values(var, burn=start, combine=True))\n flat_vals = vals.reshape(vals.shape[0], -1)\n var_df = pd.concat([f(flat_vals) for f in funcs], axis=1)\n var_df.index = ttab.create_flat_names(var, vals.shape[1:])\n var_dfs.append(var_df)\n dforg = pd.concat(var_dfs, axis=0)\n\n if (stat_funcs is not None) and (not extend):\n return dforg\n elif trace.nchains < 2:\n return dforg\n else:\n n_eff = pm.effective_n(trace,\n varnames=varnames,\n include_transformed=include_transformed)\n n_eff_pd = dict2pd(n_eff, 'n_eff')\n rhat = pm.gelman_rubin(trace,\n varnames=varnames,\n include_transformed=include_transformed)\n rhat_pd = dict2pd(rhat, 'Rhat')\n #import pdb; pdb.set_trace()\n # return pd.concat([dforg, n_eff_pd, rhat_pd],\n # axis=1, join_axes=[dforg.index])\n return pd.concat([dforg, n_eff_pd, rhat_pd],axis=1).reindex(dforg.index)", "def evaluate(self, plot):", "def evaluate(pred, ground_truth, target, count_to_level = False, Y_name = None, thresholds = None, print_metrics=True):\n \n if target == 'count':\n \n # fill NaNs with zeroes\n pred = pred.fillna(method = \"ffill\")\n pred = pred.fillna(method = \"bfill\")\n ground_truth = ground_truth.fillna(method = \"ffill\")\n ground_truth = ground_truth.fillna(method = \"bfill\")\n \n # Set negative predictions to zero\n pred[pred < 0] = 0\n ground_truth[ground_truth < 0] = 0\n \n # Calculate error metrics\n rmse = mean_squared_error(ground_truth, pred, squared=False)\n mae = mean_absolute_error(ground_truth, pred)\n \n # Calculate error metrics only for crowded moments (p75) \n busy = np.percentile(ground_truth, 75)\n ground_truth_busy = ground_truth[ground_truth > busy].dropna()\n pred_busy = pred[ground_truth > busy].dropna()\n rmse_busy = mean_squared_error(ground_truth_busy, pred_busy, squared=False)\n mae_busy = mean_absolute_error(ground_truth_busy, pred_busy)\n \n # Store error metrics in dict\n error_metrics = dict({'rmse': rmse, 'rmse_busy': rmse_busy, 'mae': mae, 'mae_busy': mae_busy})\n \n if print_metrics:\n print(f\"Root mean squared error: {rmse.round(1)}\")\n print(f\"Root mean squared error (crowded): {rmse_busy.round(1)}\")\n print(f\"Mean absolute error: {mae.round(1)}\")\n print(f\"Mean absolute error (crowded): {mae_busy.round(1)}\")\n \n if count_to_level:\n pred = get_crowd_levels(pred, Y_name, thresholds)\n ground_truth = get_crowd_levels(ground_truth, Y_name, thresholds)\n \n # Confusion matrix\n conf_mat = confusion_matrix(ground_truth, pred)\n \n error_metrics['conf_mat'] = conf_mat\n \n elif target == \"level\":\n \n # Set dtype to category\n pred = pred.astype('category')\n ground_truth = ground_truth.astype('category')\n \n # Forward fill NaNs\n pred = pred.fillna(method = \"ffill\")\n ground_truth = ground_truth.fillna(method = \"ffill\")\n \n # Confusion matrix\n conf_mat = confusion_matrix(ground_truth, pred)\n \n # Classification report (recall, precision, F1)\n class_report = classification_report(ground_truth, pred, output_dict = True)\n class_report = pd.DataFrame(class_report).transpose()\n \n error_metrics = dict({\"conf_mat\": conf_mat, \"class_report\": class_report})\n \n if print_metrics:\n print(f\"Confusion matrix: {conf_mat}\")\n print(f\"Classification report: {class_report}\")\n \n return error_metrics", "def testAlignedProfile(self):\n # Use Plot backend widget to submit mouse events\n widget = self.plot.getWidgetHandle()\n for method in ('sum', 'mean'):\n with self.subTest(method=method):\n # 2 positions to use for mouse events\n pos1 = widget.width() * 0.4, widget.height() * 0.4\n pos2 = widget.width() * 0.6, widget.height() * 0.6\n\n for action in (self.toolBar.hLineAction, self.toolBar.vLineAction):\n with self.subTest(mode=action.text()):\n # Trigger tool button for mode\n action.trigger()\n # Without image\n self.mouseMove(widget, pos=pos1)\n self.mouseClick(widget, qt.Qt.LeftButton, pos=pos1)\n\n # with image\n self.plot.addImage(\n numpy.arange(100 * 100).reshape(100, -1))\n self.mousePress(widget, qt.Qt.LeftButton, pos=pos1)\n self.mouseMove(widget, pos=pos2)\n self.mouseRelease(widget, qt.Qt.LeftButton, pos=pos2)\n\n self.mouseMove(widget)\n self.mouseClick(widget, qt.Qt.LeftButton)\n\n manager = self.toolBar.getProfileManager()\n for _ in range(20):\n self.qWait(200)\n if not manager.hasPendingOperations():\n break", "def convergence():\n fig, axes = plt.subplots(nrows=2, figsize=figsize(aspect=1.2))\n\n # label names\n label1 = str(league.lambda1)\n label2_list = [str(lambda2) for lambda2 in league.lambda2_list]\n\n # point spread and point total subplots\n subplots = [\n (False, [-0.5, 0.5], league.spreads, 'probability spread > 0.5'),\n (True, [200.5], league.totals, 'probability total > 200.5'),\n ]\n\n for ax, (commutes, lines, values, ylabel) in zip(axes, subplots):\n\n # train margin-dependent Elo model\n melo = Melo(lines=lines, commutes=commutes, k=1e-4)\n melo.fit(league.times, league.labels1, league.labels2, values)\n\n line = lines[-1]\n\n for label2 in label2_list:\n\n # evaluation times and labels\n times = np.arange(league.times.size)[::1000]\n labels1 = times.size * [label1]\n labels2 = times.size * [label2]\n\n # observed win probability\n prob = melo.probability(times, labels1, labels2, lines=line)\n ax.plot(times, prob)\n\n # true (analytic) win probability\n if ax.is_first_row():\n prob = skellam.sf(line, int(label1), int(label2))\n ax.axhline(prob, color='k')\n else:\n prob = poisson.sf(line, int(label1) + int(label2))\n ax.axhline(prob, color='k')\n\n # axes labels\n if ax.is_last_row():\n ax.set_xlabel('Iterations')\n ax.set_ylabel(ylabel)\n\n set_tight(w_pad=.5)", "def hive_group(G, group_by, ax=None, offset=np.pi / 12):\n nt = utils.node_table(G)\n groups = sorted(nt[group_by].unique())\n\n if ax is None:\n ax = plt.gca()\n\n for grp in groups:\n theta = item_theta(groups, grp) + offset\n radius = 2 * (8 + len(nt[nt[group_by] == grp]) + 1)\n x, y = to_cartesian(radius, theta)\n ha, va = text_alignment(x, y)\n ax.annotate(grp, xy=(x, y), ha=ha, va=va)", "def renderSimulation(self, target, activePatrollers, attackTime, targetPosition):\n\n\t\tt = self.no_of_discrete_time_intervals\n\t\tfSchedule = self.fSchedule\n\t\tpSchedule = self.pSchedule\n\t\tshow_legend = self.show_legend\n\n\t\txaxis = np.array([1.0*(x)/(t-1) for x in range(t)])\n\t\tattack_ferry = target[0]\n\t\tlegendArr = []\n\n\t\t# Attack on target plot\n\t\tattackPosition = game_utility.getNormalizedPosition(target[0], fSchedule, attackTime, target[1])\n\t\tplt.plot([target[1]], [attackPosition], 'ro')\n\t\tlegendArr.append(\"Attack\")\n\n\t\t# Attacked ferry plot\n\t\tplt.plot(xaxis, fSchedule[attack_ferry], '--')\n\t\tlegendArr.append(\"Ferry\" + format(attack_ferry))\n\n\t\t#Other ferry plots\n\t\tfor f in range(len(fSchedule)):\n\t\t\tif(f != attack_ferry):\n\t\t\t\tplt.plot(xaxis, fSchedule[f], '--')\n\t\t\t\tlegendArr.append(\"Ferry\" + format(f))\n\n\t\t#Patroller plots\n\t\tfor p in range(len(pSchedule)):\n\t\t\tplt.plot(xaxis, pSchedule[p], '--')\n\t\t\tlegendArr.append(\"Patroller\" + format(p))\n\n\t\t# Active Patroller\n\t\tfor activeP in range(len(activePatrollers)):\n\t\t\tattackPosition = game_utility.getNormalizedPosition(activeP, pSchedule, attackTime, target[1])\n\t\t\tplt.plot([target[1]], [attackPosition], 'gs')\n\t\t\tlegendArr.append(\"Active patrollers\")\n\n\t\tif(show_legend):\n\t\t\tplt.legend(legendArr, loc=\"upper right\")\n\n\t\tplt.axis([0, 1, 0, 1])\n\t\t#plt.xticks(np.arange(min(xaxis), max(xaxis), 0.04)) # adjust number of ticks\n\t\t#plt.grid()\n\t\tplt.xlabel('Time')\n\t\tplt.ylabel('Distance')\n\t\tplt.show()", "def plot(self,\n plot=True, plot_stats=True,\n splot=True\n #labels=None, numbers=False, origin='upper',\n #numbers_alpha=None, xlabels_vertical=True,\n #numbers_kwargs={},\n #**kwargs\n ):\n externals.exists(\"pylab\", raiseException=True)\n import pylab as P\n\n self.compute()\n # total number of plots\n nplots = plot + splot\n\n # turn off automatic update if interactive\n if P.matplotlib.get_backend() == 'TkAgg':\n P.ioff()\n\n fig = P.gcf()\n P.clf()\n sps = [] # subplots\n\n nplot = 0\n if plot:\n nplot += 1\n sps.append(P.subplot(nplots, 1, nplot))\n xstart = 0\n lines = []\n for s in self.sets:\n nsamples = len(s[0])\n xend = xstart+nsamples\n xs = xrange(xstart, xend)\n lines += [P.plot(xs, s[0], 'b')]\n lines += [P.plot(xs, s[1], 'r')]\n # vertical line\n P.plot([xend, xend], [N.min(s[0]), N.max(s[0])], 'k--')\n xstart = xend\n if len(lines)>1:\n P.legend(lines[:2], ('Target', 'Prediction'))\n if plot_stats:\n P.title(self.asstring(short='very'))\n\n if splot:\n nplot += 1\n sps.append(P.subplot(nplots, 1, nplot))\n for s in self.sets:\n P.plot(s[0], s[1], 'o',\n markeredgewidth=0.2,\n markersize=2)\n P.gca().set_aspect('equal')\n\n if P.matplotlib.get_backend() == 'TkAgg':\n P.ion()\n P.draw()\n\n return fig, sps", "def _stats_plot(self, element, y, data=None):\n data, x, y = self._process_args(data, None, y)\n\n opts = {'plot': dict(self._plot_opts), 'norm': self._norm_opts,\n 'style': self._style_opts}\n\n ylim = self._plot_opts.get('ylim', (None, None))\n if not isinstance(y, (list, tuple)):\n ranges = {y: ylim}\n return (element(data, self.by, y).redim.range(**ranges).relabel(**self._relabel).opts(**opts))\n\n labelled = ['y' if self.invert else 'x'] if self.group_label != 'Group' else []\n if self.value_label != 'value':\n labelled.append('x' if self.invert else 'y')\n\n if 'xlabel' in self._plot_opts and 'x' not in labelled:\n labelled.append('x')\n if 'ylabel' in self._plot_opts and 'y' not in labelled:\n labelled.append('y')\n\n opts['plot']['labelled'] = labelled\n\n kdims = [self.group_label]\n data = data[list(y)]\n if check_library(data, 'dask'):\n from dask.dataframe import melt\n else:\n melt = pd.melt\n df = melt(data, var_name=self.group_label, value_name=self.value_label)\n ranges = {self.value_label: ylim}\n return (element(df, kdims, self.value_label).redim(**self._redim)\n .redim.range(**ranges).relabel(**self._relabel).opts(**opts))", "def plot_metrics(results, epsilon_pos, epsilon_neg):\n ## Plot risk and fairness gaps as a function of sample size,\n ## with true minimum risk and true fairness gaps for reference.\n\n metrics_Y0 = pd.concat(results['metrics_Y0_noisy'], keys=n_arr)\n metrics_Y0 = metrics_Y0.reset_index().drop(columns='level_1').rename(\n columns={'level_0': 'n'})\n metrics_Y = pd.concat(results['metrics_Y_noisy'], keys=n_arr)\n metrics_Y = metrics_Y.reset_index().drop(columns='level_1').rename(\n columns={'level_0': 'n'})\n metrics = pd.concat([metrics_Y0, metrics_Y])\n\n m = results['metrics_Y0_best']\n risk = m.loc[m.Metric == 'Risk', 'Value'].values[0]\n\n g = sns.FacetGrid(metrics, row='Outcome', col='Metric',\n col_order=['risk', 'gap_FPR', 'gap_FNR'])\n g.map(sns.pointplot, 'n', 'value', order=n_arr)\n g.set_xticklabels(rotation=45)\n\n g.axes[0, 0].hlines(risk, *g.axes[0, 0].get_xlim())\n g.axes[1, 0].hlines(risk, *g.axes[1, 0].get_xlim())\n\n g.axes[0, 1].hlines(epsilon_pos, *g.axes[0, 1].get_xlim())\n g.axes[1, 1].hlines(epsilon_pos, *g.axes[1, 1].get_xlim())\n\n g.axes[0, 2].hlines(epsilon_neg, *g.axes[0, 2].get_xlim())\n g.axes[1, 2].hlines(epsilon_neg, *g.axes[1, 2].get_xlim())", "def _plot_evoked(evoked, picks, exclude, unit, show, ylim, proj, xlim, hline,\n units, scalings, titles, axes, plot_type, cmap=None,\n gfp=False, window_title=None, spatial_colors=False,\n selectable=True, zorder='unsorted',\n noise_cov=None, colorbar=True, mask=None, mask_style=None,\n mask_cmap=None, mask_alpha=.25, time_unit='s',\n show_names=False, group_by=None, sphere=None):\n import matplotlib.pyplot as plt\n\n # For evoked.plot_image ...\n # First input checks for group_by and axes if any of them is not None.\n # Either both must be dicts, or neither.\n # If the former, the two dicts provide picks and axes to plot them to.\n # Then, we call this function recursively for each entry in `group_by`.\n if plot_type == \"image\" and isinstance(group_by, dict):\n if axes is None:\n axes = dict()\n for sel in group_by:\n plt.figure()\n axes[sel] = plt.axes()\n if not isinstance(axes, dict):\n raise ValueError(\"If `group_by` is a dict, `axes` must be \"\n \"a dict of axes or None.\")\n _validate_if_list_of_axes(list(axes.values()))\n remove_xlabels = any([_is_last_row(ax) for ax in axes.values()])\n for sel in group_by: # ... we loop over selections\n if sel not in axes:\n raise ValueError(sel + \" present in `group_by`, but not \"\n \"found in `axes`\")\n ax = axes[sel]\n # the unwieldy dict comp below defaults the title to the sel\n titles = ({channel_type(evoked.info, idx): sel\n for idx in group_by[sel]} if titles is None else titles)\n _plot_evoked(evoked, group_by[sel], exclude, unit, show, ylim,\n proj, xlim, hline, units, scalings, titles,\n ax, plot_type, cmap=cmap, gfp=gfp,\n window_title=window_title,\n selectable=selectable, noise_cov=noise_cov,\n colorbar=colorbar, mask=mask,\n mask_style=mask_style, mask_cmap=mask_cmap,\n mask_alpha=mask_alpha, time_unit=time_unit,\n show_names=show_names,\n sphere=sphere)\n if remove_xlabels and not _is_last_row(ax):\n ax.set_xticklabels([])\n ax.set_xlabel(\"\")\n ims = [ax.images[0] for ax in axes.values()]\n clims = np.array([im.get_clim() for im in ims])\n min, max = clims.min(), clims.max()\n for im in ims:\n im.set_clim(min, max)\n figs = [ax.get_figure() for ax in axes.values()]\n if len(set(figs)) == 1:\n return figs[0]\n else:\n return figs\n elif isinstance(axes, dict):\n raise ValueError(\"If `group_by` is not a dict, \"\n \"`axes` must not be a dict either.\")\n\n time_unit, times = _check_time_unit(time_unit, evoked.times)\n evoked = evoked.copy() # we modify info\n info = evoked.info\n if axes is not None and proj == 'interactive':\n raise RuntimeError('Currently only single axis figures are supported'\n ' for interactive SSP selection.')\n if isinstance(gfp, str) and gfp != 'only':\n raise ValueError('gfp must be boolean or \"only\". Got %s' % gfp)\n\n scalings = _handle_default('scalings', scalings)\n titles = _handle_default('titles', titles)\n units = _handle_default('units', units)\n\n picks = _picks_to_idx(info, picks, none='all', exclude=())\n if len(picks) != len(set(picks)):\n raise ValueError(\"`picks` are not unique. Please remove duplicates.\")\n\n bad_ch_idx = [info['ch_names'].index(ch) for ch in info['bads']\n if ch in info['ch_names']]\n if len(exclude) > 0:\n if isinstance(exclude, str) and exclude == 'bads':\n exclude = bad_ch_idx\n elif (isinstance(exclude, list) and\n all(isinstance(ch, str) for ch in exclude)):\n exclude = [info['ch_names'].index(ch) for ch in exclude]\n else:\n raise ValueError(\n 'exclude has to be a list of channel names or \"bads\"')\n\n picks = np.array([pick for pick in picks if pick not in exclude])\n\n types = np.array(_get_channel_types(info, picks), str)\n ch_types_used = list()\n for this_type in _VALID_CHANNEL_TYPES:\n if this_type in types:\n ch_types_used.append(this_type)\n\n fig = None\n if axes is None:\n fig, axes = plt.subplots(len(ch_types_used), 1)\n fig.subplots_adjust(left=0.125, bottom=0.1, right=0.975, top=0.92,\n hspace=0.63)\n if isinstance(axes, plt.Axes):\n axes = [axes]\n fig.set_size_inches(6.4, 2 + len(axes))\n\n if isinstance(axes, plt.Axes):\n axes = [axes]\n elif isinstance(axes, np.ndarray):\n axes = list(axes)\n\n if fig is None:\n fig = axes[0].get_figure()\n\n if window_title is not None:\n _set_window_title(fig, window_title)\n\n if len(axes) != len(ch_types_used):\n raise ValueError('Number of axes (%g) must match number of channel '\n 'types (%d: %s)' % (len(axes), len(ch_types_used),\n sorted(ch_types_used)))\n _check_option('proj', proj, (True, False, 'interactive', 'reconstruct'))\n noise_cov = _check_cov(noise_cov, info)\n if proj == 'reconstruct' and noise_cov is not None:\n raise ValueError('Cannot use proj=\"reconstruct\" when noise_cov is not '\n 'None')\n projector, whitened_ch_names = _setup_plot_projector(\n info, noise_cov, proj=proj is True, nave=evoked.nave)\n if len(whitened_ch_names) > 0:\n unit = False\n if projector is not None:\n evoked.data[:] = np.dot(projector, evoked.data)\n if proj == 'reconstruct':\n evoked = evoked._reconstruct_proj()\n\n if plot_type == 'butterfly':\n _plot_lines(evoked.data, info, picks, fig, axes, spatial_colors, unit,\n units, scalings, hline, gfp, types, zorder, xlim, ylim,\n times, bad_ch_idx, titles, ch_types_used, selectable,\n False, line_alpha=1., nave=evoked.nave,\n time_unit=time_unit, sphere=sphere)\n plt.setp(axes, xlabel='Time (%s)' % time_unit)\n\n elif plot_type == 'image':\n for ai, (ax, this_type) in enumerate(zip(axes, ch_types_used)):\n use_nave = evoked.nave if ai == 0 else None\n this_picks = list(picks[types == this_type])\n _plot_image(evoked.data, ax, this_type, this_picks, cmap, unit,\n units, scalings, times, xlim, ylim, titles,\n colorbar=colorbar, mask=mask, mask_style=mask_style,\n mask_cmap=mask_cmap, mask_alpha=mask_alpha,\n nave=use_nave, time_unit=time_unit,\n show_names=show_names, ch_names=evoked.ch_names)\n if proj == 'interactive':\n _check_delayed_ssp(evoked)\n params = dict(evoked=evoked, fig=fig, projs=info['projs'], axes=axes,\n types=types, units=units, scalings=scalings, unit=unit,\n ch_types_used=ch_types_used, picks=picks,\n plot_update_proj_callback=_plot_update_evoked,\n plot_type=plot_type)\n _draw_proj_checkbox(None, params)\n\n plt.setp(fig.axes[:len(ch_types_used) - 1], xlabel='')\n fig.canvas.draw() # for axes plots update axes.\n plt_show(show)\n return fig", "def plot_metrics_over_iterations(metrics, epoch):\n total_elbo, total_cond_log_like, total_kl = metrics\n legend = ['ELBO', 'log p(x | z)']\n\n for level in range(len(total_kl)):\n legend.append('KL Divergence, Level ' + str(level))\n\n nans = np.zeros((1, 2 + len(total_kl)))\n nans.fill(np.nan)\n indices = np.ones((1, 2 + len(total_kl)))\n\n handle = plot_line(nans, indices, legend=legend,\n title='Average Metrics During Inference Iterations, Epoch ' + str(epoch),\n xlabel='Inference Iterations', ylabel='Metrics (Nats)')\n\n iterations = np.arange(0, total_elbo.shape[1]).astype(int)\n\n ave_elbo = np.mean(total_elbo, axis=0)\n update_trace(ave_elbo, iterations, win=handle, name='ELBO')\n\n ave_recon = np.mean(total_cond_log_like, axis=0)\n update_trace(ave_recon, iterations, win=handle, name='log p(x | z)')\n\n for level in range(len(total_kl)):\n ave_kl = np.mean(total_kl[level], axis=0)\n update_trace(ave_kl, iterations, win=handle, name='KL Divergence, Level ' + str(level))", "def plot_live(X, y, evaluator, param_name, param_range, scale='log', ylim=(0,1), ylabel='score'):\n # Plot interactively\n plt.ion()\n plt.ylabel(ylabel)\n plt.xlabel(param_name)\n \n # Make the scale look nice\n plt.xscale(scale)\n plt.xlim(param_range[0],param_range[-1])\n plt.ylim(ylim)\n \n # Start from empty plot, then fill it\n series = {}\n lines = {}\n xvals = []\n for i in param_range:\n scores = evaluator(X, y, i) \n if i == param_range[0]: # initialize series\n for k in scores.keys():\n lines[k], = plt.plot(xvals, [], marker = '.', label = k)\n series[k] = []\n xvals.append(i)\n for k in scores.keys(): # append new data\n series[k].append(scores[k])\n lines[k].set_data(xvals, series[k])\n # refresh plot\n plt.legend(loc='best')\n plt.margins(0.1)\n display.display(plt.gcf())\n display.clear_output(wait=True)", "def plot_average_impl(df, regexps, y_value='return_mean', window=1, agg='mean', \n x_value='frames'):\n df = df.dropna(subset=[y_value])\n\n unique_models = df['model'].unique()\n model_groups = [[m for m in unique_models if re.match(regex, m)]\n for regex in regexps]\n\n for regex, models in zip(regexps, model_groups):\n df_re = df[df['model'].isin(models)]\n # the average doesn't make sense if most models are not included,\n # so we only for the period of training that has been done by all models\n num_frames_per_model = [df_model[x_value].max()\n for _, df_model in df_re.groupby('model')]\n median_progress = sorted(num_frames_per_model)[(len(num_frames_per_model) - 1) // 2]\n mean_duration = np.mean([\n df_model['duration'].max() for _, df_model in df_re.groupby('model')])\n df_re = df_re[df_re[x_value] <= median_progress]\n\n # smooth\n parts = []\n for _, df_model in df_re.groupby('model'):\n df_model = df_model.copy()\n df_model.loc[:, y_value] = df_model[y_value].rolling(window).mean()\n parts.append(df_model)\n df_re = pandas.concat(parts)\n\n df_agg = df_re.groupby([x_value]).agg([agg])\n values = df_agg[y_value][agg]\n pyplot.plot(df_agg.index, values, label=regex)\n print(regex, median_progress, mean_duration / 86400.0, values.iloc[-1])", "def plot_equivalent_samples(\n self, acquisition_risks, baseline=None, errors='std',\n fig=None, ax=None, alpha=0.3, i=0, labels=None, zorders=None,\n colors=None, relative=True, rolling_before=False,\n rolling_after=False, inverse=False):\n if errors == 'percentiles':\n upper_base = self.quant_errors\n elif errors == 'std':\n upper_base = self.errors\n else:\n raise ValueError\n\n if fig is None or ax is None:\n fig, ax = plt.subplots(dpi=200)\n\n if baseline is None:\n baselines = ['RandomAcquisition', 'BiasedRiskEstimator']\n\n base_risk = upper_base.loc[baselines[0]][baselines[1]].values\n if zorders is None:\n zorders = 100 * [None]\n\n linestyles = itertools.cycle(['--', '-.', ':'])\n for acquisition, risk in acquisition_risks:\n acq_risk = f'{acquisition}_{risk}'\n if colors is None:\n color = acquisition_risks_to_color[acq_risk]\n else:\n color = colors[i]\n\n s_u = upper_base.loc[acquisition][risk].values\n if (R := rolling_before) is not False:\n s_u = np.convolve(\n s_u, np.ones(R)/R, mode='valid')\n base_risk = np.convolve(\n base_risk, np.ones(R)/R, mode='valid')\n\n diffs = s_u[:, np.newaxis] - base_risk\n diffs[diffs < 0] = 1e10\n idxs = np.argmin(diffs, axis=1) + 1\n x = range(1, len(idxs)+1)\n if relative:\n y = idxs/x\n else:\n y = idxs\n\n if (R := rolling_after) is not False:\n y = np.convolve(y, np.ones(R)/R, mode='valid')\n x = range(1, len(y)+1)\n\n if inverse:\n y = 1/y\n\n ax.plot(y, '-', color=color, label=labels[i],\n zorder=zorders[i])\n i += 1\n\n return fig, ax", "def plotModelResults(model, X_train=X_train, X_test=X_test, plot_intervals=False, plot_anomalies=False):\n\n prediction = model.predict(X_test)\n\n plt.figure(figsize=(15, 7))\n\n plt.plot(prediction, \"g\", label=\"prediction\", linewidth=2.0)\n plt.plot(y_test.values, label=\"actual\", linewidth=2.0)\n plt.draw()\n if plot_intervals:\n cv = cross_val_score(model, X_train, y_train,\n cv=tscv,\n scoring=\"neg_mean_absolute_error\")\n mae = cv.mean() * (-1)\n deviation = cv.std()\n\n scale = 1.96\n lower = prediction - (mae + scale * deviation)\n upper = prediction + (mae + scale * deviation)\n\n plt.plot(lower, \"r--\", label=\"upper bond / lower bond\", alpha=0.5)\n plt.plot(upper, \"r--\", alpha=0.5)\n plt.draw()\n\n if plot_anomalies:\n anomalies = np.array([np.NaN]*len(y_test))\n anomalies[y_test<lower] = y_test[y_test<lower]\n anomalies[y_test>upper] = y_test[y_test>upper]\n plt.plot(anomalies, \"o\", markersize=10, label = \"Anomalies\")\n plt.draw()\n def mean_absolute_percentage_error(y_true, y_pred):\n return np.mean(np.abs((y_true - y_pred) / y_true)) * 100\n\n error = mean_absolute_percentage_error(prediction, y_test)\n plt.title(\"Mean absolute percentage error {0:.2f}%\".format(error))\n plt.legend(loc=\"best\")\n plt.tight_layout()\n plt.grid(True)\n plt.draw()", "def get_stability_plot(self):\n fig, ax = plt.subplots()\n first_episode = self.get_convergence_episode()\n\n values = self.stats['return_stats']['episode_totals']\n _, _, (y_lower, _) = self._moving_average(\n values, window=_ROLLING_WINDOW, p=_CONFIDENCE_LEVEL)\n episodes = np.arange(len(values))\n unstable_episodes = np.where(\n np.logical_and(values < y_lower[-1], episodes > first_episode))[0]\n\n ax.plot(episodes, values, color='steelblue', lw=2, alpha=.9,\n label='Return')\n for i, episode in enumerate(unstable_episodes):\n ax.axvline(episode, color='salmon', lw=2,\n label='Unstable' if i == 0 else None)\n ax.axvline(first_episode, color='seagreen', lw=2, label='Converged')\n\n ax.set_title('Normalized instability = {:.3f}%'.format(\n self.get_normalized_instability() * 100.))\n ax.legend()\n ax.set_ylabel('Return')\n ax.set_xlabel('Episode')\n return fig", "def visualize_tgt_by_categorical(df, var, target):\n import seaborn as sns\n import matplotlib.pyplot as plt\n import pandas as pd\n \n plt.figure(figsize=(10,5))\n \n grouped_values = df.groupby(var)[target].mean().sort_values(ascending = False).reset_index()\n\n sns.set(style = 'white')\n sns.barplot(x = var, y = target, data = grouped_values, palette = sns.color_palette(\"RdBu\", n_colors = 7))\n\n return plt.show()", "def plot_cross_validation_metric(\n df_cv, metric, rolling_window=0.1, ax=None, figsize=(10, 6)\n):\n if ax is None:\n fig = plt.figure(facecolor='w', figsize=figsize)\n ax = fig.add_subplot(111)\n else:\n fig = ax.get_figure()\n # Get the metric at the level of individual predictions, and with the rolling window.\n df_none = performance_metrics(df_cv, metrics=[metric], rolling_window=0)\n df_h = performance_metrics(df_cv, metrics=[metric], rolling_window=rolling_window)\n\n # Some work because matplotlib does not handle timedelta\n # Target ~10 ticks.\n tick_w = max(df_none['horizon'].astype('timedelta64[ns]')) / 10.\n # Find the largest time resolution that has <1 unit per bin.\n dts = ['D', 'h', 'm', 's', 'ms', 'us', 'ns']\n dt_names = [\n 'days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds',\n 'nanoseconds'\n ]\n dt_conversions = [\n 24 * 60 * 60 * 10 ** 9,\n 60 * 60 * 10 ** 9,\n 60 * 10 ** 9,\n 10 ** 9,\n 10 ** 6,\n 10 ** 3,\n 1.,\n ]\n for i, dt in enumerate(dts):\n if np.timedelta64(1, dt) < np.timedelta64(tick_w, 'ns'):\n break\n\n x_plt = df_none['horizon'].astype('timedelta64[ns]').astype(np.int64) / float(dt_conversions[i])\n x_plt_h = df_h['horizon'].astype('timedelta64[ns]').astype(np.int64) / float(dt_conversions[i])\n\n ax.plot(x_plt, df_none[metric], '.', alpha=0.5, c='gray')\n ax.plot(x_plt_h, df_h[metric], '-', c='b')\n ax.grid(True)\n\n ax.set_xlabel('Horizon ({})'.format(dt_names[i]))\n ax.set_ylabel(metric)\n return fig", "def get_multiobjective_plot(evaluator_list, stride=500):\n num_objectives = (\n evaluator_list[0].stats['multiobj_stats']['episode_totals'].shape[1])\n values = [collections.defaultdict(list) for _ in range(num_objectives)]\n for e in evaluator_list:\n for i in range(num_objectives):\n values[i][e.task_name].append(\n e.stats['multiobj_stats']['episode_totals'][:, i])\n means = [None] * num_objectives\n stds = [None] * num_objectives\n for i in range(num_objectives):\n values[i] = _map(np.vstack, values[i])\n means[i] = _map(functools.partial(np.mean, axis=0), values[i])\n stds[i] = _map(functools.partial(np.std, axis=0), values[i])\n\n fig, axes = plt.subplots(num_objectives, 1, figsize=(8, 6 * num_objectives))\n for objective_idx in range(num_objectives):\n ax = axes[objective_idx]\n for i, task_name in enumerate(means[objective_idx]):\n m = means[objective_idx][task_name]\n s = stds[objective_idx][task_name]\n idx = i % len(_COLORS)\n x = np.arange(len(m))\n ax.plot(x, m, lw=2, color=_COLORS[idx], alpha=.6, label=None)\n ax.plot(x[::stride], m[::stride], 'o', lw=2, marker=_MARKERS[idx],\n markersize=10, color=_COLORS[idx], label=task_name)\n ax.fill_between(x, m - s, m + s, alpha=.4, lw=2, color=_COLORS[idx])\n ax.legend()\n ax.set_ylabel('Objective {}'.format(objective_idx))\n ax.set_xlabel('Episode')\n return fig", "def evaluate(self):\n # define start index test set\n start_test_set = int(len(self.X) * 2 / 3)\n\n # Different methods for cummulativa vs day-ahead forecasting\n if self.forecast_horizon == 1:\n # In sample\n lin_residuals_in_sample = self.y - (self.betas[0] + np.dot(self.X, self.betas[1]))\n self.rmse_in_sample = np.mean(lin_residuals_in_sample ** 2) ** 0.5\n self.var_in_sample = np.var(self.y)\n\n # Out of sample\n # Calculate MSE of wls-ev prediction\n self.mse_wlsev = np.mean((self.y[start_test_set:] - self.ols_predict()) ** 2)\n # Calculate MSE of benchmark prediction\n self.mse_benchmark = np.mean((self.y[start_test_set:] - self.benchmark_predict()) ** 2)\n else:\n # In Sample with betas estimated on full time series\n lin_residuals_in_sample = rolling_sum(self.y, self.forecast_horizon) - (\n self.betas[0] + np.dot(self.X[:-(self.forecast_horizon-1)], self.betas[1]))\n self.rmse_in_sample = np.mean(lin_residuals_in_sample ** 2) ** 0.5\n self.var_in_sample = np.var(rolling_sum(self.y, self.forecast_horizon))\n\n # Out of sample\n # calculate realized cummulative returns over forecast horizon sequences\n cum_rets_realized = rolling_sum(self.y[start_test_set:], self.forecast_horizon)\n # Calculate MSE of wls-ev prediction, only where realized values are available\n self.mse_wlsev = np.mean((cum_rets_realized - self.ols_predict()[:-(self.forecast_horizon-1)]) ** 2)\n # Calculate MSE of benchmark prediction, only where realized values are available\n self.mse_benchmark = np.mean(\n (cum_rets_realized - self.benchmark_predict()[:-(self.forecast_horizon-1)]) ** 2)\n\n # Calculate out of sample r-squared\n self.oos_r_squared = 1 - (self.mse_wlsev / self.mse_benchmark)\n # Calculate in sample r-squared\n self.in_sample_r_squared = 1.0 - (self.rmse_in_sample ** 2) / self.var_in_sample", "def _plot_experiment(df, axes, metric_name, isTrain):\n # colors: https://stackoverflow.com/questions/42086276/get-default-line-colour-cycle\n ldf = metric_short_to_long(df)\n plotted = \"Train\" if isTrain else \"Val\"\n m = ldf.query(\"stat == 'mse' and metric == @metric_name\")[[\"trial\",\"state\",\"value\"]].rename({\"value\":\"mse\"},axis=1)\n # aggregated\n ax = sns.barplot(x=\"trial\", y=\"mse\", data=m, palette=[u'#1f77b4'], ci=\"sd\", ax=axes[0])\n ax.set_ylabel(\"MSE (log)\")\n ax.set_yscale(\"log\")\n ax.set_title(f\"Aggregated State Errors ({plotted})\")\n ax.set_xlabel(\"Trial Number\")\n\n # individual state plots\n ax = sns.barplot(x=\"trial\", y=\"mse\", hue=\"state\",data=m, ci=\"sd\", ax=axes[1])\n ax.set_ylabel(\"MSE (log)\")\n ax.set_yscale(\"log\")\n ax.set_title(f\"State Error by Trial ({plotted})\")\n ax.set_xlabel(\"Trial Number\")", "def estimate(values, target):\n\n # next time\n # diff(values)\n\n\n return 1.", "def compute(self, pred: torch.Tensor, target: torch.Tensor) \\\n -> torch.Tensor:\n # If 2-dimensional, select the highest score in each row\n if len(target.size()) == 2:\n target = target.argmax(dim=1)\n\n ranked_scores = torch.argsort(pred, dim=1)[:, -self.top_k:]\n recalled = torch.sum((target.unsqueeze(1) == ranked_scores).float(), dim=1)\n return recalled.mean()", "def plot_bias(clf_list = ['test_small','rt_small','test2_small'],return_df = False,XKCD = False):\n if XKCD = True:\n plt.xkcd()\n print('damn')\n df = load_all_dfs(clf_list)\n df = df.swaplevel(0,1)\n del df['std']\n df.hist()\n plt.figure()\n\n for clf in clf_list:\n df.ix[clf].mean().plot(label = clf,figsize=(16, 4))\n plt.legend(loc='upper right')\n plt.title('mean')\n plt.figure()\n \n # c = df.columns\n for clf in clf_list:\n #df[c[1:]].ix[clf].max().plot(label = clf,figsize=(16, 4))\n df.ix[clf].max().plot(label = clf,figsize=(16, 4))\n plt.legend(loc='upper right')\n plt.title('max')\n \n plt.figure()\n for clf in clf_list:\n df.ix[clf].std().plot(label = clf,figsize=(16, 4))\n\n \n plt.legend(loc='upper right')\n plt.title('std')\n plt.figure()\n used_list = []\n for clf in clf_list:\n for clf2 in clf_list:\n if (clf != clf2) and ({clf,clf2} not in used_list):\n diff = ((df.ix[clf] - df.ix[clf2])**2)**(1/2)\n diff.mean().plot(label = clf+' - ' +clf2,figsize=(16, 4))\n used_list.append({clf,clf2})\n \n \n \n \n \n plt.legend(loc='upper right')\n plt.title('difference')\n print('damnover')\n if return_df == True:\n return df", "def __call__(self, output, target):\n maxk = max(self.topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in self.topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def perSaccPerGroup(addExp2 = True, trim = True, spacing = .5, addSimulation = True, \\\n\t\texclOverlap = True):\n\n\tfig = plt.figure(figsize = (10,10))\n\ttitle = \"Average towards-handle landings per group - exclOverlap = %s\" % exclOverlap\n\tplt.title(title)\n\t\n\t\n\t# Exp 1:\n\tdm1 = getDM.getDM(exp = \"004A\", excludeErrors = True, driftCorr = True)\n\t\n\t\n\tsubPlot = 1\n\t\n\tfor group in dm1.unique(\"corrDirection\"):\n\n\t\t# New subplot:\n\t\tplt.subplot(1,2,subPlot)\n\t\tplt.title(group)\n\t\t_dm1 = dm1.select(\"corrDirection == '%s'\" % group)\n\n\t\tlLandingsAbs = []\n\t\tlLandingsCorr = []\n\t\t\n\t\tfor sacc in [\"1\", \"2\", \"3\"]:\n\t\t\t\n\t\t\t# This is the same for corrected landing positions (the saccade\n\t\t\t# doesn't change; only the reference point does)\n\t\t\t_dm = _dm1.select(\"endX%sNorm != ''\" % sacc)\n\t\t\t_dm = _dm.select(\"endX%sNorm > -.5\" % sacc)\n\t\t\t_dm = _dm.select(\"endX%sNorm < .5\" % sacc)\n\t\t\t\n\t\t\tif trim:\n\t\t\t\t_dmAbs = _dm.selectByStdDev(keys = [\"file\"], dv = \"endX%sNormToHandle\" % sacc)\n\t\t\t\t_dmCorr = _dm.selectByStdDev(keys = [\"file\"], dv = \"endX%sCorrNormToHandle\" % sacc)\n\t\t\t\n\t\t\t# Determine avg landing position:\n\t\t\tavgAbs = _dmAbs[\"endX%sNormToHandle\" % sacc].mean()\n\t\t\tavgCorr = _dmCorr[\"endX%sCorrNormToHandle\" % sacc].mean()\n\t\t\t\n\t\t\t# TODO: determine error bars:\n\t\t\t\n\t\t\t\n\t\t\tlLandingsAbs.append(avgAbs)\n\t\t\tlLandingsCorr.append(avgCorr)\n\t\t\n\t\tplt.plot(lLandingsAbs, color = \"#f57900\", linewidth = 2, marker = \"o\")\n\t\tplt.plot(lLandingsCorr, color = \"#73d216\", linewidth = 2, marker = \"o\")\n\t\t\n\t\t# The other 2 experiments can be treated equally:\n\t\tcolList = [\"#ef2929\", \"#3465a4\"]\n\t\t\n\t\tfor exp in [\"004B\", \"004C\"]:\n\t\t\t\n\t\t\tif not addExp2:\n\t\t\t\tif exp == \"004B\":\n\t\t\t\t\tcontinue\n\t\t\tif not addSimulation:\n\t\t\t\tif exp == \"004C\":\n\t\t\t\t\tcontinue\n\n\t\t\tdm = getDM.getDM(exp = exp, excludeErrors = True, driftCorr = True)\n\t\t\t_dm = dm.select(\"corrDirection == '%s'\" % group)\n\n\t\t\tif exp == \"004C\":\n\t\t\t\tif exclOverlap:\n\t\t\t\t\t_dm = _dm.select(\"gap == 'zero'\")\n\n\t\t\t\n\t\t\tlLandingsAbs = []\n\t\t\t\n\t\t\tfor sacc in [\"1\", \"2\", \"3\"]:\n\t\t\t\t\n\t\t\t\t# TODO: how to filter only on-object saccades exp 2??\n\t\t\t\tsacc_dm = _dm.select(\"endX%sNorm != ''\" % sacc)\n\t\t\t\tsacc_dm = sacc_dm.select(\"endX%sNorm > -.5\" % sacc)\n\t\t\t\tsacc_dm = sacc_dm.select(\"endX%sNorm < .5\" % sacc)\n\t\t\t\t\n\t\t\t\tif trim:\n\t\t\t\t\tsacc_dm = sacc_dm.selectByStdDev(keys = [\"file\"], dv = \"endX%sNormToHandle\" % sacc)\n\t\t\t\t\n\t\t\t\t# Determine avg landing position:\n\t\t\t\tavgAbs = sacc_dm[\"endX%sNormToHandle\" % sacc].mean()\n\t\t\t\t\n\t\t\t\t# TODO: determine error bars:\n\t\t\t\t\n\t\t\t\tlLandingsAbs.append(avgAbs)\n\t\t\tcol = colList.pop()\n\t\t\tplt.plot(lLandingsAbs, color = col, linewidth = 2, marker = \"o\")\n\t\t\t\n\t\t# Modify plot:\n\t\tplt.legend([\"Exp1 abs\", \"Exp1 corr\", \"Exp2\", \"Sim\"])\n\t\t\t\n\t\tplt.axhline(0, color = \"#888a85\", linestyle = \"--\", linewidth = 2)\n\t\txLabels = [\"sacc 1\", \"sacc 2\", \"sacc 3\"]\n\t\txTicks = range(0,3)\n\t\tplt.xticks(xTicks, xLabels, rotation = .5)\n\t\tplt.ylim([-.5, .5])\n\t\tplt.xlim(min(xTicks)-spacing, max(xTicks)+spacing)\n\t\t\n\t\tsubPlot +=1\n\t\t\n\tplt.savefig(\"%s.png\" % title)", "def plot_evaluation(values, info, measures = ['Dice','Jaccard', 'TPR', 'TNR', '1-GCE', 'VS', 'RI', 'ARI', 'MI', '1-VOI', 'ICC','1/(1+PBD)', 'KAP', 'AUC', '1/(1+HD)', '1/(1+AVD)', 'MHD' ], colourmap=None, outfile='polar_results.png'):\n _min = info['minimum']\n _max = info['maximum']\n if colourmap is None:\n colourmap = [[86./255.,180./255.,233./255.] for ii in range(values.shape[0])]\n else:\n # normalize colourmap values between 0 and 1\n colourmap = (colourmap-_min)/(_max-_min)\n # apply cividis, returns the RBG1 values for cividis, for dots\n colourmap = [[cm.cividis(ii)] for ii in colourmap] \n\n # elements of the circle\n N = len(measures)\n # evenly space measures around circle\n x_as = [n / float(N) * 2 * pi for n in range(N)] \n\n # Set color of axes\n plt.rc('axes', linewidth=0.5, edgecolor=\"#888888\")\n\n # Create polar plot\n fig = plt.figure(figsize = (11,9.5))\n gs = gridspec.GridSpec(1, 3, width_ratios=[17,2,1])\n ax = plt.subplot(gs[0], polar=True)\n \n # Set position of y-labels\n ax.set_rlabel_position(0)\n\n # Set color and linestyle of grid\n ax.xaxis.grid(True, color=\"#888888\", linestyle='solid', linewidth=0.5)\n ax.yaxis.grid(True, color=\"#888888\", linestyle='solid', linewidth=0.5)\n\n # Set yticks\n plt.yticks([0.2, 0.4, 0.6, 0.8, 1.0], [\"0.2\", \"0.4\", \"0.6\", \"0.8\", \"1.0\"], fontsize=15)\n pos=ax.get_rlabel_position()\n ax.set_rlabel_position(pos+0.4*360./float(len(measures)))\n\n # Plot data\n for ii in np.arange(values.shape[0]):\n xx = np.asarray(x_as) + np.random.randn(len(x_as))*np.diff(x_as)[0]/15.\n data_norm = None\n if info['logplot']:\n data_norm = matplotlib.colors.LogNorm(vmin=_min, vmax=_max)\n sc = ax.scatter(xx, values[ii,:], 23, color=colourmap[ii]*len(xx), norm=data_norm, zorder=3) \n\n # Fill area\n # close the circle\n median = list(np.median(values, axis=0))\n median += median[:1]\n upper = list(np.percentile(values, 75, axis=0))\n upper += upper[:1]\n lower = list(np.percentile(values, 25, axis=0))\n lower += lower[:1]\n x_as += x_as[:1]\n ax.plot(x_as, median, color=[86./255.,180./255.,233./255.], zorder=5)\n ax.fill_between(x_as, upper, lower, zorder=4, color=[86./255.,180./255.,233./255.], alpha=0.3)\n\n # Set number of radial axes and remove labels\n plt.xticks(x_as[:-1], [])\n\n # Set axes limits\n plt.ylim(0, 1)\n\n # Draw ytick labels to make sure they fit properly\n for i in range(N):\n angle_rad = i / float(N) * 2 * pi-0.05\n text_size = 21\n if i in {3,8}:\n ax.text(angle_rad, 1.15, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {0}:\n ax.text(angle_rad, 1.25, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {1,5,7}:\n ax.text(angle_rad, 1.29, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {4}:\n ax.text(angle_rad, 1.32, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"top\")\n elif i in {10}:\n ax.text(angle_rad, 1.26, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {6}:\n ax.text(angle_rad, 1.25, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {9}:\n ax.text(angle_rad, 1.18, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n else:\n ax.text(angle_rad, 1.22, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n\n # colorbar location on figure\n cbaxes = plt.subplot(gs[2])\n\n # log scaling option\n norm = None\n if info['logplot']:\n norm = matplotlib.colors.LogNorm(vmin=_min,vmax=_max)\n\n img = plt.imshow(np.array([[_min,_max]]), aspect='auto', cmap=\"cividis\", norm=norm)\n img.set_visible(False)\n\n # initialize colorbar\n cbar = plt.colorbar(cax = cbaxes)\n\n # ticks and label\n c_values = cbar.get_ticks().tolist()\n \n ticklabels = [\"\" for ii in c_values]\n if _min < np.min(c_values):\n c_values = [_min] + c_values\n ticklabels = [\"%0.1f %s\" %(np.min(c_values), info['unit'])] + ticklabels\n else:\n ticklabels[0] = \"%0.1f %s\" %(np.min(c_values), info['unit'])\n\n if _max > np.max(c_values):\n c_values = c_values + [_max]\n ticklabels = ticklabels + [\"%0.1f %s\" %(np.max(c_values), info['unit'])]\n else:\n ticklabels[-1] = \"%0.1f %s\" %(np.max(c_values), info['unit'])\n \n cbar.set_ticks(c_values)\n cbar.set_ticklabels(ticklabels)\n cbaxes.yaxis.set_minor_formatter(matplotlib.ticker.NullFormatter())\n cbar.ax.set_ylabel(info[\"label\"], labelpad=-20)\n \n # font sizes for colorbar\n cbar.ax.yaxis.label.set_size(19)\n cbar.ax.tick_params(labelsize=14)\n\n # Save and show polar plot \n plt.savefig(outfile)\n if info['display']:\n plt.show()\n plt.clf()\n plt.close('all')", "def graph_results(self):\n # fig = plt.figure()\n # fig.subplots_adjust(hspace=.35)\n\n # median_x_values = [num for num in\n # range(len(results.get_meaningful_medians()))]\n # median_y_values = results.get_meaningful_medians()\n\n # print(\"Meaningful medians:\", results.get_meaningful_medians())\n # print(\"Enumerated meaningful medians:\",\n # list(enumerate(results.get_meaningful_medians())))\n # print(\"Zipped enumerated meaningful medians:\",\n # list(zip(*enumerate(results.get_meaningful_medians()))))\n\n print(\"[Progress] Graphing results...\")\n\n median_x_values, median_y_values = \\\n zip(*enumerate(self.sim_results.get_median_balances()))\n median_graph = self.graph_fig.add_subplot(2, 1, 1)\n\n median_graph.plot(median_x_values, median_y_values)\n\n median_graph.set_title(\"Simulation Result Medians\")\n median_graph.set_xlabel(\"Roll #\")\n median_graph.set_ylabel(\"Median Balance\")\n\n mean_graph = self.graph_fig.add_subplot(2, 1, 2)\n\n mean_values_to_graph = \\\n self.sim_results.get_average_balances()\n mean_x_values, mean_y_values = \\\n zip(*enumerate(mean_values_to_graph))\n mean_graph.plot(mean_x_values, mean_y_values)\n\n mean_graph.set_title(\"Simulation Result Means\")\n mean_graph.set_xlabel(\"Roll #\")\n mean_graph.set_ylabel(\"Mean Balance\")\n\n plt.show()", "def group_evaluation(ops, mut):\n\n center = ops[0]\n min_distortion = distortion([ops[0]], ops, mut)\n for i in ops:\n tmp = distortion([i], ops, mut)\n if tmp < min_distortion:\n center = i\n min_distortion = tmp\n return center", "def make_summary_match_panel(\n target,\n multiple_features,\n plot_only_columns_shared_by_target_and_all_features=False,\n target_ascending=False,\n n_samplings=30,\n n_permutations=30,\n random_seed=RANDOM_SEED,\n title='Summary Match Panel',\n target_type='continuous',\n max_std=3,\n target_annotation_kwargs={'fontsize': 12},\n plot_sample_names=False,\n file_path=None,\n dpi=100):\n\n # Set up figure\n fig = figure(figsize=FIGURE_SIZE)\n\n # Compute the number of rows needed for plotting\n n = 0\n for name, d in multiple_features.items():\n n += len(d['indices']) + 3\n\n # Set up ax grids\n gridspec = GridSpec(n, 1)\n\n # Plot title\n fig.suptitle(title, horizontalalignment='center', **FONT_LARGEST)\n r_i = 0\n\n # Set columns to be plotted\n columns = target.index\n if plot_only_columns_shared_by_target_and_all_features:\n for name, d in multiple_features.items():\n columns &= d['df'].columns\n\n # Plot multiple_features\n for fi, (name, d) in enumerate(multiple_features.items()):\n print('Making match panel for {} ...'.format(name))\n\n features = d['df']\n indices = d['indices']\n index_aliases = d['index_aliases']\n emphasis = d['emphasis']\n data_type = d['data_type']\n\n # Extract specified indices from features\n missing_indices = [i for i in indices if i not in features.index]\n if any(missing_indices):\n raise ValueError(\n 'features don\\'t have indices {}.'.format(missing_indices))\n\n # Sort target and features.columns (based on target)\n target = target.loc[columns & features.columns].sort_values(\n ascending=target_ascending or target.dtype == 'O')\n features = features[target.index]\n\n # Drop constant rows\n features = drop_df_slices(\n features.loc[indices], 1, max_n_unique_objects=1)\n\n target_o_to_int = {}\n target_int_to_o = {}\n if target.dtype == 'O':\n # Make target numerical\n for i, o in enumerate(target.unique()):\n target_o_to_int[o] = i\n target_int_to_o[i] = o\n target = target.map(target_o_to_int)\n\n if target_type in ('binary', 'categorical'):\n # Cluster within categories\n columns = cluster_2d_array_slices_by_group(features.values,\n target.values)\n features = features.iloc[:, columns]\n\n # Match\n scores = match(\n target.values,\n features.values,\n n_features=features.shape[0],\n n_samplings=n_samplings,\n n_permutations=n_permutations,\n random_seed=random_seed)\n scores.index = features.index\n\n # Sort scores\n scores = scores.sort_values('Score', ascending=emphasis == 'low')\n features = features.loc[scores.index]\n\n # Use alias\n i_to_a = {i: a for i, a in zip(indices, index_aliases)}\n features.index = features.index.map(lambda i: i_to_a[i])\n\n # Make annotations\n annotations = DataFrame(index=scores.index)\n # Make IC(MoE)s\n annotations['IC(\\u0394)'] = scores[['Score', '0.95 MoE']].apply(\n lambda s: '{0:.3f}({1:.3f})'.format(*s), axis=1)\n # Make p-value\n annotations['p-value'] = scores['p-value'].apply('{:.2e}'.format)\n # Make FDRs\n annotations['FDR'] = scores['FDR'].apply('{:.2e}'.format)\n\n # Plot features title\n title_ax = subplot(gridspec[r_i:r_i + 1, 0])\n r_i += 1\n title_ax.axis('off')\n title_ax.text(\n 0.5,\n 0,\n '{} (n={})'.format(name, target.size),\n horizontalalignment='center',\n **FONT_LARGER)\n\n target_ax = subplot(gridspec[r_i:r_i + 1, 0])\n r_i += 1\n\n features_ax = subplot(gridspec[r_i:r_i + features.shape[0], 0])\n r_i += features.shape[0]\n\n # Plot match panel\n plot_match_panel(target, target_int_to_o, features, max_std,\n annotations, None, target_ax, features_ax,\n target_type, data_type, None,\n target_annotation_kwargs, plot_sample_names\n and fi == len(multiple_features) - 1, None, dpi)\n\n if file_path:\n save_plot(file_path)", "def test_plot_average(self):\n # Unpack the list of baseline-pairs into a Python list\n blpairs = np.unique(self.uvp.blpair_array)\n blps = [blp for blp in blpairs]\n\n # Plot the spectra averaged over baseline-pairs and times\n f1 = plot.delay_spectrum(self.uvp, [blps,], spw=0, pol=('xx','xx'),\n average_blpairs=True, average_times=True)\n elements = [(matplotlib.lines.Line2D, 1),]\n assert axes_contains(f1.axes[0], elements)\n plt.close(f1)\n\n # Average over baseline-pairs but keep the time bins intact\n f2 = plot.delay_spectrum(self.uvp, [blps,], spw=0, pol=('xx','xx'),\n average_blpairs=True, average_times=False)\n elements = [(matplotlib.lines.Line2D, self.uvp.Ntpairs),]\n assert axes_contains(f2.axes[0], elements)\n plt.close(f2)\n\n # Average over times, but keep the baseline-pairs separate\n f3 = plot.delay_spectrum(self.uvp, [blps,], spw=0, pol=('xx','xx'),\n average_blpairs=False, average_times=True)\n elements = [(matplotlib.lines.Line2D, self.uvp.Nblpairs),]\n assert axes_contains(f3.axes[0], elements)\n plt.close(f3)\n\n # Plot the spectra averaged over baseline-pairs and times, but also\n # fold the delay axis\n f4 = plot.delay_spectrum(self.uvp, [blps,], spw=0, pol=('xx','xx'),\n average_blpairs=True, average_times=True,\n fold=True)\n elements = [(matplotlib.lines.Line2D, 1),]\n assert axes_contains(f4.axes[0], elements)\n plt.close(f4)\n\n # Plot imaginary part\n f4 = plot.delay_spectrum(self.uvp, [blps,], spw=0, pol=('xx','xx'),\n average_blpairs=False, average_times=True,\n component='imag')\n elements = [(matplotlib.lines.Line2D, self.uvp.Nblpairs),]\n assert axes_contains(f4.axes[0], elements)\n plt.close(f4)\n\n # Plot abs\n f5 = plot.delay_spectrum(self.uvp, [blps,], spw=0, pol=('xx','xx'),\n average_blpairs=False, average_times=True,\n component='abs')\n elements = [(matplotlib.lines.Line2D, self.uvp.Nblpairs),]\n assert axes_contains(f4.axes[0], elements)\n plt.close(f5)\n\n # test errorbar plotting w/ markers\n\n # bootstrap resample\n (uvp_avg, _,\n _) = grouping.bootstrap_resampled_error(self.uvp, time_avg=True,\n Nsamples=100, normal_std=True,\n robust_std=False, verbose=False)\n\n f6 = plot.delay_spectrum(uvp_avg, uvp_avg.get_blpairs(), spw=0,\n pol=('xx','xx'), average_blpairs=False,\n average_times=False,\n component='real', error='bs_std', lines=False,\n markers=True)\n plt.close(f6)\n\n # plot errorbar instead of pspec\n f7 = plot.delay_spectrum(uvp_avg, uvp_avg.get_blpairs(), spw=0,\n pol=('xx','xx'), average_blpairs=False,\n average_times=False,\n component='real', lines=False,\n markers=True, plot_stats='bs_std')\n plt.close(f7)", "def RunEstimate(update_func, num_points=31, median_flag=False):\n d = ReadHeights(nrows=None)\n labels = {1:'male', 2:'female'}\n\n suites = {}\n for key, xs in d.items():\n label = labels[key]\n print(label, len(xs))\n Summarize(xs)\n\n xs = thinkbayes2.Jitter(xs, 1.3)\n\n mus, sigmas = FindPriorRanges(xs, num_points, median_flag=median_flag)\n suite = Height(mus, sigmas, label)\n suites[label] = suite\n update_func(suite, xs)\n print('MAP', suite.MaximumLikelihood())\n\n suite1 = suites['male']\n suite2 = suites['female']\n\n mu1 = suite1.Marginal(0)\n sigma1 = suite1.Marginal(1)\n\n mu2 = suite2.Marginal(0)\n sigma2 = suite2.Marginal(1)\n\n diff = mu1 - mu2\n sigma = (sigma1 + sigma2) / 2\n\n pmf_d = diff / sigma\n\n thinkplot.Cdf(pmf_d.MakeCdf())\n thinkplot.Show(xlabel='# stddev between means',\n ylabel='PMF')", "def plot_numerical(feature, target, dataframe, target_unit=None):\n # Create figure\n fig = figure(figsize=(21, 7))\n\n # Create histogram\n hist_ax = fig.add_subplot(1, 2, 1)\n histplot(data=dataframe, x=feature, discrete=True, ax=hist_ax)\n\n # Create hexbin plot\n hex_ax = fig.add_subplot(1, 2, 2)\n hex_plot = dataframe.plot.hexbin(\n x=feature, y=target, gridsize=20, ax=hex_ax\n )\n\n # Add unit to hexbin plot's y-axis if unit given\n if target_unit is not None:\n hex_plot.set_ylabel(\"{} / {}\".format(target.title(), target_unit))\n\n # Add lineplot showing the mean target value for each feature value\n mean_line = dataframe.groupby(feature)[target].mean()\n mean_line.plot(\n ax=hex_ax,\n label=\"mean {}/{}\".format(target, feature),\n color=\"yellow\",\n alpha=0.5,\n )\n\n # Display the legend for the lineplot\n legend()", "def evaluate(\n self,\n targetSeries,\n exogenousSeries=None,\n returnPred=False\n ):\n pass", "def __call__(self, output, target, *args, **kwargs):\n _, y_pred = output.topk(1, 1, True, True)\n y_pred = y_pred.t().detach().cpu().numpy()[0]\n y_true = target.detach().cpu().numpy()\n self.pfm = self.metric_func(y_true, y_pred)\n return self.pfm", "def plot_average_improvement(metrics, epoch, handle_dict):\n total_elbo, total_cond_log_like, total_kl = metrics\n elbo_improvement = 100. * np.mean(np.divide(total_elbo[:, 1] - total_elbo[:, -1], total_elbo[:, 1]), axis=0)\n update_trace(np.array([elbo_improvement]), np.array([epoch]).astype(int), win=handle_dict['elbo_improvement'], name='ELBO')\n cond_log_like_improvement = 100. * np.mean(np.divide(total_cond_log_like[:, 1] - total_cond_log_like[:, -1], total_cond_log_like[:, 1]), axis=0)\n update_trace(np.array([cond_log_like_improvement]), np.array([epoch]).astype(int), win=handle_dict['recon_improvement'], name='log P(x | z)')\n for level in range(len(total_kl)):\n kl_improvement = 100. * np.mean(np.divide(total_kl[level][:, 1] - total_kl[level][:, -1], total_kl[level][:, 1] + 1e-5), axis=0)\n update_trace(np.array([kl_improvement]), np.array([epoch]).astype(int), win=handle_dict['kl_improvement'], name='Level ' + str(level))", "def show_score(x, y, estimator):\n # Instantiate models and predict values\n model = estimator\n model.fit(x, y)\n preds = model.predict(x_test)\n preds = abs(preds.astype(int))\n actuals = y_test\n\n # Print results\n print(f\"{estimator.__class__.__name__}:: r2 score = {round(metrics.r2_score(actuals, preds), 2)} : MAE = {round(metrics.mean_absolute_error(actuals, preds), 2)}\")", "def PlotAverageEstimate( measure='DM', ax=None, scenario={}, errorstart=0, **kwargs ):\n\n if ax is None:\n fig, ax = plt.subplots()\n\n avg, dev = [], []\n for iz, (redshift, color) in enumerate( zip(redshift_bins, Rainbow(redshift_bins)) ):\n P, x = GetLikelihood_Full( measure=measure, redshift=redshift, **scenario )\n a, s = Likelihood2Expectation( P=P, x=x, density=True, log=True )\n avg.append(a)\n dev.append(s)\n ## plot arrorbars, starting at the indicated position\n erb = ax.errorbar( redshift_bins[errorstart:], avg[errorstart:], np.array(dev).reshape([len(avg),2])[errorstart:].transpose(), **kwargs ) \n ## draw the full line with the same kwargs\n kwargs_ = kwargs.copy()\n ## however, remove those kwargs that do not work with plt.plot\n for key in ['errorevery', 'label']:\n kwargs_.pop( key, 0 )\n ## if color is not set, ensure that same color is used as for errorbar\n if 'color' not in kwargs:\n lines, collection = erb.get_children()\n color = lines.get_color()\n kwargs_['color'] = color\n ax.plot( redshift_bins, avg, **kwargs_ )\n# ax.errorbar( redshift_bins, avg, avg - 10**(np.log10(avg)-dev), **kwargs ) \n ax.set_yscale('log')\n ax.set_xlabel('redshift', fontdict={'size':16 })\n ax.set_ylabel('%s / %s' % (label_measure[measure], units[measure]), fontdict={'size':16 } )", "def plot(stats):\n global y1, y2, lines\n bars = redraw()\n\n if y1 == y2:\n print('plot equals case')\n add_line(y1)\n ax.set_title('Mean comparison against y = {}'.format(int(y1)))\n\n ttres = st.ttest_1samp(dfT, y1)\n ps = ttres[1]\n\n label_bars(ps, bars, lambda p, b: p_to_color_div(p, b.get_height() > y1), True)\n\n asc, desc = np.arange(0, 1, 0.2), np.arange(1, -0.1, -0.2)\n colors = [p_to_color_div(p, True) for p in asc] + [p_to_color_div(p, False) for p in desc]\n\n leg = add_legend(colors, np.around(np.append(asc, desc), 1))\n else:\n add_line(y1)\n add_line(y2)\n ymin, ymax = min(y1, y2), max(y1, y2)\n\n ax.set_title('Probability of population mean between {} and {}'.format(int(ymin), int(ymax)))\n\n lower = st.t.cdf(ymin, stats['dof'], loc=stats['mean'], scale=stats['stderr'])\n higher = st.t.cdf(ymax, stats['dof'], loc=stats['mean'], scale=stats['stderr'])\n density_in_range = higher - lower\n\n label_bars(density_in_range, bars, lambda p, b: p_to_color_seq(p), False)\n\n seq = np.arange(1.01, 0, -0.1)\n colors = [p_to_color_seq(p) for p in seq]\n\n leg = add_legend(colors, np.around(seq, 1))\n\n return bars", "def plot_mean_std_comparison(evaluators: List):\n nr_plots = len(evaluators)\n fig, ax = plt.subplots(2, nr_plots, figsize=(4 * nr_plots, 7))\n flat_ax = ax.flatten()\n for i in range(nr_plots):\n plot_mean_std(evaluators[i].real, evaluators[i].fake, ax=ax[:, i])\n\n titles = [e.name if e is not None else idx for idx, e in enumerate(evaluators)]\n for i, label in enumerate(titles):\n title_font = {'size': '24'}\n flat_ax[i].set_title(label, **title_font)\n plt.tight_layout()", "def plot_slice_wise_measures(labels, preds, args):\n\n cal_roc = [[], []]\n cal_prrcf1 = [[], [], []] # save PR, RC, F1 respectively\n noncal_prrcf1 = [[], [], []]\n thres_all = []\n noncal_roc = [[], []]\n n_slices = len(labels)\n for thres in range(500, -1, -5):\n print(\"[Threshold # of pixels: {}]\".format(thres))\n thres_all.append(thres)\n cal_pgt, cal_pp, cal_tp, noncal_pgt, noncal_pp, noncal_tp = \\\n plaque_detection_rate(labels, preds, thres=thres)\n\n\n cal_prrcf1[0].append(float(cal_tp) / cal_pp if cal_pp != 0 else 0.0)\n cal_prrcf1[1].append(float(cal_tp) / cal_pgt)\n cal_prrcf1[2].append(2.0 * cal_tp / (cal_pgt + cal_pp))\n noncal_prrcf1[0].append(float(noncal_tp) / noncal_pp if noncal_pp != 0 else 0.0)\n noncal_prrcf1[1].append(float(noncal_tp) / noncal_pgt)\n noncal_prrcf1[2].append(2.0 * noncal_tp / (noncal_pgt + noncal_pp))\n\n cal_roc[0].append((cal_pp - cal_tp) / (n_slices - cal_pgt)) # false negative ratio\n cal_roc[1].append(cal_tp / cal_pgt) # true positive ratio\n noncal_roc[0].append((noncal_pp - noncal_tp) / (n_slices - noncal_pgt)) # false negative ratio\n noncal_roc[1].append(noncal_tp / noncal_pgt) # true positive ratio\n\n print('Cal: PR - {:.4f} RC - {:.4f} F1 - {:.4f} Noncal: PR - {:.4f} RC - {:.4f} F1 - {:.4f}'.format(\n cal_prrcf1[0][-1], cal_prrcf1[1][-1], cal_prrcf1[2][-1],\n noncal_prrcf1[0][-1], noncal_prrcf1[1][-1], noncal_prrcf1[2][-1]))\n print('Cal: fpr - {:.4f} tpr - {:.4f} Noncal: fpr - {:.4f} tpr - {:.4f}'.format(\n cal_roc[0][-1], cal_roc[1][-1], noncal_roc[0][-1], noncal_roc[1][-1]))\n\n # plot the roc curve and calculate AUC\n fig_names = ['calcified', 'non-calcified']\n for plq_metrics, fig_name in zip([cal_roc, noncal_roc], fig_names):\n plt.figure()\n lw = 2\n auc_metric = auc(plq_metrics[0], plq_metrics[1])\n print(\"{} : {}\".format(fig_name, auc_metric))\n plt.plot(plq_metrics[0], plq_metrics[1], color='darkorange',\n lw=lw, label='ROC curve (area = %0.2f)' % auc_metric)\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('slice-wise ROC curve of {} plaques'.format(fig_name))\n plt.legend(loc=\"lower right\")\n plt.savefig(\"./{}/{}_roc.png\".format(args.fig_dir, fig_name))\n\n for plq_metrics, fig_name in zip([cal_prrcf1, noncal_prrcf1], fig_names):\n plt.figure()\n lw = 2\n plt.plot(thres_all, plq_metrics[0], color='r', lw=lw, label='precision')\n plt.plot(thres_all, plq_metrics[1], color='g', lw=lw, label='recall')\n plt.plot(thres_all, plq_metrics[2], color='b', lw=lw, label='f1')\n\n plt.xlim([min(thres_all), max(thres_all)])\n plt.ylim([0.0, 1.05])\n plt.xlabel('Threshold Number of Pixels')\n plt.title('{} measures under different thresholds'.format(fig_name))\n plt.legend(bbox_to_anchor=(1, 0.95), loc=\"upper right\")\n plt.savefig(\"./{}/{}_prrcf1.png\".format(args.fig_dir, fig_name))", "def PlotContributions( ax=None, dev=False, measure='DM', redshift=0.1, cumulative=False, N_inter=False, **scenario ):\n if ax is None:\n fig, ax = plt.subplots()\n for region in regions:\n models = scenario.get( region )\n if models:\n for model in models:\n P = GetLikelihood( region=region, model=model, measure=measure, redshift=redshift, N_inter=N_inter, dev=dev )\n PlotLikelihood( *P, measure=measure, label=region+': '+Label(model) , linestyle=linestyle_region[region], ax=ax, cumulative=cumulative )\n ax.legend()\n ax.set_title( \"redshift = %.1f\" % redshift )", "def finalize_plot(self, artifact_name, attacker_x=None, attacker_y=None):\n # Plot the axis ticks.\n plt.ylim((self.min_y - 10.0, self.max_y + 10.0))\n plt.xlim((self.min_x - 10.0, self.max_x + 10.0))\n plt.xticks([self.min_x + 1000, 0.0, self.max_x], size=15)\n plt.yticks([self.min_y + 1000, 0.0, self.max_y], size=15)\n # Add and place the labels.\n ax = plt.gca()\n plt.ylabel(\"Crossrange (ft)\", size=15)\n plt.xlabel(\"Downrange (ft)\", size=15)\n plt.subplots_adjust(bottom=0.25, left=0.25)\n ax.yaxis.set_label_coords(-0.1, 0.5)\n # Place the plane.\n plane = plt.imread(\"plane.png\").transpose((1, 0, 2))\n width = (self.max_x - self.min_x) / 10\n height = (496.0 / 499.0) * width\n x_start = -(width / 2.0)\n y_start = -(height / 2.0)\n plt.imshow(plane, extent=[x_start, x_start + width,\n y_start, y_start + height], zorder=100)\n plane = np.flip(plane, 1)\n if attacker_x is None:\n attacker_x = self.max_x - (2 * width)\n if attacker_y is None:\n attacker_y = self.max_y - (2 * height)\n red_plane = self.color_plane_png(plane, [1.0, 0, 0], True)\n plt.imshow(red_plane, zorder=100,\n extent=[attacker_x, attacker_x + width,\n attacker_y, attacker_y + height])\n self.record_artifact(plt, artifact_name, \"matplotlib\")\n plt.clf()", "def plot_actor_critic_results(algorithm_results_list, threshold=None, window_len=100, \n plt_title=None):\n\n # extract data\n scores_list = []\n pol_loss_list = []\n val_loss_list = []\n clipped_L_list= []\n entropy_list = []\n alg_titles = []\n\n for alg_res in algorithm_results_list:\n if isinstance(alg_res, str):\n # load from file\n alg_titles.append(alg_res)\n data = pickle.load(open(alg_res, 'rb'))\n scores_list.append(data['scores'])\n pol_loss_list.append(data['policy_loss'])\n val_loss_list.append(data['value_loss'])\n clipped_L_list.append(data['clipped_surrogate'])\n entropy_list.append(data['entropy'])\n\n # plot scores\n fig = plt.figure(\"scores\")\n ax = fig.add_subplot(111)\n\n for scores in scores_list:\n \n # compute moving average and standard deviation\n mv_avg = np.asarray([np.mean(scores[max(0, i-window_len):i]) for i in range(len(scores))])\n # mv_std = np.asarray([np.std(scores[max(0, i-window_len):i]) for i in range(len(scores))])\n mv_q16 = np.asarray([np.quantile(scores[max(0, i-window_len):i], 0.16) for i in range(1,len(scores))])\n mv_q84 = np.asarray([np.quantile(scores[max(0, i-window_len):i], 0.84) for i in range(1,len(scores))])\n mv_q16 = np.insert(mv_q16, 0, 0.0)\n mv_q84 = np.insert(mv_q84, 0, 0.0)\n\n\n # plot\n ax.plot(np.arange(len(scores)), mv_avg)\n # ax.fill_between(np.arange(len(scores)), mv_avg-mv_std, mv_avg+mv_std, alpha=0.3)\n ax.fill_between(np.arange(len(scores)), mv_q16, mv_q84, alpha=0.3)\n\n # plot success threshold\n if threshold is not None:\n plt.hlines(threshold, 0, len(scores), colors='r', linestyles='dashed')\n plt.title(plt_title)\n plt.ylabel('Score')\n plt.xlabel('Episode #')\n plt.legend(alg_titles)\n\n\n # plot losses\n fig = plt.figure(\"losses\")\n ax = fig.add_subplot(111)\n for pol_losses in pol_loss_list:\n \n # # compute moving average and standard deviation\n # mv_avg = np.asarray([np.mean(pol_losses[max(0, i-window_len):i]) for i in range(len(pol_losses))])\n # # mv_std = np.asarray([np.std(pol_losses[max(0, i-window_len):i]) for i in range(len(pol_losses))])\n # mv_q16 = np.asarray([np.quantile(pol_losses[max(0, i-window_len):i], 0.16) for i in range(1,len(pol_losses))])\n # mv_q84 = np.asarray([np.quantile(pol_losses[max(0, i-window_len):i], 0.84) for i in range(1,len(pol_losses))])\n # mv_q16 = np.insert(mv_q16, 0, 0.0)\n # mv_q84 = np.insert(mv_q84, 0, 0.0)\n\n\n # plot\n ax.plot(np.arange(len(pol_losses)), pol_losses)\n # ax.fill_between(np.arange(len(pol_losses)), mv_avg-mv_std, mv_avg+mv_std, alpha=0.3)\n # ax.fill_between(np.arange(len(pol_losses)), mv_q16, mv_q84, alpha=0.3)\n\n for val_losses in val_loss_list:\n \n # plot\n ax.plot(np.arange(len(val_losses)), val_losses)\n\n for clipped_L in clipped_L_list:\n \n # plot\n ax.plot(np.arange(len(clipped_L)), clipped_L)\n\n for entropy in entropy_list:\n ax.plot(np.arange(len(entropy)), entropy)\n\n\n\n # plot success threshold\n if plt_title is not None:\n plt.title(plt_title + \": losses\")\n plt.ylabel('Losses')\n plt.xlabel('Training Iteration #')\n plt.legend(['policy loss', 'value loss', 'clipped surrogat', 'entropy'])\n\n # open plots\n plt.show()", "def pie_chart_score(self, grouped):\n picked_scenario = self.scenario_dict[\"%d\" % (self.scenario_num-1)]\n distinct_enum_X = self.data_dict[picked_scenario[\"X\"]]['distinct_enum']\n score = 0\n if min(grouped) < 0:\n score = 0\n elif distinct_enum_X == 1:\n score = 0\n elif picked_scenario[\"Agg_func_Y\"] == \"avg\":\n score = 0\n elif distinct_enum_X >= 2 and distinct_enum_X <= 8:\n score += self.calculate_entropy(self.data_dict[picked_scenario[\"Y\"]]) / 8\n elif distinct_enum_X > 8:\n score += 4 * (self.calculate_entropy(self.data_dict[picked_scenario[\"Y\"]])) / distinct_enum_X\n if score > 3:\n score = 3\n return score", "def huber(y_true, y_pred, delta=1.0):\n y_pred = math_ops.cast(y_pred, dtype=backend.floatx())\n y_true = math_ops.cast(y_true, dtype=backend.floatx())\n delta = math_ops.cast(delta, dtype=backend.floatx())\n error = math_ops.subtract(y_pred, y_true)\n abs_error = math_ops.abs(error)\n half = tensor_conversion.convert_to_tensor_v2_with_dispatch(\n 0.5, dtype=abs_error.dtype\n )\n return backend.mean(\n array_ops.where_v2(abs_error <= delta, half * math_ops.square(error),\n delta * abs_error - half * math_ops.square(delta)),\n axis=-1)", "def plot_results(y_trues, y_preds, marker='o', ms=5, fillstyle=None,\n linestyle='None', output_file=None):\n n_folds = len(y_trues)\n plt.figure(figsize=(6, 6 * n_folds))\n\n for i, (y_true, y_pred) in enumerate(zip(y_trues, y_preds)):\n\n plt.subplot(n_folds, 1, i + 1)\n\n # Plot each point\n plt.plot(y_true, y_pred, marker=marker, ms=ms,\n fillstyle=fillstyle, linestyle=linestyle, color='C0')\n\n # Plot the perfect line\n min_age = np.min(np.r_[y_true, y_pred])\n max_age = np.max(np.r_[y_true, y_pred])\n plt.plot([min_age, max_age], [min_age, max_age], color='C1')\n\n # Compute the MAE\n mae = mean_absolute_error(y_true, y_pred)\n r, _ = spearmanr(y_true, np.abs(y_true - y_pred))\n\n # Add a title\n plt.title(\"Fold {0}\\nMAE={1:0.3f} - r={2:0.3f}\"\n .format(i + 1, mae, r), fontsize=16)\n plt.xlabel(\"True age\", fontsize=12)\n plt.ylabel(\"Predicted age\", fontsize=12)\n\n plt.subplots_adjust(hspace=0.45)\n\n if output_file is not None:\n plt.savefig(output_file)\n\n plt.show()", "def plot_trends(group, country=\"US\", state=None, place=None, predictive_method=\"ARIMA\"):\n print(f\"* Plotting Google Trends of `{group}` for {country} - {state or 'All'}\")\n group_queries = get_group_queries(group, only_root=True)\n\n n_queries = len(group_queries)\n n_cols = 3\n n_rows = int(n_queries / n_cols) + (1 if n_queries % n_cols else 0)\n\n # Annotations\n annotations = []\n\n # Initialize figure with subplots\n subplot_titles = [\"%s...\" % t[:22] if len(t) >= 22 else t for t in group_queries]\n fig = make_subplots(\n rows=n_rows, cols=n_cols, subplot_titles=subplot_titles,\n shared_yaxes=True,\n print_grid=True\n )\n\n # Marked Dates\n covid_start_date = COVID_START_DATE\n reopen_date = REOPEN_DATE\n reopen_date_minus_1 = REOPEN_DATE_MINUS_1\n data_start_date = DATA_START_DATE\n data_end_date = DATA_END_DATE\n\n # Figure variable\n baseline = 0\n value_range = [0, 100]\n\n # Model params\n model_params = []\n\n for idx, query in enumerate(group_queries):\n row = int(idx / n_cols) + 1\n col = idx % n_cols + 1\n showlegend = idx == 0\n\n query_file_path = get_data_filename(group, query, country=country, state=state, full=True)\n df = pd.read_csv(query_file_path, parse_dates=True)\n count = df[\"date\"].count()\n\n # ARIMA Model\n if query in df.columns:\n print(\"Query: \", query)\n # get_arima_params(df[query])\n df, model = arima_predict(df, from_date=PREDICT_FROM_DATE, value_col=query)\n params = model.get_params()\n model_params.append([query, str(params[\"order\"])])\n # return False\n \n # No data\n if count == 0:\n continue\n\n # Process\n stayhome_order_date = place.get(\"ClosedFrom\") if place else SOCIAL_DISTANCE_ORDER_DATE\n\n df = df[(df[\"date\"] >= data_start_date) & (df[\"date\"] <= data_end_date)]\n df_before = df[(df[\"date\"] <= reopen_date)]\n df_after = df[(df[\"date\"] >= reopen_date_minus_1)]\n df_prediction = df[df[\"is_predicted\"] == 1]\n\n # Normalize\n if config.TRENDS_APPLY_NORMALIZATION:\n max_value = df[query].max()\n baseline = df_before[query].median()\n df[\"value\"] = df[query].apply(lambda x: (x - baseline) / max_value)\n df_before[\"value\"] = df_before[query].apply(lambda x: (x - baseline) / max_value)\n df_after[\"value\"] = df_after[query].apply(lambda x: (x - baseline) / max_value)\n baseline = 0\n value_range = [-1, 1]\n else:\n max_value = df[query].max()\n baseline = df_before[query].median()\n df[\"value\"] = df[query]\n df_before[\"value\"] = df_before[query]\n df_after[\"value\"] = df_after[query]\n\n # Compute difference\n query_text = query.split(\"+\")[0].strip() + \" + ...\" if \"+\" in query else query\n actual_mean, actual_meanCI95min, actual_meanCI95max = mean_confidence_interval(df_prediction[query])\n predict_mean = df_prediction[\"prediction\"].mean()\n diff = round(100 * (actual_mean - predict_mean) / predict_mean, 1)\n diffCI95min = round(100 * (actual_meanCI95min - predict_mean) / predict_mean, 1)\n diffCI95max = round(100 * (actual_meanCI95max - predict_mean) / predict_mean, 1)\n x_date = list(df['date'])[int(df[\"date\"].count()/2)]\n diff_annot = go.layout.Annotation(\n text=f'<b>{query_text}</b><br><sub><b style=\"color:{config.COLOR_UPTREND if diff >= 0 else config.COLOR_DOWNTREND}\">{diff}%</b>; 95%CI, [{diffCI95min}%, {diffCI95max}%]</sub>',\n showarrow=False, xanchor=\"center\", yanchor=\"top\", \n x=x_date,\n y=0.0,\n xshift=0,\n yshift=-5,\n xref=f\"x{'' if idx == 0 else idx + 1}\",\n yref=f\"y{'' if idx == 0 else idx + 1}\"\n )\n annotations.append(diff_annot)\n\n # Lockdown period\n max_y = max(df[query].max(), abs(df[query].min()))\n min_y = -max_y\n shape_lockdown = go.layout.Shape(**{\"type\": \"rect\",\"y0\":100,\"y1\": -100,\"x0\":COVID_START_DATE, \n \"x1\":REOPEN_DATE,\"xref\":\"x1\",\"yref\":\"y1\",\"layer\":\"below\",\n \"fillcolor\":\"#eeeeee\", \"line\":dict(width=0), \"line_width\": 0})\n fig.add_shape(shape_lockdown, row=row, col=col)\n\n # Horizontal line \n shape = go.layout.Shape(**{\"type\": \"line\",\"y0\":baseline,\"y1\": baseline,\"x0\":str(df[\"date\"].values[0]), \n \"x1\":str(df[\"date\"].values[-1]),\"xref\":\"x1\",\"yref\":\"y1\",\"layer\":\"below\",\n \"line\": {\"color\": \"rgb(200, 200, 200)\",\"width\": 1.5}})\n fig.add_shape(shape, row=row, col=col)\n\n # Stay home order\n if stayhome_order_date:\n shape_stayhome_order = go.layout.Shape(**{\"type\": \"line\",\"y0\":-0.25,\"y1\": 0.25,\"x0\":stayhome_order_date, \n \"x1\":stayhome_order_date,\"xref\":\"x1\",\"yref\":\"y1\",\n \"line\": {\"color\": \"blue\",\"width\": 1.5, \"dash\": \"dot\"}})\n fig.add_shape(shape_stayhome_order, row=row, col=col)\n\n # Plot\n subplot_before = go.Scatter(x=df_before[\"date\"], y=df_before[\"value\"], \n mode=\"lines\", name=\"Before Lockdown\",\n line=dict(width=1, color=config.LINE_COLOR_BEFORE), \n line_shape=\"linear\", showlegend=False) # linear or spline \n subplot_after = go.Scatter(x=df_after[\"date\"], y=df_after[\"value\"], \n mode=\"lines\", name=\"Actual Queries\",\n line=dict(width=1.5, color=config.LINE_COLOR_AFTER), \n line_shape=\"linear\", showlegend=showlegend) # linear or spline \n subplot_prediction = go.Scatter(x=df_prediction[\"date\"], y=df_prediction[\"prediction\"], \n mode=\"lines\", name=\"Expected Queries\",\n line=dict(width=2, color=config.LINE_COLOR_BEFORE, dash=\"dot\"), \n line_shape=\"linear\", showlegend=showlegend) # linear or spline \n subplot_lockdown_legend = go.Bar(x=[reopen_date,], y=[0,], \n name=\"Early Lockdown Phase\", \n showlegend=showlegend,\n marker_color=\"#eeeeee\")\n fig.add_trace(subplot_before, row=row, col=col)\n fig.add_trace(subplot_after, row=row, col=col)\n fig.add_trace(subplot_prediction, row=row, col=col)\n if idx == 0:\n fig.add_trace(subplot_lockdown_legend, row=row, col=col)\n\n # break\n\n # Caption\n # caption = go.layout.Annotation(\n # showarrow=False,\n # text=\"\",\n # xanchor=\"center\",\n # x=0.5,\n # yanchor=\"top\",\n # y=0.0,\n # yshift=0,\n # )\n\n # Layout\n # location = f\"{country}.{state}\" if state else country\n # fig_title = f\"\"\"Term: {group}. Location: {location}<br>\n # <span style=\"font-size: 14px;line-height:1\">Period: {data_start_date} - {data_end_date}\n # <br>Lockdown Period: {covid_start_date} - {PREDICT_FROM_DATE}</span>\"\"\"\n fig_title = \"\"\n fig.update_layout(title={\"text\": fig_title, \"x\":0.5, \"xanchor\": \"center\"}, \n title_font=dict(size=12),\n height=50 + n_rows * 175, width=250 * n_cols, coloraxis=dict(colorscale=\"Bluered_r\"), \n showlegend=True, plot_bgcolor=\"rgb(255,255,255)\", titlefont={\"size\": 30},\n margin={\"t\": 50},\n annotations=annotations,\n legend=dict(\n orientation=\"v\",\n yanchor=\"bottom\",\n y=0,\n xanchor=\"right\",\n x=1,\n bgcolor=\"white\",\n bordercolor=\"#333\",\n borderwidth=1\n )\n )\n fig.update_xaxes(showgrid=False, showticklabels=False, showline=False)\n fig.update_yaxes(showgrid=False, showticklabels=False, showline=True, range=value_range)\n\n # Store model parameters\n mkdir_if_not_exist(config.TRENDS_OUTPUT_DIR)\n df_params = pd.DataFrame(model_params, columns=[\"Query\", \"Order\"])\n df_params.to_csv(\"%s/ARIMA_orders_%s.csv\" % (config.TRENDS_OUTPUT_DIR, group), index=False)\n\n # Create online URL\n url = py.iplot(fig, filename=group, file_id=group)\n print(\"URL:\", url.src)\n\n if config.TRENDS_EXPORT_FIGURES:\n # Save\n mkdir_if_not_exist(config.TRENDS_FIGURES_DIR)\n fig.write_image(\"%s/%s_%s_%s.jpg\" % (config.TRENDS_FIGURES_DIR, country, state or \"All\", group))\n # fig.show()\n else:\n # Show\n fig.show()", "def plot_all(self):\n self.plot_ramps()\n self.plot_groupdq()", "def parallel_group(\n G, group_by, ax=None, y_offset=-0.3, rotation=45, ha=\"right\", va=\"top\"\n):\n if ax is None:\n ax = plt.gca()\n nt = utils.node_table(G)\n # groups = nt.groupby(group_by).apply(lambda df: len(df)).sort_index()\n groups = sorted(nt[group_by].unique())\n\n for i, label in enumerate(groups):\n x = i * 4\n y = y_offset\n ax.annotate(label, xy=(x, y), ha=ha, va=va, rotation=rotation)\n ax.relim()", "def calc_exploration_and_retention(axs, df):\n\n if 'ID' in df.columns:\n df_new_grouped = df.groupby(['ID', 'episode', 'observations'])\n else:\n df_new_grouped = df.groupby(['episode', 'observations'])\n failed_ctr = np.zeros(len(df_new_grouped)) # at most num_actions-1 if optimal\n retention_rate = np.zeros(len(df_new_grouped))\n\n for idx, (name, group) in enumerate(df_new_grouped):\n if group.rewards.iloc[0] == 1:\n first_rewarded_idx = 0\n else:\n first_rewarded_idx = np.argmax(group.rewards.diff() == 1)\n failed_ctr[idx] = first_rewarded_idx\n if first_rewarded_idx == len(group) - 1:\n retention_rate[idx] = 1\n else:\n retention_rate[idx] = group.rewards.iloc[first_rewarded_idx+1:].mean()\n\n unique, counts = np.unique(failed_ctr, return_counts=True)\n axs[0].bar(unique, counts)\n axs[0].set_xlabel('# of failed attempts')\n\n axs[1].hist(retention_rate)\n axs[1].set_xlabel('proportion of rewarded trials')", "def make_summary_plot(run_lists, file_descriptor, attr='sipm1.threeSampleAmpl'):\n biases = []\n gains = []\n pes = []\n currs = []\n gainerrs = []\n quad_terms = []\n quad_errs = []\n for row in sorted(run_lists):\n biases.append(row[0])\n gain_out = fit_gain(row[1], attr=attr)\n out_tuple = gain_out[0]\n gains.append(out_tuple[0])\n gainerrs.append(out_tuple[3])\n smeans = sorted(gain_out[1])\n currs.append(0.5*(smeans[-1] + smeans[-2]))\n pes.append(currs[-1]/gains[-1])\n quad_terms.append(out_tuple[1])\n quad_errs.append(out_tuple[4])\n\n maxgain = max(gains)\n gains = np.array(gains)/maxgain\n gainerrs = np.array(gainerrs)/maxgain\n # gainerrs = 0.1*gains\n\n currs = np.array(currs)/max(currs)\n pes = np.array(pes)\n pe_errs = gainerrs/gains*pes\n maxpe = max(pes)\n fig, ax1 = plt.subplots()\n\n coeffs, V = np.polyfit(biases, gains, 1, w=1.0/gainerrs, cov=True)\n breakdown = -1*coeffs[1]/coeffs[0]\n\n breakdown_sigma = sigma_from_cov(coeffs, V)\n\n # calculate sigmas throughout range\n vals, vecs = np.linalg.eig(V)\n U = np.transpose(vecs)\n xs_for_error = np.arange(breakdown - 0.1, max(biases) + 0.1, 0.01)\n gain_sigmas = sig_from_diag(xs_for_error, U, vals)\n error_band_ys = np.array([i*coeffs[0] + coeffs[1] for i in xs_for_error])\n ax1.fill_between(xs_for_error, error_band_ys + gain_sigmas,\n error_band_ys - gain_sigmas, facecolor='red', alpha=0.5)\n\n fitline = [i*coeffs[0] + coeffs[1] for i in biases] + [0]\n fitbiases = biases + [breakdown]\n\n ax1.set_title('bias scan %s' % file_descriptor)\n fitplot = ax1.plot(fitbiases, fitline, 'r-')\n gainplot = ax1.errorbar(\n biases, gains, yerr=gainerrs, fmt='ro', markersize=10)\n currplot = ax1.plot(biases, currs, 'g*', markersize=15)\n ax1.set_ylim(0, 1.105)\n ax1.set_xlim([breakdown - 0.1, max(biases) + 0.1])\n ax1.set_xlabel('bias voltage [V]')\n ax1.set_ylabel('relative gain, charge [a.u.]')\n\n ticks = [breakdown]\n ticks.extend([bias for bias in biases[::2]])\n tick_labels = ['%.1f $\\pm$ %.1f' % (breakdown, breakdown_sigma)]\n tick_labels.extend([str(bias) for bias in biases[::2]])\n ax1.set_xticks(ticks)\n ax1.set_xticklabels(tick_labels)\n ax1.grid()\n ax1.get_xticklabels()[0].set_color('r')\n\n ax2 = ax1.twinx()\n peplot = ax2.errorbar(biases, pes, yerr=pe_errs, fmt='b^', markersize=10)\n ax2.set_ylabel('pe', color='b')\n ax2.set_ylim(0, maxpe*1.105)\n ax2.set_xlim([breakdown - 0.1, max(biases) + 0.1])\n for tick in ax2.get_yticklabels():\n tick.set_color('b')\n ax1.legend([gainplot[0]]+currplot+[peplot[0]]+fitplot,\n ['gain', 'charge', 'pes', 'gain fit'],\n loc='best', numpoints=1)\n\n plt.savefig('pdfs/breakdownPlot%s.pdf' % file_descriptor)\n plt.show()\n\n quadploterrs = 0.5/np.sqrt(quad_terms)*quad_errs\n plt.errorbar(biases, np.sqrt(quad_terms)*100, yerr=quadploterrs*100, fmt='ko')\n plt.xlim(min(biases) - 0.1, max(biases) + 0.1)\n plt.xlabel('bias [V]')\n plt.ylabel('sqrt(quadratic term) [%]')\n plt.title('quadratic terms %s' % file_descriptor)\n\n plt.savefig('pdfs/quadraticTerms%s.pdf' % file_descriptor)\n plt.show()", "def mean_absolute_error(self, test_set, predicted_values):\r\n\r\n running_sum = 0\r\n for i in range(len(test_set)):\r\n running_sum += abs(test_set[i].classification - predicted_values[i])\r\n\r\n running_sum = running_sum / len(test_set)\r\n self.performance += running_sum\r\n self.num_performances += 1\r\n return running_sum" ]
[ "0.6690005", "0.52602696", "0.5226427", "0.5201476", "0.5161905", "0.51192516", "0.5022657", "0.49890342", "0.497622", "0.49757755", "0.49593621", "0.4885093", "0.4878392", "0.48618805", "0.48530275", "0.48435473", "0.48297042", "0.4821337", "0.47865835", "0.47860396", "0.47836027", "0.47807932", "0.47694817", "0.47511947", "0.47415298", "0.4731351", "0.47182968", "0.4698362", "0.46884245", "0.4687587", "0.4686242", "0.4669421", "0.4666238", "0.46602106", "0.46536082", "0.46473327", "0.46350625", "0.4630302", "0.46286747", "0.4626559", "0.46243772", "0.4611647", "0.46073687", "0.45964602", "0.459435", "0.45852762", "0.45739356", "0.45737407", "0.45735958", "0.45617205", "0.45571855", "0.45570925", "0.4556575", "0.4554534", "0.4552679", "0.45526147", "0.45467585", "0.45436883", "0.45420164", "0.4539548", "0.45365503", "0.4534376", "0.45328984", "0.45281735", "0.4520107", "0.45176026", "0.45133537", "0.45114222", "0.45091024", "0.4505906", "0.45057347", "0.45039964", "0.4497709", "0.44970074", "0.44876328", "0.44873574", "0.44846043", "0.44800603", "0.44777772", "0.44775075", "0.44755536", "0.4475342", "0.44744956", "0.4473647", "0.4471668", "0.4468327", "0.44683182", "0.4464315", "0.4463731", "0.44618347", "0.44607073", "0.4453756", "0.4449692", "0.44456753", "0.44412825", "0.44378322", "0.44262874", "0.4423473", "0.44230604", "0.44211605" ]
0.60396034
1
Generate keyword arguments suitable for consumption by the ticketswitch API
def as_api_parameters(self): raise NotImplementedError( 'as_api_parameters not implemented on ' + self.__class__)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generateKwargsAsString(self):\n args = \"\"\n axisList = self.tabWidget.currentWidget()\n\n for axisWidget in axisList.getAxisWidgets():\n args += \"%s = %s, \" % (axisWidget.axis.id,\n axisWidget.getCurrentValuesAsStr())\n\n # Generate additional args\n args += 'squeeze = 0'\n args += \", order = '%s' \" % axisList.getAxesOrderString()\n return args", "def extra_target_arguments(self):\n return {}", "def get_kwargs(self):\n return {}", "def _set_named_args(self, **kv):\n # named_params = {}\n # for k in kv:\n # named_params[\"${0}\".format(k)] = json.dumps(kv[k])\n # couchbase++ wants all args JSONified\n named_params = {f'${k}': json.dumps(v) for k, v in kv.items()}\n\n self._params[\"named_parameters\"] = named_params\n return self", "def _generate_params(self):\n return {\n 'lis_outcome_service_url': self.lis_outcome_service_url,\n 'lis_result_sourcedid': self.lis_result_sourcedid,\n 'oauth_consumer_key': self.key\n }", "def _generate_options(self, **kwargs: Any) -> dict:\n raise NotImplementedError", "def test_kwargs(self):\n kwargs = forge.kwargs\n assert isinstance(kwargs, forge._signature.VarKeyword)\n assert kwargs.name == 'kwargs'\n assert kwargs.converter is None\n assert kwargs.validator is None", "def generateKwArgs(self, axisList=None):\n if axisList is None:\n axisList = self.tabWidget.currentWidget()\n\n kwargs = {} \n for axisWidget in axisList.getAxisWidgets():\n kwargs[axisWidget.axis.id] = axisWidget.getCurrentValues()\n\n # Generate additional args\n kwargs['squeeze'] = 0\n kwargs['order'] = axisList.getAxesOrderString()\n\n return kwargs", "def make_args(self, args):\n result_str = \"?\"\n for k, v in args.iteritems():\n result_str = result_str + k + \"=\" + v + \"&\"\n return result_str", "def __init__(**params):", "def add_kwargs():\n pass", "def _set_named_args(self, **kv):\n for k in kv:\n self._body['${0}'.format(k)] = kv[k]\n return self", "def generate_arg_and_kwags():\n def gen_func(\n #df: DataSource,\n option: List[list],\n style: List[dict]\n )->List[Tuple[list, dict]]:\n\n if len(option) != len(style):\n raise SystemError(\"option and style must be same size list.\")\n\n arg_and_kwarg = []\n for o, s in zip(option, style):\n arg = [*o]\n kwargs = s\n arg_and_kwarg.append((arg, kwargs))\n return arg_and_kwarg\n return gen_func", "def define_parameters(self):", "def init_args(self):\n return {\n \"doc\": self.__doc__.format(name=colored(self.module_name, \"green\", attrs=['bold','underline'])),\n \"Url\": \"set a target url\",\n 'Type': \"set type to check , [php, asp, aspx, cgi, dir , mdb]\",\n }", "def test_kw_args_with_keywords():\n assert arguments.fun_opt_kw_params(visited_color='blue',\n link_color='red',\n back_color='yellow',\n fore_color='orange') == ('orange',\n 'yellow',\n 'red', 'blue')", "def params(self, **kwargs):\n return kwargs", "def construct_params(self):\n\n return {\"expand\": self.get_expand()}", "def test_kw_args_with_defaults():\n assert arguments.fun_opt_kw_params() == ('blue', 'red', 'yellow', 'orange')", "def pykwarg(self):\n return self._pykwarg", "def toargs(context, schema, data):\n data = dict(data)\n args = {}\n for name, field in schema.namesAndDescriptions(True):\n field = field.bind(context)\n n = name\n if n.endswith('_') and iskeyword(n[:-1]):\n n = n[:-1]\n\n s = data.get(n, data)\n if s is not data:\n s = str(s)\n del data[n]\n\n try:\n args[str(name)] = field.from_unicode(s)\n except ValidationError as v:\n reraise(ConfigurationError('Invalid value for', n, str(v)),\n None, sys.exc_info()[2])\n elif field.required:\n # if the default is valid, we can use that:\n default = field.default\n try:\n field.validate(default)\n except ValidationError:\n raise ConfigurationError('Missing parameter:', n)\n args[str(name)] = default\n\n if data:\n # we had data left over\n try:\n keyword_arguments = schema.getTaggedValue('keyword_arguments')\n except KeyError:\n keyword_arguments = False\n if not keyword_arguments:\n raise ConfigurationError('Unrecognized parameters:', *data)\n\n for name in data:\n args[str(name)] = data[name]\n\n return args", "def get_kwargs():\n\treturn get_kwargs_raw(sys.argv)", "def get_argdict(cls, toolchain, args):\n return {} # Empty must be overloaded (if required)", "def build_arg_list(fn, env):\r\n kw = {}\r\n argspec = inspect.getargspec(fn)\r\n\r\n # if there is a **kw argument in the fn definition,\r\n # just pass along the environment\r\n if argspec[2]:\r\n kw = env\r\n #else for each entry in the arglist set the value from the environment\r\n else:\r\n #skip self\r\n argnames = argspec[0][1:]\r\n for name in argnames:\r\n if name in env:\r\n kw[name] = env[name]\r\n return kw", "def kwargs(kwargs):\n run_kwargs(kwargs)", "def _kwargs(self):\n dict = {\"name\":self.name}\n return dict", "def get_dynamic_setup_params():\n return {\n # Retrieve the long description from the README\n # 'long_description': read_file('README.rst'),\n 'install_requires': substitute_crypto_to_req(\n read_requirements('requirements.txt'),\n ),\n # 'extras_require': read_extras(),\n }", "def get_dynamic_setup_params():\n\n return {\n # Retrieve the long description from the README\n \"long_description\": read_file(\"README.md\")\n }", "def _get_init_args(self):\n\n return dict(enum=self.enum, dflt=self._defname,\n base=self.base, shape=self.shape)", "def _kwargs(self):\n dict = DAG._kwargs(self) \n dict[\"inputpaths\"] = self.inputpaths\n dict[\"outputpath\"] = self.outputpath\n dict[\"query\"] = self.query\n return dict", "def test_020_kwargs(self):\n caller = self.get_caller([KwargsTaskOverride])\n self.assertEqual([\"A\", \"B\"], caller(\"A\", \"B\"))", "def format_arguments(self, **kwargs):\n return kwargs", "def driver_kwargs(self):\n out = super(TestCisRpcSplit, self).driver_kwargs\n # Reversed\n out['icomm_kwargs'] = dict(name=self.ocomm_name)\n out['ocomm_kwargs'] = dict(name=self.icomm_name)\n return out", "def definearguments(self, customparser):\n if not customparser:\n return\n customparser.add_option(\n '--url',\n dest='url',\n help=\"Use the provided iLO URL to login.\",\n default=None,\n )\n customparser.add_option(\n '-u',\n '--user',\n dest='user',\n help=\"If you are not logged in yet, including this flag along\"\\\n \" with the password and URL flags can be used to log into a\"\\\n \" server in the same command.\"\"\",\n default=None,\n )\n customparser.add_option(\n '-p',\n '--password',\n dest='password',\n help=\"\"\"Use the provided iLO password to log in.\"\"\",\n default=None,\n )\n customparser.add_option(\n '-e',\n '--enc',\n dest='encode',\n action='store_true',\n help=SUPPRESS_HELP,\n default=False,\n )", "def add_kwargs_arg(parser):\n parser.add_argument('--arg', '-a', type=__kwargs_arg, metavar='K=V', action='append',\n dest='kwargs', default=[],\n help='any special keyword arguments to pass to the method, formated as '\n 'key=value with value being a valid Python literal or one of the special '\n 'values nan, inf, -inf, N4, N8, N8_DIST, N6, N18, N18_DIST, N26, N26_DIST')", "def get_keyword_args(function):\n argspec = inspect.getargspec(function)\n kwargs = argspec.args[len(argspec.args) - len(argspec.defaults):]\n kwargs = {arg: value for arg, value in zip(kwargs, argspec.defaults)}\n return kwargs", "def _get_kwargs_for_backend(self):\n return dict()", "def new_comm_kwargs(cls, *args, **kwargs):\n kwargs.setdefault('address', 'file.txt')\n return args, kwargs", "def _params(self, **kwargs):\n defaults = {'display_name': 'Test User',\n 'locale': 'en-us',\n 'country': 'us'}\n defaults.update(kwargs)\n\n return defaults", "def init_kwargs(self):\n return {\"variant\": self.variant}", "def init_kwargs(self):\n return {\"variant\": self.variant}", "def init_kwargs(self):\n return {\"variant\": self.variant}", "def init_kwargs(self):\n return {\"variant\": self.variant}", "def test_kw_args_with_positional():\n assert arguments.fun_opt_kw_params('blue', 'red', 'yellow',\n 'orange') == ('blue', 'red', 'yellow',\n 'orange')", "def manage_params(args):\n # Socrata API\n with open(\"secret/builtby-socrata.yaml\", 'r') as f:\n try:\n socrata_api_credentials = yaml.load(f)\n except yaml.YAMLError as exc:\n print(exc)\n\n socrata_app_token = socrata_api_credentials['app_token']\n\n # base params\n params = {\n '$$app_token': socrata_app_token\n }\n # remove null attributes\n args = {k: v for k, v in args.items() if v is not None}\n # add args to params\n params.update(args) # inplace\n\n return params", "def get_filter_kwargs(self, *_, **__) -> Dict[str, Any]:", "def create_parameters_description():\n description = OrderedDict()\n description['GeneralArguments'] = [\n {\n 'main_argument_name': '--config-file',\n 'argument_name_options': ['--config'],\n 'parameter_name': 'config_file',\n 'help': \"\"\"A json-encoded configuration file, in which one can specify the parameters\n for all detectors in use as well as some general parameters for the whole run.\n The encoded object should therefore be a dictionary,\n with possible top-level keys 'GeneralArguments' (general parameters, not relevant\n to a detector class), 'SaccadeDetector', 'BlinkDetector', 'FixationDetector'\n and 'SmoothPursuitDetector'.\n\n The value for each of the present keys should in turn be a dictionary with keys\n identical to the longest argument names below, without the eye movement name prefix.\n An example (and equivalent to default parameters) configuration file is provided\n in default_parameters.conf.json and includes all possible keys.\n\n In your custom configuration file you do not have to specify any the parameter values,\n missing keys will be considered to have the default value.\n\n For default values, you can consult the respective classes' __init__ methods in\n saccade_detector.py, blink_detector.py, fixation_detector.py and sp_detector.py.\n\n\n Values given through the console interface override the ones in the config file.\"\"\",\n 'kwargs': {}\n },\n {\n 'main_argument_name': '--input-folder',\n 'argument_name_options': ['--in'],\n 'parameter_name': 'input_folder',\n 'help': 'From where to load the gaze points data. If absent, must be present in --config-file file. '\n 'This folder is assumed to have subfolders that correspond to videos, for which recordings '\n 'were made. Each such subdirectory should contain gaze files (one file per observer).',\n 'kwargs': {}\n },\n {\n 'main_argument_name': '--gaze-file-pattern',\n 'argument_name_options': ['--pattern'],\n 'parameter_name': 'gaze_file_pattern',\n 'help': 'Will look for such files in all subdirectories of --input-folder. '\n 'For GazeCom, \\'*.arff\\' is a recommended value (or \\'*.coord\\', if dealing with original dataset files). '\n 'One can use this parameter to match some name pattern as well (not just the file extension), '\n 'for example with \\'*_needed_files_*.arff\\'. \\n'\n 'If no wildcard symbol is found in the provided string, it is assumed to be just the file name '\n 'suffix, so it will be prepended with a wildcard symbol (i.e. \".coord\" will become \"*.coord\").',\n 'kwargs': {}\n },\n {\n 'main_argument_name': '--input-data-type',\n 'argument_name_options': ['--type'],\n 'parameter_name': 'input_data_type',\n 'help': 'Type of data loader to use (if not specified, will try to detect automatically)',\n 'kwargs': {'choices': ['DSF', 'ARFF', 'labelled ARFF']}\n },\n {\n 'main_argument_name': '--verbose',\n 'argument_name_options': ['-v'],\n 'parameter_name': 'verbose',\n 'default': None,\n 'help': 'Whether to output some information about the progress of the run to STDERR',\n 'kwargs': {'action': 'store_const', 'const': True} # only like this can support the default of None\n # (not to override the config all the time\n # with a missing value)\n },\n {\n 'main_argument_name': '--movies',\n 'argument_name_options': ['-m'],\n 'parameter_name': 'movies',\n 'help': 'Which movies out of the input folder to use (might be useful for train/test split). '\n 'The gaze data is supposed to be put under respective directories in the input folder. '\n 'If none are given, all available ones are used.',\n 'kwargs': {'nargs': '+', 'default': None}\n },\n {\n 'main_argument_name': '--output-folder',\n 'argument_name_options': ['--out'],\n 'parameter_name': 'output_folder',\n 'help': 'Where to output the resulting labelled data (if empty, will create a new temporary directory)',\n 'kwargs': {}\n },\n ]\n\n description['SaccadeDetector'] = [\n {\n 'main_argument_name': '--tolerance',\n 'argument_name_options': ['--tol'],\n 'parameter_name': 'tolerance',\n 'help': 'The relative size of the area outside the screen that is still considered to be legal',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--threshold-onset-fast-degree-per-sec',\n 'argument_name_options': ['--threshold-onset-fast'],\n 'parameter_name': 'threshold_onset_fast_degree_per_sec',\n 'help': 'Threshold for initialization of saccade detection, in degrees per second',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--threshold-onset-slow-degree-per-sec',\n 'argument_name_options': ['--threshold-onset-slow'],\n 'parameter_name': 'threshold_onset_slow_degree_per_sec',\n 'help': 'A slower threshold for saccade onset detection, in degrees per second',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--threshold-offset-degree-per-sec',\n 'argument_name_options': ['--threshold-offset'],\n 'parameter_name': 'threshold_offset_degree_per_sec',\n 'help': 'Threshold for saccade offset detection, in degrees per second',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--max-speed-degree-per-sec',\n 'argument_name_options': ['--max-speed'],\n 'parameter_name': 'max_speed_degree_per_sec',\n 'help': 'Maximum speed of saccadic eye movements',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--min-duration-microsec',\n 'argument_name_options': ['--min-duration'],\n 'parameter_name': 'min_duration_microsec',\n 'help': 'Minimal saccade duration threshold',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--max-duration-microsec',\n 'argument_name_options': ['--max-duration'],\n 'parameter_name': 'max_duration_microsec',\n 'help': 'Maximal saccade duration threshold',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--velocity-integral-interval-microsec',\n 'argument_name_options': ['--velocity-integral-interval'],\n 'parameter_name': 'velocity_integral_interval_microsec',\n 'help': 'Interval duration, over which to integrate velocity computation.',\n 'kwargs': {'type': float}\n },\n ]\n\n description['BlinkDetector'] = [\n {\n 'main_argument_name': '--max-distance-to-saccade-microsec',\n 'argument_name_options': ['--max-distance-to-saccade'],\n 'parameter_name': 'max_distance_to_saccade_microsec',\n 'help': 'Threshold for distance from a definite blink to a nearby saccade, which will be marked as blink '\n 'as well.',\n 'kwargs': {'type': float}\n },\n ]\n\n description['FixationDetector'] = [\n {\n 'main_argument_name': '--prefiltering-interval-spread-threshold-degrees',\n 'argument_name_options': ['--prefiltering-interval-spread-threshold'],\n 'parameter_name': 'prefiltering_interval_spread_threshold_degrees',\n 'help': 'All the intersaccadic intervals shorter than this will be deemed fixations',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--min-sp-duration-microsec',\n 'argument_name_options': ['--min-sp-duration'],\n 'parameter_name': 'min_sp_duration_microsec',\n 'help': 'Minimal duration of a potential SP candidate (fast-moving samples shorter than this threshold '\n 'are labelled as noise)',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--sliding-window-width-microsec',\n 'argument_name_options': ['--sliding-window-width'],\n 'parameter_name': 'sliding_window_width_microsec',\n 'help': 'Sliding window for coordinates smoothing',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--normalization-sliding-window-size-samples',\n 'argument_name_options': ['--normalization-sliding-window'],\n 'parameter_name': 'normalization_sliding_window_size_samples',\n 'help': 'A moving average sliding window size (to normalize the data)',\n 'kwargs': {'type': int}\n },\n {\n 'main_argument_name': '--speed-threshold-degrees-per-sec',\n 'argument_name_options': ['--speed-threshold'],\n 'parameter_name': 'speed_threshold_degrees_per_sec',\n 'help': 'Biggest plausible speed for a noisy fixation',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--sliding-window-criterion',\n 'argument_name_options': ['--sliding-window'],\n 'parameter_name': 'sliding_window_criterion',\n 'help': 'Defines the way we check the samples with the sliding_window_criterion threshold: '\n 'either compute the average speed in the current window, or get the spread of '\n 'the gaze points (i.e. biggest XY bounding box side), divided by the duration',\n 'kwargs': {'choices': ['speed', 'spread']}\n },\n {\n 'main_argument_name': '--intersaccadic-interval-duration-threshold-microsec',\n 'argument_name_options': ['--intersaccadic-interval-duration-threshold'],\n 'parameter_name': 'intersaccadic_interval_duration_threshold_microsec',\n 'help': 'Minimal size of the intersaccadic interval to apply the step with the moving average analysis',\n 'kwargs': {'type': float}\n },\n ]\n\n description['SmoothPursuitDetector'] = [\n # a mutually exclusive group\n [\n {\n 'main_argument_name': '--min-pts',\n 'argument_name_options': [],\n 'parameter_name': 'min_pts',\n 'soft_type': int,\n 'help': 'An integer indicating the minimum number of points required to form a core point\\'s '\n 'neighbourhood, or a string \\'num_observers\\' (meaning that the actual number of observers '\n 'for each movie will be substituted, depending on the data set provided).\\n'\n 'This option is mutually exclusive with --min-observers.',\n 'kwargs': {}\n },\n {\n 'main_argument_name': '--min-observers',\n 'argument_name_options': [],\n 'parameter_name': 'min_observers',\n # first try casting to int, then to float (since int cast will fail for a float)\n 'soft_type': [int, float],\n 'help': 'Either a floating point in [0.0; 1.0] range (indicating the share of all the present '\n 'observers per movie) or int [2; +\\inf) (indicating the absolute threshold for '\n 'observer count in the core point\\'s neighbourhood).\\n'\n 'This option is mutually exclusive with --min-pts.',\n 'kwargs': {}\n }\n ],\n {\n 'main_argument_name': '--eps-deg',\n 'argument_name_options': ['--eps'],\n 'parameter_name': 'eps_deg',\n 'help': 'Spatial Euclidean distance threshold that defines the neighbourhood in the XY-plane',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--time-slice-microsec',\n 'argument_name_options': ['--time-slice'],\n 'parameter_name': 'time_slice_microsec',\n 'help': 'Width of the time slice that defines the size of the neighbourhood on the time axis.',\n 'kwargs': {'type': float}\n },\n ]\n\n return description", "def driver_kwargs(self):\n out = super(TestCisRpc, self).driver_kwargs\n out['comm'] = 'RPCComm'\n out['format_str'] = self.fmt_str\n return out", "def test_kw_args_with_dict():\n arg_dict = {'visited_color': 'blue',\n 'link_color': 'red',\n 'back_color': 'yellow',\n 'fore_color': 'orange'}\n assert arguments.fun_opt_kw_params(**arg_dict) == ('orange', 'yellow',\n 'red', 'blue')", "def __init__(self, **kwargs):\n self.__kwargs = kwargs", "def arguments(**kw):\n return export_arguments('cc', _all_arguments, _groups, **kw)", "def _folium_kwargs(self):", "def _kwargs(self):\n dict = DAG._kwargs(self) \n if (self.job): \n dict[\"inputpaths\"] = self.job.inputpaths\n dict[\"outputpath\"] = self.job.outputpath\n dict[\"job\"] = \"%s()\" % self.job.__class__.__name__\n return dict", "def _build_provided_kwargs_dict( # pylint: disable=R0914\n host: str,\n privilege_levels: Optional[Dict[str, PrivilegeLevel]],\n default_desired_privilege_level: Optional[str],\n port: Optional[int],\n auth_username: Optional[str],\n auth_password: Optional[str],\n auth_private_key: Optional[str],\n auth_private_key_passphrase: Optional[str],\n auth_strict_key: Optional[bool],\n auth_bypass: Optional[bool],\n timeout_socket: Optional[float],\n timeout_transport: Optional[float],\n timeout_ops: Optional[float],\n comms_return_char: Optional[str],\n ssh_config_file: Optional[Union[str, bool]],\n ssh_known_hosts_file: Optional[Union[str, bool]],\n on_init: Optional[Callable[..., Any]],\n on_open: Optional[Callable[..., Any]],\n on_close: Optional[Callable[..., Any]],\n transport: Optional[str],\n transport_options: Optional[Dict[str, Any]],\n channel_log: Optional[Union[str, bool, BytesIO]],\n channel_log_mode: Optional[str],\n channel_lock: Optional[bool],\n logging_uid: Optional[str],\n auth_secondary: Optional[str],\n failed_when_contains: Optional[List[str]],\n textfsm_platform: Optional[str],\n genie_platform: Optional[str],\n **kwargs: Dict[Any, Any],\n) -> Dict[str, Any]:\n # dict of all args coming from the factories\n _provided_args: Dict[str, Any] = {\n \"host\": host,\n \"privilege_levels\": privilege_levels,\n \"default_desired_privilege_level\": default_desired_privilege_level,\n \"port\": port,\n \"auth_username\": auth_username,\n \"auth_password\": auth_password,\n \"auth_private_key\": auth_private_key,\n \"auth_private_key_passphrase\": auth_private_key_passphrase,\n \"auth_strict_key\": auth_strict_key,\n \"auth_bypass\": auth_bypass,\n \"timeout_socket\": timeout_socket,\n \"timeout_transport\": timeout_transport,\n \"timeout_ops\": timeout_ops,\n \"comms_return_char\": comms_return_char,\n \"ssh_config_file\": ssh_config_file,\n \"ssh_known_hosts_file\": ssh_known_hosts_file,\n \"on_init\": on_init,\n \"on_open\": on_open,\n \"on_close\": on_close,\n \"transport\": transport,\n \"transport_options\": transport_options,\n \"channel_log\": channel_log,\n \"channel_log_mode\": channel_log_mode,\n \"channel_lock\": channel_lock,\n \"logging_uid\": logging_uid,\n \"auth_secondary\": auth_secondary,\n \"failed_when_contains\": failed_when_contains,\n \"textfsm_platform\": textfsm_platform,\n \"genie_platform\": genie_platform,\n }\n\n # add back in the None/False args\n _provided_args = {key: value for key, value in _provided_args.items() if value is not None}\n\n # merge in any kwargs that maybe need to get passed down\n return {**_provided_args, **kwargs}", "def as_kwargs(self) -> Dict[str, Any]:\n ret = {}\n for arg in self.args.values():\n ret[arg.name] = arg.value\n return ret", "def define_parameters(self):\n self.add_argument('--prefix', dest='prefix', type=str, optional=False,\n help='prefix for file names')\n self.add_argument('--sleepLength',\n dest = 'sleepLength',\n type = str,\n optional = True,\n help ='time to sleep before performing plugin action',\n default = '0')", "def driver_kwargs(self):\n out = super(TestCisRpcServer, self).driver_kwargs\n out['comm'] = 'ClientComm'\n return out", "def gen_args(self, obj, pa_names = False):\n\n pal, kwal = get_class_total_args(type(obj))\n\n try:\n get_val = type(obj).__get_init_arg_val__\n except AttributeError:\n get_val = getattr\n\n for pa in pal:\n v = get_val(obj, pa)\n self.gen_field((pa + \" = \") if pa_names else \"\")\n self.pprint(v)\n\n for kwa, default in kwal.items():\n try:\n v = get_val(obj, kwa)\n except AttributeError:\n # If value cannot be obtained, skip the argument generation\n continue\n\n # generate only arguments with non-default values\n if (v is default) or (v == default):\n continue\n\n self.gen_field(kwa + \" = \")\n self.pprint(v)", "def create_training_args(self, input_dict, output_dict, exec_properties,\n executor_class_path, training_inputs,\n job_id) -> Dict[Text, Any]:\n pass", "def __init__(\n self, subject_as_keyword: bool, arg_map: Dict[str, str], arg_strict: bool\n ):\n self.subject_as_keyword = subject_as_keyword\n self.arg_map = arg_map\n self.arg_strict = arg_strict", "def set_params(self, **kwargs):\n ...", "def get_kwargs(d):\n raise NotImplementedError(\"subclass must implement get_kwargs()\")", "def __init__(self, **kwargs: Any):\n for name, value in kwargs.items():\n setattr(self, name, value)", "def __init__(self\n , **kwargs\n ):\n self.t_cstArgs = {}\n \n # Get the constant value parameters\n for t_cstArgTuple in self.__class__.getCstArgs():\n \n s_cstArgKey = t_cstArgTuple[ self.U_CST_ARG_KEY_INDEX ]\n b_required = t_cstArgTuple[ self.U_CST_ARG_REQUIRED_INDEX ] \n \n if not kwargs.has_key( s_cstArgKey ):\n if b_required:\n raise QArkFunctionMissingRequiredFunctionArgError( self.__class__.__name__, s_cstArgKey )\n else:\n self.t_cstArgs[ s_cstArgKey ] = None \n else:\n # Get the QArkFunctionArg\n o_cstArg = kwargs[ s_cstArgKey ]\n self.t_cstArgs[ s_cstArgKey ] = o_cstArg.getValue()", "def purefa_argument_spec():\n\n return dict(\n fa_url=dict(),\n api_token=dict(no_log=True),\n )", "def _repr_kwargs(self):\n\n ret = \"\"\n if self.options.growth:\n ret += \", growth=True\"\n elif self.options.circular:\n ret += \", circular=True\"\n\n return ret", "def __make_params(args):\n data = {}\n for i in range(len(args)):\n if i == 0: # saltando a primeira iteracao pra\n # saltar o parametro que é o nome do arquivo de execução\n continue\n if not i % 2 == 0:\n data[args[i]] = args[i + 1]\n return data", "def parse_kw_args(tagname, bits, args_spec=None, restrict=False):\n\n args = []\n\n if restrict:\n if args_spec is None:\n raise ValueError(\"you must pass an args_spec dict if you want to restrict allowed args\")\n allowed = list(args_spec.keys())\n do_validate = True\n else:\n do_validate = args_spec is not None\n\n for bit in bits:\n try:\n name, val = bit.split('=')\n except ValueError:\n raise template.TemplateSyntaxError(\n \"keyword arguments to '%s' tag must have 'key=value' form (got : '%s')\" \\\n % (tagname, bit)\n )\n\n name = str(name)\n if do_validate:\n if restrict:\n if name in allowed:\n # we only want each name once\n del allowed[allowed.index(name)]\n else:\n raise template.TemplateSyntaxError(\n \"keyword arguments to '%s' tag must be one of % (got : '%s')\" \\\n % (tagname, \",\".join(allowed), name)\n )\n\n validate = args_spec[name]\n else:\n validate = args_spec.get(name, None)\n\n if validate is not None:\n if callable(validate):\n try:\n val = validate(val)\n except Exception, e:\n raise template.TemplateSyntaxError(\n \"invalid optional argument '%s' for '%s' tag: '%s' (%s)\" \\\n % (tagname, name, val, e)\n )\n else:\n # assume re\n if re.match(validate, val) is None:\n raise template.TemplateSyntaxError(\n \"invalid optional argument '%s' for '%s' tag: '%s' (doesn't match '%s')\" \\\n % (tagname, name, val, validate)\n )\n\n # should be ok if we managed to get here \n args.append((name, val))\n\n return args", "def _make_args(self, args, defaults=[], vararg=None, kwonlyargs=[],\n kw_defaults=[], kwarg=None):\n # On Python 2 convert vararg and kwarg to raw name, raise error using\n # lineno stored on the node and lexer from self.\n # On Python 3.3 extract name and annotation\n # After should be straight forward\n raise NotImplementedError()", "def build_parms(args):\r\n readDir=args.dir\r\n #target_date=args.target_date\r\n target_date=args.target_date\r\n outdir=args.outdir \r\n parms = {\"readDir\":readDir,\r\n \"target_date\":target_date,\r\n \"outdir\":outdir}\r\n \r\n return(parms)", "def _arg2kw(self, mixed_args):\n def insert(dict_, k, v):\n if k in dict_:\n print \"duplicated args : %s \" % kv[0]\n raise ArgParseError\n dict_[k] = v\n \n opts = []\n args = {}\n\n n = len(mixed_args)\n i = 0\n while i < n:\n a = mixed_args[i]\n if a == '-' or a == '--' :\n opts.append(a)\n elif a.startswith(\"---\"):\n print \"invalid args: %s\" % mixed_args\n print \"only the following formats are supported:\"\n print \" arg1\"\n print \" --input=name1\"\n print \" --output name3\"\n print \" -oname2\"\n print \" -o name4\"\n raise ArgParseError\n elif a.startswith(\"--\"):\n kv = a[2:].split(\"=\", 1)\n if len(kv) == 2:\n insert(args, kv[0], kv[1])\n else:\n i += 1\n insert(args, kv[0], mixed_args[i])\n elif a.startswith(\"-\"):\n if len(a) > 2:\n insert(args, a[1], a[2:])\n else:\n i += 1\n insert(args, a[1], mixed_args[i])\n else:\n opts.append(a)\n i += 1\n \n return opts, args", "def params_helper(self,**kwargs):\n\n dic = {'output' : 'json, xml, kml',\n 'maxresults' : 'limit on max number of results returned ; Default is limited to 100',\n 'countrycode' : 'GB, US etc ISO Country Code ==> Only 2 caracters !',\n 'latitude' : 'latitude reference for distance calculation',\n 'distance' : 'return results based on specified distance from specified latitude/longitude',\n 'distanceunit' : 'Miles or km',\n 'operatorid' : 'exact match on a given EVSE operator id (comma separated list)',\n 'connectiontypeid' : ' exact match on a given connection type id (comma separated list)',\n 'countryid' : 'exact match on a given country id (comma separated list)',\n 'levelid' : 'exact match on a given charging level (1-3) id (comma separated list)',\n 'minpowerkw' : 'minimum output power in kW (this information is not known for many locations)',\n 'usagetypeid' : 'exact match on a given usage type id (comma separated list) ',\n 'statustypeid' : ' exact match on a given status type id (comma separated list)',\n 'dataproviderid ' : 'exact match on a given data provider id id (comma separated list). Use opendata=true for only OCM provided (\"Open\") data.',\n 'modifiedsince' : 'POIs modified since the given date (UTC) e.g. 2016-09-15T09:30',\n 'opendata' : ' true or false. Set to true to include only Open Data licensed content, false to return only non-open licensed data. By default all available data is returned.',\n 'includecomments' : ' true or false. Set to true to also include user comments and media items (photos) per charging location. Default = false.',\n 'verbose ' : ' true or false. Set to false to get a smaller result set with null items removed. Default = true.',\n 'compact ' : 'true or false. Set to true to remove reference data objects from output (just returns IDs for common reference data such as DataProvider etc). Default = false.',\n 'camelcase' : 'true or false. Set to true to get a property names in camelCase format. Default = false',\n 'callback' : 'specify the name of the JSONP callback (if required), JSON response type only.'\n }\n\n if len(kwargs)==0 :\n\n for key in dic.keys() :\n print(key)\n\n else :\n \n for k in kwargs: \n print(dic.get(k))", "def _create_param_dict(self, func_args):\n for i, a in enumerate(func_args):\n self.fn.args[i].name = str(a)\n self.param_dict[a] = self.fn.args[i]", "def _core_ar_kwarg(self,**kwargs) :\n\t\tpass", "def fill_args(cls, toolchain, parser):\n pass # pass must be overloaded (if required)", "def __init__(self, *args: Union[List[AtomKey], EKT], **kwargs: str) -> None:\n ...", "def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs", "def ReviewServiceArgs(cls, container = '', library = 'Standard', dialogname = ''):\n return container, library, dialogname, ScriptForge.componentcontext", "def __init__(self, **kwargs):\n\n self.opts = {}\n self.opts.update(kwargs)\n self._v_registry = {}", "def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)", "def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)", "def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)", "def test_kwargs():\n client, server = make_queue_pairs('localhost')\n client.send_inputs(1, input_kwargs={'hello': 'world'})\n _, task = server.get_task()\n assert task.args == (1,)\n assert task.kwargs == {'hello': 'world'}", "def token_kwargs(bits, parser):\r\n if not bits:\r\n return {}\r\n kwargs = SortedDict()\r\n while bits:\r\n match = kwarg_re.match(bits[0])\r\n if not match or not match.group(1):\r\n return kwargs\r\n key, value = match.groups()\r\n del bits[:1]\r\n kwargs[parser.compile_filter(key)] = parser.compile_filter(value)\r\n return kwargs", "def driver_kwargs(self):\n out = super(TestCisRpcClient, self).driver_kwargs\n out['comm'] = 'ServerComm'\n out['response_kwargs'] = {'format_str': self.fmt_str}\n return out", "def __init__(self, **kw):\r\n self.__dict__.update(kw)", "def get_hyper_params(**kwargs):\n hyper_params = {\n \"anchor_ratios\": [0.5, 1, 2],\n \"anchor_scales\": [16, 32, 64, 128, 256],\n \"stride\": 32,\n \"nms_topn\": 300,\n \"total_pos_bboxes\": 64,\n \"total_neg_bboxes\": 64,\n \"pooling_size\": (7, 7),\n }\n for key, value in kwargs.items():\n if key in hyper_params and value:\n hyper_params[key] = value\n #\n hyper_params[\"anchor_count\"] = len(hyper_params[\"anchor_ratios\"]) * len(hyper_params[\"anchor_scales\"])\n return hyper_params", "def get_default_args(**kw):\n default_args_exp = {\n \"output_file\": \"ml_demo.c\",\n \"function_name\": \"ml_demo\",\n \"precision\": ML_Binary32,\n \"accuracy\": ML_Faithful,\n \"target\": GenericProcessor.get_target_instance()\n }\n default_args_exp.update(kw)\n return DefaultArgTemplate(**default_args_exp)", "def __init__(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)", "def _generate_keywords(self):\n _keywords = [*self._lookup_opcodes_dir.keys(), *self._registers_list.keys()]\n for key in _keywords:\n self._keywords.extend(key.split(\" \"))\n return", "def optional_parameters(self):\n return ['seed', 'param_card', 'apmass', 'map', 'mpid', 'mrhod']", "def build_parameters(pobj):\n ViscosityWilke.build_parameters(pobj)", "def get_request_kwargs(self, api_params, *args, **kwargs):\n serialized = self.serialize_data(kwargs.get(\"data\"))\n kwargs[\"data\"] = self.format_data_to_request(serialized)\n return kwargs", "def _template_kwargs(*, logical_name: str, bucket: str, key: str) -> Dict[str, str]:\n if logical_name == \"ArtifactBuilder\":\n return dict(ArtifactBucketName=bucket, WorkersS3Key=key)\n elif logical_name == \"LayerBuilder\":\n return dict(ReplicationBucket=bucket, WorkersS3Key=key)\n else:\n raise ValueError(f\"Unknown logical name: {logical_name}\")", "def set_params(**kwargs):\r\n # Read default user input variables from user_input.py\r\n import user_input as ui\r\n ui_dict = {k:v for (k,v) in ui.__dict__.items() if not(\"__\" in k)}\r\n \r\n # Overwrite variables from user_input.py with values that the user passed\r\n # to this function, if desired.\r\n for key, value in kwargs.items():\r\n if key in ui_dict:\r\n ui_dict[key] = value\r\n \r\n return ui_dict", "def cookiecutter_args(self) -> dict[str, str]:\n local_args = {\n \"add_golden\": \"y\" if self.golden_tests else \"n\",\n \"copyright_holder\": self.copyright_holder,\n \"copyright_year\": (\n self.today.strftime(\"%Y\")\n if not self.copyright_year\n else self.copyright_year\n ),\n \"github_owner\": self.github_owner,\n \"name\": self.name,\n \"slug\": self.slug,\n # The template expects the test cases in a single string separated by\n # spaces.\n \"test_cases\": \" \".join(self.test_cases),\n }\n cruft_json = self.target_dir / \".cruft.json\"\n if cruft_json.is_file():\n with open(cruft_json, \"r\", encoding=\"utf-8\") as f:\n cruft_json_data = json.load(f)\n args = cruft_json_data[\"context\"][\"cookiecutter\"]\n for k, v in local_args.items():\n args[k] = v\n else:\n args = local_args\n\n return args", "def get_default_args(**kw):\n default_args_log = {\n \"output_file\": \"POLY.c\",\n \"function_name\": \"POLY\",\n \"precision\": ML_Binary64,\n \"target\": GenericProcessor.get_target_instance(),\n \"function\": None,\n \"interval\": None,\n \"epsilon\": None\n }\n default_args_log.update(kw)\n return DefaultArgTemplate(**default_args_log)", "def get_interface_initialization_kwargs(self, **kwargs) -> dict:\n return {\n key: value\n for key, value in kwargs.items()\n if not self.input_definitions.get(key=key).run_method_input\n }", "def setupParameters(self, **pars):\n \n seldict = {}\n for k,v in pars.items():\n if v != None and v != \"\":\n seldict[k] = v\n \n return seldict", "def _set_kwargs(self, title, initialdir, filetype, command, action, variable=None):\n logger.debug(\"Setting Kwargs: (title: %s, initialdir: %s, filetype: '%s', \"\n \"command: '%s': action: '%s', variable: '%s')\",\n title, initialdir, filetype, command, action, variable)\n kwargs = dict()\n if self._handletype.lower() == \"context\":\n self._set_context_handletype(command, action, variable)\n\n if title is not None:\n kwargs[\"title\"] = title\n\n if initialdir is not None:\n kwargs[\"initialdir\"] = initialdir\n\n if self._handletype.lower() in (\n \"open\", \"save\", \"filename\", \"filename_multi\", \"savefilename\"):\n kwargs[\"filetypes\"] = self._filetypes[filetype]\n if self._defaults.get(filetype, None):\n kwargs['defaultextension'] = self._defaults[filetype]\n if self._handletype.lower() == \"save\":\n kwargs[\"mode\"] = \"w\"\n if self._handletype.lower() == \"open\":\n kwargs[\"mode\"] = \"r\"\n logger.debug(\"Set Kwargs: %s\", kwargs)\n return kwargs", "def setup(self, **kwargs):\n\n for k, v in kwargs.items():\n setattr(self, k, v)" ]
[ "0.6476009", "0.64399606", "0.639305", "0.6366198", "0.6307585", "0.6280311", "0.6226344", "0.6221674", "0.6219365", "0.6188294", "0.61156327", "0.6104635", "0.60843754", "0.608238", "0.6072212", "0.60718983", "0.6050508", "0.6045505", "0.603796", "0.60124546", "0.6003399", "0.59876925", "0.59872216", "0.5984418", "0.5980302", "0.59314466", "0.5927005", "0.59151196", "0.5897919", "0.5877223", "0.5869678", "0.5867053", "0.58628726", "0.5859214", "0.5844396", "0.58389604", "0.58318096", "0.58233553", "0.5816533", "0.5816379", "0.5816379", "0.5816379", "0.5816379", "0.58098227", "0.580195", "0.5801702", "0.577263", "0.5769643", "0.5755899", "0.5752581", "0.575135", "0.5749601", "0.5739989", "0.5737484", "0.57350516", "0.57334465", "0.57159543", "0.57097054", "0.56972134", "0.56915486", "0.5681905", "0.56722844", "0.56654376", "0.5662001", "0.56584454", "0.5650552", "0.5643903", "0.564353", "0.5639792", "0.56352407", "0.5633343", "0.56309783", "0.56297165", "0.5618336", "0.5613008", "0.55974716", "0.559672", "0.5594187", "0.55920583", "0.55889577", "0.55889577", "0.55889577", "0.5588482", "0.5587", "0.557625", "0.55752796", "0.5574502", "0.55730814", "0.55702406", "0.5570065", "0.5569449", "0.55681473", "0.5564307", "0.5561764", "0.5561502", "0.55599535", "0.5559794", "0.5553942", "0.5548118", "0.55477816", "0.55295825" ]
0.0
-1
Generates a dictionary of parameters to be passed back to the API.
def as_api_parameters(self): params = { 'card_number': self.card_number, } missing_expiry_year = not self.expiry_year missing_expiry_month = not self.expiry_month if missing_expiry_year or missing_expiry_month: raise InvalidParametersError( 'both expiry_year and expiry_month must be specified') params.update( expiry_date='{:0>2}{:0>2}'.format( self.expiry_month, # handle 4 digit years str(self.expiry_year)[-2:] ) ) missing_start_year = not self.start_year missing_start_month = not self.start_month specifying_start_date = self.start_year or self.start_month if specifying_start_date and (missing_start_year or missing_start_month): raise InvalidParametersError( 'both start_year and start_month must be specified or neither specified') if specifying_start_date: params.update( start_date='{:0>2}{:0>2}'.format( self.start_month, str(self.start_year)[-2:] ) ) if self.ccv2: params.update(cv_two=self.ccv2) if self.issue_number: params.update(issue_number=self.issue_number) if self.billing_address: params.update( **self.billing_address.as_api_billing_address_parameters() ) if self.return_url: params.update(return_url=self.return_url) if self.return_token: params.update(return_token=self.return_token) if self.user_agent: params.update(client_http_user_agent=self.user_agent) if self.accept: params.update(client_http_accept=self.accept) if self.remote_site: params.update(remote_site=self.remote_site) return params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_params(self):\n return {\n 'lis_outcome_service_url': self.lis_outcome_service_url,\n 'lis_result_sourcedid': self.lis_result_sourcedid,\n 'oauth_consumer_key': self.key\n }", "def get_params(self):\n return {}", "def parameters_dict(self):\n return", "def parameters(self):\n return dict(self._register)", "def parameters(self):\n return {}", "def parameters(self):\n return {\"W\": self.W,\n \"T\": self.T,\n \"P\": self.P,\n \"Wo\": self.Wo,\n \"To\": self.To,\n \"Po\": self.Po}", "def gen_params(self) -> Dict:\n param_dict: Dict = {}\n\n gX_name: List[str] = ['g_leak', 'g_nav', 'g_kvhh', 'g_kva', 'g_kvsi', \n 'g_cav', 'g_kca', 'g_nap', 'g_kir']\n gX_log: np.ndarray = 4 * np.random.rand(9) - 2 # from -2 to 2\n gX: np.ndarray = (10 * np.ones(9)) ** gX_log # 0.01 ~ 100\n gX_itr: Iterator = zip(gX_name, gX)\n\n gR_name: List[str] = ['g_ampar', 'g_nmdar', 'g_gabar']\n gR_log: np.ndarray = 4 * np.random.rand(3) - 3 # from -3 to 1\n gR: np.ndarray = (10 * np.ones(3)) ** gR_log # 0.001 ~ 10\n gR_itr: Iterator = zip(gR_name, gR)\n\n tCa_log: float = 2 * np.random.rand(1) + 1 # from 1 to 3\n tCa: float = 10 ** tCa_log # 10 ~ 1000\n tCa_dict: Dict = {'t_ca': tCa}\n\n param_dict.update(gX_itr)\n param_dict.update(gR_itr)\n param_dict.update(tCa_dict)\n return param_dict", "def as_api_parameters(self):\n\n data = {}\n for system in self.system_codes:\n data.update({\n \"{0}_callback/{1}\".format(system, variable): self.data[variable]\n for variable in self.data.keys()\n })\n return data", "def get_parameters_dictionary(request):\n parameters_dict = {PARAMETER_MESSAGE: request.GET.get('message'),\n PARAMETER_ENTITY_NAME: request.GET.get('entity_name'),\n PARAMETER_STRUCTURED_VALUE: request.GET.get('structured_value'),\n PARAMETER_FALLBACK_VALUE: request.GET.get('fallback_value'),\n PARAMETER_BOT_MESSAGE: request.GET.get('bot_message'),\n PARAMETER_TIMEZONE: request.GET.get('timezone'),\n PARAMETER_LANGUAGE_SCRIPT: request.GET.get('language_script', ENGLISH_LANG),\n PARAMETER_SOURCE_LANGUAGE: request.GET.get('source_language', ENGLISH_LANG),\n PARAMETER_PAST_DATE_REFERENCED: request.GET.get('date_past_reference', 'False'),\n PARAMETER_MIN_DIGITS: request.GET.get('min_number_digits'),\n PARAMETER_MAX_DIGITS: request.GET.get('max_number_digits'),\n PARAMETER_NUMBER_UNIT_TYPE: request.GET.get('unit_type'),\n PARAMETER_LOCALE: request.GET.get('locale'),\n PARAMETER_RANGE_ENABLED: request.GET.get('range_enabled')\n }\n\n return parameters_dict", "def gen_params(self) -> Dict:\n param_dict: Dict = {}\n\n gX_name: List[str] = ['g_leak', 'g_kvhh', 'g_cav', 'g_kca', 'g_nap']\n gX_log: np.ndarray = 4 * np.random.rand(5) - 2 # from -2 to 2\n gX: np.ndarray = (10 * np.ones(5)) ** gX_log # 0.01 ~ 100\n gX_itr: Iterator = zip(gX_name, gX)\n\n tCa_log: float = 2 * np.random.rand(1) + 1 # from 1 to 3\n tCa: float = 10 ** tCa_log # 10 ~ 1000\n tCa_dict: Dict = {'t_ca': tCa}\n\n param_dict.update(gX_itr)\n param_dict.update(tCa_dict)\n return param_dict", "def get_params(self):\n return {\"d\": \"155\"}", "def get_params(self):\r\n param_names = ['aws_access_key_id', 'aws_secret_access_key',\r\n 'is_secure', 'port', 'proxy', 'proxy_port',\r\n 'proxy_user', 'proxy_pass',\r\n 'debug', 'https_connection_factory']\r\n params = {}\r\n for name in param_names:\r\n params[name] = getattr(self, name)\r\n return params", "def request_params(self):\n return {'key': self.key, 'hash': self.hash}", "def get_params(self) -> dict:\n # initialize dictionary\n params = dict()\n\n # loop through parameters, adding to parameter dictionary\n for key in self._get_param_names():\n params[key] = getattr(self, key)\n\n return params", "def api_params(self):\n params = dict(\n (key, self.module.params.get(key))\n for key, value in RECORD_KEYS_MAP.items()\n if key != \"answers\" and self.module.params.get(key) is not None\n )\n return params", "def get_params(self, deep=False):\n return {\"alpha\": self.alpha, \"beta\": self.beta, \"gamma\": self.gamma, \"W\": self.W, \"bias\": self.bias, \"add_bias\": self.add_bias, \"opts\": self.opts}", "def getInitParams(self):\n paramDict = Distribution.getInitParams(self)\n paramDict['mapping'] = self.mapping\n paramDict['values'] = self.values\n return paramDict", "def get_params(self):\n return {'k': self.k, 'q': self.q, 'sigma_s': self.sigma_s, 'm': self.m}", "def parameters_dict(self):\n return dict(zip(self.parameters_names(), self.parameters_list))", "def parameters(self):\n return {\"P\": self.P,\n \"T\": self.T}", "def get_params(self):\n params = {}\n for step in self.steps:\n params[step[0]] = step[1].get_params()\n return params", "def get_params(continue_from=\"\") -> {}:\n\n\treturn {\n\t\t\"action\": \"query\",\n\t\t\"format\": \"json\",\n\t\t\"list\": \"allpages\",\n\t\t\"apcontinue\": continue_from,\n\t\t\"apnamespace\": NAMESPACE,\n\t\t\"aplimit\": PAGES_LIMIT\n\t}", "def _build_param_dict(self):\n self._build_common_param_dict()\n\n self._param_dict.add(Parameter.NUM_AVG_SAMPLES,\n r'ScansToAverage>([\\d]+)</ScansToAverage>',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Scans to Average\",\n description=\"Number of samples to average (must be even)\",\n range=INT16,\n startup_param=True,\n direct_access=False,\n default_value=4,\n visibility=ParameterDictVisibility.READ_WRITE)\n self._param_dict.add(Parameter.MIN_COND_FREQ,\n r'MinimumCondFreq>([\\d]+)</MinimumCondFreq',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Minimum Conductivity Frequency\",\n range=INT16,\n description=\"Minimum conductivity frequency to enable pump turn-on.\",\n startup_param=True,\n direct_access=False,\n default_value=500,\n units=Units.HERTZ,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.PUMP_DELAY,\n r'PumpDelay>([\\d]+)</PumpDelay',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Pump Delay\",\n range=INT16,\n description=\"Time to wait after minimum conductivity frequency is reached before turning pump on.\",\n startup_param=True,\n direct_access=False,\n default_value=60,\n units=Units.SECOND,\n visibility=ParameterDictVisibility.READ_WRITE)\n self._param_dict.add(Parameter.AUTO_RUN,\n r'AutoRun>(.*)</AutoRun',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Auto Run\",\n description=\"Enable automatic logging when power is applied: (true | false).\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=False,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.IGNORE_SWITCH,\n r'IgnoreSwitch>(.*)</IgnoreSwitch',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Ignore Switch\",\n description=\"Disable magnetic switch position for starting or stopping logging: (true | false)\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.OPTODE,\n r'OPTODE>(.*)</OPTODE',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Optode Attached\",\n description=\"Enable optode: (true | false)\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.VOLT1,\n r'ExtVolt1>(.*)</ExtVolt1',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Volt 1\",\n description=\"Enable external voltage 1: (true | false)\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n\n self._build_ctd_specific_params()", "def _build_param_dict(self):\n # Add parameter handlers to parameter dict.\n self._param_dict = ProtocolParameterDict()\n \n self._param_dict.add(Parameter.CYCLE_TIME,\n r'(\\d+)\\s+= Cycle Time \\(.*\\)\\r\\n(0|1)\\s+= Minutes or Seconds Cycle Time',\n lambda match : self._to_seconds(int(match.group(1)),\n int(match.group(2))),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_WRITE,\n startup_param=True,\n direct_access=False,\n default_value=20,\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.CHANGE_PARAM,\n submenu_write=[[\"1\", Prompt.CYCLE_TIME_PROMPT]])\n \n self._param_dict.add(Parameter.VERBOSE,\n r'', # Write-only, so does it really matter?\n lambda match : None,\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=True,\n init_value=1,\n menu_path_write=SubMenu.CHANGE_PARAM,\n submenu_write=[[\"2\", Prompt.VERBOSE_PROMPT]])\n \n self._param_dict.add(Parameter.METADATA_POWERUP,\n r'(0|1)\\s+= Metadata Print Status on Power up',\n lambda match : int(match.group(1)),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=True,\n init_value=0,\n menu_path_write=SubMenu.CHANGE_PARAM,\n submenu_write=[[\"3\", Prompt.METADATA_PROMPT]])\n\n self._param_dict.add(Parameter.METADATA_RESTART,\n r'(0|1)\\s+= Metadata Print Status on Restart Data Collection',\n lambda match : int(match.group(1)),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=True,\n init_value=0,\n menu_path_write=SubMenu.CHANGE_PARAM,\n submenu_write=[[\"4\", Prompt.METADATA_PROMPT]])\n \n self._param_dict.add(Parameter.RES_SENSOR_POWER,\n r'(0|1)\\s+= Res Power Status',\n lambda match : int(match.group(1)),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=False,\n init_value=1,\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.SENSOR_POWER,\n submenu_write=[[\"1\"]])\n\n self._param_dict.add(Parameter.INST_AMP_POWER,\n r'(0|1)\\s+= Thermocouple & Hydrogen Amp Power Status',\n lambda match : int(match.group(1)),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=False,\n init_value=1,\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.SENSOR_POWER,\n submenu_write=[[\"2\"]])\n\n self._param_dict.add(Parameter.EH_ISOLATION_AMP_POWER,\n r'(0|1)\\s+= eh Amp Power Status',\n lambda match : int(match.group(1)),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=False,\n init_value=1,\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.SENSOR_POWER,\n submenu_write=[[\"3\"]])\n \n self._param_dict.add(Parameter.HYDROGEN_POWER,\n r'(0|1)\\s+= Hydrogen Sensor Power Status',\n lambda match : int(match.group(1)),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=False,\n init_value=1,\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.SENSOR_POWER,\n submenu_write=[[\"4\"]])\n \n self._param_dict.add(Parameter.REFERENCE_TEMP_POWER,\n r'(0|1)\\s+= Reference Temperature Power Status',\n lambda match : int(match.group(1)),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=False,\n init_value=1,\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.SENSOR_POWER,\n submenu_write=[[\"5\"]])", "def construct_params(self):\n\n return {\"expand\": self.get_expand()}", "def get_next_params(self) -> dict:\n params = {arg_name: caller() for arg_name, caller in self.parameters}\n return params", "def parameters(self):\n return {\n 'label': \"undefined\",\n 'name': self.name(),\n 'id': self._id,\n 'created': self.created_to_str(),\n 'stage': self.stage_to_str(),\n 'direction': self.direction_to_str(),\n 'timeframe': self.timeframe_to_str(),\n 'expiry': self.expiry_to_str()\n }", "def get_params(self):", "def get_params(self):\n return {'classifier': self.classifier,\n 'grid_param': self.grid_param,\n 'n_param_comb': self.n_param_comb,\n 'top_bagging': self.bagging,\n 'bagging_param': self.bagging_param,\n 'comb_seed': self.comb_seed}", "def gen_params(self) -> Dict:\n param_dict: Dict = {}\n\n gX_name: List[str] = ['g_leak', 'g_nav', 'g_kvhh', 'g_kva', 'g_kvsi', \n 'g_cav', 'g_kca', 'g_nap', 'g_kir']\n gX_name: List[str] = list(itertools.compress(gX_name, list(self.channel_bool.values())[:9]))\n gX_log: np.ndarray = 4 * np.random.rand(len(gX_name)) - 2 # from -2 to 2\n gX: np.ndarray = (10 * np.ones(len(gX_name))) ** gX_log # 0.01 ~ 100\n gX_itr: Iterator = zip(gX_name, gX)\n\n gR_name: List[str] = ['g_ampar', 'g_nmdar', 'g_gabar']\n gR_name: List[str] = list(itertools.compress(gR_name, list(self.channel_bool.values())[9:12]))\n gR_log: np.ndarray = 4 * np.random.rand(len(gR_name)) - 3 # from -3 to 1\n gR: np.ndarray = (10 * np.ones(len(gR_name))) ** gR_log # 0.001 ~ 10\n gR_itr: Iterator = zip(gR_name, gR)\n\n param_dict.update(gX_itr)\n param_dict.update(gR_itr)\n\n if self.channel_bool['ca']:\n tCa_log: float = 2 * np.random.rand(1) + 1 # from 1 to 3\n tCa: float = 10 ** tCa_log # 10 ~ 1000\n tCa_dict: Dict = {'t_ca': tCa}\n param_dict.update(tCa_dict)\n\n return param_dict", "def get_params(self) -> Dict:\n params: Dict = {}\n params['g_leak'] = self.leak.get_g()\n params['g_kvhh'] = self.kvhh.get_g()\n params['g_cav'] = self.cav.get_g()\n params['g_kca'] = self.kca.get_g()\n params['g_nap'] = self.nap.get_g()\n params['t_ca'] = self.tau_ca\n return params", "def get_params(self):\n return {\n \"nspecies\": self.nspecies,\n \"lmax\": self.lmax,\n \"nmax\": self.nmax,\n \"rcut\": self.rcut,\n \"sigma\": self.sigma,\n \"trans_width\": self.trans_width\n }", "def get_params(self):\n pass", "def as_api_parameters(self):\n return {\n 'return_token': self.token,\n 'return_url': self.url,\n 'client_http_user_agent': self.user_agent,\n 'client_http_accept': self.accept,\n 'remote_site': self.remote_site,\n }", "def parameters(self) -> Dict[str, Any]:\n return self.data[\"args\"].get(\"parameters\", {})", "def build_parameters(self):\n for key in entity_map:\n if key in self.sample_frame:\n parameter = {\n \"id\": str(uuid.uuid4()),\n \"required\": True,\n \"name\": entity_map[key]['entity_type'],\n \"dataType\": \"@{}\".format(entity_map[key]['entity_type']),\n \"value\": \"${}\".format(entity_map[key]['entity_type']),\n \"isList\": False\n }\n self.frame['responses'][0]['parameters'].append(parameter)", "def get_id_params(self):\n params = {}\n # Add the path.\n try:\n params[\"path\"] = self.get_path()\n except NotImplementedError:\n pass\n # Add the URL.\n try:\n params[\"url\"] = self.get_url()\n except NotImplementedError:\n pass\n # All done!\n return params", "def __make_params(args):\n data = {}\n for i in range(len(args)):\n if i == 0: # saltando a primeira iteracao pra\n # saltar o parametro que é o nome do arquivo de execução\n continue\n if not i % 2 == 0:\n data[args[i]] = args[i + 1]\n return data", "def getParameters( self ):\n parameterDict = {}\n parameterDict['StorageName'] = self.name\n parameterDict['ProtocolName'] = self.protocolName\n parameterDict['Protocol'] = self.protocol\n parameterDict['Host'] = self.host\n parameterDict['Path'] = self.path\n parameterDict['Port'] = self.port\n parameterDict['SpaceToken'] = self.spaceToken\n parameterDict['WSUrl'] = self.wspath\n return S_OK( parameterDict )", "def get_parameter_dict(self):\n prm = ModelParameters()\n prm.define(\"a\", self.a)\n return prm", "def generate_params(self, randomize=True):\n pass", "def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['method'] = self.method\n paramDict['dimension'] = self.dimension\n paramDict['rank'] = self.rank\n paramDict['mu'] = self.mu\n paramDict['covariance'] = self.covariance\n return paramDict", "def params(self):\n return {'cfg': self.cfg,\n 'momentum': self.momentum,\n 'center': self.center,\n 'scale': self.scale,\n 'epsilon': self.epsilon,\n 'act_fn': self.act_fn}", "def get(self):\n return dict(self._params)", "def parameters(self):\n return {\"W\": self.W,\n \"T\": self.T,\n \"P\": self.P}", "def get_required_params():\n return {}", "def buildParams(self,id,start,finish):\n return (\n (\"id\",id),\n (\"start\",start),\n (\"finish\",finish)\n )", "def get_params(self):\n outputs = ['sample',\n 'ratio_params',\n 'despike_params',\n 'autorange_params',\n 'bkgcorrect_params']\n\n out = {}\n for o in outputs:\n out[o] = getattr(self, o)\n\n out['filter_params'] = self.filt.params\n out['filter_sequence'] = self.filt.sequence\n out['filter_used'] = self.filt.make_keydict()\n\n return out", "def get_params(self):\n return {\n 'dropout': self._dropout,\n 'layer_size': self._layer_size,\n 'num_layers': self._num_layers,\n 'embedding_layer_size': self._embedding_layer_size,\n 'controller_type': self._controller_type\n }", "def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['workingDir'] = self.workingDir\n paramDict['dataFilename'] = self.dataFilename\n paramDict['functionID'] = self.functionID\n paramDict['functionType'] = self.functionType\n paramDict['variableID'] = self.variableID\n paramDict['k'] = self.k\n paramDict['s'] = self.s\n return paramDict", "def getParams(self):\n\n\t\tparams = {\"Nparticles\":self.__Nparticles,\"Nkicks\":self.__Nkicks,\"kappa\":self.__kappa, \"eta\":self.__eta,\"gamma\":self.__gamma, \"omega\":self.__omega,\n\t\t\"Kbt\":self.__Kbt, \"tk\":self.__tk}\n\n\t\treturn params", "def _params(self, request: Request) -> dict:\n params = {'forceAsync': True}\n\n subset = self._spatial_subset_params(request) + self._temporal_subset_params(request)\n if len(subset) > 0:\n params['subset'] = subset\n\n for p, val in request.parameter_values():\n if type(val) == str:\n params[p] = val\n elif type(val) == bool:\n params[p] = str(val).lower()\n elif type(val) == list and type(val[0]) != str:\n params[p] = ','.join([str(v) for v in val])\n else:\n params[p] = val\n\n return params", "def _identifying_params(self) -> Mapping[str, Any]:\n return {**{\"model_name\": self.model_name}, **self._default_params}", "def get_params(self, deep: bool = True) -> Dict[str, Any]:\n return _get_params(self, deep)", "def parameters(self):\n res = dict()\n res[\"population_size\"] = self.population_size\n res[\"mutation_prob\"] = self.mutation_prob\n res[\"crossover\"] = self.crossover\n res[\"selection\"] = self.selection\n res[\"sigma\"] = self.sigma\n res[\"crossover_method\"] = self.crossover_method\n res[\"selection_method\"] = self.selection_method\n res[\"best_rate\"] = self.best_rate\n res[\"n_parents\"] = self.n_parents\n res[\"model_parameters\"] = self.model.total_parameters()\n res[\"IDCT_from\"] = self.IDCT_from\n res[\"elitism\"] = self.elitism\n return res", "def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['p'] = self.p\n return paramDict", "def parameter_type_dict():\n return {'filter' : filters.filter_parameters,\n 'global_options' : global_options.global_options_parameters,\n 'input_device' : input_devices.input_device_parameters,\n 'input_stream' : input_streams.input_stream_parameters,\n 'output_device' : output_devices.output_device_parameters,\n 'output_stream' : output_streams.output_stream_parameters}", "def get_params(self, params_dict={}):\n raise NotImplementedError()", "def params(self):\n return {'shape': self.shape,\n 'name': self.name}", "def params(self) -> dict:\n\n if not self.exp_metadata.parameters:\n self.exp_metadata.parameters = {}\n return self.exp_metadata.parameters", "def buildParamsDict(self):\n self.params_dict = {\n \"img_dir\": self.savePathJoin(\"Images\"),\n \"depth_dir\": self.savePathJoin(\"Depth\"),\n \"back_of_dir\": self.savePathJoin(\"Back_Of\"),\n \"of_dir\": self.savePathJoin(\"Of\"),\n \"save_dir\": self.user[\"Save\"],\n \"high\": self.high,\n \"low\": self.low,\n \"run_dict\": self.run_dict,\n \"of_model\": self.app.get_resource(\n os.path.join(\"of_models\", \"network-default.pytorch\")\n ),\n \"depth_model\": self.app.get_resource(\n os.path.join(\"depth_models\", \"model_city2kitti.meta\")\n ),\n \"yolo_weights\": self.app.get_resource(\n os.path.join(\"yolo\", \"yolov3.weights\")\n ),\n \"yolo_v\": self.app.get_resource(os.path.join(\"yolo\", \"yolov3.cfg\")),\n \"coco_names\": self.app.get_resource(os.path.join(\"yolo\", \"coco.names\")),\n \"object_detection_dir\": self.savePathJoin(\"ObjectDetection\"),\n \"plot_speed_dir\": PLOT_SPEED_DIR,\n \"plot_crash_dir\": PLOT_CRASH_DIR,\n \"numbers_dir\": NP_DIR,\n \"plot_error_dir\": PLOT_ERROR_DIR,\n \"speed_gt\": self.user[\"GT\"],\n \"vid_path\": self.user[\"Video\"],\n \"super_pixel_method\": self.super_pixel_method,\n \"super_pixel_dir\": SUPER_PIXEL_DIR,\n \"send_video_frame\": False,\n \"create_csv\": self.ui.c_csv.isChecked(),\n \"create_draw\": self.ui.c_draw.isChecked(),\n \"create_velocity\": self.ui.c_velocity.isChecked(),\n \"create_video_fps\": int(self.ui.t_fps.text()),\n \"optimize_params\": self.ui.c_optimize.isChecked(),\n \"super_pixel_label_dir\": os.path.join(\n self.savePathJoin(\"Super_Pixel\"), self.super_pixel_method\n ),\n }", "def _create_param_dict(self, func_args):\n for i, a in enumerate(func_args):\n self.fn.args[i].name = str(a)\n self.param_dict[a] = self.fn.args[i]", "def get_parameters(self):\n d = Algorithm.get_parameters(self)\n d.update({\n 'M': d.pop('population_size', self.population_size),\n 'num_tests': self.num_tests,\n 'num_searches': self.num_searches,\n 'num_searches_best': self.num_searches_best,\n 'bonus1': self.bonus1,\n 'bonus2': self.bonus2,\n 'num_enabled': self.num_enabled,\n 'local_searches': self.local_searches\n })\n return d", "def parameters(self) -> Mapping[str, str]:\n return pulumi.get(self, \"parameters\")", "def params(self):\r\n return {\r\n \"startdate\": self.start.strftime(\"%Y%m%d\"),\r\n \"enddate\": self.end.strftime(\"%Y%m%d\"),\r\n \"download\": \"0\",\r\n \"Lfunds\": \"1\",\r\n \"InvFunds\": \"1\",\r\n }", "def get_url_params(\n self, context: Optional[dict], next_page_token: Optional[dict]\n ) -> Dict[str, Any]:\n # empty dictionary, since we pass all the parameters in the POST request payload\n return {}", "def generate_params(type_, version, regions=None, timee=None, carrier=None):\n regions = regions or \"JP--JP\"\n timee = timee or str(int(time.time() * 1000))\n carrier = carrier or \"(null)(null)\"\n key = _generate_conn_info_key(type_, version, regions, timee, carrier)\n params = {\n \"type\": type_,\n \"version\": version,\n \"regions\": regions,\n \"time\": timee,\n \"carrier\": carrier,\n \"key\": key,\n }\n return params", "def _identifying_params(self) -> dict[str, Any]:\n return {**{\"model_path\": self.model_path}, **self._default_params}", "def set_rand_params(self) -> Dict:\n new_params: Dict = self.gen_params()\n self.set_params(new_params)\n return new_params", "def get_parameters(**kwargs):\r\n parameters = vars(global_file.params)\r\n for key, value in kwargs.items():\r\n parameters[str(key)] = value\r\n return parameters", "def get_parameters(self):\n params = {}\n for p in self.DEFAULT_VALUES.keys():\n params[p] = getattr(self, p)\n return params", "def get_params(self):\n raise NotImplementedError", "def generate_template_dict(self):\n # Get the existing parameters\n params = super().generate_template_dict()\n\n # Add our custom parameters\n params['job_parameter_file'] = self.job_parameter_file\n params['job_output_directory'] = self.job_output_directory\n\n # Return the updated params\n return params", "def _get_param_names(self):\n temp_params = {'function': self.function, 'target': self.target}\n\n temp_params.update(self.kwargs)\n\n return temp_params", "def extract_parameters(self) -> Dict[str, Set[str]]:\n regex = \"\\{([A-Za-z0-9_]+)\\}\"\n reserved_parameters = [\n \"output\",\n \"input\",\n \"output_vec\",\n \"input_vec\",\n \"df\",\n \"vec_open\",\n \"vec_close\",\n ]\n parameters = {}\n for scope in self.scopes:\n parameters[scope] = set(\n [\n x\n for x in re.findall(regex, self.call)\n if x not in reserved_parameters\n ]\n )\n return parameters", "def parameters(self) -> Dict[str, str]:\n return self._parameters", "def _build_param_dict(self):\n # The parameter dictionary.\n self._param_dict = ProtocolParameterDict()\n\n # Add parameter handlers to parameter dictionary for instrument configuration parameters.\n self._param_dict.add(Parameter.SAMPLE_INTERVAL,\n '', # this is a driver only parameter\n None,\n int,\n type=ParameterDictType.INT,\n startup_param=True,\n display_name='D1000 Sample Periodicity',\n range=(1, 3600),\n description='Periodicity of D1000 temperature sample in autosample mode: (1-3600)',\n default_value=DEFAULT_SAMPLE_RATE,\n units=Units.SECOND,\n visibility=ParameterDictVisibility.READ_WRITE)\n self._add_setup_param(Parameter.CHANNEL_ADDRESS,\n int,\n type=ParameterDictType.INT,\n display_name='Base Channel Address',\n description='Hex value of ASCII character to ID unit, e.g. 31 is the ASCII code for 1:'\n ' (30-31, 41-5A, 61-7A)',\n range=(0x30, 0x7A),\n default_value=0x31)\n self._add_setup_param(Parameter.LINEFEED,\n bool,\n type=ParameterDictType.BOOL,\n display_name='Line Feed Flag',\n range={'True': True, 'False': False},\n description='Enable D1000 to generate a linefeed before and after each response:'\n ' (true | false)',\n default_value=False)\n self._add_setup_param(Parameter.PARITY_TYPE,\n bool,\n type=ParameterDictType.BOOL,\n display_name='Parity Type',\n range={'Odd': True, 'Even': False},\n description='Sets the parity: (true:odd | false:even)',\n default_value=False)\n self._add_setup_param(Parameter.PARITY_ENABLE,\n bool,\n type=ParameterDictType.BOOL,\n display_name='Parity Flag',\n range={'True': True, 'False': False},\n description='Enable use of parity bit, a parity error will be issued if detected:'\n ' (true | false)',\n default_value=False)\n self._add_setup_param(Parameter.EXTENDED_ADDRESSING,\n bool,\n type=ParameterDictType.BOOL,\n display_name='Extended Addressing',\n range={'True': True, 'False': False},\n description='Enable extended addressing: (true | false)',\n default_value=False)\n self._add_setup_param(Parameter.BAUD_RATE,\n int,\n type=ParameterDictType.INT,\n display_name='Baud Rate',\n range={'38400': 0, '19200': 1, '9600': 2, '4800': 3, '2400': 4, '1200': 5, '600': 6,\n '300': 7, '57600': 8},\n description='Using ethernet interface in deployed configuration: (300, 600, '\n '1200, 2400, 4800, 9600, 19200, 38400, 57600)',\n default_value=9600,\n units=Units.BAUD)\n self._add_setup_param(Parameter.ALARM_ENABLE,\n bool,\n type=ParameterDictType.BOOL,\n display_name='Enable Alarms',\n range={'True': True, 'False': False},\n description='Enable alarms to be controlled by the Digital Output (DO) command:'\n ' (true | false)',\n default_value=False)\n self._add_setup_param(Parameter.LOW_ALARM_LATCH,\n bool,\n type=ParameterDictType.BOOL,\n display_name='Low Alarm Latching',\n range={'True': True, 'False': False},\n description='Enable changing the alarm to latching mode: (true | false)',\n default_value=False)\n self._add_setup_param(Parameter.HIGH_ALARM_LATCH,\n bool,\n type=ParameterDictType.BOOL,\n display_name='High Alarm Latching',\n range={'True': True, 'False': False},\n description='Enable changing the alarm to latching mode: (true | false)',\n default_value=False)\n self._add_setup_param(Parameter.RTD_4_WIRE,\n bool,\n type=ParameterDictType.BOOL,\n display_name='4 Wire RTD Flag',\n range={'True': True, 'False': False},\n description='Represents a physical configuration of the instrument, '\n 'disabling may cause data to be misaligned: (true | false)',\n default_value=True)\n self._add_setup_param(Parameter.TEMP_UNITS,\n bool,\n type=ParameterDictType.BOOL,\n display_name='Fahrenheit Flag',\n range={'Fahrenheit': True, 'Celsius': False},\n description='Flag to control the temperature format: (true:Fahrenheit | false:Celsius)',\n default_value=False)\n self._add_setup_param(Parameter.ECHO,\n bool,\n type=ParameterDictType.BOOL,\n display_name='Daisy Chain',\n range={'True': True, 'False': False},\n description='If not set, only 1 out of 3 D1000s will process commands: (true | false)',\n default_value=True)\n self._add_setup_param(Parameter.COMMUNICATION_DELAY,\n int,\n type=ParameterDictType.INT,\n display_name='Communication Delay',\n range=(0, 3),\n description='The number of delays to add when processing commands: (0-3)',\n default_value=0)\n self._add_setup_param(Parameter.PRECISION,\n int,\n type=ParameterDictType.INT,\n display_name='Precision',\n range={'4 digits': 0, '5 digits': 1, '6 digits': 2, '7 digits': 3},\n description='Number of digits the instrument should output for temperature query: '\n '(0=4-3=7)',\n default_value=6)\n self._add_setup_param(Parameter.LARGE_SIGNAL_FILTER_C,\n float,\n type=ParameterDictType.FLOAT,\n display_name='Large Signal Filter Constant',\n range={'0': 0, '.25': 1, '.5': 2, '1': 3, '2': 4, '4': 5, '8': 6, '16': 7},\n description='Time to reach 63% of its final value: '\n '(0 = 0.0, 1 = 0.25, 2 = 0.5, 3 = 1.0, 4 = 2.0, 5 = 4.0, 6 = 8.0, 7 = 16.0)',\n default_value=0.0,\n units=Units.SECOND)\n self._add_setup_param(Parameter.SMALL_SIGNAL_FILTER_C,\n float,\n type=ParameterDictType.FLOAT,\n display_name='Small Signal Filter Constant',\n range={'0': 0, '.25': 1, '.5': 2, '1': 3, '2': 4, '4': 5, '8': 6, '16': 7},\n description='Smaller filter constant, should be larger than large filter constant: '\n '(0 = 0.0, 1 = 0.25, 2 = 0.5, 3 = 1.0, 4 = 2.0, 5 = 4.0, 6 = 8.0, 7 = 16.0)',\n default_value=0.50,\n units=Units.SECOND)\n\n for key in self._param_dict.get_keys():\n self._param_dict.set_default(key)", "def get_params(self, deep=...):\n ...", "def __iter__(self):\n return dict(self.parameters)", "def params_helper(self,**kwargs):\n\n dic = {'output' : 'json, xml, kml',\n 'maxresults' : 'limit on max number of results returned ; Default is limited to 100',\n 'countrycode' : 'GB, US etc ISO Country Code ==> Only 2 caracters !',\n 'latitude' : 'latitude reference for distance calculation',\n 'distance' : 'return results based on specified distance from specified latitude/longitude',\n 'distanceunit' : 'Miles or km',\n 'operatorid' : 'exact match on a given EVSE operator id (comma separated list)',\n 'connectiontypeid' : ' exact match on a given connection type id (comma separated list)',\n 'countryid' : 'exact match on a given country id (comma separated list)',\n 'levelid' : 'exact match on a given charging level (1-3) id (comma separated list)',\n 'minpowerkw' : 'minimum output power in kW (this information is not known for many locations)',\n 'usagetypeid' : 'exact match on a given usage type id (comma separated list) ',\n 'statustypeid' : ' exact match on a given status type id (comma separated list)',\n 'dataproviderid ' : 'exact match on a given data provider id id (comma separated list). Use opendata=true for only OCM provided (\"Open\") data.',\n 'modifiedsince' : 'POIs modified since the given date (UTC) e.g. 2016-09-15T09:30',\n 'opendata' : ' true or false. Set to true to include only Open Data licensed content, false to return only non-open licensed data. By default all available data is returned.',\n 'includecomments' : ' true or false. Set to true to also include user comments and media items (photos) per charging location. Default = false.',\n 'verbose ' : ' true or false. Set to false to get a smaller result set with null items removed. Default = true.',\n 'compact ' : 'true or false. Set to true to remove reference data objects from output (just returns IDs for common reference data such as DataProvider etc). Default = false.',\n 'camelcase' : 'true or false. Set to true to get a property names in camelCase format. Default = false',\n 'callback' : 'specify the name of the JSONP callback (if required), JSON response type only.'\n }\n\n if len(kwargs)==0 :\n\n for key in dic.keys() :\n print(key)\n\n else :\n \n for k in kwargs: \n print(dic.get(k))", "def get_params(self) -> Dict:\n params: Dict = {}\n params['g_leak']: float = self.leak.get_g()\n params['g_nav']: float = self.nav.get_g()\n params['g_kvhh']: float = self.kvhh.get_g()\n params['g_kva']: float = self.kva.get_g()\n params['g_kvsi']: float = self.kvsi.get_g()\n params['g_cav']: float = self.cav.get_g()\n params['g_kca']: float = self.kca.get_g()\n params['g_nap']: float = self.nap.get_g()\n params['g_kir']: float = self.kir.get_g()\n params['g_ampar']: float = self.ampar.get_g()\n params['g.nmdar']: float = self.nmdar.get_g()\n params['g_gabar']: float = self.gabar.get_g()\n params['t_Ca']: float = self.tau_ca\n return params", "def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['base'] = self.base\n return paramDict", "def get_params(self, _deep: bool = True) -> dict:\n return self.params", "def get_params(self):\n return []", "def _build_param_dict(self):\n # Add parameter handlers to parameter dict. \n self._param_dict.add(SBE37Parameter.OUTPUTSAL,\n r'(do not )?output salinity with each sample',\n lambda match : False if match.group(1) else True,\n self._true_false_to_string)\n self._param_dict.add(SBE37Parameter.OUTPUTSV,\n r'(do not )?output sound velocity with each sample',\n lambda match : False if match.group(1) else True,\n self._true_false_to_string)\n self._param_dict.add(SBE37Parameter.NAVG,\n r'number of samples to average = (\\d+)',\n lambda match : int(match.group(1)),\n self._int_to_string)\n self._param_dict.add(SBE37Parameter.SAMPLENUM,\n r'samplenumber = (\\d+), free = \\d+',\n lambda match : int(match.group(1)),\n self._int_to_string)\n self._param_dict.add(SBE37Parameter.INTERVAL,\n r'sample interval = (\\d+) seconds',\n lambda match : int(match.group(1)),\n self._int_to_string)\n self._param_dict.add(SBE37Parameter.STORETIME,\n r'(do not )?store time with each sample',\n lambda match : False if match.group(1) else True,\n self._true_false_to_string)\n self._param_dict.add(SBE37Parameter.TXREALTIME,\n r'(do not )?transmit real-time data',\n lambda match : False if match.group(1) else True,\n self._true_false_to_string)\n self._param_dict.add(SBE37Parameter.SYNCMODE,\n r'serial sync mode (enabled|disabled)',\n lambda match : False if (match.group(1)=='disabled') else True,\n self._true_false_to_string)\n self._param_dict.add(SBE37Parameter.SYNCWAIT,\n r'wait time after serial sync sampling = (\\d+) seconds',\n lambda match : int(match.group(1)),\n self._int_to_string)\n self._param_dict.add(SBE37Parameter.TCALDATE,\n r'temperature: +((\\d+)-([a-zA-Z]+)-(\\d+))',\n lambda match : self._string_to_date(match.group(1), '%d-%b-%y'),\n self._date_to_string)\n self._param_dict.add(SBE37Parameter.TA0,\n r' +TA0 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.TA1,\n r' +TA1 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.TA2,\n r' +TA2 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.TA3,\n r' +TA3 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CCALDATE,\n r'conductivity: +((\\d+)-([a-zA-Z]+)-(\\d+))',\n lambda match : self._string_to_date(match.group(1), '%d-%b-%y'),\n self._date_to_string)\n self._param_dict.add(SBE37Parameter.CG,\n r' +G = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CH,\n r' +H = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CI,\n r' +I = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CJ,\n r' +J = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.WBOTC,\n r' +WBOTC = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CTCOR,\n r' +CTCOR = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.CPCOR,\n r' +CPCOR = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PCALDATE,\n r'pressure .+ ((\\d+)-([a-zA-Z]+)-(\\d+))',\n lambda match : self._string_to_date(match.group(1), '%d-%b-%y'),\n self._date_to_string)\n self._param_dict.add(SBE37Parameter.PA0,\n r' +PA0 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PA1,\n r' +PA1 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PA2,\n r' +PA2 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCA0,\n r' +PTCA0 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCA1,\n r' +PTCA1 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCA2,\n r' +PTCA2 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCB0,\n r' +PTCSB0 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCB1,\n r' +PTCSB1 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.PTCB2,\n r' +PTCSB2 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.POFFSET,\n r' +POFFSET = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.RCALDATE,\n r'rtc: +((\\d+)-([a-zA-Z]+)-(\\d+))',\n lambda match : self._string_to_date(match.group(1), '%d-%b-%y'),\n self._date_to_string)\n self._param_dict.add(SBE37Parameter.RTCA0,\n r' +RTCA0 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.RTCA1,\n r' +RTCA1 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)\n self._param_dict.add(SBE37Parameter.RTCA2,\n r' +RTCA2 = (-?\\d.\\d\\d\\d\\d\\d\\de[-+]\\d\\d)',\n lambda match : float(match.group(1)),\n self._float_to_string)", "def as_api_parameters(self):\n raise NotImplementedError(\n 'as_api_parameters not implemented on ' + self.__class__)", "def getInitParams(self):\n paramDict = {}\n paramDict['upperBoundUsed' ] = self.upperBoundUsed\n paramDict['lowerBoundUsed' ] = self.lowerBoundUsed\n paramDict['hasInfiniteBound'] = self.hasInfiniteBound\n paramDict['upperBound' ] = self.upperBound\n paramDict['lowerBound' ] = self.lowerBound\n paramDict['adjustmentType' ] = self.__adjustmentType\n paramDict['dimensionality' ] = self.dimensionality\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['apex' ] = self.apex\n paramDict['min' ] = self.min\n paramDict['max' ] = self.max\n return paramDict", "def _get_parameters(self, *keys):\n return {k: v for k, v in self.param.items() if k in keys}", "def params(self):\n return {'out_dim': self.out_dim,\n 'act_fn': self.act_fn,\n 'use_bias': self.use_bias,\n 'idx': self.idx}", "def getParameters(self):\n\n current_params = {'taux': self.taux, 'mu': self.mu, 'G': self.G, 'alpha_0': self.alpha_0,\n 'delta': self.delta, 'p': self.p, 'I0': self.I0, 'kparam': self.kparam}\n\n return (current_params)", "def params(kernels, time, target, target_frame, observer, corr):\n return {\n 'kernels': kernels,\n 'times': time,\n 'target': target,\n 'target_frame': target_frame,\n 'observer': observer,\n 'aberration_correction': corr,\n }", "def get_params(self, deep=True):\n out = dict()\n for key in self._get_param_names():\n value = getattr(self, key, None)\n if deep and hasattr(value, 'get_params'):\n deep_items = value.get_params().items()\n out.update((key + '__' + k, val) for k, val in deep_items)\n out[key] = value\n return out", "def parameters(self):", "def get_params(self):\n return self.params\n\n \"\"\"\n ____________________________________________________________________________\n\n Fields retrieved from search by default\n ---------------------------------------\n 'id': True,\n 'title': True,\n 'agency' : True,\n 'awardeeCity' : True,\n 'awardeeName' : True,\n 'awardeeStateCode' : True,\n 'date' : True,\n 'fundsObligatedAmt' : True,\n 'piFirstName' : True,\n 'piLastName' : True,\n\n Other retrievable fields\n ------------------------\n 'offset' : False\n 'awardeeCountryCode' : False,\n 'awardeeCounty' : False,\n 'awardeeDistrictCode' : False,\n 'awardeeZipCode' : False,\n 'cfdaNumber' : False,\n 'coPDPI' : False,\n 'startDate' : False,\n 'expDate' : False,\n 'estimatedTotalAmt' : False,\n 'fundsObligatedAmt' : True,\n 'dunsNumber' : False,\n 'fundProgramName' : False,\n 'parentDunsNumber' : False,\n 'pdPIName' : False,\n 'perfCity' : False,\n 'perfCountryCode' : False,\n 'perfCounty' : False,\n 'perfDistrictCode' : False,\n 'perfLocation' : False,\n 'perfStateCode' : False,\n 'perfZipCode' : False,\n 'poName' : False,\n 'primaryProgram' : False,\n 'transType' : False,\n 'awardee' : False,\n 'poPhone' : False,\n 'poEmail' : False,\n 'awardeeAddress' : False,\n 'perfAddress' : False,\n 'publicationResearch' : False,\n 'publicationConference' : False,\n 'fundAgencyCode' : False,\n 'awardAgencyCode' : False,\n 'projectOutComesReport' : False,\n 'abstractText' : False,\n 'piMiddeInitial' : False,\n 'piLastName' : True,\n 'piPhone' : False,\n 'piEmail' : False\n \"\"\"", "def get_params(self, deep=True):\n return {p: getattr(self, p) for p in self.params}", "def get_params(self, deep = True, bounds = True):\n params = dict() \n for p in self._LIST_PARAMETERS:\n params[p] = self._get_one_param(p)\n if(bounds):\n params[p + '_bounds'] = self._get_one_bound(p)\n if(deep and self._FLAG_TYPE == 'collection' and p == 'list_func'):\n for n, sub_obj in enumerate(params[p]):\n sub_params = sub_obj.get_params(deep, bounds)\n params.update({'f' + str(n) + '__' + key: val for key, val in sub_params.items()})\n \n return params", "def params(self, **kwargs):\n return kwargs", "def params():\n return utils.Params('../experiments/base-model/params.json')", "def parameters(self):\n pass", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['lambda'] = self.lambdaVar\n paramDict['k' ] = self.k\n paramDict['low' ] = self.low\n return paramDict" ]
[ "0.79212326", "0.7483993", "0.72430176", "0.71748704", "0.7172315", "0.7096033", "0.7091137", "0.70732015", "0.7070858", "0.7060539", "0.697383", "0.69705796", "0.6943187", "0.6936735", "0.69144523", "0.6904813", "0.68293667", "0.6813441", "0.6812606", "0.67854816", "0.677385", "0.6767349", "0.6766769", "0.6762466", "0.67538434", "0.675141", "0.67459726", "0.67455447", "0.67318195", "0.67203605", "0.6709596", "0.6706495", "0.6698586", "0.6681769", "0.66693825", "0.6663467", "0.66273135", "0.6627116", "0.66263175", "0.6613837", "0.6608858", "0.6603977", "0.6603497", "0.658552", "0.65776914", "0.6555073", "0.6550424", "0.6537754", "0.65355265", "0.6533235", "0.6524873", "0.6518151", "0.65179825", "0.651103", "0.6494363", "0.6484088", "0.64798564", "0.6473847", "0.64641315", "0.6458704", "0.64570564", "0.6454649", "0.64536023", "0.644635", "0.64403176", "0.6439598", "0.643869", "0.6435852", "0.64326", "0.64207715", "0.6415212", "0.641442", "0.64107275", "0.6395175", "0.63939637", "0.6390637", "0.63787705", "0.6366297", "0.63641876", "0.6360311", "0.6357524", "0.63569945", "0.63524884", "0.6342018", "0.6332297", "0.63218594", "0.63214105", "0.6316674", "0.63160855", "0.63146937", "0.63143945", "0.6310646", "0.6307599", "0.63046557", "0.6300692", "0.62964284", "0.62963736", "0.62920105", "0.62891674", "0.6282216", "0.62787825" ]
0.0
-1
Generate API keyword args for these details.
def as_api_parameters(self): return { 'return_token': self.token, 'return_url': self.url, 'client_http_user_agent': self.user_agent, 'client_http_accept': self.accept, 'remote_site': self.remote_site, }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extra_target_arguments(self):\n return {}", "def init_args(self):\n return {\n \"doc\": self.__doc__.format(name=colored(self.module_name, \"green\", attrs=['bold','underline'])),\n \"Url\": \"set a target url\",\n 'Type': \"set type to check , [php, asp, aspx, cgi, dir , mdb]\",\n }", "def as_api_parameters(self):\n raise NotImplementedError(\n 'as_api_parameters not implemented on ' + self.__class__)", "def help_args():\n pass", "def Help():\n names=api_method_dict.keys()\n names.sort()\n return ''.join(['**** ' + api_method_dict[name].__name__ + '\\n' + api_method_dict[name].__doc__ + '\\n'\n for name in names])", "def add_extra_args(self):\n super(AwsAccessListKeysMethod, self).add_extra_args()\n self.parser.add_argument(\"--key_pair_name\", required=False, default=None,\n help=\"AWS Key Pair name\")", "def _format_api_string(self):\n api_string = self.api_name\n arg_string_list = []\n if not self.api_args is None and \\\n len(self.api_args) > 0:\n for key in self.api_args:\n try:\n value = self.api_args[key]\n except TypeError:\n #assert False, f\"node: {self.api_name} key: {key} bad arg: {self.api_args}\" + \\\n # f\" type: {self.api_args.__class__.__name__}\"\n print(f\"node: {self.api_name} key: {key} bad arg: {self.api_args}\" + \\\n f\" type: {self.api_args.__class__.__name__}\")\n raise TypeError\n if isinstance(value, list):\n value_string = \"[ \" + \",\".join([self._api_value_string(x) for x in value]) + \" ]\"\n else:\n if value is None:\n assert False, f\"key={key}\"\n value_string = self._api_value_string(value)\n arg_string_list.append(f\"{key}:{value_string}\")\n api_string += \"(\" + \" \".join(arg_string_list) + \")\"\n #assert False, f\"{self.api_name}: {api_string}\"\n return api_string", "def valid_args(self):\r\n for k in request.args.keys():\r\n if k not in ['api_key']:\r\n getattr(self.__class__, k)", "def api(self) -> str:", "def generateKwargsAsString(self):\n args = \"\"\n axisList = self.tabWidget.currentWidget()\n\n for axisWidget in axisList.getAxisWidgets():\n args += \"%s = %s, \" % (axisWidget.axis.id,\n axisWidget.getCurrentValuesAsStr())\n\n # Generate additional args\n args += 'squeeze = 0'\n args += \", order = '%s' \" % axisList.getAxesOrderString()\n return args", "def _generate_options(self, **kwargs: Any) -> dict:\n raise NotImplementedError", "def get_dynamic_setup_params():\n\n return {\n # Retrieve the long description from the README\n \"long_description\": read_file(\"README.md\")\n }", "def add_extra_args(self):\n pass", "def get_keys_info() -> Dict[str, List[str]]:\n args_dict = {}\n\n for api in API_DICT:\n arg_list = list(\n getattr(\n sys.modules[__name__], \"set_\" + str(api) + \"_key\"\n ).__code__.co_varnames\n )\n arg_list.remove(\"persist\")\n arg_list.remove(\"show_output\")\n args_dict[api] = arg_list\n\n return args_dict", "def pykwarg(self):\n return self._pykwarg", "def full_args():\n return setup_args()", "def _generate_keywords(self):\n _keywords = [*self._lookup_opcodes_dir.keys(), *self._registers_list.keys()]\n for key in _keywords:\n self._keywords.extend(key.split(\" \"))\n return", "def create_parameters_description():\n description = OrderedDict()\n description['GeneralArguments'] = [\n {\n 'main_argument_name': '--config-file',\n 'argument_name_options': ['--config'],\n 'parameter_name': 'config_file',\n 'help': \"\"\"A json-encoded configuration file, in which one can specify the parameters\n for all detectors in use as well as some general parameters for the whole run.\n The encoded object should therefore be a dictionary,\n with possible top-level keys 'GeneralArguments' (general parameters, not relevant\n to a detector class), 'SaccadeDetector', 'BlinkDetector', 'FixationDetector'\n and 'SmoothPursuitDetector'.\n\n The value for each of the present keys should in turn be a dictionary with keys\n identical to the longest argument names below, without the eye movement name prefix.\n An example (and equivalent to default parameters) configuration file is provided\n in default_parameters.conf.json and includes all possible keys.\n\n In your custom configuration file you do not have to specify any the parameter values,\n missing keys will be considered to have the default value.\n\n For default values, you can consult the respective classes' __init__ methods in\n saccade_detector.py, blink_detector.py, fixation_detector.py and sp_detector.py.\n\n\n Values given through the console interface override the ones in the config file.\"\"\",\n 'kwargs': {}\n },\n {\n 'main_argument_name': '--input-folder',\n 'argument_name_options': ['--in'],\n 'parameter_name': 'input_folder',\n 'help': 'From where to load the gaze points data. If absent, must be present in --config-file file. '\n 'This folder is assumed to have subfolders that correspond to videos, for which recordings '\n 'were made. Each such subdirectory should contain gaze files (one file per observer).',\n 'kwargs': {}\n },\n {\n 'main_argument_name': '--gaze-file-pattern',\n 'argument_name_options': ['--pattern'],\n 'parameter_name': 'gaze_file_pattern',\n 'help': 'Will look for such files in all subdirectories of --input-folder. '\n 'For GazeCom, \\'*.arff\\' is a recommended value (or \\'*.coord\\', if dealing with original dataset files). '\n 'One can use this parameter to match some name pattern as well (not just the file extension), '\n 'for example with \\'*_needed_files_*.arff\\'. \\n'\n 'If no wildcard symbol is found in the provided string, it is assumed to be just the file name '\n 'suffix, so it will be prepended with a wildcard symbol (i.e. \".coord\" will become \"*.coord\").',\n 'kwargs': {}\n },\n {\n 'main_argument_name': '--input-data-type',\n 'argument_name_options': ['--type'],\n 'parameter_name': 'input_data_type',\n 'help': 'Type of data loader to use (if not specified, will try to detect automatically)',\n 'kwargs': {'choices': ['DSF', 'ARFF', 'labelled ARFF']}\n },\n {\n 'main_argument_name': '--verbose',\n 'argument_name_options': ['-v'],\n 'parameter_name': 'verbose',\n 'default': None,\n 'help': 'Whether to output some information about the progress of the run to STDERR',\n 'kwargs': {'action': 'store_const', 'const': True} # only like this can support the default of None\n # (not to override the config all the time\n # with a missing value)\n },\n {\n 'main_argument_name': '--movies',\n 'argument_name_options': ['-m'],\n 'parameter_name': 'movies',\n 'help': 'Which movies out of the input folder to use (might be useful for train/test split). '\n 'The gaze data is supposed to be put under respective directories in the input folder. '\n 'If none are given, all available ones are used.',\n 'kwargs': {'nargs': '+', 'default': None}\n },\n {\n 'main_argument_name': '--output-folder',\n 'argument_name_options': ['--out'],\n 'parameter_name': 'output_folder',\n 'help': 'Where to output the resulting labelled data (if empty, will create a new temporary directory)',\n 'kwargs': {}\n },\n ]\n\n description['SaccadeDetector'] = [\n {\n 'main_argument_name': '--tolerance',\n 'argument_name_options': ['--tol'],\n 'parameter_name': 'tolerance',\n 'help': 'The relative size of the area outside the screen that is still considered to be legal',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--threshold-onset-fast-degree-per-sec',\n 'argument_name_options': ['--threshold-onset-fast'],\n 'parameter_name': 'threshold_onset_fast_degree_per_sec',\n 'help': 'Threshold for initialization of saccade detection, in degrees per second',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--threshold-onset-slow-degree-per-sec',\n 'argument_name_options': ['--threshold-onset-slow'],\n 'parameter_name': 'threshold_onset_slow_degree_per_sec',\n 'help': 'A slower threshold for saccade onset detection, in degrees per second',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--threshold-offset-degree-per-sec',\n 'argument_name_options': ['--threshold-offset'],\n 'parameter_name': 'threshold_offset_degree_per_sec',\n 'help': 'Threshold for saccade offset detection, in degrees per second',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--max-speed-degree-per-sec',\n 'argument_name_options': ['--max-speed'],\n 'parameter_name': 'max_speed_degree_per_sec',\n 'help': 'Maximum speed of saccadic eye movements',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--min-duration-microsec',\n 'argument_name_options': ['--min-duration'],\n 'parameter_name': 'min_duration_microsec',\n 'help': 'Minimal saccade duration threshold',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--max-duration-microsec',\n 'argument_name_options': ['--max-duration'],\n 'parameter_name': 'max_duration_microsec',\n 'help': 'Maximal saccade duration threshold',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--velocity-integral-interval-microsec',\n 'argument_name_options': ['--velocity-integral-interval'],\n 'parameter_name': 'velocity_integral_interval_microsec',\n 'help': 'Interval duration, over which to integrate velocity computation.',\n 'kwargs': {'type': float}\n },\n ]\n\n description['BlinkDetector'] = [\n {\n 'main_argument_name': '--max-distance-to-saccade-microsec',\n 'argument_name_options': ['--max-distance-to-saccade'],\n 'parameter_name': 'max_distance_to_saccade_microsec',\n 'help': 'Threshold for distance from a definite blink to a nearby saccade, which will be marked as blink '\n 'as well.',\n 'kwargs': {'type': float}\n },\n ]\n\n description['FixationDetector'] = [\n {\n 'main_argument_name': '--prefiltering-interval-spread-threshold-degrees',\n 'argument_name_options': ['--prefiltering-interval-spread-threshold'],\n 'parameter_name': 'prefiltering_interval_spread_threshold_degrees',\n 'help': 'All the intersaccadic intervals shorter than this will be deemed fixations',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--min-sp-duration-microsec',\n 'argument_name_options': ['--min-sp-duration'],\n 'parameter_name': 'min_sp_duration_microsec',\n 'help': 'Minimal duration of a potential SP candidate (fast-moving samples shorter than this threshold '\n 'are labelled as noise)',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--sliding-window-width-microsec',\n 'argument_name_options': ['--sliding-window-width'],\n 'parameter_name': 'sliding_window_width_microsec',\n 'help': 'Sliding window for coordinates smoothing',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--normalization-sliding-window-size-samples',\n 'argument_name_options': ['--normalization-sliding-window'],\n 'parameter_name': 'normalization_sliding_window_size_samples',\n 'help': 'A moving average sliding window size (to normalize the data)',\n 'kwargs': {'type': int}\n },\n {\n 'main_argument_name': '--speed-threshold-degrees-per-sec',\n 'argument_name_options': ['--speed-threshold'],\n 'parameter_name': 'speed_threshold_degrees_per_sec',\n 'help': 'Biggest plausible speed for a noisy fixation',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--sliding-window-criterion',\n 'argument_name_options': ['--sliding-window'],\n 'parameter_name': 'sliding_window_criterion',\n 'help': 'Defines the way we check the samples with the sliding_window_criterion threshold: '\n 'either compute the average speed in the current window, or get the spread of '\n 'the gaze points (i.e. biggest XY bounding box side), divided by the duration',\n 'kwargs': {'choices': ['speed', 'spread']}\n },\n {\n 'main_argument_name': '--intersaccadic-interval-duration-threshold-microsec',\n 'argument_name_options': ['--intersaccadic-interval-duration-threshold'],\n 'parameter_name': 'intersaccadic_interval_duration_threshold_microsec',\n 'help': 'Minimal size of the intersaccadic interval to apply the step with the moving average analysis',\n 'kwargs': {'type': float}\n },\n ]\n\n description['SmoothPursuitDetector'] = [\n # a mutually exclusive group\n [\n {\n 'main_argument_name': '--min-pts',\n 'argument_name_options': [],\n 'parameter_name': 'min_pts',\n 'soft_type': int,\n 'help': 'An integer indicating the minimum number of points required to form a core point\\'s '\n 'neighbourhood, or a string \\'num_observers\\' (meaning that the actual number of observers '\n 'for each movie will be substituted, depending on the data set provided).\\n'\n 'This option is mutually exclusive with --min-observers.',\n 'kwargs': {}\n },\n {\n 'main_argument_name': '--min-observers',\n 'argument_name_options': [],\n 'parameter_name': 'min_observers',\n # first try casting to int, then to float (since int cast will fail for a float)\n 'soft_type': [int, float],\n 'help': 'Either a floating point in [0.0; 1.0] range (indicating the share of all the present '\n 'observers per movie) or int [2; +\\inf) (indicating the absolute threshold for '\n 'observer count in the core point\\'s neighbourhood).\\n'\n 'This option is mutually exclusive with --min-pts.',\n 'kwargs': {}\n }\n ],\n {\n 'main_argument_name': '--eps-deg',\n 'argument_name_options': ['--eps'],\n 'parameter_name': 'eps_deg',\n 'help': 'Spatial Euclidean distance threshold that defines the neighbourhood in the XY-plane',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--time-slice-microsec',\n 'argument_name_options': ['--time-slice'],\n 'parameter_name': 'time_slice_microsec',\n 'help': 'Width of the time slice that defines the size of the neighbourhood on the time axis.',\n 'kwargs': {'type': float}\n },\n ]\n\n return description", "def _generate_params(self):\n return {\n 'lis_outcome_service_url': self.lis_outcome_service_url,\n 'lis_result_sourcedid': self.lis_result_sourcedid,\n 'oauth_consumer_key': self.key\n }", "def create_api_keys(self, **kwargs):\n\n all_params = ['api_key']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_api_keys\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/apikeys'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'api_key' in params:\n body_params = params['api_key']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['privileges', 'apikey']\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='ApiKeyWithPrivileges',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def extra_args(self):\n return []", "def _core_ar_kwarg(self,**kwargs) :\n\t\tpass", "def format_arguments(self, **kwargs):\n return kwargs", "def purefa_argument_spec():\n\n return dict(\n fa_url=dict(),\n api_token=dict(no_log=True),\n )", "def get_dynamic_setup_params():\n return {\n # Retrieve the long description from the README\n # 'long_description': read_file('README.rst'),\n 'install_requires': substitute_crypto_to_req(\n read_requirements('requirements.txt'),\n ),\n # 'extras_require': read_extras(),\n }", "def handle_filters():\n params = {'api_key': API_KEY}\n for k in demisto.args():\n if demisto.getArg(k):\n params[k] = demisto.getArg(k)\n return params", "def get_args():\n return {\"id\": fields.UUID(required=True, location=\"view_args\")}", "def get_argdict(cls, toolchain, args):\n return {} # Empty must be overloaded (if required)", "def get_api_key(context) -> str:\n provided_api_key = \"\"\n for key, value in context.invocation_metadata():\n if key == \"api_key\":\n provided_api_key = str(value)\n return provided_api_key\n return provided_api_key", "def custom_openapi() -> Dict:\n if app.openapi_schema:\n return app.openapi_schema\n openapi_schema = get_openapi(\n title=\"The GenomicMedLab Cool Seq Tool\",\n version=__version__,\n description=\"Common Operations On Lots-of Sequences Tool.\",\n routes=app.routes\n )\n\n openapi_schema[\"info\"][\"contact\"] = {\n \"name\": \"Alex H. Wagner\",\n \"email\": \"Alex.Wagner@nationwidechildrens.org\",\n \"url\": \"https://www.nationwidechildrens.org/specialties/institute-for-genomic-medicine/research-labs/wagner-lab\" # noqa: E501\n }\n app.openapi_schema = openapi_schema\n return app.openapi_schema", "def get_argument_as_keywords(self):\n status = True\n arg_kv = self.get_values_for_mandatory_args()\n if len(arg_kv) != len(self.req_args_list):\n msg = 'could not execute %s without mandatory arguments' % (object)\n self.data_repository = skip_and_report_status(self.data_repository, msg)\n status = False\n arg_kv = self.get_values_for_optional_args(arg_kv)\n return arg_kv, status", "def edit_keywords(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs)", "def index_args():\n return {}", "def setup(self):\r\n \r\n if self.requestedAction == admin.ACTION_EDIT or self.requestedAction == admin.ACTION_CREATE:\r\n \r\n # Set the required parameters\r\n for arg in RadiusAuthRestHandler.REQUIRED_PARAMS:\r\n self.supportedArgs.addReqArg(arg)\r\n \r\n # Set up the valid parameters\r\n for arg in RadiusAuthRestHandler.VALID_PARAMS:\r\n if arg not in RadiusAuthRestHandler.REQUIRED_PARAMS:\r\n self.supportedArgs.addOptArg(arg)", "def get_kwargs(self):\n return {}", "def add_required_arguments(self, *args):\n self._add_sample_specific_arguments(True, *args)", "def generate_call_string(self):\n if(self.api_key is None):\n raise error(\"API Key is not defined\");#Should base class do this? \n \n self.call_url=self.baseurl;\n if hasattr(self,'search_str'):\n self.call_url+=self.search_str;\n if hasattr(self,'filter_field_str'):\n self.call_url=self.call_url+'&'+self.filter_field_str;\n \n #loop over the parameters dict\n for key in self.input_params:\n self.call_url+=self.input_params[key];\n \n #finally add api key. at this point already checked it exists\n self.call_url=self.call_url+'&'+\"api-key=\"+str(self.api_key);\n return;", "def _custom_actioner(message: ActionMessage, defined_keyword_arg, **kwargs):\n print(message.additional_fields)\n print(defined_keyword_arg)\n print(kwargs)", "def manage_params(args):\n # Socrata API\n with open(\"secret/builtby-socrata.yaml\", 'r') as f:\n try:\n socrata_api_credentials = yaml.load(f)\n except yaml.YAMLError as exc:\n print(exc)\n\n socrata_app_token = socrata_api_credentials['app_token']\n\n # base params\n params = {\n '$$app_token': socrata_app_token\n }\n # remove null attributes\n args = {k: v for k, v in args.items() if v is not None}\n # add args to params\n params.update(args) # inplace\n\n return params", "def gen_args(self, obj, pa_names = False):\n\n pal, kwal = get_class_total_args(type(obj))\n\n try:\n get_val = type(obj).__get_init_arg_val__\n except AttributeError:\n get_val = getattr\n\n for pa in pal:\n v = get_val(obj, pa)\n self.gen_field((pa + \" = \") if pa_names else \"\")\n self.pprint(v)\n\n for kwa, default in kwal.items():\n try:\n v = get_val(obj, kwa)\n except AttributeError:\n # If value cannot be obtained, skip the argument generation\n continue\n\n # generate only arguments with non-default values\n if (v is default) or (v == default):\n continue\n\n self.gen_field(kwa + \" = \")\n self.pprint(v)", "def generate_arg_and_kwags():\n def gen_func(\n #df: DataSource,\n option: List[list],\n style: List[dict]\n )->List[Tuple[list, dict]]:\n\n if len(option) != len(style):\n raise SystemError(\"option and style must be same size list.\")\n\n arg_and_kwarg = []\n for o, s in zip(option, style):\n arg = [*o]\n kwargs = s\n arg_and_kwarg.append((arg, kwargs))\n return arg_and_kwarg\n return gen_func", "def define_parameters(self):", "def _add_pos_args(self, *args):\n arg_array = self._params.setdefault(\"positional_parameters\", [])\n # couchbase++ wants all args JSONified\n json_args = [json.dumps(arg) for arg in args]\n arg_array.extend(json_args)", "def add_kwargs():\n pass", "def _collect_repr_args(self, poargs, kwargs):", "def make_args(self, args):\n result_str = \"?\"\n for k, v in args.iteritems():\n result_str = result_str + k + \"=\" + v + \"&\"\n return result_str", "def Args(parser):\n flags.AddHcxActivationKeyArgToParser(parser)\n base.ASYNC_FLAG.AddToParser(parser)\n base.ASYNC_FLAG.SetDefault(parser, True)\n parser.display_info.AddFormat('yaml')", "def add_args(self):\n raise NotImplementedError", "def _set_named_args(self, **kv):\n # named_params = {}\n # for k in kv:\n # named_params[\"${0}\".format(k)] = json.dumps(kv[k])\n # couchbase++ wants all args JSONified\n named_params = {f'${k}': json.dumps(v) for k, v in kv.items()}\n\n self._params[\"named_parameters\"] = named_params\n return self", "def format_args(self, **kwargs: Any) -> str:\n return \"\"", "def _setup_api_properties(self):\n self.implicit_api_logical_id = GeneratedLogicalId.implicit_http_api()\n self.implicit_api_condition = \"ServerlessHttpApiCondition\"\n self.api_event_type = \"HttpApi\"\n self.api_type = SamResourceType.HttpApi.value\n self.api_id_property = \"ApiId\"\n self.editor = OpenApiEditor", "def get_cli_arguments(self):\n pass", "def _docs_params(**kwds):\n\n def dec(obj):\n obj.__orig_doc__ = obj.__doc__\n obj.__doc__ = dedent(obj.__doc__).format_map(kwds)\n return obj\n\n return dec", "def _make_args(self, args, defaults=[], vararg=None, kwonlyargs=[],\n kw_defaults=[], kwarg=None):\n # On Python 2 convert vararg and kwarg to raw name, raise error using\n # lineno stored on the node and lexer from self.\n # On Python 3.3 extract name and annotation\n # After should be straight forward\n raise NotImplementedError()", "def fill_args(cls, toolchain, parser):\n pass # pass must be overloaded (if required)", "def base_arguments(self):\n raise NotImplementedError()", "def help(self, keyword):\n if (keyword == 'all'):\n string = ('%-20s%-20s%-20s%s\\n' % ('Keyword', 'Type', 'Default', 'Comment'))\n for key, value in self.allowed_keys.items():\n string += ('%-20s%-20s%-20s%s\\n' % (key, str(value[0]), str(value[1]), value[2]))\n print string", "def main(pArgs):\n\n # Options and args... \n \n longoptions=[\"help\", \"usage\", \"endpoint=\", \"interface-type=\", \"verbose=\", \\\n \"recursive\", \"dbs-conf=\", \"show-prod\", \"show-caf\", \\\n \"only-subscribed\", \"only-custodial\"]", "def api():\n\treturn \"The API call\"", "def keyword_only(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n if len(args) > 0:\n raise TypeError(\"Method %s only takes keyword arguments.\" % func.__name__)\n return func(**kwargs)\n notice = \".. Note:: This method requires all argument be specified by keyword.\\n\"\n wrapper.__doc__ = notice + wrapper.__doc__\n return wrapper", "def definearguments(self, customparser):\n if not customparser:\n return\n customparser.add_option(\n '--url',\n dest='url',\n help=\"Use the provided iLO URL to login.\",\n default=None,\n )\n customparser.add_option(\n '-u',\n '--user',\n dest='user',\n help=\"If you are not logged in yet, including this flag along\"\\\n \" with the password and URL flags can be used to log into a\"\\\n \" server in the same command.\"\"\",\n default=None,\n )\n customparser.add_option(\n '-p',\n '--password',\n dest='password',\n help=\"\"\"Use the provided iLO password to log in.\"\"\",\n default=None,\n )\n customparser.add_option(\n '-e',\n '--enc',\n dest='encode',\n action='store_true',\n help=SUPPRESS_HELP,\n default=False,\n )", "def set_api_access_keys(**kwargs):\n API_BASE_PARAMS['key'] = kwargs['key']", "def args_str(self):", "def build_method(method_name, description, parameters, api_path, http_method, summary, return_type):\n allow_per_page = False\n parameters = check_for_pre_attachment_param(parameters)\n arg_list = get_parameters(parameters)\n param_descriptions = get_parameter_descriptions(parameters)\n payload = build_payload(parameters)\n enums = check_for_enums(parameters)\n\n \"\"\"\n If the method returns an array, allow the per_page parameter for paging\n \"\"\"\n if return_type == 'array' or (method_name.startswith(\"list_\") and http_method == \"GET\"):\n arg_list.append('per_page=None')\n param_descriptions.append(':param per_page: (optional) Set how many results canvas should return, defaults to config.LIMIT_PER_PAGE')\n param_descriptions.append(':type per_page: integer or None')\n payload.append('\\'per_page\\': per_page,')\n allow_per_page = True\n\n arg_list.append('**request_kwargs')\n\n \"\"\"\n Create the method signature\n \"\"\"\n\n content = line_format('def ' + method_name + '(request_ctx, ' + ', '.join(arg_list) + '):', NONE)\n content += line_format('\"\"\"', FOUR)\n\n \"\"\"\n Create the method description text from the description in the meta api\n \"\"\"\n regex = re.compile(r'\\{api\\:(\\w+)\\#(\\w+).*?\\}')\n for line in description.splitlines(True):\n rst_line = regex.sub(format_api_string, line)\n content += line_format(rst_line.rstrip(), FOUR)\n\n \"\"\"\n list out the method paramters\n \"\"\"\n content += line_format('', NONE)\n content += line_format(':param request_ctx: The request context', EIGHT)\n content += line_format(':type request_ctx: :class:RequestContext', EIGHT)\n for param in param_descriptions:\n content += line_format(param, EIGHT)\n content += line_format(':return: '+summary, EIGHT)\n content += line_format(':rtype: requests.Response (with ' + return_type + ' data)', EIGHT)\n content += line_format('', NONE)\n content += line_format('\"\"\"', FOUR)\n content += line_format('', NONE)\n\n \"\"\"\n Add the per_page check\n \"\"\"\n if allow_per_page:\n content += line_format('if per_page is None:', FOUR)\n content += line_format('per_page = request_ctx.per_page', EIGHT)\n\n \"\"\"\n Add any enums if they exist.\n \"\"\"\n for enum in enums:\n content += line_format(enum, FOUR)\n\n \"\"\"\n Add the api path\n \"\"\"\n path_formatted = 'path = \\'' + api_path + '\\''\n content += line_format(path_formatted, FOUR)\n\n \"\"\"\n Add a payload if one exists\n \"\"\"\n payload_string = ''\n if payload:\n content += line_format('payload = {', FOUR)\n for item in payload:\n content += line_format(item, EIGHT)\n content += line_format('}', FOUR)\n payload_string = ', payload=payload'\n\n content += line_format('url = request_ctx.base_api_url + path.format(' + ', '.join(get_path_parameters(parameters)) + ')', FOUR)\n content += line_format(\n 'response = client.'+http_method.lower()+'(request_ctx, url' + payload_string + ', **request_kwargs)', FOUR)\n\n content += line_format('', NONE)\n content += line_format('return response', FOUR)\n content += line_format('', NONE)\n content += line_format('', NONE)\n return content", "def get_api_keys(self, **kwargs):\n\n all_params = ['page', 'per_page', '_from', 'to', 'sort_dir', 'sort_field', 'filters']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_api_keys\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/apikeys'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'page' in params:\n query_params['_page'] = params['page']\n if 'per_page' in params:\n query_params['_perPage'] = params['per_page']\n if '_from' in params:\n query_params['_from'] = params['_from']\n if 'to' in params:\n query_params['_to'] = params['to']\n if 'sort_dir' in params:\n query_params['_sortDir'] = params['sort_dir']\n if 'sort_field' in params:\n query_params['_sortField'] = params['sort_field']\n if 'filters' in params:\n query_params['_filters'] = params['filters']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['privileges', 'apikey']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[ApiKey]',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def define_parameters(self):\n self.add_argument('--prefix', dest='prefix', type=str, optional=False,\n help='prefix for file names')\n self.add_argument('--sleepLength',\n dest = 'sleepLength',\n type = str,\n optional = True,\n help ='time to sleep before performing plugin action',\n default = '0')", "def toargs(context, schema, data):\n data = dict(data)\n args = {}\n for name, field in schema.namesAndDescriptions(True):\n field = field.bind(context)\n n = name\n if n.endswith('_') and iskeyword(n[:-1]):\n n = n[:-1]\n\n s = data.get(n, data)\n if s is not data:\n s = str(s)\n del data[n]\n\n try:\n args[str(name)] = field.from_unicode(s)\n except ValidationError as v:\n reraise(ConfigurationError('Invalid value for', n, str(v)),\n None, sys.exc_info()[2])\n elif field.required:\n # if the default is valid, we can use that:\n default = field.default\n try:\n field.validate(default)\n except ValidationError:\n raise ConfigurationError('Missing parameter:', n)\n args[str(name)] = default\n\n if data:\n # we had data left over\n try:\n keyword_arguments = schema.getTaggedValue('keyword_arguments')\n except KeyError:\n keyword_arguments = False\n if not keyword_arguments:\n raise ConfigurationError('Unrecognized parameters:', *data)\n\n for name in data:\n args[str(name)] = data[name]\n\n return args", "def setup_args(cls) -> ParlaiParser:\n # we want to later deprecate this for add_cmdline_args", "def _kwargs(self):\n dict = {\"name\":self.name}\n return dict", "def __init__(self, **kwargs):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'version': 'str',\n 'tagline': 'str',\n 'keywords': 'str',\n 'short_description': 'str',\n 'usage_information': 'str',\n 'long_description': 'str',\n 'license_model_description': 'str',\n 'system_requirements': 'str',\n 'time_released': 'datetime',\n 'release_notes': 'str',\n 'categories': 'list[str]',\n 'publisher': 'Publisher',\n 'languages': 'list[Item]',\n 'screenshots': 'list[Screenshot]',\n 'videos': 'list[NamedLink]',\n 'support_contacts': 'list[SupportContact]',\n 'support_links': 'list[NamedLink]',\n 'documentation_links': 'list[DocumentationLink]',\n 'icon': 'UploadData',\n 'banner': 'UploadData',\n 'regions': 'list[Region]',\n 'package_type': 'str',\n 'default_package_version': 'str',\n 'links': 'list[Link]',\n 'is_featured': 'bool'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'version': 'version',\n 'tagline': 'tagline',\n 'keywords': 'keywords',\n 'short_description': 'shortDescription',\n 'usage_information': 'usageInformation',\n 'long_description': 'longDescription',\n 'license_model_description': 'licenseModelDescription',\n 'system_requirements': 'systemRequirements',\n 'time_released': 'timeReleased',\n 'release_notes': 'releaseNotes',\n 'categories': 'categories',\n 'publisher': 'publisher',\n 'languages': 'languages',\n 'screenshots': 'screenshots',\n 'videos': 'videos',\n 'support_contacts': 'supportContacts',\n 'support_links': 'supportLinks',\n 'documentation_links': 'documentationLinks',\n 'icon': 'icon',\n 'banner': 'banner',\n 'regions': 'regions',\n 'package_type': 'packageType',\n 'default_package_version': 'defaultPackageVersion',\n 'links': 'links',\n 'is_featured': 'isFeatured'\n }\n\n self._id = None\n self._name = None\n self._version = None\n self._tagline = None\n self._keywords = None\n self._short_description = None\n self._usage_information = None\n self._long_description = None\n self._license_model_description = None\n self._system_requirements = None\n self._time_released = None\n self._release_notes = None\n self._categories = None\n self._publisher = None\n self._languages = None\n self._screenshots = None\n self._videos = None\n self._support_contacts = None\n self._support_links = None\n self._documentation_links = None\n self._icon = None\n self._banner = None\n self._regions = None\n self._package_type = None\n self._default_package_version = None\n self._links = None\n self._is_featured = None", "def ReviewServiceArgs(cls, container = '', library = 'Standard', dialogname = ''):\n return container, library, dialogname, ScriptForge.componentcontext", "def add_kwargs_arg(parser):\n parser.add_argument('--arg', '-a', type=__kwargs_arg, metavar='K=V', action='append',\n dest='kwargs', default=[],\n help='any special keyword arguments to pass to the method, formated as '\n 'key=value with value being a valid Python literal or one of the special '\n 'values nan, inf, -inf, N4, N8, N8_DIST, N6, N18, N18_DIST, N26, N26_DIST')", "def getArguments(self):\n ApiCli.getArguments(self)\n\n if self.args.alarm_name is not None:\n self.alarm_name = self.args.alarm_name\n\n if self.args.metric_name is not None:\n self.metric_name = self.args.metric_name\n\n if self.args.aggregate is not None:\n self.aggregate = self.args.aggregate\n\n if self.args.operation is not None:\n self.operation = self.args.operation\n\n if self.args.threshold is not None:\n self.threshold = self.args.threshold\n\n if self.args.interval is not None:\n self.interval = self.args.interval\n\n if self.args.host_group_id is not None:\n self.host_group_id = self.args.host_group_id\n\n if self.args.actions is not None:\n self.actions = self.args.actions\n\n if self.args.note is not None:\n self.note = self.args.note\n\n if self.args.per_host_notify is not None:\n self.per_host_notify = self.args.per_host_notify\n\n if self.args.is_disabled is not None:\n self.is_disabled = self.args.is_disabled\n\n payload = {}\n\n # Create trigger predicate dictionary\n predicate = {}\n\n if self.aggregate is not None:\n predicate['agg'] = self.aggregate\n\n if self.operation is not None:\n predicate['op'] = self.operation\n\n if self.threshold is not None:\n predicate['val'] = self.threshold\n\n if 'agg' in predicate or 'op' in predicate or 'val' in predicate:\n payload['triggerPredicate'] = predicate\n\n # Create payload dictionary\n if self.alarm_name:\n payload['name'] = self.alarm_name\n\n if self.host_group_id is not None:\n payload['hostgroupId'] = self.host_group_id\n\n if self.interval is not None:\n payload['interval'] = self.intervals[self.interval]\n\n if self.metric_name is not None:\n payload['metricName'] = self.metric_name\n\n if self.note is not None:\n payload['note'] = self.note\n\n if self.actions is not None:\n payload['actions'] = self.actions\n\n if self.per_host_notify is not None:\n payload['perHostNotify'] = True if self.per_host_notify == 'yes' else False\n\n if self.is_disabled is not None:\n payload['isDisabled'] = True if self.is_disabled == 'yes' else False\n\n self.data = json.dumps(payload, sort_keys=True)\n self.headers = {'Content-Type': 'application/json'}", "def DeveloperAPI(*args, **kwargs):\n if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):\n return DeveloperAPI()(args[0])\n\n def wrap(obj):\n _append_doc(obj, message='DeveloperAPI: This API may change across minor Ludwig releases.')\n _mark_annotated(obj)\n return obj\n return wrap", "def as_api_parameters(self):\n\n data = {}\n for system in self.system_codes:\n data.update({\n \"{0}_callback/{1}\".format(system, variable): self.data[variable]\n for variable in self.data.keys()\n })\n return data", "def list_keywords(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), params=kwargs)", "def purefb_argument_spec():\n\n return dict(\n fb_url=dict(),\n api_token=dict(no_log=True),\n )", "def __init__(**params):", "def get_documentation(self, *args, **dargs):\n pass", "def process_api_declaration(self, resources, resource, context):\n pass", "def __call__(self, *args, **kwargs) -> Dict[str, Any]:\n pass", "def generate_api_key(self, **kwargs):\n\n all_params = []\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method generate_api_key\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/apikeys/_generate'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['privileges', 'apikey']\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='ApiKey',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def get_json_argument_list():\n list_of_arguments_to_get = [\"finish_time\", \"segmentation_training_samples\", \"patch_count_per_image\", \"learning_rate\", \"batch_k\",\n \"batch_p\", \"flip_augment\", \"standardize\", \"margin\", \"metric\"]\n\n return list_of_arguments_to_get", "def Args(parser):\n\n base_classes.RegionalDescriber.Args(parser)\n base_classes.AddFieldsFlag(parser, 'targetVpnGateways')", "def eseries_host_argument_spec():\n argument_spec = basic_auth_argument_spec()\n argument_spec.update(dict(\n api_username=dict(type='str', required=True),\n api_password=dict(type='str', required=True, no_log=True),\n api_url=dict(type='str', required=True),\n ssid=dict(type='str', required=True),\n validate_certs=dict(type='bool', required=False, default=True),\n ))\n return argument_spec", "def params(self, **kwargs):\n return kwargs", "def _set_named_args(self, **kv):\n for k in kv:\n self._body['${0}'.format(k)] = kv[k]\n return self", "def generate_tokens(self, apiview, function_id, *, add_line_marker: bool, prefix: str = \"\"):\n # Add arg name\n self.id = function_id\n if add_line_marker:\n self.id = f\"{function_id}.param({self.argname})\"\n apiview.add_line_marker(self.id)\n\n apiview.add_text(f\"{prefix}{self.argname}\")\n # add arg type\n if self.argtype:\n apiview.add_punctuation(\":\", False, True)\n apiview.add_type(self.argtype, self.id)\n\n # add arg default value\n default = self.default\n if default is not None:\n apiview.add_punctuation(\"=\", True, True)\n if isinstance(default, str) and default not in SPECIAL_DEFAULT_VALUES:\n apiview.add_string_literal(default)\n else:\n if isinstance(default, astroid.node_classes.Name):\n value = default.name\n elif hasattr(default, \"as_string\"):\n value = default.as_string()\n elif inspect.isclass(default):\n value = get_qualified_name(default, apiview.namespace)\n else:\n value = str(default)\n apiview.add_literal(value)", "def generate_pretty_key(*args, **kwargs):\n return '\\n'.join((\n \"\",\n \"\\tindex: {}\".format(str(kwargs.get('index'))),\n \"\\tdoc_type: {}\".format(str(kwargs.get('doc_type'))),\n \"\\tbody: {}\".format(str(RecursivelySortedDict(kwargs.get('body')))),\n \"\\tquery: {}\".format(str(RecursivelySortedDict(kwargs.get('query')))),\n ))", "def test_020_kwargs(self):\n caller = self.get_caller([KwargsTaskOverride])\n self.assertEqual([\"A\", \"B\"], caller(\"A\", \"B\"))", "def _setup_arguments(self):\n\n self._parser.add_argument(\"-a\", \"--area-interest\",\n help=\"Area of interest to process, \"\n \"shapefile path\", required=True)\n # FUTURE VERSIONS\n # self._parser.add_argument(\"-s\", \"--srtm-dem\",\n # help=\"Path to SRTM DEM file. Zip format\",\n # required=False)\n # self._parser.add_argument(\"-y\", \"--hsheds-dem\",\n # help=\"Path to HSHEDS DEM file. Zip format\",\n # required=False)\n # self._parser.add_argument(\"-g\", \"--groves-file\",\n # help=\"Path to groves classification file. \"\n # \"Zip format\",\n # required=False)", "def __init__(self, **kwargs):\n esp_name = self.esp_name\n self.api_key = get_anymail_setting(\n \"api_key\",\n esp_name=esp_name,\n kwargs=kwargs,\n allow_bare=True,\n )\n api_url = get_anymail_setting(\n \"api_url\",\n esp_name=esp_name,\n kwargs=kwargs,\n default=\"https://api.brevo.com/v3/\",\n )\n if not api_url.endswith(\"/\"):\n api_url += \"/\"\n super().__init__(api_url, **kwargs)", "def __add_arguments__(cls, parser: ArgumentParser) -> None:\n\n parser.add_argument(\n \"-d\",\n \"--data_dict_guid\",\n required=True,\n type=str,\n help=(\n \"The indexd Globally Unique Identifier (GUID) for the data dictionary.\"\n ),\n )\n parser.add_argument(\n \"-o\",\n \"--output\",\n required=True,\n type=str,\n help=(\n \"Path to write out the JSON response with file_name and dictionary_url.\"\n ),\n )", "def get_api_fields(cls):\n return ['fqdn', 'ttl', 'description', 'views']", "def build(keys: List[str]):\n api = API()\n api.build(*keys)", "def parse_args() -> argparse.Namespace:\n\n parser = argparse.ArgumentParser(\n description=\"THE FOLLOWING SCRIPT SHOWS SNAPSHOT OPERATIONS USING REST API.\", )\n parser.add_argument(\n \"-c\", \"--cluster\", required=True, help=\"API server IP:port details\")\n parser.add_argument(\n \"-u\",\n \"--api_user\",\n default=\"admin\",\n help=\"API Username\")\n parser.add_argument(\"-p\", \"--api_pass\", help=\"API Password\")\n parsed_args = parser.parse_args()\n\n # collect the password without echo if not already provided\n if not parsed_args.api_pass:\n parsed_args.api_pass = getpass()\n\n return parsed_args", "def definearguments(self, customparser):\n if not customparser:\n return\n\n add_login_arguments_group(customparser)\n\n customparser.add_argument(\n '--serviceaccount',\n dest='serviceacc',\n action=\"store_true\",\n help=\"Optionally include this flag if you wish to created account \"\\\n \"to be a service account.\",\n default=False\n )\n customparser.add_argument(\n '--addprivs',\n dest='optprivs',\n nargs='*',\n action=_AccountParse,\n type=str,\n help=\"Optionally include this flag if you wish to specify \"\\\n \"which privileges you want added to the iLO account. This overrides the default of \"\\\n \"duplicating privileges of the currently logged in account on the new account. Pick \"\\\n \"privileges from the privilege list in the above help text. EX: --addprivs=1,2,4\",\n default=None\n )\n customparser.add_argument(\n '--removeprivs',\n dest='optprivs',\n nargs='*',\n action=_AccountParse,\n type=str,\n help=\"Optionally include this flag if you wish to specify \"\\\n \"which privileges you want removed from the iLO account. This overrides the default of\"\\\n \" duplicating privileges of the currently logged in account on the new account. Pick \"\\\n \"privileges from the privilege list in the above help text. EX: --removeprivs=1,2,4\",\n default=None\n )\n customparser.add_argument(\n '--role',\n dest='role',\n choices=['Administrator', 'ReadOnly', 'Operator'],\n help=\"Optionally include this flag if you would like to specify Privileges by role. \"\\\n \"Valid choices are: Administrator, ReadOnly, Operator\",\n default=None\n )\n customparser.add_argument(\n '-j',\n '--json',\n dest='json',\n action=\"store_true\",\n help=\"Optionally include this flag if you wish to change the\"\\\n \" displayed output to JSON format. Preserving the JSON data\"\\\n \" structure makes the information easier to parse.\",\n default=False\n )", "def init_cloud_api(self, args=None):\n pass", "def generateKwArgs(self, axisList=None):\n if axisList is None:\n axisList = self.tabWidget.currentWidget()\n\n kwargs = {} \n for axisWidget in axisList.getAxisWidgets():\n kwargs[axisWidget.axis.id] = axisWidget.getCurrentValues()\n\n # Generate additional args\n kwargs['squeeze'] = 0\n kwargs['order'] = axisList.getAxesOrderString()\n\n return kwargs", "def _build_arguments(self):\n # TODO: comeback to allow test path override. maybe?\n # self._parser.add_argument(\n # '--test-path',\n # type=utils.validate_path,\n # required=False,\n # help=('Path th projects test Dockerfile. Dockerfile should be in the root of the test directory.')\n # )\n self._parser.add_argument(\n '--configs',\n type=bool,\n required=False,\n default=False,\n help=\"Would you like to inject configuration files?\"\n )" ]
[ "0.6347063", "0.6342163", "0.6047952", "0.60061", "0.5908773", "0.590851", "0.58989066", "0.58448356", "0.58410525", "0.58357745", "0.5807142", "0.57746965", "0.57710725", "0.5671101", "0.5665809", "0.56421447", "0.5637025", "0.5618054", "0.55946404", "0.5592014", "0.5560202", "0.55435866", "0.55219644", "0.5513859", "0.55076927", "0.5471917", "0.5468453", "0.54666245", "0.54599166", "0.5444413", "0.54098856", "0.54020035", "0.53864455", "0.53802913", "0.5366934", "0.5363213", "0.5363128", "0.5362455", "0.5349671", "0.5344664", "0.5342888", "0.53296113", "0.5319646", "0.53111124", "0.5305683", "0.530556", "0.5298574", "0.52954966", "0.5295396", "0.52950495", "0.528783", "0.52767193", "0.52699655", "0.52658284", "0.52645344", "0.5263485", "0.52562755", "0.5254318", "0.5254126", "0.5243462", "0.524164", "0.52368176", "0.5232426", "0.52270466", "0.52158517", "0.5210464", "0.52093345", "0.52089053", "0.52083707", "0.520799", "0.5207857", "0.5206446", "0.5206218", "0.52048296", "0.52038634", "0.52017766", "0.5201018", "0.5200218", "0.5199606", "0.5196969", "0.5185607", "0.5183269", "0.51798016", "0.51741755", "0.5165121", "0.5162968", "0.5161967", "0.5158466", "0.51557755", "0.5151786", "0.51453304", "0.51452804", "0.5144242", "0.51425505", "0.5141428", "0.51367986", "0.5135297", "0.5132318", "0.5131926", "0.5131048" ]
0.5200405
77
Generate API keyword args for these details.
def as_api_parameters(self): return { '{}_callback/stripeToken'.format(source): token for source, token in self.tokens.items() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extra_target_arguments(self):\n return {}", "def init_args(self):\n return {\n \"doc\": self.__doc__.format(name=colored(self.module_name, \"green\", attrs=['bold','underline'])),\n \"Url\": \"set a target url\",\n 'Type': \"set type to check , [php, asp, aspx, cgi, dir , mdb]\",\n }", "def as_api_parameters(self):\n raise NotImplementedError(\n 'as_api_parameters not implemented on ' + self.__class__)", "def help_args():\n pass", "def Help():\n names=api_method_dict.keys()\n names.sort()\n return ''.join(['**** ' + api_method_dict[name].__name__ + '\\n' + api_method_dict[name].__doc__ + '\\n'\n for name in names])", "def add_extra_args(self):\n super(AwsAccessListKeysMethod, self).add_extra_args()\n self.parser.add_argument(\"--key_pair_name\", required=False, default=None,\n help=\"AWS Key Pair name\")", "def _format_api_string(self):\n api_string = self.api_name\n arg_string_list = []\n if not self.api_args is None and \\\n len(self.api_args) > 0:\n for key in self.api_args:\n try:\n value = self.api_args[key]\n except TypeError:\n #assert False, f\"node: {self.api_name} key: {key} bad arg: {self.api_args}\" + \\\n # f\" type: {self.api_args.__class__.__name__}\"\n print(f\"node: {self.api_name} key: {key} bad arg: {self.api_args}\" + \\\n f\" type: {self.api_args.__class__.__name__}\")\n raise TypeError\n if isinstance(value, list):\n value_string = \"[ \" + \",\".join([self._api_value_string(x) for x in value]) + \" ]\"\n else:\n if value is None:\n assert False, f\"key={key}\"\n value_string = self._api_value_string(value)\n arg_string_list.append(f\"{key}:{value_string}\")\n api_string += \"(\" + \" \".join(arg_string_list) + \")\"\n #assert False, f\"{self.api_name}: {api_string}\"\n return api_string", "def valid_args(self):\r\n for k in request.args.keys():\r\n if k not in ['api_key']:\r\n getattr(self.__class__, k)", "def api(self) -> str:", "def generateKwargsAsString(self):\n args = \"\"\n axisList = self.tabWidget.currentWidget()\n\n for axisWidget in axisList.getAxisWidgets():\n args += \"%s = %s, \" % (axisWidget.axis.id,\n axisWidget.getCurrentValuesAsStr())\n\n # Generate additional args\n args += 'squeeze = 0'\n args += \", order = '%s' \" % axisList.getAxesOrderString()\n return args", "def _generate_options(self, **kwargs: Any) -> dict:\n raise NotImplementedError", "def get_dynamic_setup_params():\n\n return {\n # Retrieve the long description from the README\n \"long_description\": read_file(\"README.md\")\n }", "def add_extra_args(self):\n pass", "def get_keys_info() -> Dict[str, List[str]]:\n args_dict = {}\n\n for api in API_DICT:\n arg_list = list(\n getattr(\n sys.modules[__name__], \"set_\" + str(api) + \"_key\"\n ).__code__.co_varnames\n )\n arg_list.remove(\"persist\")\n arg_list.remove(\"show_output\")\n args_dict[api] = arg_list\n\n return args_dict", "def pykwarg(self):\n return self._pykwarg", "def full_args():\n return setup_args()", "def _generate_keywords(self):\n _keywords = [*self._lookup_opcodes_dir.keys(), *self._registers_list.keys()]\n for key in _keywords:\n self._keywords.extend(key.split(\" \"))\n return", "def create_parameters_description():\n description = OrderedDict()\n description['GeneralArguments'] = [\n {\n 'main_argument_name': '--config-file',\n 'argument_name_options': ['--config'],\n 'parameter_name': 'config_file',\n 'help': \"\"\"A json-encoded configuration file, in which one can specify the parameters\n for all detectors in use as well as some general parameters for the whole run.\n The encoded object should therefore be a dictionary,\n with possible top-level keys 'GeneralArguments' (general parameters, not relevant\n to a detector class), 'SaccadeDetector', 'BlinkDetector', 'FixationDetector'\n and 'SmoothPursuitDetector'.\n\n The value for each of the present keys should in turn be a dictionary with keys\n identical to the longest argument names below, without the eye movement name prefix.\n An example (and equivalent to default parameters) configuration file is provided\n in default_parameters.conf.json and includes all possible keys.\n\n In your custom configuration file you do not have to specify any the parameter values,\n missing keys will be considered to have the default value.\n\n For default values, you can consult the respective classes' __init__ methods in\n saccade_detector.py, blink_detector.py, fixation_detector.py and sp_detector.py.\n\n\n Values given through the console interface override the ones in the config file.\"\"\",\n 'kwargs': {}\n },\n {\n 'main_argument_name': '--input-folder',\n 'argument_name_options': ['--in'],\n 'parameter_name': 'input_folder',\n 'help': 'From where to load the gaze points data. If absent, must be present in --config-file file. '\n 'This folder is assumed to have subfolders that correspond to videos, for which recordings '\n 'were made. Each such subdirectory should contain gaze files (one file per observer).',\n 'kwargs': {}\n },\n {\n 'main_argument_name': '--gaze-file-pattern',\n 'argument_name_options': ['--pattern'],\n 'parameter_name': 'gaze_file_pattern',\n 'help': 'Will look for such files in all subdirectories of --input-folder. '\n 'For GazeCom, \\'*.arff\\' is a recommended value (or \\'*.coord\\', if dealing with original dataset files). '\n 'One can use this parameter to match some name pattern as well (not just the file extension), '\n 'for example with \\'*_needed_files_*.arff\\'. \\n'\n 'If no wildcard symbol is found in the provided string, it is assumed to be just the file name '\n 'suffix, so it will be prepended with a wildcard symbol (i.e. \".coord\" will become \"*.coord\").',\n 'kwargs': {}\n },\n {\n 'main_argument_name': '--input-data-type',\n 'argument_name_options': ['--type'],\n 'parameter_name': 'input_data_type',\n 'help': 'Type of data loader to use (if not specified, will try to detect automatically)',\n 'kwargs': {'choices': ['DSF', 'ARFF', 'labelled ARFF']}\n },\n {\n 'main_argument_name': '--verbose',\n 'argument_name_options': ['-v'],\n 'parameter_name': 'verbose',\n 'default': None,\n 'help': 'Whether to output some information about the progress of the run to STDERR',\n 'kwargs': {'action': 'store_const', 'const': True} # only like this can support the default of None\n # (not to override the config all the time\n # with a missing value)\n },\n {\n 'main_argument_name': '--movies',\n 'argument_name_options': ['-m'],\n 'parameter_name': 'movies',\n 'help': 'Which movies out of the input folder to use (might be useful for train/test split). '\n 'The gaze data is supposed to be put under respective directories in the input folder. '\n 'If none are given, all available ones are used.',\n 'kwargs': {'nargs': '+', 'default': None}\n },\n {\n 'main_argument_name': '--output-folder',\n 'argument_name_options': ['--out'],\n 'parameter_name': 'output_folder',\n 'help': 'Where to output the resulting labelled data (if empty, will create a new temporary directory)',\n 'kwargs': {}\n },\n ]\n\n description['SaccadeDetector'] = [\n {\n 'main_argument_name': '--tolerance',\n 'argument_name_options': ['--tol'],\n 'parameter_name': 'tolerance',\n 'help': 'The relative size of the area outside the screen that is still considered to be legal',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--threshold-onset-fast-degree-per-sec',\n 'argument_name_options': ['--threshold-onset-fast'],\n 'parameter_name': 'threshold_onset_fast_degree_per_sec',\n 'help': 'Threshold for initialization of saccade detection, in degrees per second',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--threshold-onset-slow-degree-per-sec',\n 'argument_name_options': ['--threshold-onset-slow'],\n 'parameter_name': 'threshold_onset_slow_degree_per_sec',\n 'help': 'A slower threshold for saccade onset detection, in degrees per second',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--threshold-offset-degree-per-sec',\n 'argument_name_options': ['--threshold-offset'],\n 'parameter_name': 'threshold_offset_degree_per_sec',\n 'help': 'Threshold for saccade offset detection, in degrees per second',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--max-speed-degree-per-sec',\n 'argument_name_options': ['--max-speed'],\n 'parameter_name': 'max_speed_degree_per_sec',\n 'help': 'Maximum speed of saccadic eye movements',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--min-duration-microsec',\n 'argument_name_options': ['--min-duration'],\n 'parameter_name': 'min_duration_microsec',\n 'help': 'Minimal saccade duration threshold',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--max-duration-microsec',\n 'argument_name_options': ['--max-duration'],\n 'parameter_name': 'max_duration_microsec',\n 'help': 'Maximal saccade duration threshold',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--velocity-integral-interval-microsec',\n 'argument_name_options': ['--velocity-integral-interval'],\n 'parameter_name': 'velocity_integral_interval_microsec',\n 'help': 'Interval duration, over which to integrate velocity computation.',\n 'kwargs': {'type': float}\n },\n ]\n\n description['BlinkDetector'] = [\n {\n 'main_argument_name': '--max-distance-to-saccade-microsec',\n 'argument_name_options': ['--max-distance-to-saccade'],\n 'parameter_name': 'max_distance_to_saccade_microsec',\n 'help': 'Threshold for distance from a definite blink to a nearby saccade, which will be marked as blink '\n 'as well.',\n 'kwargs': {'type': float}\n },\n ]\n\n description['FixationDetector'] = [\n {\n 'main_argument_name': '--prefiltering-interval-spread-threshold-degrees',\n 'argument_name_options': ['--prefiltering-interval-spread-threshold'],\n 'parameter_name': 'prefiltering_interval_spread_threshold_degrees',\n 'help': 'All the intersaccadic intervals shorter than this will be deemed fixations',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--min-sp-duration-microsec',\n 'argument_name_options': ['--min-sp-duration'],\n 'parameter_name': 'min_sp_duration_microsec',\n 'help': 'Minimal duration of a potential SP candidate (fast-moving samples shorter than this threshold '\n 'are labelled as noise)',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--sliding-window-width-microsec',\n 'argument_name_options': ['--sliding-window-width'],\n 'parameter_name': 'sliding_window_width_microsec',\n 'help': 'Sliding window for coordinates smoothing',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--normalization-sliding-window-size-samples',\n 'argument_name_options': ['--normalization-sliding-window'],\n 'parameter_name': 'normalization_sliding_window_size_samples',\n 'help': 'A moving average sliding window size (to normalize the data)',\n 'kwargs': {'type': int}\n },\n {\n 'main_argument_name': '--speed-threshold-degrees-per-sec',\n 'argument_name_options': ['--speed-threshold'],\n 'parameter_name': 'speed_threshold_degrees_per_sec',\n 'help': 'Biggest plausible speed for a noisy fixation',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--sliding-window-criterion',\n 'argument_name_options': ['--sliding-window'],\n 'parameter_name': 'sliding_window_criterion',\n 'help': 'Defines the way we check the samples with the sliding_window_criterion threshold: '\n 'either compute the average speed in the current window, or get the spread of '\n 'the gaze points (i.e. biggest XY bounding box side), divided by the duration',\n 'kwargs': {'choices': ['speed', 'spread']}\n },\n {\n 'main_argument_name': '--intersaccadic-interval-duration-threshold-microsec',\n 'argument_name_options': ['--intersaccadic-interval-duration-threshold'],\n 'parameter_name': 'intersaccadic_interval_duration_threshold_microsec',\n 'help': 'Minimal size of the intersaccadic interval to apply the step with the moving average analysis',\n 'kwargs': {'type': float}\n },\n ]\n\n description['SmoothPursuitDetector'] = [\n # a mutually exclusive group\n [\n {\n 'main_argument_name': '--min-pts',\n 'argument_name_options': [],\n 'parameter_name': 'min_pts',\n 'soft_type': int,\n 'help': 'An integer indicating the minimum number of points required to form a core point\\'s '\n 'neighbourhood, or a string \\'num_observers\\' (meaning that the actual number of observers '\n 'for each movie will be substituted, depending on the data set provided).\\n'\n 'This option is mutually exclusive with --min-observers.',\n 'kwargs': {}\n },\n {\n 'main_argument_name': '--min-observers',\n 'argument_name_options': [],\n 'parameter_name': 'min_observers',\n # first try casting to int, then to float (since int cast will fail for a float)\n 'soft_type': [int, float],\n 'help': 'Either a floating point in [0.0; 1.0] range (indicating the share of all the present '\n 'observers per movie) or int [2; +\\inf) (indicating the absolute threshold for '\n 'observer count in the core point\\'s neighbourhood).\\n'\n 'This option is mutually exclusive with --min-pts.',\n 'kwargs': {}\n }\n ],\n {\n 'main_argument_name': '--eps-deg',\n 'argument_name_options': ['--eps'],\n 'parameter_name': 'eps_deg',\n 'help': 'Spatial Euclidean distance threshold that defines the neighbourhood in the XY-plane',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--time-slice-microsec',\n 'argument_name_options': ['--time-slice'],\n 'parameter_name': 'time_slice_microsec',\n 'help': 'Width of the time slice that defines the size of the neighbourhood on the time axis.',\n 'kwargs': {'type': float}\n },\n ]\n\n return description", "def _generate_params(self):\n return {\n 'lis_outcome_service_url': self.lis_outcome_service_url,\n 'lis_result_sourcedid': self.lis_result_sourcedid,\n 'oauth_consumer_key': self.key\n }", "def create_api_keys(self, **kwargs):\n\n all_params = ['api_key']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_api_keys\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/apikeys'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'api_key' in params:\n body_params = params['api_key']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['privileges', 'apikey']\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='ApiKeyWithPrivileges',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def extra_args(self):\n return []", "def _core_ar_kwarg(self,**kwargs) :\n\t\tpass", "def format_arguments(self, **kwargs):\n return kwargs", "def purefa_argument_spec():\n\n return dict(\n fa_url=dict(),\n api_token=dict(no_log=True),\n )", "def get_dynamic_setup_params():\n return {\n # Retrieve the long description from the README\n # 'long_description': read_file('README.rst'),\n 'install_requires': substitute_crypto_to_req(\n read_requirements('requirements.txt'),\n ),\n # 'extras_require': read_extras(),\n }", "def handle_filters():\n params = {'api_key': API_KEY}\n for k in demisto.args():\n if demisto.getArg(k):\n params[k] = demisto.getArg(k)\n return params", "def get_args():\n return {\"id\": fields.UUID(required=True, location=\"view_args\")}", "def get_argdict(cls, toolchain, args):\n return {} # Empty must be overloaded (if required)", "def get_api_key(context) -> str:\n provided_api_key = \"\"\n for key, value in context.invocation_metadata():\n if key == \"api_key\":\n provided_api_key = str(value)\n return provided_api_key\n return provided_api_key", "def custom_openapi() -> Dict:\n if app.openapi_schema:\n return app.openapi_schema\n openapi_schema = get_openapi(\n title=\"The GenomicMedLab Cool Seq Tool\",\n version=__version__,\n description=\"Common Operations On Lots-of Sequences Tool.\",\n routes=app.routes\n )\n\n openapi_schema[\"info\"][\"contact\"] = {\n \"name\": \"Alex H. Wagner\",\n \"email\": \"Alex.Wagner@nationwidechildrens.org\",\n \"url\": \"https://www.nationwidechildrens.org/specialties/institute-for-genomic-medicine/research-labs/wagner-lab\" # noqa: E501\n }\n app.openapi_schema = openapi_schema\n return app.openapi_schema", "def get_argument_as_keywords(self):\n status = True\n arg_kv = self.get_values_for_mandatory_args()\n if len(arg_kv) != len(self.req_args_list):\n msg = 'could not execute %s without mandatory arguments' % (object)\n self.data_repository = skip_and_report_status(self.data_repository, msg)\n status = False\n arg_kv = self.get_values_for_optional_args(arg_kv)\n return arg_kv, status", "def edit_keywords(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs)", "def index_args():\n return {}", "def setup(self):\r\n \r\n if self.requestedAction == admin.ACTION_EDIT or self.requestedAction == admin.ACTION_CREATE:\r\n \r\n # Set the required parameters\r\n for arg in RadiusAuthRestHandler.REQUIRED_PARAMS:\r\n self.supportedArgs.addReqArg(arg)\r\n \r\n # Set up the valid parameters\r\n for arg in RadiusAuthRestHandler.VALID_PARAMS:\r\n if arg not in RadiusAuthRestHandler.REQUIRED_PARAMS:\r\n self.supportedArgs.addOptArg(arg)", "def get_kwargs(self):\n return {}", "def add_required_arguments(self, *args):\n self._add_sample_specific_arguments(True, *args)", "def generate_call_string(self):\n if(self.api_key is None):\n raise error(\"API Key is not defined\");#Should base class do this? \n \n self.call_url=self.baseurl;\n if hasattr(self,'search_str'):\n self.call_url+=self.search_str;\n if hasattr(self,'filter_field_str'):\n self.call_url=self.call_url+'&'+self.filter_field_str;\n \n #loop over the parameters dict\n for key in self.input_params:\n self.call_url+=self.input_params[key];\n \n #finally add api key. at this point already checked it exists\n self.call_url=self.call_url+'&'+\"api-key=\"+str(self.api_key);\n return;", "def _custom_actioner(message: ActionMessage, defined_keyword_arg, **kwargs):\n print(message.additional_fields)\n print(defined_keyword_arg)\n print(kwargs)", "def manage_params(args):\n # Socrata API\n with open(\"secret/builtby-socrata.yaml\", 'r') as f:\n try:\n socrata_api_credentials = yaml.load(f)\n except yaml.YAMLError as exc:\n print(exc)\n\n socrata_app_token = socrata_api_credentials['app_token']\n\n # base params\n params = {\n '$$app_token': socrata_app_token\n }\n # remove null attributes\n args = {k: v for k, v in args.items() if v is not None}\n # add args to params\n params.update(args) # inplace\n\n return params", "def gen_args(self, obj, pa_names = False):\n\n pal, kwal = get_class_total_args(type(obj))\n\n try:\n get_val = type(obj).__get_init_arg_val__\n except AttributeError:\n get_val = getattr\n\n for pa in pal:\n v = get_val(obj, pa)\n self.gen_field((pa + \" = \") if pa_names else \"\")\n self.pprint(v)\n\n for kwa, default in kwal.items():\n try:\n v = get_val(obj, kwa)\n except AttributeError:\n # If value cannot be obtained, skip the argument generation\n continue\n\n # generate only arguments with non-default values\n if (v is default) or (v == default):\n continue\n\n self.gen_field(kwa + \" = \")\n self.pprint(v)", "def generate_arg_and_kwags():\n def gen_func(\n #df: DataSource,\n option: List[list],\n style: List[dict]\n )->List[Tuple[list, dict]]:\n\n if len(option) != len(style):\n raise SystemError(\"option and style must be same size list.\")\n\n arg_and_kwarg = []\n for o, s in zip(option, style):\n arg = [*o]\n kwargs = s\n arg_and_kwarg.append((arg, kwargs))\n return arg_and_kwarg\n return gen_func", "def define_parameters(self):", "def _add_pos_args(self, *args):\n arg_array = self._params.setdefault(\"positional_parameters\", [])\n # couchbase++ wants all args JSONified\n json_args = [json.dumps(arg) for arg in args]\n arg_array.extend(json_args)", "def add_kwargs():\n pass", "def _collect_repr_args(self, poargs, kwargs):", "def make_args(self, args):\n result_str = \"?\"\n for k, v in args.iteritems():\n result_str = result_str + k + \"=\" + v + \"&\"\n return result_str", "def Args(parser):\n flags.AddHcxActivationKeyArgToParser(parser)\n base.ASYNC_FLAG.AddToParser(parser)\n base.ASYNC_FLAG.SetDefault(parser, True)\n parser.display_info.AddFormat('yaml')", "def add_args(self):\n raise NotImplementedError", "def _set_named_args(self, **kv):\n # named_params = {}\n # for k in kv:\n # named_params[\"${0}\".format(k)] = json.dumps(kv[k])\n # couchbase++ wants all args JSONified\n named_params = {f'${k}': json.dumps(v) for k, v in kv.items()}\n\n self._params[\"named_parameters\"] = named_params\n return self", "def format_args(self, **kwargs: Any) -> str:\n return \"\"", "def _setup_api_properties(self):\n self.implicit_api_logical_id = GeneratedLogicalId.implicit_http_api()\n self.implicit_api_condition = \"ServerlessHttpApiCondition\"\n self.api_event_type = \"HttpApi\"\n self.api_type = SamResourceType.HttpApi.value\n self.api_id_property = \"ApiId\"\n self.editor = OpenApiEditor", "def get_cli_arguments(self):\n pass", "def _docs_params(**kwds):\n\n def dec(obj):\n obj.__orig_doc__ = obj.__doc__\n obj.__doc__ = dedent(obj.__doc__).format_map(kwds)\n return obj\n\n return dec", "def _make_args(self, args, defaults=[], vararg=None, kwonlyargs=[],\n kw_defaults=[], kwarg=None):\n # On Python 2 convert vararg and kwarg to raw name, raise error using\n # lineno stored on the node and lexer from self.\n # On Python 3.3 extract name and annotation\n # After should be straight forward\n raise NotImplementedError()", "def fill_args(cls, toolchain, parser):\n pass # pass must be overloaded (if required)", "def base_arguments(self):\n raise NotImplementedError()", "def help(self, keyword):\n if (keyword == 'all'):\n string = ('%-20s%-20s%-20s%s\\n' % ('Keyword', 'Type', 'Default', 'Comment'))\n for key, value in self.allowed_keys.items():\n string += ('%-20s%-20s%-20s%s\\n' % (key, str(value[0]), str(value[1]), value[2]))\n print string", "def main(pArgs):\n\n # Options and args... \n \n longoptions=[\"help\", \"usage\", \"endpoint=\", \"interface-type=\", \"verbose=\", \\\n \"recursive\", \"dbs-conf=\", \"show-prod\", \"show-caf\", \\\n \"only-subscribed\", \"only-custodial\"]", "def api():\n\treturn \"The API call\"", "def keyword_only(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n if len(args) > 0:\n raise TypeError(\"Method %s only takes keyword arguments.\" % func.__name__)\n return func(**kwargs)\n notice = \".. Note:: This method requires all argument be specified by keyword.\\n\"\n wrapper.__doc__ = notice + wrapper.__doc__\n return wrapper", "def definearguments(self, customparser):\n if not customparser:\n return\n customparser.add_option(\n '--url',\n dest='url',\n help=\"Use the provided iLO URL to login.\",\n default=None,\n )\n customparser.add_option(\n '-u',\n '--user',\n dest='user',\n help=\"If you are not logged in yet, including this flag along\"\\\n \" with the password and URL flags can be used to log into a\"\\\n \" server in the same command.\"\"\",\n default=None,\n )\n customparser.add_option(\n '-p',\n '--password',\n dest='password',\n help=\"\"\"Use the provided iLO password to log in.\"\"\",\n default=None,\n )\n customparser.add_option(\n '-e',\n '--enc',\n dest='encode',\n action='store_true',\n help=SUPPRESS_HELP,\n default=False,\n )", "def set_api_access_keys(**kwargs):\n API_BASE_PARAMS['key'] = kwargs['key']", "def args_str(self):", "def build_method(method_name, description, parameters, api_path, http_method, summary, return_type):\n allow_per_page = False\n parameters = check_for_pre_attachment_param(parameters)\n arg_list = get_parameters(parameters)\n param_descriptions = get_parameter_descriptions(parameters)\n payload = build_payload(parameters)\n enums = check_for_enums(parameters)\n\n \"\"\"\n If the method returns an array, allow the per_page parameter for paging\n \"\"\"\n if return_type == 'array' or (method_name.startswith(\"list_\") and http_method == \"GET\"):\n arg_list.append('per_page=None')\n param_descriptions.append(':param per_page: (optional) Set how many results canvas should return, defaults to config.LIMIT_PER_PAGE')\n param_descriptions.append(':type per_page: integer or None')\n payload.append('\\'per_page\\': per_page,')\n allow_per_page = True\n\n arg_list.append('**request_kwargs')\n\n \"\"\"\n Create the method signature\n \"\"\"\n\n content = line_format('def ' + method_name + '(request_ctx, ' + ', '.join(arg_list) + '):', NONE)\n content += line_format('\"\"\"', FOUR)\n\n \"\"\"\n Create the method description text from the description in the meta api\n \"\"\"\n regex = re.compile(r'\\{api\\:(\\w+)\\#(\\w+).*?\\}')\n for line in description.splitlines(True):\n rst_line = regex.sub(format_api_string, line)\n content += line_format(rst_line.rstrip(), FOUR)\n\n \"\"\"\n list out the method paramters\n \"\"\"\n content += line_format('', NONE)\n content += line_format(':param request_ctx: The request context', EIGHT)\n content += line_format(':type request_ctx: :class:RequestContext', EIGHT)\n for param in param_descriptions:\n content += line_format(param, EIGHT)\n content += line_format(':return: '+summary, EIGHT)\n content += line_format(':rtype: requests.Response (with ' + return_type + ' data)', EIGHT)\n content += line_format('', NONE)\n content += line_format('\"\"\"', FOUR)\n content += line_format('', NONE)\n\n \"\"\"\n Add the per_page check\n \"\"\"\n if allow_per_page:\n content += line_format('if per_page is None:', FOUR)\n content += line_format('per_page = request_ctx.per_page', EIGHT)\n\n \"\"\"\n Add any enums if they exist.\n \"\"\"\n for enum in enums:\n content += line_format(enum, FOUR)\n\n \"\"\"\n Add the api path\n \"\"\"\n path_formatted = 'path = \\'' + api_path + '\\''\n content += line_format(path_formatted, FOUR)\n\n \"\"\"\n Add a payload if one exists\n \"\"\"\n payload_string = ''\n if payload:\n content += line_format('payload = {', FOUR)\n for item in payload:\n content += line_format(item, EIGHT)\n content += line_format('}', FOUR)\n payload_string = ', payload=payload'\n\n content += line_format('url = request_ctx.base_api_url + path.format(' + ', '.join(get_path_parameters(parameters)) + ')', FOUR)\n content += line_format(\n 'response = client.'+http_method.lower()+'(request_ctx, url' + payload_string + ', **request_kwargs)', FOUR)\n\n content += line_format('', NONE)\n content += line_format('return response', FOUR)\n content += line_format('', NONE)\n content += line_format('', NONE)\n return content", "def get_api_keys(self, **kwargs):\n\n all_params = ['page', 'per_page', '_from', 'to', 'sort_dir', 'sort_field', 'filters']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_api_keys\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/apikeys'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'page' in params:\n query_params['_page'] = params['page']\n if 'per_page' in params:\n query_params['_perPage'] = params['per_page']\n if '_from' in params:\n query_params['_from'] = params['_from']\n if 'to' in params:\n query_params['_to'] = params['to']\n if 'sort_dir' in params:\n query_params['_sortDir'] = params['sort_dir']\n if 'sort_field' in params:\n query_params['_sortField'] = params['sort_field']\n if 'filters' in params:\n query_params['_filters'] = params['filters']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['privileges', 'apikey']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[ApiKey]',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def define_parameters(self):\n self.add_argument('--prefix', dest='prefix', type=str, optional=False,\n help='prefix for file names')\n self.add_argument('--sleepLength',\n dest = 'sleepLength',\n type = str,\n optional = True,\n help ='time to sleep before performing plugin action',\n default = '0')", "def toargs(context, schema, data):\n data = dict(data)\n args = {}\n for name, field in schema.namesAndDescriptions(True):\n field = field.bind(context)\n n = name\n if n.endswith('_') and iskeyword(n[:-1]):\n n = n[:-1]\n\n s = data.get(n, data)\n if s is not data:\n s = str(s)\n del data[n]\n\n try:\n args[str(name)] = field.from_unicode(s)\n except ValidationError as v:\n reraise(ConfigurationError('Invalid value for', n, str(v)),\n None, sys.exc_info()[2])\n elif field.required:\n # if the default is valid, we can use that:\n default = field.default\n try:\n field.validate(default)\n except ValidationError:\n raise ConfigurationError('Missing parameter:', n)\n args[str(name)] = default\n\n if data:\n # we had data left over\n try:\n keyword_arguments = schema.getTaggedValue('keyword_arguments')\n except KeyError:\n keyword_arguments = False\n if not keyword_arguments:\n raise ConfigurationError('Unrecognized parameters:', *data)\n\n for name in data:\n args[str(name)] = data[name]\n\n return args", "def setup_args(cls) -> ParlaiParser:\n # we want to later deprecate this for add_cmdline_args", "def _kwargs(self):\n dict = {\"name\":self.name}\n return dict", "def __init__(self, **kwargs):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'version': 'str',\n 'tagline': 'str',\n 'keywords': 'str',\n 'short_description': 'str',\n 'usage_information': 'str',\n 'long_description': 'str',\n 'license_model_description': 'str',\n 'system_requirements': 'str',\n 'time_released': 'datetime',\n 'release_notes': 'str',\n 'categories': 'list[str]',\n 'publisher': 'Publisher',\n 'languages': 'list[Item]',\n 'screenshots': 'list[Screenshot]',\n 'videos': 'list[NamedLink]',\n 'support_contacts': 'list[SupportContact]',\n 'support_links': 'list[NamedLink]',\n 'documentation_links': 'list[DocumentationLink]',\n 'icon': 'UploadData',\n 'banner': 'UploadData',\n 'regions': 'list[Region]',\n 'package_type': 'str',\n 'default_package_version': 'str',\n 'links': 'list[Link]',\n 'is_featured': 'bool'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'version': 'version',\n 'tagline': 'tagline',\n 'keywords': 'keywords',\n 'short_description': 'shortDescription',\n 'usage_information': 'usageInformation',\n 'long_description': 'longDescription',\n 'license_model_description': 'licenseModelDescription',\n 'system_requirements': 'systemRequirements',\n 'time_released': 'timeReleased',\n 'release_notes': 'releaseNotes',\n 'categories': 'categories',\n 'publisher': 'publisher',\n 'languages': 'languages',\n 'screenshots': 'screenshots',\n 'videos': 'videos',\n 'support_contacts': 'supportContacts',\n 'support_links': 'supportLinks',\n 'documentation_links': 'documentationLinks',\n 'icon': 'icon',\n 'banner': 'banner',\n 'regions': 'regions',\n 'package_type': 'packageType',\n 'default_package_version': 'defaultPackageVersion',\n 'links': 'links',\n 'is_featured': 'isFeatured'\n }\n\n self._id = None\n self._name = None\n self._version = None\n self._tagline = None\n self._keywords = None\n self._short_description = None\n self._usage_information = None\n self._long_description = None\n self._license_model_description = None\n self._system_requirements = None\n self._time_released = None\n self._release_notes = None\n self._categories = None\n self._publisher = None\n self._languages = None\n self._screenshots = None\n self._videos = None\n self._support_contacts = None\n self._support_links = None\n self._documentation_links = None\n self._icon = None\n self._banner = None\n self._regions = None\n self._package_type = None\n self._default_package_version = None\n self._links = None\n self._is_featured = None", "def ReviewServiceArgs(cls, container = '', library = 'Standard', dialogname = ''):\n return container, library, dialogname, ScriptForge.componentcontext", "def add_kwargs_arg(parser):\n parser.add_argument('--arg', '-a', type=__kwargs_arg, metavar='K=V', action='append',\n dest='kwargs', default=[],\n help='any special keyword arguments to pass to the method, formated as '\n 'key=value with value being a valid Python literal or one of the special '\n 'values nan, inf, -inf, N4, N8, N8_DIST, N6, N18, N18_DIST, N26, N26_DIST')", "def getArguments(self):\n ApiCli.getArguments(self)\n\n if self.args.alarm_name is not None:\n self.alarm_name = self.args.alarm_name\n\n if self.args.metric_name is not None:\n self.metric_name = self.args.metric_name\n\n if self.args.aggregate is not None:\n self.aggregate = self.args.aggregate\n\n if self.args.operation is not None:\n self.operation = self.args.operation\n\n if self.args.threshold is not None:\n self.threshold = self.args.threshold\n\n if self.args.interval is not None:\n self.interval = self.args.interval\n\n if self.args.host_group_id is not None:\n self.host_group_id = self.args.host_group_id\n\n if self.args.actions is not None:\n self.actions = self.args.actions\n\n if self.args.note is not None:\n self.note = self.args.note\n\n if self.args.per_host_notify is not None:\n self.per_host_notify = self.args.per_host_notify\n\n if self.args.is_disabled is not None:\n self.is_disabled = self.args.is_disabled\n\n payload = {}\n\n # Create trigger predicate dictionary\n predicate = {}\n\n if self.aggregate is not None:\n predicate['agg'] = self.aggregate\n\n if self.operation is not None:\n predicate['op'] = self.operation\n\n if self.threshold is not None:\n predicate['val'] = self.threshold\n\n if 'agg' in predicate or 'op' in predicate or 'val' in predicate:\n payload['triggerPredicate'] = predicate\n\n # Create payload dictionary\n if self.alarm_name:\n payload['name'] = self.alarm_name\n\n if self.host_group_id is not None:\n payload['hostgroupId'] = self.host_group_id\n\n if self.interval is not None:\n payload['interval'] = self.intervals[self.interval]\n\n if self.metric_name is not None:\n payload['metricName'] = self.metric_name\n\n if self.note is not None:\n payload['note'] = self.note\n\n if self.actions is not None:\n payload['actions'] = self.actions\n\n if self.per_host_notify is not None:\n payload['perHostNotify'] = True if self.per_host_notify == 'yes' else False\n\n if self.is_disabled is not None:\n payload['isDisabled'] = True if self.is_disabled == 'yes' else False\n\n self.data = json.dumps(payload, sort_keys=True)\n self.headers = {'Content-Type': 'application/json'}", "def DeveloperAPI(*args, **kwargs):\n if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):\n return DeveloperAPI()(args[0])\n\n def wrap(obj):\n _append_doc(obj, message='DeveloperAPI: This API may change across minor Ludwig releases.')\n _mark_annotated(obj)\n return obj\n return wrap", "def as_api_parameters(self):\n\n data = {}\n for system in self.system_codes:\n data.update({\n \"{0}_callback/{1}\".format(system, variable): self.data[variable]\n for variable in self.data.keys()\n })\n return data", "def list_keywords(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), params=kwargs)", "def purefb_argument_spec():\n\n return dict(\n fb_url=dict(),\n api_token=dict(no_log=True),\n )", "def as_api_parameters(self):\n return {\n 'return_token': self.token,\n 'return_url': self.url,\n 'client_http_user_agent': self.user_agent,\n 'client_http_accept': self.accept,\n 'remote_site': self.remote_site,\n }", "def __init__(**params):", "def get_documentation(self, *args, **dargs):\n pass", "def process_api_declaration(self, resources, resource, context):\n pass", "def __call__(self, *args, **kwargs) -> Dict[str, Any]:\n pass", "def generate_api_key(self, **kwargs):\n\n all_params = []\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method generate_api_key\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/apikeys/_generate'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['privileges', 'apikey']\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='ApiKey',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def get_json_argument_list():\n list_of_arguments_to_get = [\"finish_time\", \"segmentation_training_samples\", \"patch_count_per_image\", \"learning_rate\", \"batch_k\",\n \"batch_p\", \"flip_augment\", \"standardize\", \"margin\", \"metric\"]\n\n return list_of_arguments_to_get", "def Args(parser):\n\n base_classes.RegionalDescriber.Args(parser)\n base_classes.AddFieldsFlag(parser, 'targetVpnGateways')", "def eseries_host_argument_spec():\n argument_spec = basic_auth_argument_spec()\n argument_spec.update(dict(\n api_username=dict(type='str', required=True),\n api_password=dict(type='str', required=True, no_log=True),\n api_url=dict(type='str', required=True),\n ssid=dict(type='str', required=True),\n validate_certs=dict(type='bool', required=False, default=True),\n ))\n return argument_spec", "def params(self, **kwargs):\n return kwargs", "def _set_named_args(self, **kv):\n for k in kv:\n self._body['${0}'.format(k)] = kv[k]\n return self", "def generate_tokens(self, apiview, function_id, *, add_line_marker: bool, prefix: str = \"\"):\n # Add arg name\n self.id = function_id\n if add_line_marker:\n self.id = f\"{function_id}.param({self.argname})\"\n apiview.add_line_marker(self.id)\n\n apiview.add_text(f\"{prefix}{self.argname}\")\n # add arg type\n if self.argtype:\n apiview.add_punctuation(\":\", False, True)\n apiview.add_type(self.argtype, self.id)\n\n # add arg default value\n default = self.default\n if default is not None:\n apiview.add_punctuation(\"=\", True, True)\n if isinstance(default, str) and default not in SPECIAL_DEFAULT_VALUES:\n apiview.add_string_literal(default)\n else:\n if isinstance(default, astroid.node_classes.Name):\n value = default.name\n elif hasattr(default, \"as_string\"):\n value = default.as_string()\n elif inspect.isclass(default):\n value = get_qualified_name(default, apiview.namespace)\n else:\n value = str(default)\n apiview.add_literal(value)", "def generate_pretty_key(*args, **kwargs):\n return '\\n'.join((\n \"\",\n \"\\tindex: {}\".format(str(kwargs.get('index'))),\n \"\\tdoc_type: {}\".format(str(kwargs.get('doc_type'))),\n \"\\tbody: {}\".format(str(RecursivelySortedDict(kwargs.get('body')))),\n \"\\tquery: {}\".format(str(RecursivelySortedDict(kwargs.get('query')))),\n ))", "def test_020_kwargs(self):\n caller = self.get_caller([KwargsTaskOverride])\n self.assertEqual([\"A\", \"B\"], caller(\"A\", \"B\"))", "def _setup_arguments(self):\n\n self._parser.add_argument(\"-a\", \"--area-interest\",\n help=\"Area of interest to process, \"\n \"shapefile path\", required=True)\n # FUTURE VERSIONS\n # self._parser.add_argument(\"-s\", \"--srtm-dem\",\n # help=\"Path to SRTM DEM file. Zip format\",\n # required=False)\n # self._parser.add_argument(\"-y\", \"--hsheds-dem\",\n # help=\"Path to HSHEDS DEM file. Zip format\",\n # required=False)\n # self._parser.add_argument(\"-g\", \"--groves-file\",\n # help=\"Path to groves classification file. \"\n # \"Zip format\",\n # required=False)", "def __init__(self, **kwargs):\n esp_name = self.esp_name\n self.api_key = get_anymail_setting(\n \"api_key\",\n esp_name=esp_name,\n kwargs=kwargs,\n allow_bare=True,\n )\n api_url = get_anymail_setting(\n \"api_url\",\n esp_name=esp_name,\n kwargs=kwargs,\n default=\"https://api.brevo.com/v3/\",\n )\n if not api_url.endswith(\"/\"):\n api_url += \"/\"\n super().__init__(api_url, **kwargs)", "def __add_arguments__(cls, parser: ArgumentParser) -> None:\n\n parser.add_argument(\n \"-d\",\n \"--data_dict_guid\",\n required=True,\n type=str,\n help=(\n \"The indexd Globally Unique Identifier (GUID) for the data dictionary.\"\n ),\n )\n parser.add_argument(\n \"-o\",\n \"--output\",\n required=True,\n type=str,\n help=(\n \"Path to write out the JSON response with file_name and dictionary_url.\"\n ),\n )", "def get_api_fields(cls):\n return ['fqdn', 'ttl', 'description', 'views']", "def build(keys: List[str]):\n api = API()\n api.build(*keys)", "def parse_args() -> argparse.Namespace:\n\n parser = argparse.ArgumentParser(\n description=\"THE FOLLOWING SCRIPT SHOWS SNAPSHOT OPERATIONS USING REST API.\", )\n parser.add_argument(\n \"-c\", \"--cluster\", required=True, help=\"API server IP:port details\")\n parser.add_argument(\n \"-u\",\n \"--api_user\",\n default=\"admin\",\n help=\"API Username\")\n parser.add_argument(\"-p\", \"--api_pass\", help=\"API Password\")\n parsed_args = parser.parse_args()\n\n # collect the password without echo if not already provided\n if not parsed_args.api_pass:\n parsed_args.api_pass = getpass()\n\n return parsed_args", "def definearguments(self, customparser):\n if not customparser:\n return\n\n add_login_arguments_group(customparser)\n\n customparser.add_argument(\n '--serviceaccount',\n dest='serviceacc',\n action=\"store_true\",\n help=\"Optionally include this flag if you wish to created account \"\\\n \"to be a service account.\",\n default=False\n )\n customparser.add_argument(\n '--addprivs',\n dest='optprivs',\n nargs='*',\n action=_AccountParse,\n type=str,\n help=\"Optionally include this flag if you wish to specify \"\\\n \"which privileges you want added to the iLO account. This overrides the default of \"\\\n \"duplicating privileges of the currently logged in account on the new account. Pick \"\\\n \"privileges from the privilege list in the above help text. EX: --addprivs=1,2,4\",\n default=None\n )\n customparser.add_argument(\n '--removeprivs',\n dest='optprivs',\n nargs='*',\n action=_AccountParse,\n type=str,\n help=\"Optionally include this flag if you wish to specify \"\\\n \"which privileges you want removed from the iLO account. This overrides the default of\"\\\n \" duplicating privileges of the currently logged in account on the new account. Pick \"\\\n \"privileges from the privilege list in the above help text. EX: --removeprivs=1,2,4\",\n default=None\n )\n customparser.add_argument(\n '--role',\n dest='role',\n choices=['Administrator', 'ReadOnly', 'Operator'],\n help=\"Optionally include this flag if you would like to specify Privileges by role. \"\\\n \"Valid choices are: Administrator, ReadOnly, Operator\",\n default=None\n )\n customparser.add_argument(\n '-j',\n '--json',\n dest='json',\n action=\"store_true\",\n help=\"Optionally include this flag if you wish to change the\"\\\n \" displayed output to JSON format. Preserving the JSON data\"\\\n \" structure makes the information easier to parse.\",\n default=False\n )", "def init_cloud_api(self, args=None):\n pass", "def generateKwArgs(self, axisList=None):\n if axisList is None:\n axisList = self.tabWidget.currentWidget()\n\n kwargs = {} \n for axisWidget in axisList.getAxisWidgets():\n kwargs[axisWidget.axis.id] = axisWidget.getCurrentValues()\n\n # Generate additional args\n kwargs['squeeze'] = 0\n kwargs['order'] = axisList.getAxesOrderString()\n\n return kwargs", "def _build_arguments(self):\n # TODO: comeback to allow test path override. maybe?\n # self._parser.add_argument(\n # '--test-path',\n # type=utils.validate_path,\n # required=False,\n # help=('Path th projects test Dockerfile. Dockerfile should be in the root of the test directory.')\n # )\n self._parser.add_argument(\n '--configs',\n type=bool,\n required=False,\n default=False,\n help=\"Would you like to inject configuration files?\"\n )" ]
[ "0.6347063", "0.6342163", "0.6047952", "0.60061", "0.5908773", "0.590851", "0.58989066", "0.58448356", "0.58410525", "0.58357745", "0.5807142", "0.57746965", "0.57710725", "0.5671101", "0.5665809", "0.56421447", "0.5637025", "0.5618054", "0.55946404", "0.5592014", "0.5560202", "0.55435866", "0.55219644", "0.5513859", "0.55076927", "0.5471917", "0.5468453", "0.54666245", "0.54599166", "0.5444413", "0.54098856", "0.54020035", "0.53864455", "0.53802913", "0.5366934", "0.5363213", "0.5363128", "0.5362455", "0.5349671", "0.5344664", "0.5342888", "0.53296113", "0.5319646", "0.53111124", "0.5305683", "0.530556", "0.5298574", "0.52954966", "0.5295396", "0.52950495", "0.528783", "0.52767193", "0.52699655", "0.52658284", "0.52645344", "0.5263485", "0.52562755", "0.5254318", "0.5254126", "0.5243462", "0.524164", "0.52368176", "0.5232426", "0.52270466", "0.52158517", "0.5210464", "0.52093345", "0.52089053", "0.52083707", "0.520799", "0.5207857", "0.5206446", "0.5206218", "0.52048296", "0.52038634", "0.52017766", "0.5201018", "0.5200405", "0.5200218", "0.5199606", "0.5196969", "0.5185607", "0.5183269", "0.51798016", "0.51741755", "0.5165121", "0.5162968", "0.5161967", "0.5158466", "0.51557755", "0.5151786", "0.51453304", "0.51452804", "0.5144242", "0.51425505", "0.5141428", "0.51367986", "0.5135297", "0.5132318", "0.5131926", "0.5131048" ]
0.0
-1
Generate API keyword args for these details.
def as_api_parameters(self): data = {} for system in self.system_codes: data.update({ "{0}_callback/{1}".format(system, variable): self.data[variable] for variable in self.data.keys() }) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extra_target_arguments(self):\n return {}", "def init_args(self):\n return {\n \"doc\": self.__doc__.format(name=colored(self.module_name, \"green\", attrs=['bold','underline'])),\n \"Url\": \"set a target url\",\n 'Type': \"set type to check , [php, asp, aspx, cgi, dir , mdb]\",\n }", "def as_api_parameters(self):\n raise NotImplementedError(\n 'as_api_parameters not implemented on ' + self.__class__)", "def help_args():\n pass", "def Help():\n names=api_method_dict.keys()\n names.sort()\n return ''.join(['**** ' + api_method_dict[name].__name__ + '\\n' + api_method_dict[name].__doc__ + '\\n'\n for name in names])", "def add_extra_args(self):\n super(AwsAccessListKeysMethod, self).add_extra_args()\n self.parser.add_argument(\"--key_pair_name\", required=False, default=None,\n help=\"AWS Key Pair name\")", "def _format_api_string(self):\n api_string = self.api_name\n arg_string_list = []\n if not self.api_args is None and \\\n len(self.api_args) > 0:\n for key in self.api_args:\n try:\n value = self.api_args[key]\n except TypeError:\n #assert False, f\"node: {self.api_name} key: {key} bad arg: {self.api_args}\" + \\\n # f\" type: {self.api_args.__class__.__name__}\"\n print(f\"node: {self.api_name} key: {key} bad arg: {self.api_args}\" + \\\n f\" type: {self.api_args.__class__.__name__}\")\n raise TypeError\n if isinstance(value, list):\n value_string = \"[ \" + \",\".join([self._api_value_string(x) for x in value]) + \" ]\"\n else:\n if value is None:\n assert False, f\"key={key}\"\n value_string = self._api_value_string(value)\n arg_string_list.append(f\"{key}:{value_string}\")\n api_string += \"(\" + \" \".join(arg_string_list) + \")\"\n #assert False, f\"{self.api_name}: {api_string}\"\n return api_string", "def valid_args(self):\r\n for k in request.args.keys():\r\n if k not in ['api_key']:\r\n getattr(self.__class__, k)", "def api(self) -> str:", "def generateKwargsAsString(self):\n args = \"\"\n axisList = self.tabWidget.currentWidget()\n\n for axisWidget in axisList.getAxisWidgets():\n args += \"%s = %s, \" % (axisWidget.axis.id,\n axisWidget.getCurrentValuesAsStr())\n\n # Generate additional args\n args += 'squeeze = 0'\n args += \", order = '%s' \" % axisList.getAxesOrderString()\n return args", "def _generate_options(self, **kwargs: Any) -> dict:\n raise NotImplementedError", "def get_dynamic_setup_params():\n\n return {\n # Retrieve the long description from the README\n \"long_description\": read_file(\"README.md\")\n }", "def add_extra_args(self):\n pass", "def get_keys_info() -> Dict[str, List[str]]:\n args_dict = {}\n\n for api in API_DICT:\n arg_list = list(\n getattr(\n sys.modules[__name__], \"set_\" + str(api) + \"_key\"\n ).__code__.co_varnames\n )\n arg_list.remove(\"persist\")\n arg_list.remove(\"show_output\")\n args_dict[api] = arg_list\n\n return args_dict", "def pykwarg(self):\n return self._pykwarg", "def full_args():\n return setup_args()", "def _generate_keywords(self):\n _keywords = [*self._lookup_opcodes_dir.keys(), *self._registers_list.keys()]\n for key in _keywords:\n self._keywords.extend(key.split(\" \"))\n return", "def create_parameters_description():\n description = OrderedDict()\n description['GeneralArguments'] = [\n {\n 'main_argument_name': '--config-file',\n 'argument_name_options': ['--config'],\n 'parameter_name': 'config_file',\n 'help': \"\"\"A json-encoded configuration file, in which one can specify the parameters\n for all detectors in use as well as some general parameters for the whole run.\n The encoded object should therefore be a dictionary,\n with possible top-level keys 'GeneralArguments' (general parameters, not relevant\n to a detector class), 'SaccadeDetector', 'BlinkDetector', 'FixationDetector'\n and 'SmoothPursuitDetector'.\n\n The value for each of the present keys should in turn be a dictionary with keys\n identical to the longest argument names below, without the eye movement name prefix.\n An example (and equivalent to default parameters) configuration file is provided\n in default_parameters.conf.json and includes all possible keys.\n\n In your custom configuration file you do not have to specify any the parameter values,\n missing keys will be considered to have the default value.\n\n For default values, you can consult the respective classes' __init__ methods in\n saccade_detector.py, blink_detector.py, fixation_detector.py and sp_detector.py.\n\n\n Values given through the console interface override the ones in the config file.\"\"\",\n 'kwargs': {}\n },\n {\n 'main_argument_name': '--input-folder',\n 'argument_name_options': ['--in'],\n 'parameter_name': 'input_folder',\n 'help': 'From where to load the gaze points data. If absent, must be present in --config-file file. '\n 'This folder is assumed to have subfolders that correspond to videos, for which recordings '\n 'were made. Each such subdirectory should contain gaze files (one file per observer).',\n 'kwargs': {}\n },\n {\n 'main_argument_name': '--gaze-file-pattern',\n 'argument_name_options': ['--pattern'],\n 'parameter_name': 'gaze_file_pattern',\n 'help': 'Will look for such files in all subdirectories of --input-folder. '\n 'For GazeCom, \\'*.arff\\' is a recommended value (or \\'*.coord\\', if dealing with original dataset files). '\n 'One can use this parameter to match some name pattern as well (not just the file extension), '\n 'for example with \\'*_needed_files_*.arff\\'. \\n'\n 'If no wildcard symbol is found in the provided string, it is assumed to be just the file name '\n 'suffix, so it will be prepended with a wildcard symbol (i.e. \".coord\" will become \"*.coord\").',\n 'kwargs': {}\n },\n {\n 'main_argument_name': '--input-data-type',\n 'argument_name_options': ['--type'],\n 'parameter_name': 'input_data_type',\n 'help': 'Type of data loader to use (if not specified, will try to detect automatically)',\n 'kwargs': {'choices': ['DSF', 'ARFF', 'labelled ARFF']}\n },\n {\n 'main_argument_name': '--verbose',\n 'argument_name_options': ['-v'],\n 'parameter_name': 'verbose',\n 'default': None,\n 'help': 'Whether to output some information about the progress of the run to STDERR',\n 'kwargs': {'action': 'store_const', 'const': True} # only like this can support the default of None\n # (not to override the config all the time\n # with a missing value)\n },\n {\n 'main_argument_name': '--movies',\n 'argument_name_options': ['-m'],\n 'parameter_name': 'movies',\n 'help': 'Which movies out of the input folder to use (might be useful for train/test split). '\n 'The gaze data is supposed to be put under respective directories in the input folder. '\n 'If none are given, all available ones are used.',\n 'kwargs': {'nargs': '+', 'default': None}\n },\n {\n 'main_argument_name': '--output-folder',\n 'argument_name_options': ['--out'],\n 'parameter_name': 'output_folder',\n 'help': 'Where to output the resulting labelled data (if empty, will create a new temporary directory)',\n 'kwargs': {}\n },\n ]\n\n description['SaccadeDetector'] = [\n {\n 'main_argument_name': '--tolerance',\n 'argument_name_options': ['--tol'],\n 'parameter_name': 'tolerance',\n 'help': 'The relative size of the area outside the screen that is still considered to be legal',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--threshold-onset-fast-degree-per-sec',\n 'argument_name_options': ['--threshold-onset-fast'],\n 'parameter_name': 'threshold_onset_fast_degree_per_sec',\n 'help': 'Threshold for initialization of saccade detection, in degrees per second',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--threshold-onset-slow-degree-per-sec',\n 'argument_name_options': ['--threshold-onset-slow'],\n 'parameter_name': 'threshold_onset_slow_degree_per_sec',\n 'help': 'A slower threshold for saccade onset detection, in degrees per second',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--threshold-offset-degree-per-sec',\n 'argument_name_options': ['--threshold-offset'],\n 'parameter_name': 'threshold_offset_degree_per_sec',\n 'help': 'Threshold for saccade offset detection, in degrees per second',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--max-speed-degree-per-sec',\n 'argument_name_options': ['--max-speed'],\n 'parameter_name': 'max_speed_degree_per_sec',\n 'help': 'Maximum speed of saccadic eye movements',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--min-duration-microsec',\n 'argument_name_options': ['--min-duration'],\n 'parameter_name': 'min_duration_microsec',\n 'help': 'Minimal saccade duration threshold',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--max-duration-microsec',\n 'argument_name_options': ['--max-duration'],\n 'parameter_name': 'max_duration_microsec',\n 'help': 'Maximal saccade duration threshold',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--velocity-integral-interval-microsec',\n 'argument_name_options': ['--velocity-integral-interval'],\n 'parameter_name': 'velocity_integral_interval_microsec',\n 'help': 'Interval duration, over which to integrate velocity computation.',\n 'kwargs': {'type': float}\n },\n ]\n\n description['BlinkDetector'] = [\n {\n 'main_argument_name': '--max-distance-to-saccade-microsec',\n 'argument_name_options': ['--max-distance-to-saccade'],\n 'parameter_name': 'max_distance_to_saccade_microsec',\n 'help': 'Threshold for distance from a definite blink to a nearby saccade, which will be marked as blink '\n 'as well.',\n 'kwargs': {'type': float}\n },\n ]\n\n description['FixationDetector'] = [\n {\n 'main_argument_name': '--prefiltering-interval-spread-threshold-degrees',\n 'argument_name_options': ['--prefiltering-interval-spread-threshold'],\n 'parameter_name': 'prefiltering_interval_spread_threshold_degrees',\n 'help': 'All the intersaccadic intervals shorter than this will be deemed fixations',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--min-sp-duration-microsec',\n 'argument_name_options': ['--min-sp-duration'],\n 'parameter_name': 'min_sp_duration_microsec',\n 'help': 'Minimal duration of a potential SP candidate (fast-moving samples shorter than this threshold '\n 'are labelled as noise)',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--sliding-window-width-microsec',\n 'argument_name_options': ['--sliding-window-width'],\n 'parameter_name': 'sliding_window_width_microsec',\n 'help': 'Sliding window for coordinates smoothing',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--normalization-sliding-window-size-samples',\n 'argument_name_options': ['--normalization-sliding-window'],\n 'parameter_name': 'normalization_sliding_window_size_samples',\n 'help': 'A moving average sliding window size (to normalize the data)',\n 'kwargs': {'type': int}\n },\n {\n 'main_argument_name': '--speed-threshold-degrees-per-sec',\n 'argument_name_options': ['--speed-threshold'],\n 'parameter_name': 'speed_threshold_degrees_per_sec',\n 'help': 'Biggest plausible speed for a noisy fixation',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--sliding-window-criterion',\n 'argument_name_options': ['--sliding-window'],\n 'parameter_name': 'sliding_window_criterion',\n 'help': 'Defines the way we check the samples with the sliding_window_criterion threshold: '\n 'either compute the average speed in the current window, or get the spread of '\n 'the gaze points (i.e. biggest XY bounding box side), divided by the duration',\n 'kwargs': {'choices': ['speed', 'spread']}\n },\n {\n 'main_argument_name': '--intersaccadic-interval-duration-threshold-microsec',\n 'argument_name_options': ['--intersaccadic-interval-duration-threshold'],\n 'parameter_name': 'intersaccadic_interval_duration_threshold_microsec',\n 'help': 'Minimal size of the intersaccadic interval to apply the step with the moving average analysis',\n 'kwargs': {'type': float}\n },\n ]\n\n description['SmoothPursuitDetector'] = [\n # a mutually exclusive group\n [\n {\n 'main_argument_name': '--min-pts',\n 'argument_name_options': [],\n 'parameter_name': 'min_pts',\n 'soft_type': int,\n 'help': 'An integer indicating the minimum number of points required to form a core point\\'s '\n 'neighbourhood, or a string \\'num_observers\\' (meaning that the actual number of observers '\n 'for each movie will be substituted, depending on the data set provided).\\n'\n 'This option is mutually exclusive with --min-observers.',\n 'kwargs': {}\n },\n {\n 'main_argument_name': '--min-observers',\n 'argument_name_options': [],\n 'parameter_name': 'min_observers',\n # first try casting to int, then to float (since int cast will fail for a float)\n 'soft_type': [int, float],\n 'help': 'Either a floating point in [0.0; 1.0] range (indicating the share of all the present '\n 'observers per movie) or int [2; +\\inf) (indicating the absolute threshold for '\n 'observer count in the core point\\'s neighbourhood).\\n'\n 'This option is mutually exclusive with --min-pts.',\n 'kwargs': {}\n }\n ],\n {\n 'main_argument_name': '--eps-deg',\n 'argument_name_options': ['--eps'],\n 'parameter_name': 'eps_deg',\n 'help': 'Spatial Euclidean distance threshold that defines the neighbourhood in the XY-plane',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--time-slice-microsec',\n 'argument_name_options': ['--time-slice'],\n 'parameter_name': 'time_slice_microsec',\n 'help': 'Width of the time slice that defines the size of the neighbourhood on the time axis.',\n 'kwargs': {'type': float}\n },\n ]\n\n return description", "def _generate_params(self):\n return {\n 'lis_outcome_service_url': self.lis_outcome_service_url,\n 'lis_result_sourcedid': self.lis_result_sourcedid,\n 'oauth_consumer_key': self.key\n }", "def create_api_keys(self, **kwargs):\n\n all_params = ['api_key']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_api_keys\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/apikeys'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'api_key' in params:\n body_params = params['api_key']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['privileges', 'apikey']\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='ApiKeyWithPrivileges',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def extra_args(self):\n return []", "def _core_ar_kwarg(self,**kwargs) :\n\t\tpass", "def format_arguments(self, **kwargs):\n return kwargs", "def purefa_argument_spec():\n\n return dict(\n fa_url=dict(),\n api_token=dict(no_log=True),\n )", "def get_dynamic_setup_params():\n return {\n # Retrieve the long description from the README\n # 'long_description': read_file('README.rst'),\n 'install_requires': substitute_crypto_to_req(\n read_requirements('requirements.txt'),\n ),\n # 'extras_require': read_extras(),\n }", "def handle_filters():\n params = {'api_key': API_KEY}\n for k in demisto.args():\n if demisto.getArg(k):\n params[k] = demisto.getArg(k)\n return params", "def get_args():\n return {\"id\": fields.UUID(required=True, location=\"view_args\")}", "def get_argdict(cls, toolchain, args):\n return {} # Empty must be overloaded (if required)", "def get_api_key(context) -> str:\n provided_api_key = \"\"\n for key, value in context.invocation_metadata():\n if key == \"api_key\":\n provided_api_key = str(value)\n return provided_api_key\n return provided_api_key", "def custom_openapi() -> Dict:\n if app.openapi_schema:\n return app.openapi_schema\n openapi_schema = get_openapi(\n title=\"The GenomicMedLab Cool Seq Tool\",\n version=__version__,\n description=\"Common Operations On Lots-of Sequences Tool.\",\n routes=app.routes\n )\n\n openapi_schema[\"info\"][\"contact\"] = {\n \"name\": \"Alex H. Wagner\",\n \"email\": \"Alex.Wagner@nationwidechildrens.org\",\n \"url\": \"https://www.nationwidechildrens.org/specialties/institute-for-genomic-medicine/research-labs/wagner-lab\" # noqa: E501\n }\n app.openapi_schema = openapi_schema\n return app.openapi_schema", "def get_argument_as_keywords(self):\n status = True\n arg_kv = self.get_values_for_mandatory_args()\n if len(arg_kv) != len(self.req_args_list):\n msg = 'could not execute %s without mandatory arguments' % (object)\n self.data_repository = skip_and_report_status(self.data_repository, msg)\n status = False\n arg_kv = self.get_values_for_optional_args(arg_kv)\n return arg_kv, status", "def edit_keywords(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs)", "def index_args():\n return {}", "def setup(self):\r\n \r\n if self.requestedAction == admin.ACTION_EDIT or self.requestedAction == admin.ACTION_CREATE:\r\n \r\n # Set the required parameters\r\n for arg in RadiusAuthRestHandler.REQUIRED_PARAMS:\r\n self.supportedArgs.addReqArg(arg)\r\n \r\n # Set up the valid parameters\r\n for arg in RadiusAuthRestHandler.VALID_PARAMS:\r\n if arg not in RadiusAuthRestHandler.REQUIRED_PARAMS:\r\n self.supportedArgs.addOptArg(arg)", "def get_kwargs(self):\n return {}", "def add_required_arguments(self, *args):\n self._add_sample_specific_arguments(True, *args)", "def generate_call_string(self):\n if(self.api_key is None):\n raise error(\"API Key is not defined\");#Should base class do this? \n \n self.call_url=self.baseurl;\n if hasattr(self,'search_str'):\n self.call_url+=self.search_str;\n if hasattr(self,'filter_field_str'):\n self.call_url=self.call_url+'&'+self.filter_field_str;\n \n #loop over the parameters dict\n for key in self.input_params:\n self.call_url+=self.input_params[key];\n \n #finally add api key. at this point already checked it exists\n self.call_url=self.call_url+'&'+\"api-key=\"+str(self.api_key);\n return;", "def _custom_actioner(message: ActionMessage, defined_keyword_arg, **kwargs):\n print(message.additional_fields)\n print(defined_keyword_arg)\n print(kwargs)", "def manage_params(args):\n # Socrata API\n with open(\"secret/builtby-socrata.yaml\", 'r') as f:\n try:\n socrata_api_credentials = yaml.load(f)\n except yaml.YAMLError as exc:\n print(exc)\n\n socrata_app_token = socrata_api_credentials['app_token']\n\n # base params\n params = {\n '$$app_token': socrata_app_token\n }\n # remove null attributes\n args = {k: v for k, v in args.items() if v is not None}\n # add args to params\n params.update(args) # inplace\n\n return params", "def gen_args(self, obj, pa_names = False):\n\n pal, kwal = get_class_total_args(type(obj))\n\n try:\n get_val = type(obj).__get_init_arg_val__\n except AttributeError:\n get_val = getattr\n\n for pa in pal:\n v = get_val(obj, pa)\n self.gen_field((pa + \" = \") if pa_names else \"\")\n self.pprint(v)\n\n for kwa, default in kwal.items():\n try:\n v = get_val(obj, kwa)\n except AttributeError:\n # If value cannot be obtained, skip the argument generation\n continue\n\n # generate only arguments with non-default values\n if (v is default) or (v == default):\n continue\n\n self.gen_field(kwa + \" = \")\n self.pprint(v)", "def generate_arg_and_kwags():\n def gen_func(\n #df: DataSource,\n option: List[list],\n style: List[dict]\n )->List[Tuple[list, dict]]:\n\n if len(option) != len(style):\n raise SystemError(\"option and style must be same size list.\")\n\n arg_and_kwarg = []\n for o, s in zip(option, style):\n arg = [*o]\n kwargs = s\n arg_and_kwarg.append((arg, kwargs))\n return arg_and_kwarg\n return gen_func", "def define_parameters(self):", "def _add_pos_args(self, *args):\n arg_array = self._params.setdefault(\"positional_parameters\", [])\n # couchbase++ wants all args JSONified\n json_args = [json.dumps(arg) for arg in args]\n arg_array.extend(json_args)", "def add_kwargs():\n pass", "def _collect_repr_args(self, poargs, kwargs):", "def make_args(self, args):\n result_str = \"?\"\n for k, v in args.iteritems():\n result_str = result_str + k + \"=\" + v + \"&\"\n return result_str", "def Args(parser):\n flags.AddHcxActivationKeyArgToParser(parser)\n base.ASYNC_FLAG.AddToParser(parser)\n base.ASYNC_FLAG.SetDefault(parser, True)\n parser.display_info.AddFormat('yaml')", "def add_args(self):\n raise NotImplementedError", "def _set_named_args(self, **kv):\n # named_params = {}\n # for k in kv:\n # named_params[\"${0}\".format(k)] = json.dumps(kv[k])\n # couchbase++ wants all args JSONified\n named_params = {f'${k}': json.dumps(v) for k, v in kv.items()}\n\n self._params[\"named_parameters\"] = named_params\n return self", "def format_args(self, **kwargs: Any) -> str:\n return \"\"", "def _setup_api_properties(self):\n self.implicit_api_logical_id = GeneratedLogicalId.implicit_http_api()\n self.implicit_api_condition = \"ServerlessHttpApiCondition\"\n self.api_event_type = \"HttpApi\"\n self.api_type = SamResourceType.HttpApi.value\n self.api_id_property = \"ApiId\"\n self.editor = OpenApiEditor", "def get_cli_arguments(self):\n pass", "def _docs_params(**kwds):\n\n def dec(obj):\n obj.__orig_doc__ = obj.__doc__\n obj.__doc__ = dedent(obj.__doc__).format_map(kwds)\n return obj\n\n return dec", "def _make_args(self, args, defaults=[], vararg=None, kwonlyargs=[],\n kw_defaults=[], kwarg=None):\n # On Python 2 convert vararg and kwarg to raw name, raise error using\n # lineno stored on the node and lexer from self.\n # On Python 3.3 extract name and annotation\n # After should be straight forward\n raise NotImplementedError()", "def fill_args(cls, toolchain, parser):\n pass # pass must be overloaded (if required)", "def base_arguments(self):\n raise NotImplementedError()", "def help(self, keyword):\n if (keyword == 'all'):\n string = ('%-20s%-20s%-20s%s\\n' % ('Keyword', 'Type', 'Default', 'Comment'))\n for key, value in self.allowed_keys.items():\n string += ('%-20s%-20s%-20s%s\\n' % (key, str(value[0]), str(value[1]), value[2]))\n print string", "def main(pArgs):\n\n # Options and args... \n \n longoptions=[\"help\", \"usage\", \"endpoint=\", \"interface-type=\", \"verbose=\", \\\n \"recursive\", \"dbs-conf=\", \"show-prod\", \"show-caf\", \\\n \"only-subscribed\", \"only-custodial\"]", "def api():\n\treturn \"The API call\"", "def keyword_only(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n if len(args) > 0:\n raise TypeError(\"Method %s only takes keyword arguments.\" % func.__name__)\n return func(**kwargs)\n notice = \".. Note:: This method requires all argument be specified by keyword.\\n\"\n wrapper.__doc__ = notice + wrapper.__doc__\n return wrapper", "def definearguments(self, customparser):\n if not customparser:\n return\n customparser.add_option(\n '--url',\n dest='url',\n help=\"Use the provided iLO URL to login.\",\n default=None,\n )\n customparser.add_option(\n '-u',\n '--user',\n dest='user',\n help=\"If you are not logged in yet, including this flag along\"\\\n \" with the password and URL flags can be used to log into a\"\\\n \" server in the same command.\"\"\",\n default=None,\n )\n customparser.add_option(\n '-p',\n '--password',\n dest='password',\n help=\"\"\"Use the provided iLO password to log in.\"\"\",\n default=None,\n )\n customparser.add_option(\n '-e',\n '--enc',\n dest='encode',\n action='store_true',\n help=SUPPRESS_HELP,\n default=False,\n )", "def set_api_access_keys(**kwargs):\n API_BASE_PARAMS['key'] = kwargs['key']", "def args_str(self):", "def build_method(method_name, description, parameters, api_path, http_method, summary, return_type):\n allow_per_page = False\n parameters = check_for_pre_attachment_param(parameters)\n arg_list = get_parameters(parameters)\n param_descriptions = get_parameter_descriptions(parameters)\n payload = build_payload(parameters)\n enums = check_for_enums(parameters)\n\n \"\"\"\n If the method returns an array, allow the per_page parameter for paging\n \"\"\"\n if return_type == 'array' or (method_name.startswith(\"list_\") and http_method == \"GET\"):\n arg_list.append('per_page=None')\n param_descriptions.append(':param per_page: (optional) Set how many results canvas should return, defaults to config.LIMIT_PER_PAGE')\n param_descriptions.append(':type per_page: integer or None')\n payload.append('\\'per_page\\': per_page,')\n allow_per_page = True\n\n arg_list.append('**request_kwargs')\n\n \"\"\"\n Create the method signature\n \"\"\"\n\n content = line_format('def ' + method_name + '(request_ctx, ' + ', '.join(arg_list) + '):', NONE)\n content += line_format('\"\"\"', FOUR)\n\n \"\"\"\n Create the method description text from the description in the meta api\n \"\"\"\n regex = re.compile(r'\\{api\\:(\\w+)\\#(\\w+).*?\\}')\n for line in description.splitlines(True):\n rst_line = regex.sub(format_api_string, line)\n content += line_format(rst_line.rstrip(), FOUR)\n\n \"\"\"\n list out the method paramters\n \"\"\"\n content += line_format('', NONE)\n content += line_format(':param request_ctx: The request context', EIGHT)\n content += line_format(':type request_ctx: :class:RequestContext', EIGHT)\n for param in param_descriptions:\n content += line_format(param, EIGHT)\n content += line_format(':return: '+summary, EIGHT)\n content += line_format(':rtype: requests.Response (with ' + return_type + ' data)', EIGHT)\n content += line_format('', NONE)\n content += line_format('\"\"\"', FOUR)\n content += line_format('', NONE)\n\n \"\"\"\n Add the per_page check\n \"\"\"\n if allow_per_page:\n content += line_format('if per_page is None:', FOUR)\n content += line_format('per_page = request_ctx.per_page', EIGHT)\n\n \"\"\"\n Add any enums if they exist.\n \"\"\"\n for enum in enums:\n content += line_format(enum, FOUR)\n\n \"\"\"\n Add the api path\n \"\"\"\n path_formatted = 'path = \\'' + api_path + '\\''\n content += line_format(path_formatted, FOUR)\n\n \"\"\"\n Add a payload if one exists\n \"\"\"\n payload_string = ''\n if payload:\n content += line_format('payload = {', FOUR)\n for item in payload:\n content += line_format(item, EIGHT)\n content += line_format('}', FOUR)\n payload_string = ', payload=payload'\n\n content += line_format('url = request_ctx.base_api_url + path.format(' + ', '.join(get_path_parameters(parameters)) + ')', FOUR)\n content += line_format(\n 'response = client.'+http_method.lower()+'(request_ctx, url' + payload_string + ', **request_kwargs)', FOUR)\n\n content += line_format('', NONE)\n content += line_format('return response', FOUR)\n content += line_format('', NONE)\n content += line_format('', NONE)\n return content", "def get_api_keys(self, **kwargs):\n\n all_params = ['page', 'per_page', '_from', 'to', 'sort_dir', 'sort_field', 'filters']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_api_keys\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/apikeys'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'page' in params:\n query_params['_page'] = params['page']\n if 'per_page' in params:\n query_params['_perPage'] = params['per_page']\n if '_from' in params:\n query_params['_from'] = params['_from']\n if 'to' in params:\n query_params['_to'] = params['to']\n if 'sort_dir' in params:\n query_params['_sortDir'] = params['sort_dir']\n if 'sort_field' in params:\n query_params['_sortField'] = params['sort_field']\n if 'filters' in params:\n query_params['_filters'] = params['filters']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['privileges', 'apikey']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[ApiKey]',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def define_parameters(self):\n self.add_argument('--prefix', dest='prefix', type=str, optional=False,\n help='prefix for file names')\n self.add_argument('--sleepLength',\n dest = 'sleepLength',\n type = str,\n optional = True,\n help ='time to sleep before performing plugin action',\n default = '0')", "def toargs(context, schema, data):\n data = dict(data)\n args = {}\n for name, field in schema.namesAndDescriptions(True):\n field = field.bind(context)\n n = name\n if n.endswith('_') and iskeyword(n[:-1]):\n n = n[:-1]\n\n s = data.get(n, data)\n if s is not data:\n s = str(s)\n del data[n]\n\n try:\n args[str(name)] = field.from_unicode(s)\n except ValidationError as v:\n reraise(ConfigurationError('Invalid value for', n, str(v)),\n None, sys.exc_info()[2])\n elif field.required:\n # if the default is valid, we can use that:\n default = field.default\n try:\n field.validate(default)\n except ValidationError:\n raise ConfigurationError('Missing parameter:', n)\n args[str(name)] = default\n\n if data:\n # we had data left over\n try:\n keyword_arguments = schema.getTaggedValue('keyword_arguments')\n except KeyError:\n keyword_arguments = False\n if not keyword_arguments:\n raise ConfigurationError('Unrecognized parameters:', *data)\n\n for name in data:\n args[str(name)] = data[name]\n\n return args", "def setup_args(cls) -> ParlaiParser:\n # we want to later deprecate this for add_cmdline_args", "def _kwargs(self):\n dict = {\"name\":self.name}\n return dict", "def __init__(self, **kwargs):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'version': 'str',\n 'tagline': 'str',\n 'keywords': 'str',\n 'short_description': 'str',\n 'usage_information': 'str',\n 'long_description': 'str',\n 'license_model_description': 'str',\n 'system_requirements': 'str',\n 'time_released': 'datetime',\n 'release_notes': 'str',\n 'categories': 'list[str]',\n 'publisher': 'Publisher',\n 'languages': 'list[Item]',\n 'screenshots': 'list[Screenshot]',\n 'videos': 'list[NamedLink]',\n 'support_contacts': 'list[SupportContact]',\n 'support_links': 'list[NamedLink]',\n 'documentation_links': 'list[DocumentationLink]',\n 'icon': 'UploadData',\n 'banner': 'UploadData',\n 'regions': 'list[Region]',\n 'package_type': 'str',\n 'default_package_version': 'str',\n 'links': 'list[Link]',\n 'is_featured': 'bool'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'version': 'version',\n 'tagline': 'tagline',\n 'keywords': 'keywords',\n 'short_description': 'shortDescription',\n 'usage_information': 'usageInformation',\n 'long_description': 'longDescription',\n 'license_model_description': 'licenseModelDescription',\n 'system_requirements': 'systemRequirements',\n 'time_released': 'timeReleased',\n 'release_notes': 'releaseNotes',\n 'categories': 'categories',\n 'publisher': 'publisher',\n 'languages': 'languages',\n 'screenshots': 'screenshots',\n 'videos': 'videos',\n 'support_contacts': 'supportContacts',\n 'support_links': 'supportLinks',\n 'documentation_links': 'documentationLinks',\n 'icon': 'icon',\n 'banner': 'banner',\n 'regions': 'regions',\n 'package_type': 'packageType',\n 'default_package_version': 'defaultPackageVersion',\n 'links': 'links',\n 'is_featured': 'isFeatured'\n }\n\n self._id = None\n self._name = None\n self._version = None\n self._tagline = None\n self._keywords = None\n self._short_description = None\n self._usage_information = None\n self._long_description = None\n self._license_model_description = None\n self._system_requirements = None\n self._time_released = None\n self._release_notes = None\n self._categories = None\n self._publisher = None\n self._languages = None\n self._screenshots = None\n self._videos = None\n self._support_contacts = None\n self._support_links = None\n self._documentation_links = None\n self._icon = None\n self._banner = None\n self._regions = None\n self._package_type = None\n self._default_package_version = None\n self._links = None\n self._is_featured = None", "def ReviewServiceArgs(cls, container = '', library = 'Standard', dialogname = ''):\n return container, library, dialogname, ScriptForge.componentcontext", "def add_kwargs_arg(parser):\n parser.add_argument('--arg', '-a', type=__kwargs_arg, metavar='K=V', action='append',\n dest='kwargs', default=[],\n help='any special keyword arguments to pass to the method, formated as '\n 'key=value with value being a valid Python literal or one of the special '\n 'values nan, inf, -inf, N4, N8, N8_DIST, N6, N18, N18_DIST, N26, N26_DIST')", "def getArguments(self):\n ApiCli.getArguments(self)\n\n if self.args.alarm_name is not None:\n self.alarm_name = self.args.alarm_name\n\n if self.args.metric_name is not None:\n self.metric_name = self.args.metric_name\n\n if self.args.aggregate is not None:\n self.aggregate = self.args.aggregate\n\n if self.args.operation is not None:\n self.operation = self.args.operation\n\n if self.args.threshold is not None:\n self.threshold = self.args.threshold\n\n if self.args.interval is not None:\n self.interval = self.args.interval\n\n if self.args.host_group_id is not None:\n self.host_group_id = self.args.host_group_id\n\n if self.args.actions is not None:\n self.actions = self.args.actions\n\n if self.args.note is not None:\n self.note = self.args.note\n\n if self.args.per_host_notify is not None:\n self.per_host_notify = self.args.per_host_notify\n\n if self.args.is_disabled is not None:\n self.is_disabled = self.args.is_disabled\n\n payload = {}\n\n # Create trigger predicate dictionary\n predicate = {}\n\n if self.aggregate is not None:\n predicate['agg'] = self.aggregate\n\n if self.operation is not None:\n predicate['op'] = self.operation\n\n if self.threshold is not None:\n predicate['val'] = self.threshold\n\n if 'agg' in predicate or 'op' in predicate or 'val' in predicate:\n payload['triggerPredicate'] = predicate\n\n # Create payload dictionary\n if self.alarm_name:\n payload['name'] = self.alarm_name\n\n if self.host_group_id is not None:\n payload['hostgroupId'] = self.host_group_id\n\n if self.interval is not None:\n payload['interval'] = self.intervals[self.interval]\n\n if self.metric_name is not None:\n payload['metricName'] = self.metric_name\n\n if self.note is not None:\n payload['note'] = self.note\n\n if self.actions is not None:\n payload['actions'] = self.actions\n\n if self.per_host_notify is not None:\n payload['perHostNotify'] = True if self.per_host_notify == 'yes' else False\n\n if self.is_disabled is not None:\n payload['isDisabled'] = True if self.is_disabled == 'yes' else False\n\n self.data = json.dumps(payload, sort_keys=True)\n self.headers = {'Content-Type': 'application/json'}", "def DeveloperAPI(*args, **kwargs):\n if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):\n return DeveloperAPI()(args[0])\n\n def wrap(obj):\n _append_doc(obj, message='DeveloperAPI: This API may change across minor Ludwig releases.')\n _mark_annotated(obj)\n return obj\n return wrap", "def list_keywords(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), params=kwargs)", "def purefb_argument_spec():\n\n return dict(\n fb_url=dict(),\n api_token=dict(no_log=True),\n )", "def as_api_parameters(self):\n return {\n 'return_token': self.token,\n 'return_url': self.url,\n 'client_http_user_agent': self.user_agent,\n 'client_http_accept': self.accept,\n 'remote_site': self.remote_site,\n }", "def __init__(**params):", "def get_documentation(self, *args, **dargs):\n pass", "def process_api_declaration(self, resources, resource, context):\n pass", "def __call__(self, *args, **kwargs) -> Dict[str, Any]:\n pass", "def generate_api_key(self, **kwargs):\n\n all_params = []\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method generate_api_key\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/apikeys/_generate'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['privileges', 'apikey']\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='ApiKey',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def get_json_argument_list():\n list_of_arguments_to_get = [\"finish_time\", \"segmentation_training_samples\", \"patch_count_per_image\", \"learning_rate\", \"batch_k\",\n \"batch_p\", \"flip_augment\", \"standardize\", \"margin\", \"metric\"]\n\n return list_of_arguments_to_get", "def Args(parser):\n\n base_classes.RegionalDescriber.Args(parser)\n base_classes.AddFieldsFlag(parser, 'targetVpnGateways')", "def eseries_host_argument_spec():\n argument_spec = basic_auth_argument_spec()\n argument_spec.update(dict(\n api_username=dict(type='str', required=True),\n api_password=dict(type='str', required=True, no_log=True),\n api_url=dict(type='str', required=True),\n ssid=dict(type='str', required=True),\n validate_certs=dict(type='bool', required=False, default=True),\n ))\n return argument_spec", "def params(self, **kwargs):\n return kwargs", "def _set_named_args(self, **kv):\n for k in kv:\n self._body['${0}'.format(k)] = kv[k]\n return self", "def generate_tokens(self, apiview, function_id, *, add_line_marker: bool, prefix: str = \"\"):\n # Add arg name\n self.id = function_id\n if add_line_marker:\n self.id = f\"{function_id}.param({self.argname})\"\n apiview.add_line_marker(self.id)\n\n apiview.add_text(f\"{prefix}{self.argname}\")\n # add arg type\n if self.argtype:\n apiview.add_punctuation(\":\", False, True)\n apiview.add_type(self.argtype, self.id)\n\n # add arg default value\n default = self.default\n if default is not None:\n apiview.add_punctuation(\"=\", True, True)\n if isinstance(default, str) and default not in SPECIAL_DEFAULT_VALUES:\n apiview.add_string_literal(default)\n else:\n if isinstance(default, astroid.node_classes.Name):\n value = default.name\n elif hasattr(default, \"as_string\"):\n value = default.as_string()\n elif inspect.isclass(default):\n value = get_qualified_name(default, apiview.namespace)\n else:\n value = str(default)\n apiview.add_literal(value)", "def generate_pretty_key(*args, **kwargs):\n return '\\n'.join((\n \"\",\n \"\\tindex: {}\".format(str(kwargs.get('index'))),\n \"\\tdoc_type: {}\".format(str(kwargs.get('doc_type'))),\n \"\\tbody: {}\".format(str(RecursivelySortedDict(kwargs.get('body')))),\n \"\\tquery: {}\".format(str(RecursivelySortedDict(kwargs.get('query')))),\n ))", "def test_020_kwargs(self):\n caller = self.get_caller([KwargsTaskOverride])\n self.assertEqual([\"A\", \"B\"], caller(\"A\", \"B\"))", "def _setup_arguments(self):\n\n self._parser.add_argument(\"-a\", \"--area-interest\",\n help=\"Area of interest to process, \"\n \"shapefile path\", required=True)\n # FUTURE VERSIONS\n # self._parser.add_argument(\"-s\", \"--srtm-dem\",\n # help=\"Path to SRTM DEM file. Zip format\",\n # required=False)\n # self._parser.add_argument(\"-y\", \"--hsheds-dem\",\n # help=\"Path to HSHEDS DEM file. Zip format\",\n # required=False)\n # self._parser.add_argument(\"-g\", \"--groves-file\",\n # help=\"Path to groves classification file. \"\n # \"Zip format\",\n # required=False)", "def __init__(self, **kwargs):\n esp_name = self.esp_name\n self.api_key = get_anymail_setting(\n \"api_key\",\n esp_name=esp_name,\n kwargs=kwargs,\n allow_bare=True,\n )\n api_url = get_anymail_setting(\n \"api_url\",\n esp_name=esp_name,\n kwargs=kwargs,\n default=\"https://api.brevo.com/v3/\",\n )\n if not api_url.endswith(\"/\"):\n api_url += \"/\"\n super().__init__(api_url, **kwargs)", "def __add_arguments__(cls, parser: ArgumentParser) -> None:\n\n parser.add_argument(\n \"-d\",\n \"--data_dict_guid\",\n required=True,\n type=str,\n help=(\n \"The indexd Globally Unique Identifier (GUID) for the data dictionary.\"\n ),\n )\n parser.add_argument(\n \"-o\",\n \"--output\",\n required=True,\n type=str,\n help=(\n \"Path to write out the JSON response with file_name and dictionary_url.\"\n ),\n )", "def get_api_fields(cls):\n return ['fqdn', 'ttl', 'description', 'views']", "def build(keys: List[str]):\n api = API()\n api.build(*keys)", "def parse_args() -> argparse.Namespace:\n\n parser = argparse.ArgumentParser(\n description=\"THE FOLLOWING SCRIPT SHOWS SNAPSHOT OPERATIONS USING REST API.\", )\n parser.add_argument(\n \"-c\", \"--cluster\", required=True, help=\"API server IP:port details\")\n parser.add_argument(\n \"-u\",\n \"--api_user\",\n default=\"admin\",\n help=\"API Username\")\n parser.add_argument(\"-p\", \"--api_pass\", help=\"API Password\")\n parsed_args = parser.parse_args()\n\n # collect the password without echo if not already provided\n if not parsed_args.api_pass:\n parsed_args.api_pass = getpass()\n\n return parsed_args", "def definearguments(self, customparser):\n if not customparser:\n return\n\n add_login_arguments_group(customparser)\n\n customparser.add_argument(\n '--serviceaccount',\n dest='serviceacc',\n action=\"store_true\",\n help=\"Optionally include this flag if you wish to created account \"\\\n \"to be a service account.\",\n default=False\n )\n customparser.add_argument(\n '--addprivs',\n dest='optprivs',\n nargs='*',\n action=_AccountParse,\n type=str,\n help=\"Optionally include this flag if you wish to specify \"\\\n \"which privileges you want added to the iLO account. This overrides the default of \"\\\n \"duplicating privileges of the currently logged in account on the new account. Pick \"\\\n \"privileges from the privilege list in the above help text. EX: --addprivs=1,2,4\",\n default=None\n )\n customparser.add_argument(\n '--removeprivs',\n dest='optprivs',\n nargs='*',\n action=_AccountParse,\n type=str,\n help=\"Optionally include this flag if you wish to specify \"\\\n \"which privileges you want removed from the iLO account. This overrides the default of\"\\\n \" duplicating privileges of the currently logged in account on the new account. Pick \"\\\n \"privileges from the privilege list in the above help text. EX: --removeprivs=1,2,4\",\n default=None\n )\n customparser.add_argument(\n '--role',\n dest='role',\n choices=['Administrator', 'ReadOnly', 'Operator'],\n help=\"Optionally include this flag if you would like to specify Privileges by role. \"\\\n \"Valid choices are: Administrator, ReadOnly, Operator\",\n default=None\n )\n customparser.add_argument(\n '-j',\n '--json',\n dest='json',\n action=\"store_true\",\n help=\"Optionally include this flag if you wish to change the\"\\\n \" displayed output to JSON format. Preserving the JSON data\"\\\n \" structure makes the information easier to parse.\",\n default=False\n )", "def init_cloud_api(self, args=None):\n pass", "def generateKwArgs(self, axisList=None):\n if axisList is None:\n axisList = self.tabWidget.currentWidget()\n\n kwargs = {} \n for axisWidget in axisList.getAxisWidgets():\n kwargs[axisWidget.axis.id] = axisWidget.getCurrentValues()\n\n # Generate additional args\n kwargs['squeeze'] = 0\n kwargs['order'] = axisList.getAxesOrderString()\n\n return kwargs", "def _build_arguments(self):\n # TODO: comeback to allow test path override. maybe?\n # self._parser.add_argument(\n # '--test-path',\n # type=utils.validate_path,\n # required=False,\n # help=('Path th projects test Dockerfile. Dockerfile should be in the root of the test directory.')\n # )\n self._parser.add_argument(\n '--configs',\n type=bool,\n required=False,\n default=False,\n help=\"Would you like to inject configuration files?\"\n )" ]
[ "0.6347063", "0.6342163", "0.6047952", "0.60061", "0.5908773", "0.590851", "0.58989066", "0.58448356", "0.58410525", "0.58357745", "0.5807142", "0.57746965", "0.57710725", "0.5671101", "0.5665809", "0.56421447", "0.5637025", "0.5618054", "0.55946404", "0.5592014", "0.5560202", "0.55435866", "0.55219644", "0.5513859", "0.55076927", "0.5471917", "0.5468453", "0.54666245", "0.54599166", "0.5444413", "0.54098856", "0.54020035", "0.53864455", "0.53802913", "0.5366934", "0.5363213", "0.5363128", "0.5362455", "0.5349671", "0.5344664", "0.5342888", "0.53296113", "0.5319646", "0.53111124", "0.5305683", "0.530556", "0.5298574", "0.52954966", "0.5295396", "0.52950495", "0.528783", "0.52767193", "0.52699655", "0.52658284", "0.52645344", "0.5263485", "0.52562755", "0.5254318", "0.5254126", "0.5243462", "0.524164", "0.52368176", "0.5232426", "0.52270466", "0.52158517", "0.5210464", "0.52093345", "0.52089053", "0.52083707", "0.520799", "0.5207857", "0.5206446", "0.5206218", "0.52048296", "0.52017766", "0.5201018", "0.5200405", "0.5200218", "0.5199606", "0.5196969", "0.5185607", "0.5183269", "0.51798016", "0.51741755", "0.5165121", "0.5162968", "0.5161967", "0.5158466", "0.51557755", "0.5151786", "0.51453304", "0.51452804", "0.5144242", "0.51425505", "0.5141428", "0.51367986", "0.5135297", "0.5132318", "0.5131926", "0.5131048" ]
0.52038634
74
Fits the model for the given targets.
def fit(self, X, Y): # + 1 because we want to also have a free term (bias) that is not influenced by the training values necessarily. self._weights = np.zeros(1 + X.shape[1]) Y = [0 if y == -1 else 1 for y in Y] self.epochs = 0 fitted = False while not fitted: errors = 0 self.epochs += 1 # We now parse the training data set for entry, target in zip(X, Y): classification_error = target - self.predict(entry) if classification_error: self.updates += 1 # we compute now with how much we should adjust the weights weights_update = self.learning_rate * classification_error # Adjust the weights based on the error (+/- 1 or 0) and the training entry self._weights[1:] += weights_update * entry self._weights[0] += weights_update errors += np.where(classification_error == 0, 0, 1) fitted = errors == 0 self._errors.append(errors) return self._weights
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit(self, features, targets):\n self.model_features = features\n self.model_targets= targets", "def fit(self, input_data, targets):\n self.ensemble_model_.fit(input_data, targets)", "def set_targets(self, targets: List[float]):\n self.targets = targets", "def set_targets(self, targets: List[List[float]]):\n\n if self.preload:\n for i in range(len(self.data_ram)):\n for j in range(len(targets[i])):\n self.data_ram[i][QM9.U0][j] = targets[i][j]\n\n else:\n for i in range(len(self.data)):\n for j in range(len(targets[i])):\n self.data[i][QM9.U0][j] = targets[i][j]", "def set_targets(self, targets: List[List[float]]):\n assert len(self.data) == len(targets)\n for i in range(len(self.data)):\n self.data[i].set_targets(targets[i])", "def forward(self, images, targets):\n # send all variables to selected device\n images = images.to(self.device)\n masks = targets.to(self.device)\n # compute loss\n outputs = self.net(images)\n orig_size = self.image_dataset.orig_size\n if outputs.size()[-2:] != orig_size:\n # resize predictions back to the original size\n outputs = nn.functional.interpolate(outputs, size=orig_size, mode='bilinear', align_corners=True)\n loss = self.criterion(outputs, masks)\n return loss, outputs", "def do_new(self, args):\n model_name = questionary.text(\"Target name:\").ask()\n model_name = model_name.replace(\" \", \"\")\n\n available_frameworks = list(CFState.get_instance().loaded_frameworks.keys())\n framework_choice = questionary.select(\"Which framework?\", choices=available_frameworks).ask()\n\n if \"textattack\" in framework_choice:\n framework = \"TextTarget\"\n elif \"art\" in framework_choice:\n framework = \"ArtTarget\"\n else:\n raise ValueError(\"invalid framework\")\n\n if framework == \"TextTarget\":\n model_data_type = \"text\"\n elif framework == \"ArtTarget\":\n model_data_type = questionary.select(\"What data type?\", choices=[\"numpy\", \"image\"]).ask()\n else:\n raise ValueError(\"invalid framework\")\n\n if model_name not in os.listdir(config.targets_path):\n try:\n os.mkdir(f\"{config.targets_path}/{model_name}\")\n open(f\"{config.targets_path}/{model_name}/__init__.py\", \"w\").close()\n with open(f\"{config.targets_path}/{model_name}/{model_name}.py\", \"w\") as f:\n f.write(\n f\"\"\"\n\n# Generated by counterfit #\n\nfrom counterfit.core.targets import {framework}\n\nclass {model_name.capitalize()}({framework}):\n model_name = \"{model_name.lower()}\"\n model_data_type = \"{model_data_type}\"\n model_endpoint = \"\"\n model_input_shape = ()\n model_output_classes = []\n X = []\n\n def __init__(self):\n self.X = []\n\n def __call__(self, x):\n return x\n\"\"\"\n )\n\n CFState.get_instance().import_targets()\n except Exception as e:\n\n self.pwarning(f\"\\n [!] Failed to write target file: {e}.\\n\")\n\n else:\n self.pwarning(f\"\\n [!] {model_name} already exists. Choose a new name.\\n\")", "def partial_fit(self, features, targets, active_targets):\n for feature, target in zip(features, targets):\n self.samples.setdefault(target, []).append(feature)\n if self.budget is not None:\n self.samples[target] = self.samples[target][-self.budget:]\n self.samples = {k: self.samples[k] for k in active_targets}", "def partial_fit(self, features, targets, active_targets):\n for feature, target in zip(features, targets):\n self.samples.setdefault(target, []).append(feature)\n if self.budget is not None:\n self.samples[target] = self.samples[target][-self.budget:]\n self.samples = {k: self.samples[k] for k in active_targets}", "def train_step(images, targets):\n # Save all operations\n with tf.GradientTape() as tape:\n # Make prediction\n predictions = model(images)\n # Compute loss\n loss = tf.keras.losses.categorical_crossentropy(targets, predictions)\n # Compute gradients\n gradients = tape.gradient(loss, model.trainable_variables)\n # Update model\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))", "def fit(self, features, target):\n self.target = np.array(target)\n self.features = np.array(features)\n self.length = len(self.target)\n self.targetUS = set(np.unique(self.target))\n targetprob = []\n for i in self.targetUS:\n x = self.prob_find(i, self.target)\n targetprob.append(x)\n for z in self.targetUS:\n for q in targetprob:\n self.target_data[z] = q\n # What is T\n for i in self.features:\n self.feature(i)", "def fit(self, inp, targ):\n self.model.fit(inp, targ, epochs=1, verbose=0)", "def targets(self) -> Optional[jnp.ndarray]:\n pass", "def fit(\n self,\n base_models_predictions: np.ndarray,\n true_targets: np.ndarray,\n model_identifiers: List[Tuple[int, int, float]],\n ) -> 'SingleBest':\n return self", "def train(self, data, targets):\n\n if len(data) != len(self.inputlayer):\n msg = \"Data must have same number of elements as input layer\"\n raise ValueError(msg)\n elif len(targets) != len(self.outputlayer):\n msg = \"Targets must have same number of elements as output layer\"\n raise ValueError(msg)\n\n self._propagate(data)\n self._backpropagate(targets)", "def fit(self, boards, winners, *args, **kwargs):\n self.model.fit(boards, winners, *args, **kwargs)", "def fit(self, features, target, **kwargs):\n self.features = features\n self.target = target\n super(tpot_class, self).fit(features, target, **kwargs)", "def add(self, targets, predictions, values=None):\n if len(targets) != len(predictions):\n raise ValueError, \\\n \"Targets[%d] and predictions[%d]\" % (len(targets),\n len(predictions)) + \\\n \" have different number of samples\"\n\n if values is not None and len(targets) != len(values):\n raise ValueError, \\\n \"Targets[%d] and values[%d]\" % (len(targets),\n len(values)) + \\\n \" have different number of samples\"\n\n # enforce labels in predictions to be of the same datatype as in\n # targets, since otherwise we are getting doubles for unknown at a\n # given moment labels\n nonetype = type(None)\n for i in xrange(len(targets)):\n t1, t2 = type(targets[i]), type(predictions[i])\n # if there were no prediction made - leave None, otherwise\n # convert to appropriate type\n if t1 != t2 and t2 != nonetype:\n #warning(\"Obtained target %s and prediction %s are of \" %\n # (t1, t2) + \"different datatypes.\")\n if isinstance(predictions, tuple):\n predictions = list(predictions)\n predictions[i] = t1(predictions[i])\n\n if values is not None:\n # assure that we have a copy, or otherwise further in-place\n # modifications might screw things up (some classifiers share\n # values and spit out results)\n values = copy.deepcopy(values)\n\n self.__sets.append( (targets, predictions, values) )\n self._computed = False", "def output_targets(self, input_targets):\n pass", "def go_train(sources, targets, model, dictloc, max_epochs):\n\n\ttrain.trainer(targets, sources, model, \n\t\tsaveto=\"data/trainer.npz\", \n\t\tdictionary=dictloc, \n\t\tmax_epochs=max_epochs, \n\t\tsaveFreq=100, \n\t\treload_=os.path.isfile(\"data/trainer.npz\")\n\t)", "def eval_batch(self, outputs, target):\n raise NotImplementedError", "def detect(self, targets, images, verbose=0, random_detections=False, eps=1e-6):\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(\n images) == self.config.BATCH_SIZE, \"len(images) must be equal to BATCH_SIZE\"\n\n if verbose:\n modellib.log(\"Processing {} images\".format(len(images)))\n for image in images:\n modellib.log(\"image\", image)\n # CHANGE: added target to logs\n modellib.log(\"target\", np.stack(targets))\n\n # Mold inputs to format expected by the neural network\n # CHANGE: Removed moding of target -> detect expects molded target\n # TODO!\n molded_images, image_metas, windows = self.mold_inputs(images)\n # molded_targets, target_metas, target_windows = self.mold_inputs(targets)\n molded_targets = np.stack(targets)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape,\\\n \"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes.\"\n # CHANGE: add size assertion for target\n target_shape = molded_targets[0].shape\n for g in molded_targets[1:]:\n assert g.shape == target_shape,\\\n \"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes.\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n modellib.log(\"molded_images\", molded_images)\n# modellib.log(\"image_metas\", image_metas)\n # CHANGE: add targets to log\n modellib.log(\"molded_targets\", molded_targets)\n# modellib.log(\"target_metas\", target_metas)\n modellib.log(\"anchors\", anchors)\n # Run object detection\n # CHANGE: Use siamese detection model\n detections, _, _, mrcnn_mask, _, _, _ =\\\n self.keras_model.predict([molded_images, image_metas, molded_targets, anchors], verbose=0)\n if random_detections:\n # Randomly shift the detected boxes\n window_limits = utils.norm_boxes(windows, (molded_images[0].shape[:2]))[0]\n y_shifts = np.random.uniform(-detections[0,:,0] + window_limits[0], window_limits[2] - detections[0,:,2])\n x_shifts = np.random.uniform(-detections[0,:,1] + window_limits[1], window_limits[3] - detections[0,:,3])\n zeros = np.zeros(detections.shape[1])\n shifts = np.stack([y_shifts, x_shifts, y_shifts, x_shifts, zeros, zeros], axis=-1)[np.newaxis]\n detections = detections + shifts\n\n # Randomly permute confidence scores\n\n non_zero_confidences = np.where(detections[0,:,-1])[0]\n random_perm = np.random.permutation(non_zero_confidences)\n permuted_confidences = np.concatenate([detections[0,:,-1][:len(non_zero_confidences)][random_perm],\n np.zeros(detections.shape[1] - len(non_zero_confidences))])\n detections = np.concatenate([detections[:,:,:-1], permuted_confidences.reshape(1, detections.shape[1], 1)], axis=-1)\n\n # Keep the sorted order of confidence scores\n detections = detections[:, np.argsort(-detections[0,:,-1]), :]\n # Process detections\n results = []\n for i, image in enumerate(images):\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n windows[i])\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results", "def execute(self, targets):", "def fit(self, X, y, classes=None, sample_weight=None):\n N, L = y.shape\n self.n_targets = L\n self.__configure()\n\n for j in range(self.n_targets):\n if 'sample_weight' and 'classes' in signature(self.ensemble[j].fit).parameters:\n self.ensemble[j].fit(X, y[:, j], classes=classes, sample_weight=sample_weight)\n elif 'sample_weight' in signature(self.ensemble[j].fit).parameters:\n self.ensemble[j].fit(X, y[:, j], sample_weight=sample_weight)\n else:\n self.ensemble[j].fit(X, y[:, j])\n return self", "def get_model(seq_length, feat_names, reg_cls='reg', num_output=2, targets=('arousal', 'valence'), use_mask=True,\n fuse=2, use_multitask_loss=False, gaussian_noise=0.):\n if fuse != 2:\n raise ValueError('Only support late fusion at this time.')\n assert num_output == len(targets)\n feature_models = dict()\n input_models = dict()\n for fn in feat_names:\n lstm_units = [64, 64, 64]\n fc_units = [32, 32]\n hparams = {'lstm_units': lstm_units, 'fc_units': fc_units, 'seq_length': seq_length}\n input_models[fn], feature_models[fn] = get_unimodal(hparams, fn,\n use_mask=use_mask, gaussian_noise=gaussian_noise) # Batch size x seq_length x num_feature\n\n if len(feat_names) > 1:\n n_model = len(feat_names)\n merge_feat = tf.stack(list(feature_models.values()), axis=-1) # Batch size x seq_length x num_feature x n_model\n merge_wgt = FcStack(units=[n_model], inputs=merge_feat, act='tanh',\n use_norm=False) # Batch size x seq_length x num_feature x n_model\n merge_wgt - FcStack(units=[n_model], inputs=merge_wgt, act='sigmoid',\n use_norm=False) # Batch size x seq_length x num_feature x n_model\n\n merge_feat_rescale = tf.keras.layers.multiply(\n [merge_feat, merge_wgt]) # Batch size x seq_length x num_feature x n_model\n last_feat = tf.keras.backend.sum(merge_feat_rescale, axis=-1) # Batch size x seq_length x num_feature\n else:\n last_feat = list(feature_models.values())[0]\n\n # Do regression or classification\n if reg_cls == 'reg':\n # Regression\n outs = []\n for idx in range(num_output):\n # outs.append(FcStack([1], inputs=last_feat, act='linear', use_norm=False, layername=targets[idx]))\n cur_out = FcStack([1], inputs=last_feat, act='linear', use_norm=False)\n outs.append(tf.keras.layers.Activation('tanh', name=targets[idx])(cur_out))\n # if 'trustworthiness' in targets and len(targets) > 1:\n # trust_index = targets.index('trustworthiness')\n # merge_tasks = tf.concat(outs[:trust_index] + outs[trust_index+1:], axis=-1)\n # out_trust = FcStack(units=[16], inputs=merge_tasks, act='relu', use_norm=True)\n # out_trust = FcStack(units=[1], inputs=out_trust, act='linear', use_norm=False, layername='trustworthiness')\n # outs[trust_index] = out_trust\n\n else:\n # Classification\n raise ValueError('Un-support classification at this time')\n pass\n # outs = FcStack([num_output], inputs=last_feat, act='softmax', use_norm=False, layername=targets[idx])\n\n # ret_model = tf.keras.Model(inputs=list(input_models.values()), outputs=out)\n\n if len(targets) > 1:\n in_out = []\n for idx in range(num_output):\n in_out.append(tf.keras.layers.Input(shape=(seq_length,), name='{}_lb'.format(targets[idx])))\n out_mtl = utils.MultiTaskLoss(num_outputs=num_output, loss_func=utils.ConcordanceLossFunc, trainable=use_multitask_loss)(in_out + outs)\n\n muse_model = tf.keras.Model(inputs=list(input_models.values()) + in_out, outputs=outs + out_mtl)\n else:\n muse_model = tf.keras.Model(inputs=list(input_models.values()), outputs=outs)\n # muse_model = MuSeModel(inputs=list(input_models.values()), outputs=outs)\n return muse_model", "def forward(self, predictions: Dict[str, torch.Tensor],\n targets: Dict[str, torch.Tensor]) -> torch.Tensor:\n detr_outputs = {k: v for k, v in predictions.items() if k in ['logits', 'bboxes', 'masks']}\n\n # Retrieve the matching between the outputs of the last layer and the targets\n indices = self.matcher(detr_outputs, targets)\n\n # Compute the average number of target boxes accross all nodes, for normalization purposes\n num_boxes = sum(len(t) for t in targets[\"labels\"])\n num_boxes = torch.as_tensor([num_boxes], dtype=torch.float,\n device=next(iter(predictions.values())).device)\n if is_dist_avail_and_initialized():\n torch.distributed.all_reduce(num_boxes)\n num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()\n\n # Compute all the requested losses\n losses = {}\n for loss in self.losses:\n losses.update(self.get_loss(loss, predictions, targets, indices, num_boxes))\n\n # For auxiliary losses, repeat this process with the output of each intermediate layer.\n if 'aux_outputs' in predictions:\n for i, aux_outputs in enumerate(predictions['aux_outputs']):\n indices = self.matcher(aux_outputs, targets)\n for loss in self.losses:\n if loss == 'masks':\n # Intermediate masks losses are too costly to compute, we ignore them.\n continue\n kwargs = {}\n if loss == 'labels':\n # Logging is enabled only for the last layer\n kwargs = {'log': False}\n l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)\n l_dict = {k + f'_{i}': v for k, v in l_dict.items()}\n losses.update(l_dict)\n\n return self.weight * sum([losses[k] * self.weight_dict[k] for k in losses\n if k in self.weight_dict.keys()])", "def forward(self, idx, targets=None):\n B, T = idx.shape\n tok_emb = self.token_embedding_table(idx) # (B,T,C)\n pos_emb = self.position_embedding_table(torch.arange(T, device=device)) # (T,C)\n x = tok_emb + pos_emb # (B,T,C)\n x = self.blocks(x)\n logits = self.lm_head(x) # (B,T,vocab_size) \n\n if targets is None:\n loss = None\n else:\n B, T, C = logits.shape\n logits = logits.view(B*T, C)\n targets = targets.view(B*T)\n loss = F.cross_entropy(logits, targets)\n return logits, loss", "def forward(self, outputs, targets, mask_dict=None):\n outputs_without_aux = {k: v for k, v in outputs.items() if k != \"aux_outputs\"}\n\n # Retrieve the matching between the outputs of the last layer and the targets\n if self.dn is not \"no\" and mask_dict is not None:\n output_known_lbs_bboxes,num_tgt,single_pad,scalar = self.prep_for_dn(mask_dict)\n exc_idx = []\n for i in range(len(targets)):\n if len(targets[i]['labels']) > 0:\n t = torch.arange(0, len(targets[i]['labels'])).long().cuda()\n t = t.unsqueeze(0).repeat(scalar, 1)\n tgt_idx = t.flatten()\n output_idx = (torch.tensor(range(scalar)) * single_pad).long().cuda().unsqueeze(1) + t\n output_idx = output_idx.flatten()\n else:\n output_idx = tgt_idx = torch.tensor([]).long().cuda()\n exc_idx.append((output_idx, tgt_idx))\n indices = self.matcher(outputs_without_aux, targets)\n # Compute the average number of target boxes accross all nodes, for normalization purposes\n num_masks = sum(len(t[\"labels\"]) for t in targets)\n num_masks = torch.as_tensor(\n [num_masks], dtype=torch.float, device=next(iter(outputs.values())).device\n )\n if is_dist_avail_and_initialized():\n torch.distributed.all_reduce(num_masks)\n num_masks = torch.clamp(num_masks / get_world_size(), min=1).item()\n\n # Compute all the requested losses\n losses = {}\n for loss in self.losses:\n losses.update(self.get_loss(loss, outputs, targets, indices, num_masks))\n\n if self.dn != \"no\" and mask_dict is not None:\n l_dict={}\n for loss in self.dn_losses:\n l_dict.update(self.get_loss(loss, output_known_lbs_bboxes, targets, exc_idx, num_masks*scalar))\n l_dict = {k + f'_dn': v for k, v in l_dict.items()}\n losses.update(l_dict)\n elif self.dn != \"no\":\n l_dict = dict()\n l_dict['loss_bbox_dn'] = torch.as_tensor(0.).to('cuda')\n l_dict['loss_giou_dn'] = torch.as_tensor(0.).to('cuda')\n l_dict['loss_ce_dn'] = torch.as_tensor(0.).to('cuda')\n if self.dn == \"seg\":\n l_dict['loss_mask_dn'] = torch.as_tensor(0.).to('cuda')\n l_dict['loss_dice_dn'] = torch.as_tensor(0.).to('cuda')\n losses.update(l_dict)\n\n # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.\n if \"aux_outputs\" in outputs:\n for i, aux_outputs in enumerate(outputs[\"aux_outputs\"]):\n indices = self.matcher(aux_outputs, targets)\n for loss in self.losses:\n l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_masks)\n l_dict = {k + f\"_{i}\": v for k, v in l_dict.items()}\n losses.update(l_dict)\n if 'interm_outputs' in outputs:\n start = 0\n else:\n start = 1\n if i>=start:\n if self.dn != \"no\" and mask_dict is not None:\n out_=output_known_lbs_bboxes['aux_outputs'][i]\n l_dict = {}\n for loss in self.dn_losses:\n l_dict.update(\n self.get_loss(loss, out_, targets, exc_idx, num_masks * scalar))\n l_dict = {k + f'_dn_{i}': v for k, v in l_dict.items()}\n losses.update(l_dict)\n elif self.dn != \"no\":\n l_dict = dict()\n l_dict[f'loss_bbox_dn_{i}'] = torch.as_tensor(0.).to('cuda')\n l_dict[f'loss_giou_dn_{i}'] = torch.as_tensor(0.).to('cuda')\n l_dict[f'loss_ce_dn_{i}'] = torch.as_tensor(0.).to('cuda')\n if self.dn == \"seg\":\n l_dict[f'loss_mask_dn_{i}'] = torch.as_tensor(0.).to('cuda')\n l_dict[f'loss_dice_dn_{i}'] = torch.as_tensor(0.).to('cuda')\n losses.update(l_dict)\n # interm_outputs loss\n if 'interm_outputs' in outputs:\n interm_outputs = outputs['interm_outputs']\n indices = self.matcher(interm_outputs, targets)\n for loss in self.losses:\n l_dict = self.get_loss(loss, interm_outputs, targets, indices, num_masks)\n l_dict = {k + f'_interm': v for k, v in l_dict.items()}\n losses.update(l_dict)\n\n return losses", "def update_targets(self, indexes: List[int], new_targets: np.ndarray):\n if self.train:\n self.train_nat[indexes, :] = new_targets\n else:\n self.test_nat[indexes, :] = new_targets", "def fit(self, inputs,targets,namda,ifintercept=0): \n targets=np.array(targets) \n inputs=np.array(inputs) \n if inputs.ndim < 2:\n inputs = np.reshape(inputs, (-1,len(inputs)))\n if targets.ndim < 2:\n targets = np.reshape(targets, (-1,len(targets)))\n # self.update(inputs,ifrestart) \n #self.state=discard(self.state)\n #targets=discard(targets)\n #self.bias=np.ones((1,np.shape(targets)[-1]))\n #self.allstate=np.vstack((self.bias,self.state))\n self.coefs=np.dot(solve_2(self.allstate.T,namda,ifintercept),targets.T)", "def forward(self, outputs, targets):\n\n outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs' and k != 'enc_outputs'}\n\n indices = self.matcher['fbox'](outputs_without_aux, targets)\n\n\n num_boxes = sum(len(t[\"labels\"]) for t in targets)\n num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)\n if is_dist_avail_and_initialized():\n torch.distributed.all_reduce(num_boxes)\n num_boxes = torch.clamp(num_boxes / comm.get_world_size(), min=1).item()\n\n # Compute all the requested losses\n losses = {}\n for loss in self.losses:\n\n losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))\n\n # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.\n if 'aux_outputs' in outputs:\n # 0 , 1, 2, 3, 4\n for i, aux_outputs in enumerate(outputs['aux_outputs']):\n\n\n if i <= self.v_match:\n indices = self.matcher['vbox'](aux_outputs, targets)\n else:\n indices = self.matcher['fbox'](aux_outputs, targets)\n\n for loss in self.losses:\n if i <= self.v_match and loss == 'boxes':\n loss = 'vboxes'\n\n l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes)\n l_dict = {k + f'_{i}': v for k, v in l_dict.items()}\n losses.update(l_dict)\n\n\n return losses", "def fit(self, features, targets):\n w = np.array([1, 1, 1])\n old_w = np.array([42, 42, 42])\n itercount = 0\n while itercount < self.max_iterations:#not np.array_equal(w, old_w) or \n for itc, example in enumerate(features):\n this_target = targets[itc]\n one_example = np.insert(example, 0, 1)\n prediction = 1 if w.dot(one_example)*this_target >= 0 else -1\n if prediction < 0:\n old_w = w\n w = w + one_example * this_target\n itercount += 1\n print(\"learned weights are\", w)\n self.w = w", "def train(self, x_data, y_data):\n for model in self.list_of_models:\n model.fit(x_data, y_data)\n self.trained_models.append(model)", "def fit(self, data: np.array, labels: np.array):\n self.model.fit(squeeze_keep_batch(data), squeeze_keep_batch(labels))", "def add(\n self,\n predictions: List[Union[str, int, bool]],\n targets: List[Union[str, int, bool]],\n scores: List[float],\n ):\n tgt_type = type_of_target(targets)\n if tgt_type not in (\"binary\", \"multiclass\"):\n raise NotImplementedError(\"target type not supported yet\")\n\n if not isinstance(targets, list):\n targets = [targets]\n if not isinstance(predictions, list):\n predictions = [predictions]\n\n if scores is None:\n scores = [1.0 for _ in range(len(targets))]\n\n if len(targets) != len(predictions):\n raise ValueError(\"both targets and predictions need to have the same length\")\n\n targets_indx = encode_to_integers(targets, self.labels)\n prediction_indx = encode_to_integers(predictions, self.labels)\n\n for ind in range(len(predictions)):\n self.confusion_matrix[prediction_indx[ind], targets_indx[ind]].track(scores[ind])", "def forward(self, outputs, targets):\n outputs_without_aux = {k: v for k, v in outputs.items() if k != \"aux_outputs\"}\n\n # Retrieve the matching between the outputs of the last layer and the targets\n indices = self.matcher(outputs_without_aux, targets)\n\n # Compute the average number of target boxes accross all nodes, for\n # normalization purposes\n num_boxes = sum(len(t[\"labels\"]) for t in targets)\n num_boxes = torch.as_tensor(\n [num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device\n )\n if is_dist_initialized():\n torch.distributed.all_reduce(num_boxes)\n num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()\n\n # Compute all the requested losses\n losses = {}\n for loss in self.losses:\n losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))\n\n # In case of auxiliary losses, we repeat this process with the output of each\n # intermediate layer.\n if \"aux_outputs\" in outputs:\n for i, aux_outputs in enumerate(outputs[\"aux_outputs\"]):\n indices = self.matcher(aux_outputs, targets)\n for loss in self.losses:\n kwargs = {}\n if loss in (\"labels\", \"labels_balanced\"):\n # Logging is enabled only for the last layer\n kwargs = {\"log\": False}\n l_dict = self.get_loss(\n loss, aux_outputs, targets, indices, num_boxes, **kwargs\n )\n l_dict = {k + f\"_{i}\": v for k, v in l_dict.items()}\n losses.update(l_dict)\n\n return losses", "def __call__(self, inputs, targets):\n self._verify_data(inputs, targets)\n height, width = inputs.shape[2], inputs.shape[3]\n\n if self._num_classes is None:\n self._num_classes = self.network(inputs).shape[1]\n\n # Due to the unsupported Op of slice assignment, we use numpy array here\n targets = self._unify_targets(inputs, targets)\n\n attr_np = np.zeros(shape=(inputs.shape[0], targets.shape[1], height, width))\n\n cal_times = math.ceil(self._num_masks / self._perturbation_per_eval)\n\n for idx, data in enumerate(inputs):\n bg_data = data * 0 + self._base_value\n data = op.reshape(data, (1, -1, height, width))\n for j in range(cal_times):\n bs = min(self._num_masks - j * self._perturbation_per_eval,\n self._perturbation_per_eval)\n\n masks = self._generate_masks(data, bs)\n\n weights = masks * data + (1 - masks) * bg_data\n weights = self._activation_fn(self.network(weights))\n while len(weights.shape) > 2:\n weights = op.mean(weights, axis=2)\n\n weights = np.expand_dims(np.expand_dims(weights.asnumpy()[:, targets[idx]], 2), 3)\n\n attr_np[idx] += np.sum(weights * masks.asnumpy(), axis=0)\n\n attr_np = attr_np / self._num_masks\n\n return op.Tensor(attr_np, dtype=inputs.dtype)", "def predict_from(self, inputs, to_layers):", "def prepare_gen(self, targets):\r\n pass", "def fit(self, data):\n for v in self.features + self.targets:\n v._fit(data)", "def build(self, targets: list[str], env: dict[str, str] | None = None) -> None:\n self.out_dir.mkdir(parents=True, exist_ok=True)\n self.soong_ui([\"--make-mode\", \"--soong-only\"] + targets, env=env)", "def forward_train(self, imgs, target, target_weight, img_metas, **kwargs):\n # imgs (list[Fxtorch.Tensor[NxCximgHximgW]]): multiple input frames\n assert imgs[0].size(0) == len(img_metas)\n num_frames = len(imgs)\n frame_weight = img_metas[0]['frame_weight']\n\n assert num_frames == len(frame_weight), f'The number of frames ' \\\n f'({num_frames}) and the length of weights for each frame ' \\\n f'({len(frame_weight)}) must match'\n\n if self.concat_tensors:\n features = [self.backbone(torch.cat(imgs, 0))]\n else:\n features = [self.backbone(img) for img in imgs]\n\n if self.with_neck:\n features = self.neck(features, frame_weight=frame_weight)\n\n if self.with_keypoint:\n output = self.keypoint_head(features)\n\n # if return loss\n losses = dict()\n if self.with_keypoint:\n keypoint_losses = self.keypoint_head.get_loss(\n output, target, target_weight)\n losses.update(keypoint_losses)\n keypoint_accuracy = self.keypoint_head.get_accuracy(\n output, target, target_weight)\n losses.update(keypoint_accuracy)\n\n return losses", "def fit_model(self):\n logger.info('Fitting model')\n if self.traj_dict is None:\n self.traj_dict = self.get_traj_dict()\n self.model.fit(self.traj_dict.values())", "def train(self, inputs_list, targets_list):\n # convert inputs list to 2d array\n inputs = cupy.array(inputs_list, ndmin=2).T\n targets = cupy.array(targets_list, ndmin=2).T\n\n # calculate signals into hidden layer\n hidden_inputs = cupy.dot(self.wih, inputs)\n # calculate the signals emerging from hidden layer\n hidden_outputs = self.activation_function(hidden_inputs)\n\n # calcute signals into final output layer\n final_inputs = cupy.dot(self.who, hidden_outputs)\n # calculate signals into final output layer\n final_outputs = self.activation_function(final_inputs)\n\n # output layer error is the (target - actual)\n output_errors = targets - final_outputs\n # hidden layer error is the output_errors, split by weights, recombined at hidden nodes\n hidden_errors = cupy.dot(self.who.T, output_errors)\n\n # update the weights for the links between the hidden and output layers\n # S' = S(1 - S)\n # dE / Dw = K * e * o * (1 - o)\n self.who += self.lr * cupy.dot((output_errors * final_outputs * (\n 1.0 - final_outputs)), cupy.transpose(hidden_outputs))\n self.wih += self.lr * \\\n cupy.dot((hidden_errors * hidden_outputs *\n (1.0 - hidden_outputs)), cupy.transpose(inputs))\n\n pass", "def forward(self, inputs: Tensor, targets: Tensor, **kwargs) -> Tensor:\n return NotImplemented", "def forward(self, outputs, targets):\n outputs_without_aux = {k: v for k, v in outputs.items() if k != \"aux_outputs\"}\n\n # Retrieve the matching between the outputs of the last layer and the targets\n indices = self.matcher(outputs_without_aux, targets)\n\n # Compute the average number of target boxes accross all nodes, for normalization purposes\n num_boxes = sum(len(t[\"labels\"]) for t in targets)\n num_boxes = torch.as_tensor(\n [num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device\n )\n\n if comm.get_world_size() > 1:\n torch.distributed.all_reduce(num_boxes)\n num_boxes = torch.clamp(num_boxes / comm.get_world_size(), min=1).item()\n\n # Compute all the requested losses\n losses = {}\n for loss in self.losses:\n losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))\n\n # In case of auxiliary losses, we repeat this process with the output of\n # each intermediate layer.\n if \"aux_outputs\" in outputs:\n for i, aux_outputs in enumerate(outputs[\"aux_outputs\"]):\n indices = self.matcher(aux_outputs, targets)\n for loss in self.losses:\n if loss == \"masks\":\n # Intermediate masks losses are too costly to compute, we ignore them.\n continue\n kwargs = {}\n if loss == \"labels\":\n # Logging is enabled only for the last layer\n kwargs = {\"log\": False}\n l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)\n l_dict = {k + f\"_{i}\": v for k, v in l_dict.items()}\n losses.update(l_dict)\n\n return losses", "def target_factory(targets, user_args):\n finished = []\n if user_args.config_file is not None or user_args.cli_apikeys is not None:\n api_keys = get_config_from_file(user_args)\n else:\n api_keys = None\n init_targets_len = len(targets)\n\n for counter, t in enumerate(targets):\n c.info_news(\"Target factory started for {target}\".format(target=t))\n time.sleep(1 ) #tototo\n current_target = target(t)\n if not user_args.skip_defaults:\n current_target.get_hibp()\n current_target.get_hunterio_public()\n if api_keys is not None:\n c.info_news(\"Factory is calling API keys\")\n if \"hunterio\" in api_keys:\n current_target.get_hunterio_private(api_keys[\"hunterio\"])\n # If chase option. Check we're not chasing added target\n if user_args.chase_limit and counter < init_targets_len:\n chase_limiter = 1\n for i in range(len(current_target.data)):\n if (\n len(current_target.data[i]) >= 2 # Has header & data\n and \"HUNTER_RELATED\" in current_target.data[i][0]\n and chase_limiter <= user_args.chase_limit\n ):\n c.good_news(\n \"Adding {new_target} using HunterIO chase\".format(\n new_target=current_target.data[i][1]\n )\n )\n targets.append(current_target.data[i][1])\n chase_limiter += 1\n\n if \"snusbase_token\" in api_keys:\n current_target.get_snusbase(\n api_keys[\"snusbase_url\"], api_keys[\"snusbase_token\"]\n )\n if \"leak-lookup_priv\" in api_keys:\n current_target.get_leaklookup_priv(api_keys[\"leak-lookup_priv\"])\n if \"leak-lookup_pub\" in api_keys:\n print(\"tototo\")\n current_target.get_leaklookup_pub(api_keys[\"leak-lookup_pub\"])\n if \"weleakinfo_endpoint\" in api_keys and \"weleakinfo_key\" in api_keys:\n from .helpers import weleakinfo_get_auth_token\n\n token = weleakinfo_get_auth_token(\n api_keys[\"weleakinfo_endpoint\"], api_keys[\"weleakinfo_key\"]\n )\n current_target.get_weleakinfo(token)\n\n finished.append(current_target)\n return finished", "def build_model_mobilenet(num_classes):", "def step(self, inputs=None, targets=None):\n if not self.training:\n self.train_mode()\n\n outputs, loss = self.forward(\n inputs=inputs,\n targets=targets\n )\n\n self.update(\n loss=loss,\n inputs=inputs,\n targets=targets,\n outputs=outputs\n )\n\n return outputs, loss", "def train (self, data, targets, epochs = 100, etaDataLayer = 0.2, etaHiddenLayer = 0.2, hiddenLayers = 1, hiddenNeurons = 200, dataWeightsLimit = 0.05, hiddenWeightsLimit = 0.5, regression = True, backPropagate = False, verbose = False):\n\t\tself.data[\"input\"] \t\t= numpy.array(data)\n\t\tself.data[\"rows\"] \t\t= len(data)\n\t\tself.data[\"cols\"] \t\t= len(data[0])\n\t\tself.targets \t\t\t= targets\n\t\tself.classCount \t\t= len(set(targets))\n\t\tself.epochs \t\t\t= epochs\n\t\tself.etaDataLayer \t\t= float(etaDataLayer)\n\t\tself.etaHiddenLayer \t= float(etaHiddenLayer)\n\t\tself.hiddenLayers \t\t= hiddenLayers\n\t\tself.hiddenNeurons \t\t= hiddenNeurons\n\t\tself.dataWeightsLimit \t= float(dataWeightsLimit)\n\t\tself.hiddenWeightsLimit\t= float(hiddenWeightsLimit)\n\t\tself.regression \t\t= regression\n\t\tself.backPropagate \t\t= backPropagate\n\t\tself.verbose \t\t\t= verbose\n\t\tself.report \t\t\t= {}\n\t\tself.report[\"total\"] \t= self.epochs * len(data)\n\t\tself.dataWeights \t\t= self.dataWeightsLimit * numpy.random.random_sample((self.data[\"cols\"], self.hiddenNeurons))\n\t\tself.hiddenWeights \t\t= self.hiddenWeightsLimit * numpy.random.random_sample((self.hiddenNeurons + 1, 1))\n\t\tprint \"========================================\"\n\t\tprint \"--- Training . . . ---------------------\"\n\t\tprint \"========================================\"\n\t\tfor epoch in range(self.epochs):\n\t\t\thits \t\t\t\t= 0\n\t\t\tmisses \t\t\t\t= 0\n\t\t\tprocessed \t\t\t= 0\n\t\t\tdistances \t\t\t= []\n\t\t\tpredictedClass \t\t= None\n\t\t\tsampleIndices \t\t= sorted(range(len(self.data[\"input\"])), key = lambda k: random.random())\n\t\t\tif self.verbose == True:\n\t\t\t\tprint \"----------------------------------------\"\n\t\t\t\tprint \"--- Epoch\", epoch\n\t\t\t\tprint \"----------------------------------------\"\n\t\t\tfor sampleIndex in sampleIndices:\n\t\t\t\tprint self.dataWeights\n\t\t\t\tsample \t\t\t= self.data[\"input\"][sampleIndex]\n\t\t\t\ttarget \t\t\t= self.targets[sampleIndex]\n\t\t\t\tif self.verbose == True:\n\t\t\t\t\tprint \"--- Feeding Forward . . . --------------\"\n\t\t\t\t\tprint \" Sample\", sampleIndex + 1, \"of Epoch\", epoch + 1\n\t\t\t\t# Forward Propagation\n\t\t\t\ta \t\t\t\t= 1.0 / (1.0 + numpy.exp(- numpy.dot(sample, self.dataWeights)))\n\t\t\t\tb \t\t\t\t= numpy.concatenate([[1], a])\n\t\t\t\toutput \t\t\t= 1.0 / (1.0 + numpy.exp(- numpy.dot(b, self.hiddenWeights)))[0]\n\t\t\t\t# Metric Computation & Communication\n\t\t\t\tif self.regression == False:\n\t\t\t\t\terror \t\t\t= 0.5 * (((target / (self.classCount - 1)) - output) ** 2)\n\t\t\t\t\tdistance \t\t= abs(target - (output * (self.classCount - 1)))\n\t\t\t\t\tpredictedClass \t= round(self.classCount * output) - 1\n\t\t\t\t\tif predictedClass == target:\n\t\t\t\t\t\thits += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tmisses += 1\n\t\t\t\t\tif self.verbose == True:\n\t\t\t\t\t\tprint \" Annotated Class: \\t\", target\n\t\t\t\t\t\tprint \" Computed Class: \\t\", predictedClass\n\t\t\t\t\t\tprint \" Computed SSE: \\t\\t%0.4f\" % error\n\t\t\t\t\t\tprint \" Raw Distance: \\t\\t%0.4f\" % distance\n\t\t\t\t\t\tif predictedClass == target:\n\t\t\t\t\t\t\tprint \" Prediction Status: \\tHit! :)\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint \" Prediction Status: \\tOops! :(\"\n\n\t\t\t\telse:\n\t\t\t\t\terror \t\t\t= 0.5 * ((target - output) ** 2)\n\t\t\t\t\tdistance \t\t= abs(target - output)\n\t\t\t\t\tif self.verbose == True:\n\t\t\t\t\t\tprint \" Annotated Value: \\t\", target\n\t\t\t\t\t\tprint \" Computed Value: \\t%0.4f\" % output\n\t\t\t\t\t\tprint \" Computed SSE: \\t\\t%0.4f\" % error\n\t\t\t\t\t\tprint \" Raw Distance: \\t\\t%0.4f\" % distance\n\t\t\t\tprocessed += 1\n\t\t\t\tdistances.append(distance)\n\t\t\t\t# Back Propagation\n\t\t\t\tif self.verbose == True:\n\t\t\t\t\tprint \"--- Back Propagating . . . -------------\"\n\t\t\t\tdeltaDataWeights \t= ((target / self.classCount - 1) - output) * output * (1 - output)\n\t\t\t\tdeltaHiddenWeights \t= numpy.delete((b * (1 - b) * self.hiddenWeights * deltaDataWeights)[0], 0.0)\n\t\t\t\tupdateHiddenWeights = etaHiddenLayer * b * deltaDataWeights\n\t\t\t\tupdatedHiddenLayer \t= b + updateHiddenWeights\n\t\t\t\tself.hiddenWeights \t= numpy.transpose(numpy.atleast_2d(updatedHiddenLayer))\n\t\t\t\tupdateDataWeights \t= etaDataLayer * numpy.outer(sample, deltaHiddenWeights)\n\t\t\t\tself.dataWeights \t= self.dataWeights + updateDataWeights\n\t\t\t\tprint self.dataWeights\n\t\t\t\tif self.verbose == True:\n\t\t\t\t\tprint \"--- Sample Processed -------------------\\n\"\n\n\t\t\t\t# updateDataWeights \t= etaDataLayer * numpy.array(sample) * deltaHiddenWeights\n\n\t\t\t\t# # Multiple Hidden Layer Routine\n\t\t\t\t# for hiddenLayer in range(hiddenLayers):\n\t\t\tif self.verbose == True:\n\t\t\t\tprint \"----------------------------------------\"\n\t\t\t\tprint \"--- Epoch\", epoch, \"Complete\"\n\t\t\t\tprint \"----------------------------------------\"\n\t\t\t\tif self.regression == False:\n\t\t\t\t\taccuracy \t\t= (hits/processed) * 100\n\t\t\t\t\tprint \" \tEpoch Hits / Total:\\t\", hits, \"/\", processed\n\t\t\t\t\tprint \" \tEpoch Hit Percent:\\t%0.2f\" % (float(hits) / processed * 100), \"\\n\"\n\t\t\t\telse:\n\t\t\t\t\tprint \"\\n\"\n\t\tprint \"========================================\"\n\t\tprint \"--- Training Complete ------------------\"\n\t\tprint \"========================================\"", "def main(feats_name, targets_name, model_name, n_boot, seed_start, output_filename, train_test_flag):\n\n #load feats and targets\n input_dict = {}\n input_dict['feats'] = 'data/%s' % (feats_name)\n input_dict['targets'] = 'data/%s' % (targets_name)\n #load the feats and targets\n df = pd.read_csv(\"%s\" % (input_dict['feats']))\n targets = pd.read_csv(\"%s\" % (input_dict['targets']))\n #drop columns not used for prediction\n drop_cols = [\"Unnamed: 0\",\"index\"]\n for dc in drop_cols:\n if dc in targets.columns:\n targets = targets.drop(dc,axis=1)\n if dc in df.columns:\n df = df.drop(dc,axis=1)\n #reduce to training or test set only if requested\n if (train_test_flag == 'train') and ('test_set' in df.columns):\n targets = targets[df['test_set'] == 0]\n df = df[df['test_set'] == 0]\n elif (train_test_flag == 'test') and ('test_set' in df.columns):\n targets = targets[df['test_set'] == 1]\n df = df[df['test_set'] == 1]\n df = df.drop('test_set', axis = 1)\n \n #broadcast the feats and targets\n df_b = sc.broadcast(df)\n targets_b = sc.broadcast(targets)\n\n #Set up the classifier. 3fold CV for selection of regularization term.\n if model_name == 'linear':\n model = LinearRegression(fit_intercept=True,\n normalize=False,\n copy_X=True,\n n_jobs=1) \n elif model_name == 'lasso':\n model = LassoCV(alphas = [.05,.1,.2],\n normalize = False,\n fit_intercept = True,\n verbose = False,\n copy_X = False,\n n_jobs = 3)\n elif model_name == 'ridge':\n model = RidgeCV(alphas = [.00001,.0001,.001,.01,.1,1,10,100,1000,10000],\n normalize = False,\n fit_intercept = True,\n verbose = 1,\n cv = 3)\n else:\n raise ValueError('model_name not recognized.')\n \n #Create an RDD that specifies prng seed to use\n samp_list = [(n,) for n in np.arange(seed_start, seed_start+n_boot)]\n samp_rdd = sc.parallelize(samp_list,n_boot) #create RDD with one partition for each row (second arg is number of partitions)\n #Create a function that takes a tuple as input and returns \n def func(tup):\n \"\"\"\n Takes as input a tuple containing an integer. The integer specifies the random seed that will be used to \n randomly sample, with replacement, observations from the feats set provided. The model is fitted to the \n sampled feats. Resulting best fit parameters, along with some other summary statistics and information are\n provided as input in a JSON string that will be written to the output file when all jobs are completed.\n \n Parameters\n ----------\n tup, rdd\n - series of tuples with different integer values defining the RNG seed to be used to sample observations\n \n Returns\n ----------\n tup[0], int\n - the seed that was used\n json.dumps(results_dict), str\n - dict in json format with the following keys:\n - alpha, the regularization term providing the best fit according to 3 fold cross-validation\n - random_state, the initial state used for fitting\n - training_feats, the name of the training_feats csv file\n - training_targets, the name of the target variable csv file\n - cv, the type of cross-validation used\n - sklearn_version, which version of sklearn was used\n - mse_min, the mean squared error for the test set on each fold\n - r2, the r-squared value (% var explained)\n - coef, parameter vector\n - intercept, intercept parameter\n - column_names, feature name corresponding to each parameter in the parameter vector\n \"\"\"\n #take a random sample with replacement\n np.random.seed(seed=tup[0]) #set the seed\n n_obs = np.shape(df_b.value)[0] #number of observations determines sample size\n samp = list(np.random.randint(0,high=n_obs,size=n_obs)) #draw the random sample with replacement\n #fit the model\n tic = time.time()\n results = model.fit(df_b.value.iloc[samp,:],np.ravel(targets_b.value.iloc[samp]))\n toc = tic - time.time()\n #save the results in a dict\n results_dict = {}\n results_dict['alpha'] = results.alpha_\n results_dict['random_state'] = results.random_state\n results_dict['training_feats'] = input_dict['feats']\n results_dict['training_targets'] = input_dict['targets']\n results_dict['cv'] = results.cv\n results_dict['sklearn_version'] = sklearn.__version__\n results_dict['mse_min'] = results.mse_path_.min()\n results_dict['r2'] = results.score(df_b.value.iloc[samp,:],np.ravel(targets_b.value.iloc[samp]))\n results_dict['coef'] = list(results.coef_)\n results_dict['intercept'] = results.intercept_\n results_dict['column_names'] = [i for i in df_b.value.columns]\n results_dict['fit_time'] = toc\n #convert results dict to json and save in tuple\n return(json.dumps(results_dict))\n\n #fit model in parallel\n results = samp_rdd.map(lambda p: func(p))\n #save to text file\n results.saveAsTextFile(output_filename)\n #stop the SparkContext.\n if not local_mode:\n sc.stop()", "def train(self, features, labels):\n self._clf.fit(features, labels)", "def train(self, features, labels):\n self._clf.fit(features, labels)", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def classify_batch(args, model, targets, targets_seqlen, targets_mask, tweets, tweets_seqlen, tweets_mask):\n assert len(tweets) == len(targets)\n #python assert断言是声明其布尔值必须为真的判定,如果发生异常就说明表达示为假。可以理解assert断言语句为raise-if-not,用来测试表示式,其返回值为假,就会触发异常。\n\n model.eval()\n \"\"\"特别注意的是需要用 model.eval(),让model变成测试模式,这主要是对dropout和batch normalization的操作在训练和测试的时候是不一样的\"\"\"\n ''' Prepare data and prediction准备数据和预测'''\n batch_size = len(targets)\n from main_batch import var_batch\n targets_, targets_seqlen_, targets_mask_, tweets_, tweets_seqlen_, tweets_mask_ = \\\n var_batch(args, batch_size,\n targets, targets_seqlen, targets_mask,\n tweets, tweets_seqlen, tweets_mask)\n\n probs, _ = model((tweets_, tweets_seqlen_, tweets_mask_),\n (targets_, targets_seqlen_, targets_mask_))\n\n pred_weight, pred = torch.max(probs, dim=1)\n\n if args.cuda:\n pred = pred.view(-1).cpu().data.numpy()\n pred_weights = pred_weight.view(-1).cpu().data.numpy()\n else:\n pred = pred.view(-1).data.numpy()\n pred_weights = pred_weight.view(-1).data.numpy()\n\n return pred, pred_weights", "def train(self, features, actions, rewards):\n self.model.fit(x=features, y=actions, sample_weight=rewards,\n verbose=False)", "def _metadata_update_targets(targets):\n affidavit = _create_affidavit()\n firmwares = db.firmware.get_all()\n for target in targets:\n firmwares_filtered = []\n for f in firmwares:\n if f.target == 'private':\n continue\n if f.target != target:\n continue\n firmwares_filtered.append(f)\n if target == 'stable':\n _generate_metadata_kind('firmware.xml.gz',\n firmwares_filtered,\n affidavit=affidavit)\n elif target == 'testing':\n _generate_metadata_kind('firmware-testing.xml.gz',\n firmwares_filtered,\n affidavit=affidavit)", "def fit(\n self,\n X: Optional[np.ndarray],\n y: Optional[Union[np.ndarray, np.array, pd.Series]],\n ):\n self.epochs_trained = 0\n # if design_matrix/targets None assume dataloaders were initialized elsewhere\n if X is not None:\n if type(y) is pd.Series:\n self.initialize_dataloaders(X, np.array(y))\n else:\n assert type(y) in (np.ndarray, np.array)\n self.initialize_dataloaders(X, y)\n self.network.load_state_dict(self.network_initial_state_dict)\n self.optimizer.load_state_dict(self.optimizer_initial_state_dict)\n if self.scheduler:\n self.scheduler.load_state_dict(self.scheduler_initial_state_dict)\n self._train()", "def predict(self, scenes, tmp_dir):\n self.backend.load_model(tmp_dir)\n\n for scene in scenes:\n with scene.activate():\n labels = self.predict_scene(scene, tmp_dir)\n label_store = scene.prediction_label_store\n label_store.save(labels)\n\n if self.config.debug and self.config.predict_debug_uri:\n self.save_debug_predict_image(\n scene, self.config.predict_debug_uri)", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def train(self, src, labels): # real signature unknown; restored from __doc__\n pass", "def train(self, inputs_list, targets_list):\n # convert inputs list to 2d array\n inputs = numpy.array(inputs_list, ndmin=2).T\n targets = numpy.array(targets_list, ndmin=2).T\n\n # calculate signals into hidden layer\n hidden_inputs = numpy.dot(self.wih, inputs)\n # calculate the signals emerging from hidden layer\n hidden_outputs = self.activation_function(hidden_inputs)\n\n # calcute signals into final output layer\n final_inputs = numpy.dot(self.who, hidden_outputs)\n # calculate signals into final output layer\n final_outputs = self.activation_function(final_inputs)\n\n # output layer error is the (target - actual)\n output_errors = targets - final_outputs\n # hidden layer error is the output_errors, split by weights, recombined at hidden nodes\n hidden_errors = numpy.dot(self.who.T, output_errors)\n\n # update the weights for the links between the hidden and output layers\n # S' = S(1 - S)\n # dE / Dw = K * e * o * (1 - o)\n self.who += self.lr * numpy.dot((output_errors * final_outputs * (\n 1.0 - final_outputs)), numpy.transpose(hidden_outputs))\n self.wih += self.lr * \\\n numpy.dot((hidden_errors * hidden_outputs *\n (1.0 - hidden_outputs)), numpy.transpose(inputs))\n\n pass", "def update_model(self, boards, outcomes, verbose=0):\n\t\t# Compile the model\n\t\tself.model.compile(\n\t\t\toptimizer='adam',\n\t\t\tloss='mse'\n\t\t)\n\n\t\t# Train the model\n\t\tmf = self.model.fit(boards, outcomes, verbose=verbose)\n\n\t\t# Crush the graph, maybe that is what is messing with RAM?\n\t\ttf.keras.backend.clear_session()", "def forward(self, inputs: Tensor, targets: Tensor = None, batch_seen: int = None) -> Tensor:\n # print(inputs.shape,targets.shape)\n b, n_hist, n, input_dim = inputs.shape\n inputs = inputs.transpose(0, 1).reshape(self.n_hist, b, -1)\n inputs = self.encoder_linear(inputs)\n\n if targets is not None:\n targets = self.encoder_linear(targets.transpose(0, 1).reshape(self.n_pred, b, -1))\n h, c = self.encoding(inputs)\n outputs = self.decoding((h, c), targets, self._compute_sampling_threshold(batch_seen))\n outputs = outputs.transpose(0, 1) #b, n_pred, hidden_size\n return self.decoder_linear(outputs).reshape(b, self.n_pred, n, -1)", "def fit(self, features, targets):\r\n \r\n \r\n # if no\r\n # run information gain on each row\r\n # see which attribute has the largest information gain\r\n # split on this attribute\r\n \r\n \r\n self._check_input(features)\r\n\r\n # Creating the root\r\n self.tree = Tree()\r\n \r\n all_features_original = np.asarray(self.attribute_names)\r\n \r\n self._extend_tree(features, targets, all_features_original, all_features_original, self.tree)", "def update_target_model(self):\n self.target_model.set_weights(self.model.get_weights())", "def update_target_model(self):\n self.target_model.set_weights(self.model.get_weights())", "def update_target_model(self):\n self.target_model.set_weights(self.model.get_weights())", "def forward(self, input_ids, attention_mask=None, decoder_input_ids=None, lm_labels=None):\n return self.model(\n input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, lm_labels=lm_labels,\n )", "def fit(self, views: Iterable[np.ndarray], y=None, **kwargs):\n return self", "def __call__(self, models, x, y, z=None, xbinsize=None, ybinsize=None, err=None, bkg=None, bkg_scale=1, **kwargs):\n\n tie_list = []\n try:\n n_inputs = models[0].n_inputs\n except TypeError:\n n_inputs = models.n_inputs\n\n self._data = Dataset(n_inputs, x, y, z, xbinsize, ybinsize, err, bkg, bkg_scale)\n\n if self._data.ndata > 1:\n\n if len(models) == 1:\n self._fitmodel = ConvertedModel([models.copy() for _ in xrange(self._data.ndata)], tie_list)\n # Copy the model so each data set has the same model!\n elif len(models) == self._data.ndata:\n self._fitmodel = ConvertedModel(models, tie_list)\n else:\n raise Exception(\"Don't know how to handle multiple models \"\n \"unless there is one foreach dataset\")\n else:\n if len(models) > 1:\n self._data.make_simfit(len(models))\n self._fitmodel = ConvertedModel(models, tie_list)\n else:\n self._fitmodel = ConvertedModel(models)\n\n self._fitter = Fit(self._data.data, self._fitmodel.sherpa_model, self._stat_method, self._opt_method, self._est_method, **kwargs)\n self.fit_info = self._fitter.fit()\n\n return self._fitmodel.get_astropy_model()", "def __batch_to_torch(self, mids, targets):\n mids = Variable(torch.LongTensor(mids))\n targets = Variable(torch.FloatTensor(targets))\n \n if self.cuda:\n mids, targets = mids.cuda(), targets.cuda()\n \n return mids, targets", "def run_training(\n self, model_data: RasaModelData, label_ids: Optional[np.ndarray] = None\n ) -> None:\n if not self.finetune_mode:\n # This means the model wasn't loaded from a\n # previously trained model and hence needs\n # to be instantiated.\n self.model = self.model_class()(\n model_data.get_signature(),\n self.config,\n isinstance(self.featurizer, MaxHistoryTrackerFeaturizer),\n self._label_data,\n self._entity_tag_specs,\n )\n self.model.compile(\n optimizer=tf.keras.optimizers.Adam(self.config[LEARNING_RATE])\n )\n (\n data_generator,\n validation_data_generator,\n ) = rasa.utils.train_utils.create_data_generators(\n model_data,\n self.config[BATCH_SIZES],\n self.config[EPOCHS],\n self.config[BATCH_STRATEGY],\n self.config[EVAL_NUM_EXAMPLES],\n self.config[RANDOM_SEED],\n )\n callbacks = rasa.utils.train_utils.create_common_callbacks(\n self.config[EPOCHS],\n self.config[TENSORBOARD_LOG_DIR],\n self.config[TENSORBOARD_LOG_LEVEL],\n self.tmp_checkpoint_dir,\n )\n\n if self.model is None:\n raise ModelNotFound(\"No model was detected prior to training.\")\n\n self.model.fit(\n data_generator,\n epochs=self.config[EPOCHS],\n validation_data=validation_data_generator,\n validation_freq=self.config[EVAL_NUM_EPOCHS],\n callbacks=callbacks,\n verbose=False,\n shuffle=False, # we use custom shuffle inside data generator\n )", "def generate(self, models, sample, **kwargs):\n net_input = sample['net_input']\n\n def batch_for_softmax(dec_out, target):\n # assumes decoder_out[0] is the only thing needed (may not be correct for future models!)\n first, rest = dec_out[0], dec_out[1:]\n bsz, tsz, dim = first.shape\n if bsz * tsz < self.softmax_batch:\n yield dec_out, target, True\n else:\n flat = first.contiguous().view(1, -1, dim)\n flat_tgt = target.contiguous().view(flat.shape[:-1])\n s = 0\n while s < flat.size(1):\n e = s + self.softmax_batch\n yield (flat[:, s:e],) + rest, flat_tgt[:, s:e], False\n s = e\n\n def gather_target_probs(probs, target):\n probs = probs.gather(\n dim=2,\n index=target.unsqueeze(-1),\n )\n return probs\n\n orig_target = sample['target']\n\n # compute scores for each model in the ensemble\n avg_probs = None\n avg_attn = None\n for model in models:\n model.eval()\n decoder_out = model(**net_input)\n attn = decoder_out[1]\n if type(attn) is dict:\n attn = attn.get('attn', None)\n\n batched = batch_for_softmax(decoder_out, orig_target)\n probs, idx = None, 0\n for bd, tgt, is_single in batched:\n sample['target'] = tgt\n curr_prob = model.get_normalized_probs(bd, log_probs=len(models) == 1, sample=sample).data\n if is_single:\n probs = gather_target_probs(curr_prob, orig_target)\n else:\n if probs is None:\n probs = curr_prob.new(orig_target.numel())\n step = curr_prob.size(0) * curr_prob.size(1)\n end = step + idx\n tgt_probs = gather_target_probs(curr_prob.view(tgt.shape + (curr_prob.size(-1),)), tgt)\n probs[idx:end] = tgt_probs.view(-1)\n idx = end\n sample['target'] = orig_target\n\n probs = probs.view(sample['target'].shape)\n\n if avg_probs is None:\n avg_probs = probs\n else:\n avg_probs.add_(probs)\n if attn is not None and torch.is_tensor(attn):\n attn = attn.data\n if avg_attn is None:\n avg_attn = attn\n else:\n avg_attn.add_(attn)\n if len(models) > 1:\n avg_probs.div_(len(models))\n avg_probs.log_()\n if avg_attn is not None:\n avg_attn.div_(len(models))\n\n bsz = avg_probs.size(0)\n hypos = []\n start_idxs = sample['start_indices'] if 'start_indices' in sample else [0] * bsz\n for i in range(bsz):\n # remove padding from ref\n ref = utils.strip_pad(sample['target'][i, start_idxs[i]:], self.pad) \\\n if sample['target'] is not None else None\n tgt_len = ref.numel()\n avg_probs_i = avg_probs[i][start_idxs[i]:start_idxs[i] + tgt_len]\n score_i = avg_probs_i.sum() / tgt_len\n if avg_attn is not None:\n avg_attn_i = avg_attn[i]\n alignment = utils.extract_hard_alignment(\n avg_attn_i,\n sample['net_input']['src_tokens'][i],\n sample['target'][i],\n self.pad,\n self.eos,\n )\n else:\n avg_attn_i = alignment = None\n hypos.append([{\n 'tokens': ref,\n 'score': score_i,\n 'attention': avg_attn_i,\n 'alignment': alignment,\n 'positional_scores': avg_probs_i,\n }])\n return hypos", "def __call__(self, images, targets):\n pass", "def predict_missing_values(self, data, targets, features):\n for target in targets:\n cols = features + [target]\n train_fit_mask = pd.notnull(\n data.loc[self.train_index, target])\n # train_df = data.loc[:, cols].dropna()\n train_fill_mask = pd.isnull(data.loc[self.train_index, target])\n hyper_params_model = lm.LassoCV(normalize=True, copy_X=True, n_jobs=-1).fit(\n data.loc[train_fit_mask, features], data.loc[train_fit_mask, target])\n model = lm.Lasso(alpha=hyper_params_model.alpha_,\n copy_X=True, normalize=True)\n model.fit(data.loc[train_fit_mask, features],\n data.loc[train_fit_mask, target])\n data.loc[train_fill_mask, target] = model.predict(\n data.loc[train_fill_mask, features])\n if str(self.test_index) != 'None':\n if pd.isnull(self.data.loc[self.test_index, target]).any():\n test_fill_mask = pd.isnull(\n self.data.loc[self.test_index, target])\n print self.test.loc[test_fill_mask, features]\n self.data.loc[test_fill_mask, target] = model.predict(\n self.data.loc[test_fill_mask, features])\n return data", "def update_targets(self):\n self.actor.update_target_network()\n self.critic.update_target_network()", "def targets_placeholder(self):", "def fit(self, inputs, labels=None):\n msg = 'The function fit() is not available in the class ' \\\n '`EnsembleDetector`.'\n LOGGER.error(TAG, msg)\n raise NotImplementedError(msg)", "def reset(targets):", "def predict(self, boards, **kwargs):\n return self.model.predict(boards, **kwargs)", "def train(self):\n self.emission_model(self.train_data)\n self.transition_model(self.train_data)", "def train_model(self, *args, **kwargs):\n raise NotImplementedError", "def train(self, features, labels):\n pass", "def train_models(self, clf, silent, feature_names=None, target_names=None, live=False):\n X_train, X_test, y_train, y_test = self.X_train, self.X_test, self.y_train, self.y_test\n t0 = time()\n clf.fit(X_train, y_train)\n train_time = time() - t0\n pred = clf.predict(X_test)\n test_time = time() - t0\n accuracy = metrics.accuracy_score(y_test, pred)\n fbeta = metrics.fbeta_score(y_test, pred,1,labels=self.dataset['label'].unique(),average='weighted')\n name = clf.name[0]\n if False:\n score_stats = f'Model : {name} | Score : {accuracy} | F-beta : {fbeta}'\n print(score_stats)\n\n if self.best_score_ledger[name][0] < accuracy:\n last = self.best_score_ledger[name][0]\n print(name)\n self.best_score_ledger[name] = [accuracy,fbeta]\n score_stats = f'Model : {name} | Score : {accuracy} | F-beta : {fbeta}'\n print(self.stemmer, ' ', self.transform)\n print(score_stats)\n\n if accuracy > self.best_models[name] and last != 0.0 and self.tuning_depth in ['normal','maximal']:\n new_model,score = self.hyperparameter_tuning(name,clf)\n if score > accuracy:\n self.best_score_ledger[name][0] = score\n clf = new_model\n dump(clf, os.path.join(os.getcwd(), self.file_term, 'models', f'{\"_\".join([self.uid_base, name])}'))\n\n\n\n if not silent:\n if hasattr(clf, 'coef_'):\n print(\"dimensionality: %d\" % clf.coef_.shape[1])\n print(\"density: %f\" % density(clf.coef_))\n\n if True and feature_names is not None:\n print(\"top 10 keywords per class:\")\n for i, label in enumerate(target_names):\n top10 = np.argsort(clf.coef_[i])[-10:]\n print(trim(\"%s: %s\" % (label, \" \".join(feature_names[top10]))))\n print()\n\n if True:\n print(\"classification report:\")\n print(metrics.classification_report(y_test, pred,\n target_names=target_names))\n\n if True:\n print(\"confusion matrix:\")\n print(metrics.confusion_matrix(y_test, pred))\n # if no model exists for the current settings, create one by default. Prevents issues if models are deleted.\n elif not os.path.exists(\n os.path.join(os.getcwd(), self.file_term, 'models', f'{\"_\".join([self.uid_base, name])}')):\n dump(clf, os.path.join(os.getcwd(), self.file_term, 'models', f'{\"_\".join([self.uid_base, name])}'))\n clf_descr = str(clf).split('(')[0]\n return clf_descr, accuracy, train_time, test_time", "def add_targets(self, targets: List[\"ConfigUnit\"]) -> None:\n for target in targets:\n self.add_target(target)", "def _train(self,\n Xs: Array,\n Ys: Array,\n metric: Callable = None,\n **kwargs):\n self.model.fit(Xs, Ys, **kwargs)\n return None", "def forward(self, inputs):\n if len(inputs) < self.config.train.batch_size:\n x = torch.zeros(self.config.train.batch_size, dtype=torch.long, requires_grad=False)\n x[:len(inputs)] = inputs\n inputs = x\n print(\"Padded:\", inputs)\n # print(\"input idx shp:\", inputs.shape)\n # print(\"embeddings shp:\", self.embeddings.shape)\n input_tokens = torch.LongTensor(self.embeddings[inputs, :])\n input_mask = torch.LongTensor(self.masks[inputs, :])\n # print(\"input tokens shp:\", input_tokens.shape)\n logits = self.model(input_tokens, attention_mask=input_mask)[0]\n # print(logits)\n # print(\"Check https://mc.ai/part-2-bert-fine-tuning-tutorial-with-pytorch-for-text-classification-on-the-corpus-of-linguistic/ for potential training required stuff\")\n return logits", "def __call__(self, target_labels: List[Tensor], fg_probs: Tensor):\n raise NotImplementedError", "def __call__(\n self,\n batch: PredictionBatch,\n target: Target,\n scores: torch.FloatTensor,\n ) -> None:\n raise NotImplementedError", "def fit(self, model_name, **model_params):\n model = self.model_dict[model_name]\n model.set_params(**model_params)\n self.model = model.fit(\n self.data.loc[:, self.selected_features_], self.data.loc[:, self.target_name])", "def fit(self, X, Y):\n if self.model is None:\n print(\"%s.fit: implement me\" % (self.__class__.__name__))", "def train(self, X, y):\n self.model.fit(X, y)", "def output_targets(self, input_targets):\n return input_targets", "def __init__(self, models, tgt_dict, eos_penalty=0.0, max_iter=2, max_ratio=2, decoding_format=None, retain_dropout=False, adaptive=True):\n super().__init__()\n self.models = models\n self.bos = tgt_dict.bos()\n self.pad = tgt_dict.pad()\n self.unk = tgt_dict.unk()\n self.eos = tgt_dict.eos()\n self.vocab_size = len(tgt_dict)\n self.eos_penalty = eos_penalty\n self.max_iter = max_iter\n self.max_ratio = max_ratio\n self.decoding_format = decoding_format\n self.retain_dropout = retain_dropout\n self.adaptive = adaptive\n for i, model in enumerate(self.models):\n model.prepare_for_onnx_export_()\n model.eval()\n if hasattr(model, 'get_student_model'):\n model = model.get_student_model()\n self.models[i] = model\n self._modules[f'model_{i}'] = model", "def _finalize_targets(target_values, binarize_target, num_classes):\n\n target_values[target_values == target_val_utils.DEAD_STORM_INTEGER] = 0\n\n if binarize_target:\n target_values = (target_values == num_classes - 1).astype(int)\n num_classes_to_predict = 2\n else:\n num_classes_to_predict = num_classes + 0\n\n if num_classes_to_predict == 2:\n print('Fraction of {0:d} examples in positive class: {1:.3f}'.format(\n len(target_values), numpy.mean(target_values)\n ))\n return target_values\n\n target_matrix = keras.utils.to_categorical(\n target_values, num_classes_to_predict)\n\n class_fractions = numpy.mean(target_matrix, axis=0)\n print('Fraction of {0:d} examples in each class: {1:s}\\n'.format(\n len(target_values), str(class_fractions)\n ))\n\n return target_matrix", "def predict_target(\n model: Model,\n *,\n # exactly one of them is None\n head: Union[None, int, str] = None,\n relation: Union[None, int, str] = None,\n tail: Union[None, int, str] = None,\n #\n triples_factory: Optional[TriplesFactory] = None,\n targets: Union[None, torch.LongTensor, Sequence[Union[int, str]]] = None,\n mode: Optional[InductiveMode] = None,\n) -> Predictions:\n # note: the models' predict method takes care of setting the model to evaluation mode\n\n # get input & target\n target, batch, other_col_ids = _get_input_batch(factory=triples_factory, head=head, relation=relation, tail=tail)\n\n # get label-to-id mapping and prediction targets\n labels, ids, targets = _get_targets(\n ids=targets, triples_factory=triples_factory, device=model.device, entity=relation is not None\n )\n\n # get scores\n scores = model.predict(batch, full_batch=False, mode=mode, ids=targets, target=target).squeeze(dim=0)\n if ids is None:\n ids = range(len(scores))\n\n # note: maybe we want to expose these scores, too?\n if target == LABEL_RELATION and model.use_inverse_triples:\n ids_t = torch.as_tensor(ids)\n non_inv_mask = ~model.relation_inverter.is_inverse(ids_t)\n ids = ids_t[non_inv_mask].tolist()\n scores = scores[non_inv_mask]\n\n # create raw dataframe\n data = {f\"{target}_id\": ids, \"score\": scores.tolist()}\n if labels is not None:\n data[f\"{target}_label\"] = labels\n df = pandas.DataFrame(data=data).sort_values(\"score\", ascending=False)\n return TargetPredictions(df=df, factory=triples_factory, target=target, other_columns_fixed_ids=other_col_ids)", "def fit(self, X, Y):\n n_labels = Y.shape[1]\n for idx in range(n_labels):\n y = Y[:, idx]\n predictor = SVR()\n predictor.fit(X, y)\n self.predictors.append(predictor)", "def fit(model, data, test_ids, exp_name, datasets):\n if model.model_type == 'torch':\n size = len(data[0])\n else:\n size = data[0].shape[0]\n \n train_ids = [i for i in range(size) if i not in test_ids]\n scaler = pka_scaler(data[1][train_ids])\n if model.data_type == 'descriptors':\n desc_scaler = StandardScaler()\n desc_scaler.fit(data[0][train_ids])\n data[0] = desc_scaler.transform(data[0])\n \n trained_model = train(model, train_ids, data, scaler, datasets)\n results = test(model, trained_model, test_ids, data, scaler)\n model.experiments.append({'name':exp_name,'model':trained_model, 'results':results, 'scaler':scaler})\n return results", "def train_and_eval(self):\n self.__create_indexes()\n model = None\n model = None\n if self.model == 'OMult':\n model = OMult(self.kwargs)\n elif self.model == 'ConvO':\n model = ConvO(self.kwargs)\n elif self.model == 'QMult':\n model = QMult(self.kwargs)\n elif self.model == 'ConvQ':\n model = ConvQ(self.kwargs)\n elif self.model == 'OMultBatch':\n model = OMultBatch(self.kwargs)\n elif self.model == 'ConvOBatch':\n model = ConvOBatch(self.kwargs)\n elif self.model == 'QMultBatch':\n model = QMultBatch(self.kwargs)\n elif self.model == 'ConvQBatch':\n model = ConvQBatch(self.kwargs)\n else:\n print(self.model, ' is not valid name')\n raise ValueError\n\n self.train(model)\n self.eval(model)", "def fit(self, Xs, y=None):\n Xs = check_Xs(Xs)\n n_features_ = [X.shape[1] for X in Xs]\n if len(set(n_features_)) > 1:\n raise ValueError(\n \"The number of features in each dataset should be the same.\"\n )\n self.n_feature_ = n_features_[0]\n self.n_views_ = len(n_features_)\n return self" ]
[ "0.7428789", "0.6653737", "0.61757916", "0.60686624", "0.60502565", "0.5901099", "0.58539677", "0.5839297", "0.5839297", "0.5835604", "0.58182526", "0.5763666", "0.575089", "0.57495534", "0.5730885", "0.5730016", "0.57109356", "0.56859463", "0.5679586", "0.5662925", "0.56607246", "0.56597567", "0.56542903", "0.56509334", "0.5623809", "0.5584048", "0.5580634", "0.55795664", "0.5568812", "0.55653673", "0.55565107", "0.55356187", "0.5516433", "0.5502804", "0.5483132", "0.54757756", "0.5464175", "0.54325944", "0.54235923", "0.542249", "0.5415465", "0.54109997", "0.5403991", "0.54038304", "0.5396654", "0.53901756", "0.53890795", "0.5364333", "0.5356238", "0.5335933", "0.5324163", "0.53231263", "0.53231263", "0.53226817", "0.531603", "0.5315185", "0.5314203", "0.53118", "0.5309015", "0.53001755", "0.52991533", "0.52928597", "0.5291692", "0.5287404", "0.52832085", "0.52802974", "0.52802974", "0.52802974", "0.52757907", "0.5274697", "0.52631396", "0.5263025", "0.52598965", "0.52571684", "0.5256597", "0.52542573", "0.5252914", "0.5229434", "0.522849", "0.52262336", "0.5223457", "0.5216175", "0.521553", "0.5211995", "0.52003723", "0.5199976", "0.5194546", "0.5193894", "0.5172554", "0.51724696", "0.5172272", "0.5165937", "0.51646733", "0.5162591", "0.51558465", "0.51406956", "0.51383895", "0.5136976", "0.5135706", "0.5132156", "0.5131469" ]
0.0
-1
Lakukan sesuatu ketika tombol ditekan
def gambar_plot(self): # memanggil isi dari Line Edit pada kolom X dan # menyimpannya pada variabel self.nilai_x # sekaligus mengkonversinya menjadi angka try: # cetak isi nilai X # Mengcheck apakah plot pertama dijalankan atau tidak if self.pertamaPlot: self.x = float(self.input_x.text()) self.y = float(self.input_y.text()) self.input_x.setEnabled(False) self.input_y.setEnabled(False) self.input_az.setEnabled(True) self.input_jarak.setEnabled(True) self.reset.setEnabled(True) self.check_garis.setEnabled(True) #Mengecheck apakah layer point sudah di buat sebelumnya if self.pertamaPoint: #Membuat layer titik self.layerTitik = self.buat_layer("Plot Titik","Point") self.measureDialog.show() self.pertamaPoint = False self.buat_titik() self.pertamaPlot = False else: #Mengecheck apakah layer garis sudah di buat sebelumnya if self.pertamaLine: #Mengecheck apakah checkbox check baris sudah di centang oleh user atau belum if self.check_garis.isChecked(): #Membuat layer garis self.layerGaris = self.buat_layer("Plot Garis","LineString") self.pertamaLine = False self.hitung_azimuth_jarak() except Exception as e: print(e) iface.messageBar().pushMessage("Error","anda salah memasukkan input", level=Qgis.Warning,duration=3)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mezclar_bolsa(self):", "def agregar_bolsa(self, letra, cantidad):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def main():\r\n\r\n print(\"Berikut adalah daftar naga yang tersedia.\")\r\n for naga in daftar_naga:\r\n naga.hp_sementara = naga.hp_maks\r\n print(naga)\r\n\r\n indeks_naga: int = int(input(\"Tolong masukkan indeks dari naga pilihan Anda: \"))\r\n while indeks_naga < 0 or indeks_naga >= len(daftar_naga):\r\n indeks_naga = int(input(\"Maaf, input Anda tidak sah! Tolong masukkan indeks dari naga pilihan Anda: \"))\r\n\r\n naga_pilihan: Naga = daftar_naga[indeks_naga]\r\n naga_musuh: Naga = daftar_naga[random.randint(0, len(daftar_naga) - 1)]\r\n print(naga_pilihan)\r\n print(naga_musuh)\r\n giliran: int = 0 # nilai semula\r\n while naga_pilihan.hp_sementara >= 0 and naga_musuh.hp_sementara >= 0:\r\n giliran += 1\r\n # Giliran Anda adalah ketika nilai 'giliran' itu ganjil dan giliran musuh adalah ketika nilai 'giliran'\r\n # itu genap\r\n if giliran % 2 == 1:\r\n print(naga_pilihan.serang(naga_musuh))\r\n else:\r\n print(naga_musuh.serang(naga_pilihan))\r\n\r\n if naga_musuh.hp_sementara < 0:\r\n print(\"Anda menang!!!\")\r\n break\r\n if naga_pilihan.hp_sementara < 0:\r\n print(\"Anda kalah!!!\")\r\n break\r\n\r\n print(\"Tekan Y untuk ya.\")\r\n print(\"Tekan tombol apapun yang lainnya untuk tidak.\")\r\n tanya: str = input(\"Apakah Anda mau bertarung lagi? \")\r\n if tanya == \"Y\":\r\n main()\r\n else:\r\n sys.exit()", "def cliquer_sur_unité(self):", "def reemplaza_tildes(palabra):", "def pohyb(seznam_tahu, seznam_ovoce, tah,radky, sloupce):\n\n x= seznam_tahu [len(seznam_tahu)-1][0] # [x,y] souradnice noveho tahu\n y= seznam_tahu [len(seznam_tahu)-1][1]\n\n if tah == \"s\": #sever\n y -= 1\n elif tah == \"j\": #jih\n y += 1\n elif tah == \"v\": #vychod\n x += 1\n elif tah == \"z\": #zapad\n x -= 1\n else:\n print(\"Zadal jsi spatne pismeno.\" )\n return()\n\n if x<0 or x>sloupce-1 or y<0 or y>radky-1: #tah mimo pole\n print(\"Tah neni mozny, je mimo hraci pole. Opakuj tah.\")\n elif [x,y] in seznam_tahu: #jiz obsazene policko hadem\n print(\"Tah neni mozny, had uz na nem je. Opakuj tah.\")\n elif [x,y] in seznam_ovoce: #policko s ovocem, vola se funkce snez\n snez (seznam_ovoce, seznam_tahu,[x,y],radky, sloupce)\n else:\n seznam_tahu.append([x,y]) #tah na volne policko, prida se tah a odebere posledni bod\n seznam_tahu.pop(0)", "def ustal_kon(self, f):\n kon= Kon.objects.using(settings.DBS(self.firma)).filter(id= f.nip_nabywcy)\n if kon:\n return kon[0]\n \n kon= Kon()\n \n # Numer dla zagranicznego\n nr_kon= Kon.objects.using(settings.DBS(self.firma)).exclude(nr_kon__startswith= 'Z').aggregate(Max('nr_kon'))\n kon.nr_kon= '{:05d}'.format(int(nr_kon['nr_kon__max'].strip())+1)\n\n if '/' in f.nazwa_nabywcy:\n kon.skrot, kon.nazwa= f.nazwa_nabywcy.split('/')\n else:\n kon.nazwa= f.nazwa_nabywcy\n \n kon.id= f.nip_nabywcy\n kon.idtyp= 'NIPUE' if re.match('[A-Z][A-Z]', f.nip_nabywcy) else 'NIP'\n kon.ulica, kon.kod, kon.miejsc= self.adres_kon(f.adres_nabywcy)\n \n kon.kraj= f.nip_nabywcy[:2] if re.match('[A-Z][A-Z]', f.nip_nabywcy) else 'PL'\n \n kon.id_obcy= f.id # zapamiętanie skąd się zwiął (faktura)\n \n kon.skrot= su(kon.skrot)\n kon.nazwa= su(kon.nazwa)\n kon.miejsc= su(kon.miejsc)\n kon.ulica= su(kon.ulica)\n \n kon.kiedy= datetime.date.today() # data utworzenia\n kon.data_us= kon.kiedy\n if f.termin_platnosci and f.data_wystawienia:\n kon.term_zap= (f.termin_platnosci - f.data_wystawienia).days\n \n kon.save(using= settings.DBS(self.firma))\n \n return kon", "def verteileKarten(anzahlSpieler):\n pass", "def initialize_bolsa(self,nivel):", "def buat_titik(self):\n # memberi geometri pada fitur baru\n # Memberi fitur titik\n feature = QgsFeature()\n feature.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(self.x, self.y)))\n self.measureDialog.insertParams(self.idTitik,self.x,self.y)\n # menambahkan fitur pada layer\n self.layerTitik.dataProvider().addFeatures([feature])\n self.layerTitik.updateExtents()\n\n self.layerTitik.startEditing()\n self.layerTitik.changeAttributeValue(self.idTitik,0,self.idTitik)\n self.layerTitik.changeAttributeValue(self.idTitik,1,self.x )\n self.layerTitik.changeAttributeValue(self.idTitik,2,self.y )\n self.layerTitik.commitChanges()\n self.idTitik = self.idTitik+1\n\n self.iface.actionZoomToLayer().trigger()", "def __init__(self,nama,NIM,kota,us):\r\n self.nama = nama\r\n self.NIM = NIM\r\n self.kotatinggal =kota\r\n self.uangsaku = us", "def __init__(self,nama,um,kota,us) :\r\n self.nama = nama\r\n self.umur = um\r\n self.kotaTinggal = kota\r\n self.uangSaku = us", "def kluisOpenen():\r\n kluisDict = dictionary()\r\n beginSchermTopTitel['text'] = ''\r\n beginSchermTerug.grid(pady=3, padx=(10, 10), sticky='w', row=1)\r\n\r\n for kluis in kluisDict:\r\n try:\r\n if kluisDict[kluis] is not None and int(beginSchermEntry.get()) in kluisDict[kluis]: # kluis zoeken in\r\n # dictionary\r\n beginSchermTitel['text'] = 'Kluis nummer ' + str(kluis) + ' is geopend'\r\n beginSchermEntry.delete(0, END)\r\n return\r\n except ValueError:\r\n beginSchermTitel['text'] = 'Geen geldige invoer'\r\n return\r\n beginSchermTitel['text'] = 'Dit OV nummer is onbekend'\r\n return", "def bloqueio_2(tab,jog):\r\n jog*=-1\r\n return vitoria_1(tab,jog)", "def __init__(self):\n self.modelo = [\"A\", \"sucio\", \"sucio\",\"sucio\", \"sucio\",\"sucio\", \"sucio\"]", "def sprawdz(lista):\n # do_usuniecia - lista zawierajaca indeksy pol ktore zostana usuniete z glownej listy\n do_usuniecia = []\n # petla przechodzaca po wartosciach\n for i in range(len(lista) / 2):\n # j - indeks wartosci dla poszczgolnego panstwa\n j = 2 * i + 1\n # k - indeks pod ktorym nie ma wartosci\n k = 0\n # sprawdzanie ktore elementy sa bez wartosci oraz dodawanie ich do listy do usuniecia\n for el in lista[j]:\n if el is None:\n # zastosowanie unikalnosci indeksow\n if not k in do_usuniecia:\n do_usuniecia.append(k)\n\n k += 1\n # sortowanie listy z indeksami do usuniecia w sposob rosnacy\n do_usuniecia.sort()\n # nowalista - lista zawierajaca statystyki dostepne dla wszystkich panstw odpowiednio [Lata],[Wartosc]\n nowalista = []\n for i in range(len(lista)):\n # wartosc - lista zawierajaca poszczegolne dane z glownej listy\n wartosc = []\n # dodawanie wartosci, ktore sa dostepne dla wszystkich panstw do tabeli wartosc\n for j in range(len(lista[i])):\n # zastosowanie unikalnosci indeksow dla ktorych nie ma wartosci\n if not j in do_usuniecia:\n wartosc.append(lista[i][j])\n # dodawanie listy zawierajacej wynik dla poszczegolnych danych\n nowalista.append(wartosc)\n\n return nowalista", "def __init__(self,nama,NIM,kota,us):\r\n self.nama = nama\r\n self.NIM = NIM\r\n self.kotaTinggal = kota\r\n self.uangsaku = us", "def change_glavniLabel(self, ulaz):\r\n mapa = ulaz['opis']\r\n mjerenjeId = ulaz['mjerenjeId']\r\n datum = ulaz['datum']\r\n postaja = mapa['postajaNaziv']\r\n komponenta = mapa['komponentaNaziv']\r\n formula = mapa['komponentaFormula']\r\n# mjernaJedinica = mapa['komponentaMjernaJedinica']\r\n# opis = '{0}, {1}( {2} ) [{3}]. Datum : {4} . mjerenjeId:{5}'.format(postaja, komponenta, formula, mjernaJedinica, datum, mjerenjeId)\r\n opis = '{0}, {1}( {2} ). Datum : {3} . mjerenjeId:{4}'.format(postaja, komponenta, formula, datum, mjerenjeId)\r\n self.glavniLabel.setText(opis)\r\n logging.info('glavniLabel promjenjen, value = {0}'.format(opis))", "def moi(self):\n\n pass", "def getInfoDrum(self, l):\n for i in range(0, len(l)):\n for j in range(len(l[i].info.oameni)):\n l[i].info.oameni[j].timp_asteptat = 0\n l[i].info.oameni[j].timp_mers = 0\n l[i].info.oameni[j].traseu = None\n \n for i in range(1, len(l)):\n for j in range(len(l[i].info.oameni)):\n index_prev = l[i-1].info.getOmIndex(l[i].info.oameni[j].name)\n if l[i].info.oameni[j].state != l[i-1].info.oameni[index_prev].state:\n if l[i-1].info.oameni[index_prev].state == \"waiting\" and l[i].info.oameni[j].state == \"travelling\":\n l[i].info.oameni[j].timp_asteptat = l[i-1].info.oameni[index_prev].timp_asteptat + l[i].info.time - l[i-1].info.time\n l[i].info.oameni[j].timp_mers = l[i-1].info.oameni[index_prev].timp_mers\n elif l[i-1].info.oameni[index_prev].state == \"travelling\" and l[i].info.oameni[j].state == \"waiting\":\n l[i].info.oameni[j].timp_mers = l[i-1].info.oameni[index_prev].timp_mers + l[i].info.time - l[i-1].info.time\n l[i].info.oameni[j].timp_asteptat = l[i-1].info.oameni[index_prev].timp_asteptat\n else:\n if l[i].info.oameni[j].state == \"travelling\":\n l[i].info.oameni[j].timp_mers = l[i-1].info.oameni[index_prev].timp_mers + l[i].info.time - l[i-1].info.time\n l[i].info.oameni[j].timp_asteptat = l[i-1].info.oameni[index_prev].timp_asteptat\n elif l[i].info.oameni[j].state == \"waiting\":\n l[i].info.oameni[j].timp_asteptat = l[i-1].info.oameni[index_prev].timp_asteptat + l[i].info.time - l[i-1].info.time\n l[i].info.oameni[j].timp_mers = l[i-1].info.oameni[index_prev].timp_mers\n\n for i in range(1, len(l)):\n if l[i].info.event.tip == \"boarding\":\n temp_traseu = [l[i].info.event.om.current_loc]\n j = i\n while j < len(l) and (l[j].info.event.tip != \"unboarding\" or l[j].info.event.om.name != l[i].info.event.om.name):\n j += 1\n if j >= len(l):\n return None\n unboarding_loc = l[j].info.event.om.current_loc\n direction = l[i].info.event.autobuz.direction_forward\n index_boarding = l[i].info.event.autobuz.getIndexLoc(temp_traseu[0])\n index_unboarding = l[i].info.event.autobuz.getIndexLoc(unboarding_loc)\n if direction:\n for x in range(index_boarding+1, index_unboarding+1):\n temp_traseu.append(l[i].info.event.autobuz.destinations[x])\n else:\n for x in range(index_boarding-1, index_unboarding-1, -1):\n temp_traseu.append(l[i].info.event.autobuz.destinations[x])\n l[i].info.oameni[l[i].info.getOmIndex(l[i].info.event.om.name)].traseu = temp_traseu\n for x in range(i, j):\n index = l[x].info.getOmIndex(l[i].info.event.om.name)\n assert index != None\n l[x].info.oameni[index].traseu = temp_traseu\n\n return l", "def tabelaOrareve():\n \n linja = 1\n kpm = \"3\"\n\n #print(f\"linja {oraret['linja1']} mberrin ne {kpm} minuta\")\n print(f\"Oraret per linjen 1 :\\n {oraret['linja1']}\\n, {oraret['linja2']}\\n, {oraret['linja3']}\\n\")", "def cargar_bolsa(self,lista):\n self.bolsa = lista", "def merkkaa_miina(kentta):\n while True:\n print(\"Voit merkata tyhjän paikan x:llä tai poistaa merkkauksen syöttämällä merkatun paikan koordinaatit uudestaan.\")\n print(\"Merkataan ruutu x:llä\")\n merkattava_ruutu = input(\"- Syötä koordinaatit välilyönnillä erotettuna: \").split()\n print(\"------------------------------------------------\")\n if len(merkattava_ruutu) == 0:\n print(\">>> Syötä koordinaatit kokonaislukuina välilyönnillä erotettuna toisistaan!\")\n tulosta_kentta(kentta, miinat)\n continue\n elif merkattava_ruutu[0] == \"q\":\n return \"q\"\n elif len(merkattava_ruutu) != 2:\n print(\">>> Syötä kaksi koordinaattia kokonaislukuina välilyönnillä erotettuna toisistaan!\")\n tulosta_kentta(kentta, miinat)\n continue\n try:\n miinan_leveys = int(merkattava_ruutu[0])\n miinan_korkeus = int(merkattava_ruutu[1])\n if miinan_leveys >= len(kentta[0]) or miinan_korkeus >= len(kentta) or miinan_leveys < 0 or miinan_korkeus <0:\n print(\">>> Syöttämäsi koordinaatit ovat kentän ulkopuolella. Yritä uudestaan.\")\n tulosta_kentta(kentta, miinat)\n continue\n except ValueError:\n print(\">>> Anna koordinaatit kokonaislukuina!\")\n tulosta_kentta(kentta, miinat)\n else:\n if kentta[miinan_korkeus][miinan_leveys] == \"-\":\n kentta[miinan_korkeus][miinan_leveys] = \"x\"\n tulosta_kentta(kentta, miinat)\n elif kentta[miinan_korkeus][miinan_leveys] == \"x\":\n kentta[miinan_korkeus][miinan_leveys] = \"-\"\n tulosta_kentta(kentta, miinat)\n else:\n print(\">>> Et voi merkata avattua ruutua!\")\n tulosta_kentta(kentta, miinat)\n return miinan_leveys, miinan_korkeus", "def batalhar(i, j):\n if (i.medabot.ataque > j.medabot.defesa or j.medabot.ataque > i.medabot.defesa) and i.medabot.ataque - j.medabot.defesa != j.medabot.ataque - i.medabot.defesa:\n if i.medabot.ataque - j.medabot.defesa > j.medabot.ataque - i.medabot.defesa:\n troca_peca(i, j)\n return i\n else:\n troca_peca(j, i)\n return j\n elif i.habilidade != j.habilidade:\n if i.habilidade > j.habilidade:\n troca_peca(i, j)\n return i\n else:\n troca_peca(j, i)\n return j\n else:\n if i.ID < j.ID:\n troca_peca(i, j)\n return i\n else:\n troca_peca(j, i)\n return j", "def strategia_pocitaca(pole):\n dict_pole = dict(enumerate(list_pole))\n cislo_policka = False\n\n for k, v in dict_pole.items():\n print(type(k), v)\n k1_idx = int(k)\n k2_idx = False\n k3_idx = False\n k0_idx = False\n if v == \"O\":\n print(\"O found\")\n k1_idx = int(k)\n k2_idx = k + 1\n k3_idx = k + 2\n k0_idx = k - 1\n if k <=18:\n if dict_pole[k2_idx] == \"O\" and dict_pole[k3_idx] == \"-\":\n if k3_idx not in mozne_policka1:\n mozne_policka1.append(k3_idx)\n \n elif dict_pole[k3_idx] == \"O\" and dict_pole[k2_idx] == \"-\":\n if k2_idx not in mozne_policka1:\n mozne_policka1.append(k2_idx)\n\n elif dict_pole[k2_idx] == \"-\":\n if k2_idx not in mozne_policka2:\n mozne_policka2.append(k2_idx)\n\n elif dict_pole[k3_idx] == \"-\":\n if k3_idx not in mozne_policka2:\n mozne_policka2.append(k3_idx)\n\n elif dict_pole[k0_idx] == \"-\":\n if k0_idx not in mozne_policka2:\n mozne_policka2.append(k0_idx)\n\n else: \n cislo_policka = randint(0,19)\n else:\n cislo_policka = False\n print(cislo_policka, mozne_policka1, mozne_policka2)\n for x in range(0, 20):\n try:\n mozne_policka1.remove(20)\n mozne_policka1.remove(21)\n mozne_policka2.remove(20)\n mozne_policka2.remove(21)\n except:\n ValueError\n return cislo_policka, mozne_policka1, mozne_policka2", "def change_glavniLabel(self, ulaz):\r\n mapa = ulaz['opis']\r\n mjerenjeId = ulaz['mjerenjeId']\r\n postaja = mapa['postajaNaziv']\r\n komponenta = mapa['komponentaNaziv']\r\n formula = mapa['komponentaFormula']\r\n mjernaJedinica = mapa['komponentaMjernaJedinica']\r\n opis = '{0}, {1}( {2} ) [{3}]. mjerenjeId:{4}'.format(postaja, komponenta, formula, mjernaJedinica, mjerenjeId)\r\n self.glavniLabel.setText(opis)", "def pokazPrzedmiot(self,przedmiot:str)->None:\n try:\n print(self.przedmioty[przedmiot])\n except KeyError:\n print(\"Nie ma takiego przedmiotu\")", "def change_glavniLabel(self, ulaz):\r\n mapa = ulaz['opis']\r\n mjerenjeId = ulaz['mjerenjeId']\r\n datum = ulaz['datum']\r\n postaja = mapa['postajaNaziv']\r\n komponenta = mapa['komponentaNaziv']\r\n formula = mapa['komponentaFormula']\r\n mjernaJedinica = mapa['komponentaMjernaJedinica']\r\n opis = '{0}, {1}( {2} ) [{3}]. Datum : {4} . mjerenjeId:{5}'.format(postaja, komponenta, formula, mjernaJedinica, datum, mjerenjeId)\r\n self.glavniLabel.setText(opis)\r\n msg = 'glavniLabel promjenjen, value = {0}'.format(opis)\r\n logging.info(msg)", "def sth():", "def exo2():", "def podziel(self):\n def fraktal(dlugosc, alpha, poziom):\n \"\"\"Metoda wyznaczajaca fraktal.\n\n Metoda ta przyjmuje dlugosc, kat oraz poziom drzewa.\n Na bazie podanych parametrow wylicza fraktal z podanych w zadaniu wzorow.\n Zwraca liste zawierajaca punkX oraz punktY fraktalu.\n \"\"\"\n#obliczanie punktow punktu Abis dla kazdego poziomu galezi\n x = float(self.p2[0] + self.dlugosc * cos(alpha))\n y = float(self.p2[1] + self.dlugosc * sin(alpha))\n return [round(x), round(y)]\n\n#petla przechodzaca po wszystkich poziomach drzewa\n while self.tmp <= self.poziom:\n#obliczanie grubosci, dlugosci galezi oraz kolorowanie jej\n self.grubosc = float((2 * self.grubosc + 1) / 3)\n self.dlugosc = float((2 * self.dlugosc) / 3)\n self.kolor += 6\n\n #sprawdzenie czy kolor nie wyszedl po za skale maksymalnej wartosci\n if self.kolor > 255:\n self.kolor = 255\n\n#rozbicie obliczen na poziom 1 i wyzej\n#Abis jest to punkt prawy dla kazdej galezi\n#B jest to punkt srodkowy dla kazdej galezi\n#C jest to punkt srodkowy dla kazdej galezi\n\n#obliczenia dla pierwszego poziomu\n if self.tmp < 2:\n#obliczenie fraktalu, prawa galaz dla kazdej galezi\n#podstawienie obliczonych wartosci z punktu Abis do pozostalych wedlug podanych wzorow\n Abis = fraktal(self.dlugosc, self.alpha, self.poziom)\n B = [round(self.p2[0]), round(Abis[1])]\n C = [round(-Abis[0] + 2 * self.p2[0]), round(Abis[1])]\n\n#zwiekszenie poziomu drzewa o jeden\n self.tmp += 1\n\n#tutaj nastepuje zwrocenie obiektow typu Branch z nowo obliczonymi wartosciami\n return [Branch(self.p2, Abis, self.dlugosc, self.grubosc, self.kolor, self.alpha, self.tmp),\n Branch(self.p2, B, self.dlugosc, self.grubosc, self.kolor, self.alpha, self.tmp),\n Branch(self.p2, C, self.dlugosc, self.grubosc, self.kolor, self.alpha, self.tmp)]\n#obliczenia poziomow wyzej niz pierwszy\n else:\n#obliczanie kata dla punktu prawego\n self.zetprim = randint(-1, 1) * randint(1, self.s)\n self.beta = self.alpha + self.zetprim\n\n#obliczanie kata dla punktu srodkowego\n self.zetbis = randint(-1, 1) * randint(1, self.s)\n self.gamma = self.alpha + self.zetbis\n\n#obliczanie kata dla punktu lewego\n self.zetter = randint(-1, 1) * randint(1, self.s)\n self.teta = self.alpha + self.zetter\n\n#obliczenie fraktalu, prawa galaz dla kazdej galezi\n#podstawienie obliczonych wartosci z punktu Abis do pozostalych wedlug podanych wzorow\n Abis = fraktal(self.dlugosc, self.beta, self.poziom)\n B = [round(self.p2[0]), round(Abis[1])]\n C = [round(-Abis[0] + 2 * self.p2[0]), round(Abis[1])]\n\n#zwiekszenie poziomu drzewa o jeden\n self.tmp += 1\n\n#tutaj nastepuje zwrocenie obiektow typu Branch z nowo obliczonymi wartosciami\n return [Branch(self.p2, Abis, self.dlugosc, self.grubosc, self.kolor, self.beta, self.tmp),\n Branch(self.p2, B, self.dlugosc, self.grubosc, self.kolor, self.gamma, self.tmp),\n Branch(self.p2, C, self.dlugosc, self.grubosc, self.kolor, self.teta, self.tmp)]", "def mutasi(self, keturunan):\n for i in range(self.panjangKromosom):\n if random.uniform(0, 1) < self.probMutasi:\n # membalik nilai bit nya\n keturunan[0][i] = 1 - keturunan[0][i]\n keturunan[1][i] = 1 - keturunan[1][i]\n return keturunan", "def MINET(self):", "def loeschen(self):\r\n loeschen=self.REQUEST['loeschen']\r\n tit=''\r\n i=0\r\n j=0\r\n index=[]\r\n cursor=[]\r\n for x in self.objectValues('Image'):\r\n if str(x.id())[0:6] not in index:\r\n index.append(str(x.id())[0:6]) \r\n cursor.append([str(x.id())[0:6],str(x.title),[str(x.id())]])\r\n if str(x.id())[0:6]==loeschen:\r\n tit=str(x.title)\r\n j=i\r\n i=i+1\r\n else:\r\n cursor[-1][2].append(str(x.id()))\r\n #for val in cursor[j][2]:\r\n #self._delOb(self, id=str(val))\r\n #delet=delet+str(val)+' '\r\n self.manage_delObjects(ids=cursor[j][2])\r\n return tit+' gel&ouml;scht !'", "def pick_up(self):", "def prebaci_dan_nazad(self):\r\n value = int(self.brojDanaCombo.currentText()) #integer broj dana\r\n self.emit(QtCore.SIGNAL('promjeni_datum(PyQt_PyObject)'), -value)\r\n msg = 'request pomak {0} dana unazad'.format(value)\r\n logging.info(msg)", "def get_lengte(self):", "def kluisAanvragen():\r\n kluisDict = dictionary()\r\n beginSchermTopTitel['text'] = ''\r\n beginSchermTerug.grid(pady=3, padx=(10, 10), sticky='w', row=1)\r\n\r\n try:\r\n if len(beginSchermEntry.get()) == 16:\r\n for getal in kluisDict:\r\n if kluisDict[getal] is not None and kluisDict[getal][1] == int(beginSchermEntry.get()):\r\n beginSchermTitel['text'] = 'Je hebt al een kluis: nummer ' + str(getal)\r\n return\r\n\r\n with open('FietsStalling.txt', 'r+') as readFile:\r\n for kluis in kluisDict:\r\n if kluisDict[kluis] is None: # kluis toewijzen\r\n beginSchermTitel['text'] = 'Kluis nummer ' + str(kluis)\r\n kluisDict[kluis] = (time.strftime('%d-%m-%Y %H:%M'),\r\n int(beginSchermEntry.get())) # value wordt tijd en OV\r\n readFile.truncate(0)\r\n readFile.seek(0)\r\n for item in kluisDict: # bestand updaten (nieuwe kluis toevoegen)\r\n if kluisDict[item] is not None:\r\n readFile.write(str(item) + '; ' + ''.join(str(kluisDict[item])).strip('{}()\\'\\'')\r\n .replace('\\'', '') + '\\n')\r\n beginSchermEntry.delete(0, END)\r\n return\r\n beginSchermTitel['text'] = 'Geen kluizen vrij'\r\n return\r\n else:\r\n beginSchermTitel['text'] = 'Geen geldige invoer'\r\n return\r\n except ValueError:\r\n beginSchermTitel['text'] = 'Geen geldige invoer'\r\n return", "def zapisi_pot(pot):", "def zapisi_pot(pot):", "def zapisi_pot(pot):", "def zapisi_pot(pot):", "def zapisi_pot(pot):", "def zapisi_pot(pot):", "def ikkuna(nimi, x_data, y_data, syote, funktio):\n nimi = ik.luo_ali_ikkuna(\"Spektri\")\n kirjasto[nimi] = nimi\n piirtoalue, kuvaaja = ik.luo_kuvaaja(nimi, valitse_datapiste, 1000, 650)\n kirjasto[\"kuvaaja\"] = kuvaaja\n lisaa = kuvaaja.add_subplot()\n lisaa.plot(x_data, y_data)\n lisaa.set_xlabel('Energia')\n lisaa.set_ylabel('Intensiteetti')\n piirtoalue.draw()\n ik.luo_nappi(nimi, syote, funktio)\n ik.luo_nappi(nimi, \"Tallenna\", tallentaja)\n kirjasto[\"pisteet\"] = []\n ik.kaynnista()", "def __str__(self):\n return self.idBaixasPagar", "def get_label():\r\n\r\n user = check_auth(request.headers, __name__)\r\n if user != True:\r\n return user\r\n user = authorize.get(request.headers.get('UserToken'))\r\n\r\n vozvrat = {}\r\n try:\r\n database = Database(config)\r\n except TypeError:\r\n vozvrat[\"messageError\"] = \"Нет подключения к БД\"\r\n return jsonify(vozvrat)\r\n\r\n vozvrat = []\r\n\r\n fields = [\r\n \"u.firstname\",\r\n \"u.lastname\",\r\n \"up.id\",\r\n \"up.name\",\r\n \"up.photo\",\r\n \"up.type\",\r\n \"up.method\",\r\n \"up.sale\",\r\n \"up.price\",\r\n \"c.name\",\r\n \"up.weight\",\r\n \"u2.name\",\r\n \"fp.id\",\r\n \"a.country\",\r\n \"a.city\",\r\n \"a.address\",\r\n \"a.lat\",\r\n \"a.lng\"\r\n ]\r\n\r\n query = sql.SQL(\"SELECT {} FROM users u \\\r\n RIGHT JOIN users_product up on u.id = up.user_id\\\r\n LEFT JOIN units u2 on up.unit_id = u2.id\\\r\n LEFT JOIN currencys c on up.currency_id = c.id\\\r\n LEFT JOIN favorit_products fp on u.id = fp.user_id\\\r\n LEFT JOIN address a on up.address_id = a.id\").format(\r\n sql.SQL(\",\").join(sql.Identifier(\r\n i.split('.')[0], i.split('.')[1]) for i in fields)\r\n )\r\n execute = database.select_data(query)\r\n if type(execute) != list:\r\n return execute\r\n\r\n data_append = {}\r\n for row in execute:\r\n for i in range(len(fields)):\r\n value = row[i]\r\n\r\n if fields[i] == \"up.id\":\r\n fields[i] = \"up.users_product_id\"\r\n if fields[i] == \"c.name\":\r\n fields[i] = \"c.currency\"\r\n if fields[i] == \"u2.name\":\r\n fields[i] = \"u2.unit\"\r\n if fields[i] == \"fp.id\":\r\n fields[i] = \"fp.is_favorit\"\r\n value = True if value != None else False\r\n\r\n data_append[fields[i].split('.')[1]] = value\r\n vozvrat.append(data_append)\r\n\r\n return jsonify(vozvrat)", "def piku():\n pass", "def kluisInfoTg(ovnummer):\r\n kluisDict = dictionary()\r\n\r\n for kluis in kluisDict:\r\n try:\r\n if kluisDict[kluis] is not None and ovnummer in kluisDict[kluis]: # kluis zoeken in dictionary\r\n huidigeKosten = 'De huidige kosten zijn €' + str(prijs(kluisDict[kluis][0]))\r\n return huidigeKosten\r\n except ValueError:\r\n huidigeKosten = 'Geen geldige invoer'\r\n return huidigeKosten", "def choix_mode_jeu(L):\n while True:\n efface_tout()\n texte(185,20,'SNAKE','black','nw',\"Purisa\",50)\n rectangle(560,360,599,399,'black','#A9A9A9')\n rectangle(570,363,590,396,'#8D3500','#C74B00',3)\n cercle(585,380,3,'#C69803','#FFE100')\n rectangle(200,160,400,200,'black','#A9A9A9')\n rectangle(200,220,400,260,'black','#A9A9A9')\n rectangle(200,280,400,320,'black','#A9A9A9')\n rectangle(200,340,400,380,'black','#A9A9A9')\n texte(232,161,'Classique')\n texte(227,221,'Labyrinthe')\n texte(222,281,'Multiplayer')\n texte(240,341,'Multi-lab')\n mise_a_jour()\n x,y,z = attente_clic()\n if 560<=x<=600:\n if 360<=y<=400:\n break\n elif 200<=x<=400:\n if 160 <= y <= 200:\n touches = assigne_touches(1)\n ferme_fenetre()\n Points = classique(L,touches[0])\n ferme_fenetre()\n cree_fenetre(600,400)\n elif 220 <= y <= 260:\n touches = assigne_touches(1)\n ferme_fenetre()\n Points = labyrinthe(L,touches[0])\n ferme_fenetre()\n cree_fenetre(600,400)\n elif 280 <= y <= 320:\n L[7] = nombre_de_joueurs(L[7])\n if L[7] != 0:\n touches = assigne_touches(L[7])\n ferme_fenetre()\n Points = multijoueurs(L,touches)\n ferme_fenetre()\n cree_fenetre(600,400)\n elif 340 <= y <= 380:\n L[7] = nombre_de_joueurs(L[7])\n if L[7] != 0:\n touches = assigne_touches(L[7])\n ferme_fenetre()\n Points = multijoueurs_labyrinthe(L,touches)\n ferme_fenetre()\n cree_fenetre(600,400)", "def bloqueio_de_bifurcacao_4(tab,jog): \r\n if len(bifurcacao_3(tab,-1*jog)) == 1 :\r\n return bifurcacao_3(tab,-1*jog)[0]\r\n else:\r\n for i in range(1,4):\r\n if obter_coluna(tab,i).count(jog)==1:\r\n col = obter_coluna(tab,i)\r\n for j in range(3):\r\n if col[j]==0:\r\n pos1=3*j+i\r\n newtab = marcar_posicao(tab, jog, pos1)\r\n if len(bifurcacao_3(newtab,-1*jog)) == 0:\r\n return pos1\r\n \r\n if obter_linha(tab,i).count(jog)==1:\r\n linha = obter_linha(tab,i)\r\n for j in range(3):\r\n if linha[j]==0:\r\n pos1=j+1+3*(i-1)\r\n newtab = marcar_posicao(tab, jog, pos1)\r\n if len(bifurcacao_3(newtab,-1*jog)) == 0:\r\n return pos1\r\n \r\n if i < 3 and obter_diagonal(tab,i).count(jog)==1:\r\n diagonal = obter_diagonal(tab,i)\r\n for j in range(3):\r\n if i==1:\r\n if diagonal[j]==0:\r\n pos1=4*j+i\r\n newtab = marcar_posicao(tab, jog, pos1)\r\n if len(bifurcacao_3(newtab,-1*jog)) == 0:\r\n return pos1\r\n else:\r\n if diagonal[j]==0:\r\n pos1=7-2*j\r\n newtab = marcar_posicao(tab, jog, pos1)\r\n if len(bifurcacao_3(newtab,-1*jog)) == 0:\r\n return pos1", "def degibber(self):", "def __init__(self):\n self.enfila= 0\n self.fila = []", "def lineaarinen():\n x = []\n y = []\n if not kirjasto[\"korjaus\"]:\n try:\n for erottaja in kirjasto[\"lineaariset_arvot\"]:\n x_arvo, y_arvo = erottaja\n x.append(x_arvo)\n y.append(y_arvo)\n kirjasto[\"lineaariset_arvot\"] = []\n kirjasto[\"pisteet\"] = []\n if x and x[0] != x[1] and y[0] != y[1]:\n kk = (y[1]-y[0])/(x[1]-x[0])\n intensiteetti_korjaus = []\n for j in kirjasto[\"kineettiset_energiat\"]:\n y_korjaava = kk * (j - x[0]) + y[0]\n intensiteetti_korjaus.append(y_korjaava)\n for k, l in enumerate(kirjasto[\"intensiteetit\"]):\n korjaus = l - intensiteetti_korjaus[k]\n kirjasto[\"korjaus\"].append(korjaus)\n else:\n ik.avaa_viesti_ikkuna(\"Error\", \"Korjauspisteiden valinnassa tapahtui virhe\")\n return\n except IndexError:\n ik.avaa_viesti_ikkuna(\"Error\", \"Korjauspisteitä ei ole valittu\")\n else:\n ikkuna(\"korjattu_spektri\", kirjasto[\"kineettiset_energiat\"], kirjasto[\"korjaus\"], \"Integroi\", integrointi)\n else:\n ikkuna(\"korjattu_spektri\", kirjasto[\"kineettiset_energiat\"], kirjasto[\"korjaus\"], \"Integroi\", integrointi)", "def canto_vazio_7(tab, jog):\r\n for x in [1,3,7,9]:\r\n if eh_posicao_livre(tab,x):\r\n return x", "def es_satisfecho_por(self, candidata):", "def kluisInfo():\r\n kluisDict = dictionary()\r\n beginSchermTerug.grid(pady=3, padx=(10, 10), sticky='w', row=1)\r\n\r\n for kluis in kluisDict:\r\n try:\r\n if kluisDict[kluis] is not None and int(beginSchermEntry.get()) in kluisDict[kluis]: # kluis zoeken in\r\n # dictionary\r\n beginSchermTopTitel['text'] = fietsStalTijd(kluisDict[kluis][1]) # functie fietsStalTijd op kluis\r\n # aanroepen\r\n beginSchermTitel['text'] = 'De huidige kosten zijn €' + str(prijs(kluisDict[kluis][0]))\r\n beginSchermEntry.delete(0, END)\r\n return\r\n except ValueError:\r\n beginSchermTitel['text'] = 'Geen geldige invoer'\r\n return\r\n beginSchermTitel['text'] = 'Dit OV nummer is onbekend'\r\n return", "def falcon():", "def successeurs(self,etat):\n pass", "def uvozi(self, encoding=\"UTF-8\"):\n insert = self.stroskovno_mesto.dodajanje(stevilo=1)\n super().uvozi(encoding=encoding, insert=insert)", "def eksport(lista):\n # wynik - lista zawierajaca wynik koncowy dzialania funkcji(lata i wartosci dla poszczegolnych panstw)\n wynik = []\n for panstwo in lista:\n # rok - lista zawierajaca lata\n # wartosc - lista zawierajaca wartosci dla lat\n rok = []\n wartosc = []\n for element in panstwo:\n # sprawdzenie czy klucz posiada odpowiednia wartosc\n if element[1].get('key') == \"NE.EXP.GNFS.CD\":\n # dodanie roku do listy\n rok.append(int(element[2].text))\n # rozpatrywanie przypadku w ktorym wartosc jest None\n if element[3].text is None:\n wartosc.append(element[3].text)\n else:\n wartosc.append(float(element[3].text))\n # dodawanie list dla poszczegolnych panstw do listy wynikowej\n wynik.append(rok)\n wynik.append(wartosc)\n\n return wynik", "def update_kanda(self, kanda):\n\t\tself.subvarga = ''\n\t\tself.subvargaNum = 1\n\t\tself.varga = ''\n\t\tself.vargaNum = 1\n\t\tself.kanda = kanda\n\t\tself.kandaNum += 1", "def test_get_insumo(self):", "def inscricao(self):\n\n return True", "def otra_partida():\r\n\r\n for jugador_1 in juego.get_jugadores():\r\n\r\n \"\"\"Va Iterando sobre todos los jugadores disponibles de uno en uno\"\"\"\r\n color.utilizarVerde()\r\n print(\"\\nEmpieza: {}\".format(jugador_1.get_jugado_nombre()))\r\n num1, num2 = juego.get_rangos()\r\n print(\"Rango de {} al {}\".format(num1, num2))\r\n jugador_1.set_jugador_intentos(4)\r\n intentos = jugador_1.get_jugador_intentos()\r\n numero_oculto = randint(num1, num2)\r\n puntos = jugador_1.get_jugador_puntuacion_total()\r\n\r\n while intentos >= 0:\r\n\r\n \"\"\"Empieza la partida con 4 Intentos hasta que se quede en 0\"\"\"\r\n\r\n try:\r\n color.utilizarVerde()\r\n numero = int(input(\"\\nIntroduce un Número: \"))\r\n color.utilizarAmarillo()\r\n print(\"Intetos Restantes: {}\".format(intentos - 1))\r\n if numero_oculto < numero and intentos != 1:\r\n color.utilizarRojoClarito()\r\n print(\"\\nEs Demasiado Grande\\n\")\r\n intentos -= 1\r\n jugador_1.set_jugador_intentos(intentos)\r\n\r\n elif numero_oculto > numero and intentos != 1:\r\n color.utilizarRojoClarito()\r\n print(\"\\nEs Demasiado Pequeño\\n\")\r\n intentos -= 1\r\n jugador_1.set_jugador_intentos(intentos)\r\n\r\n elif numero == numero_oculto:\r\n color.utilizarAzul()\r\n print(\"\\nHas Ganado en el {0} intento\".format(intentos))\r\n puntos += 1\r\n jugador_1.set_jugador_puntuacion_total(puntos)\r\n input(\"Pulse enter para continuar...\")\r\n break\r\n\r\n elif numero != numero_oculto and intentos == 1:\r\n color.utilizarVerdeClarito()\r\n print(\"\\nEl número era {} :(\\n\".format(numero_oculto))\r\n color.utilizarRojo()\r\n print(format(\"GAME OVER\", \"-^75\"))\r\n input(\"Pulse enter para continuar...\")\r\n break\r\n\r\n except (ValueError, NameError):\r\n print(\"\\nError: Tiene que ser un número ...\")", "def __len__(self):\n return self.aktualni_delka", "def lateral_vazio_8(tab, jog):\r\n for x in [2,4,6,8]:\r\n if eh_posicao_livre(tab,x):\r\n return x", "def get_janaka(self, chakra_list):\n if self.janaka_melam is None:\n self.janaka = None\n else:\n for c in chakra_list:\n if c.melam == self.janaka_melam:\n self.janaka = c.raga\n break", "def Lluiteu(self) -> IResultList:\n\n if len(self._Lluitadors) != 2:\n print(\"ERROR. Falten lluitadors\")\n exit\n\n elQuePica = randint(0, 1)\n\n while self._Lluitadors[0].es_Ko() == False and self._Lluitadors[1].es_Ko() == False:\n elQueRep = (elQuePica+1) % 2\n proteccio = self._Lluitadors[elQueRep].get_Lluitador().Protegeix()\n pica = self._Lluitadors[elQuePica].get_Lluitador().Pica()\n\n if pica in proteccio:\n self._Lluitadors[elQueRep].treu_vida()\n print(\n f'{self._Lluitadors[elQueRep].get_nom()} rep un cop al {pica.name} de {self._Lluitadors[elQuePica].get_nom()}')\n else:\n print(\n f'{self._Lluitadors[elQueRep].get_nom()} atura el cop al {pica.name} de {self._Lluitadors[elQuePica].get_nom()}')\n elQuePica = elQueRep\n\n guanyador = next(x for x in self._Lluitadors if x.es_Ko() == False)\n perdedor = next(i for i in self._Lluitadors if i.es_Ko() == True)\n\n comentariLocutor = \"\"\n\n if (guanyador.get_vida() - perdedor.get_vida()) > 5:\n comentariLocutor = \"Quina pallissa!!\"\n\n print(f\"{perdedor.get_nom()} cau a terra!\")\n print(f\"VICTÒRIA DE {guanyador.get_nom()}!!! {comentariLocutor}\")\n\n return self._Lluitadors", "def Kennzahlentable(conn, new_data, table):\n print(table.columns.keys())\n if 'boersenbewertung' not in new_data: return 0\n for boerse in new_data['boersenbewertung']:\n if 'kennzahlen' not in new_data['boersenbewertung'][boerse]: continue\n featkeys = list(new_data['boersenbewertung'][boerse]['kennzahlen'].keys())\n if \"Beschäftigte\" in featkeys: featkeys.remove(\"Beschäftigte\")\n addid = []\n addid.append(0)\n for id, feat in enumerate(featkeys):\n for idx, block in enumerate(new_data['boersenbewertung'][boerse]['kennzahlen'][feat]):\n del_entry(new_data['compare'], ['boersenbewertung', boerse,'kennzahlen'], [feat])\n entry_check(block, ['jahr'])\n entry_check(new_data['boersenbewertung'][boerse], ['waehrungsinfo', 'notizen_kennzahlen'])\n del_entry(new_data['compare'], ['boersenbewertung', boerse], ['waehrungsinfo', 'notizen_kennzahlen'])\n waehrungsinfo = \"\"\n keys = list(block.keys())\n try:\n keys.remove('jahr')\n except Exception:\n pass\n unit, currency = \"\", \"\"\n comment = \"\"\n if isinstance(new_data['boersenbewertung'][boerse]['notizen_kennzahlen'], list):\n if \"in\" in new_data['boersenbewertung'][boerse]['notizen_kennzahlen'][-1]:\n currency = new_data['boersenbewertung'][boerse]['notizen_kennzahlen'][-1].split(\"in\")[-1].replace(\n \")\", \"\").strip()\n for idxx, entry in enumerate(keys):\n if isinstance(block[entry],str):\n block[entry] = {entry: block[entry]}\n for idxxx, name in enumerate(block[entry]):\n if 'waehrungsinfo' in new_data['boersenbewertung'][boerse]:\n for infolist in new_data['boersenbewertung'][boerse]['waehrungsinfo']:\n if infolist['name'] == feat:\n for info in infolist['eintraege']:\n if info[\"name\"] == name:\n waehrungsinfo = info[\"waehrung\"]\n if isinstance(waehrungsinfo,str):\n cuinfo = get_currencyinfo([\"(\"+waehrungsinfo+\")\"])\n else:\n cuinfo = get_currencyinfo(waehrungsinfo)\n if cuinfo:\n if len(keys) > 1 or len(block[entry]) > len(keys):\n if len(cuinfo) == 1:\n unit = cuinfo[0]['unit']\n currency = cuinfo[0]['currency']\n else:\n unit = cuinfo[idxx]['unit']\n currency = cuinfo[idxx]['currency']\n else:\n unit = cuinfo[idxx]['unit']\n currency = cuinfo[idxx]['currency']\n currency = currency.replace(\"in \",\"\").strip()\n year = block['jahr'].replace(\"\\xa0\", \" \")\n year = year.split(\" \")[0]\n if \"*\" in year:\n year = year.split(\"*\")[0]\n comment = replace_geminfo(block['jahr'], new_data['boersenbewertung'][boerse],\n 'notizen')\n entryinfo = \"\"\n pprtname = name\n if \"(\" in pprtname:\n pprtname = pprtname.split(\"(\")[0].strip()\n if \"gesamt\" in name.lower():\n entryinfo = \" \" + cuinfo[0][\"text\"]\n conn.execute(table.insert(), [\n {'unternehmenId': new_data['unternehmenId'],\n 'Kennzahlenposition': pprtname+entryinfo,\n 'Jahr': year,\n 'Einheit': unit,\n 'W\\xe4hrung': currency,\n 'Betrag': block[entry][name].replace(' ', \"\").replace(\" \", \"\"),\n 'Bemerkung': comment,\n 'Rang': idxxx + 1,\n }])\n return 0", "def PoziomUkonczony(levelObj, gameStateObj):\n for cel in levelObj['cele']:\n\n if cel not in gameStateObj['skrzynki']:\n # Znaleziono niepokryty cel\n return False\n\n return True", "def dodajPrzedmiot(self, przedmiot: Przedmiot):\n self.przedmioty[przedmiot.nazwa]=przedmiot", "def bajar_pluma(self):\n self.pluma = self.pluma.cambiar_estado(True)", "def accueil():\r\n global etat\r\n background(0,128,128)\r\n image(tireur,0,0) \r\n rectMode(CORNERS)\r\n fill(100)\r\n rect(0,60,250,120)\r\n rect(500,60,750,120)\r\n fill(0)\r\n textSize(30)\r\n text(\"PVP\",95,100) \r\n text(\"ORDINATEUR\",520,100) \r\n if (mousePressed) and mouseX<250 and mouseX>0 and mouseY<120 and mouseY>60: #si on clique sur le rectangle jouer\r\n etat=1 #on passe en mode jeu\r\n if (mousePressed) and mouseX<750 and mouseX>500 and mouseY<120 and mouseY>60: \r\n etat=2", "def __init__(self):\n self.nombre_roues = 4\n self.nombre_fauteils = 1\n self.moteur = False\n self.volant = True", "def piskvorky(pole):\n\n print('Ahoj. Toto je hra 1D piskvorky. Pocitac hra so symbolmi \\'O\\', ty hras so symbolmi \\'X\\'.') \n while \"-\" in pole:\n if vyhodnot(pole) == False:\n break\n else:\n pole = tah_hraca(pole, cislo_policka)\n if vyhodnot(pole) == False:\n break\n else:\n pole = tah_pocitaca(pole, cislo_policka, mozne_policka1, mozne_policka2)\n if \"-\" not in pole:\n vyhodnot(pole)\n return print(\"Dakujem za hru.\")", "def __init__(self, jugador):\n\n # Se llama al metodo del padre constructor.\n Level.__init__(self, jugador)\n\n #Cargamos la imagen de fondo.\n sprite_sheet_pantalla = SpriteSheet(\"imagenes/fondoactualizado.png\")\n \n # Carga de todos los sprite de la imagen hacia la derecha.\n imagen_1 = sprite_sheet_pantalla.obtener_imagen(0,1788, 896,894)\n self.fondo = imagen_1\n \n self.fondo.set_colorkey(constantes.BLANCO)\n self.limite_derecho = 740\n self.limite_izquierdo = 88\n self.limite_superior = -10\n self.limite_inferior = 686\n self.cambio_nivel_x = 396\n self.cambio_nivel_y = -16\n \n self.fondo.set_colorkey(constantes.BLANCO)\n self.limite_nivel = -2500\n\n # Lista con los bloques de plataformas, indicando la ubicacion x,y y el tipo \n nivel = [ [platforma.STONE, 250, 740],\n [platforma.STONE, 250, 680],\n [platforma.STONE, 250, 620],\n [platforma.STONE, 250, 560],\n [platforma.STONE, 250, 500],\n [platforma.STONE, 250, 440],\n [platforma.STONE, 250, 380],\n [platforma.STONE, 250, 320],\n [platforma.STONE, 310, 320],\n [platforma.STONE, 370, 320],\n [platforma.STONE, 430, 320],\n [platforma.STONE, 490, 320],\n [platforma.STONE, 550, 320],\n [platforma.STONE, 610, 320],\n [platforma.STONE, 610, 380],\n [platforma.STONE, 610, 440],\n [platforma.STONE, 610, 500],\n [platforma.STONE, 610, 560],\n [platforma.STONE, 610, 620],]\n \n #puntos\n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 150\n puntos.rect.y = 700\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos) \n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 320\n puntos.rect.y = 390\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 366\n puntos.rect.y = 390\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 412\n puntos.rect.y = 390\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 458\n puntos.rect.y = 390\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 504\n puntos.rect.y = 390\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 560\n puntos.rect.y = 390\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 320\n puntos.rect.y = 467\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 366\n puntos.rect.y = 467\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 412\n puntos.rect.y = 467\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 458\n puntos.rect.y = 467\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 504\n puntos.rect.y = 467\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 560\n puntos.rect.y = 467\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 320\n puntos.rect.y = 544\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 366\n puntos.rect.y = 544\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 412\n puntos.rect.y = 544\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 458\n puntos.rect.y = 544\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 504\n puntos.rect.y = 544\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 560\n puntos.rect.y = 544\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 320\n puntos.rect.y = 621\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 366\n puntos.rect.y = 621\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 412\n puntos.rect.y = 621\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 458\n puntos.rect.y = 621\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 504\n puntos.rect.y = 621\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 560\n puntos.rect.y = 621\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n \n \n \n\n # Se busca en la lista anterior creada y se le agregan las plataformas al nivel.\n for plataforma in nivel:\n bloque = platforma.Plataforma(plataforma[0])\n bloque.rect.x = plataforma[1]\n bloque.rect.y = plataforma[2]\n bloque.jugador = self.jugador\n self.lista_plataformas.add(bloque)", "def tah_pocitaca(pole, cislo_policka, mozne_policka1, mozne_policka2):\n symbol = \"O\"\n strategia_pocitaca(pole)\n if mozne_policka1:\n cislo_policka = choice(mozne_policka1)\n elif mozne_policka2:\n cislo_policka = choice(mozne_policka2)\n else:\n cislo_policka = cislo_policka\n \n if cislo_policka == False:\n cislo_policka = randint(0,20)\n return tah_pocitaca(pole, cislo_policka, mozne_policka1, mozne_policka2)\n elif list_pole[cislo_policka] == \"-\":\n print(f\"\\nPocitac si vybral policko: {cislo_policka}. Tah pocitaca:\")\n pole = tah(pole, cislo_policka, symbol) \n return pole\n else:\n return tah_pocitaca(pole, cislo_policka, mozne_policka1, mozne_policka2)", "def ver_lb(request,pk):\n proyecto=Proyecto.objects.get(id_proyecto=pk)\n lineaB=LineaBase.objects.filter(proyecto=proyecto)\n Lb=iLB_item.objects.filter(lb=lineaB,item__actual=True)\n\n context={\n 'lb':Lb,\n 'lineaB':lineaB,\n 'proyectos':proyecto,\n }\n return render(request, 'items/ver_lb.html', context)", "def acquisizioneParametri(self):\n\n messaggio =''\n\n try: \n self.__rete = slugify(self.ui.nomeRete.text())\n # controllo se la lunghezza del nome inserito sia > di 5 caratteri\n if(len(self.__rete) < 5 or len(self.__rete) > 30):\n\n messaggio = 'err: inserimento Nome'\n raise NameError\n \n # controllo che il nome scelto sia univoco\n isPresent = self.__NNNameCheck()\n if(isPresent):\n messaggio = 'err: nome già utilizzato'\n raise NameError\n\n # controlli su numero layer e numero nodi che siano >= 1\n # e che siano rispettivamente <= 20 e <= 50\n self.__layer = int(self.ui.nLayer.text())\n if(self.__layer < 1):\n messaggio = 'err: numero layer < 1'\n raise ValueError\n elif(self.__layer >= 20):\n messaggio = 'err: numero layer > 20'\n raise ValueError\n\n self.__nodi = int(self.ui.nNodi.text())\n if(self.__nodi < 1):\n messaggio = 'err: numero nodi < 1'\n raise ValueError\n if(self.__nodi >= 50):\n messaggio = 'err: numero nodi > 50'\n raise ValueError\n\n # salvataggio della funzione scelta\n self.__funzione = self.ui.funzione.currentText()\n \n # controllo che la percentuale di Vs sia < 25%\n # e che la percentuale di Ts sia > 75%\n if(self.__percentuale < 25):\n messaggio = 'err: suddivisione'\n raise ValueError\n if (self.__percentuale > 75):\n messaggio = 'err: suddivisione'\n raise ValueError\n\n # controllo che sia stato scelto effettivamente un dataset\n if(len(self.__dataSet) == 0):\n messaggio = 'err: dataSet errato'\n raise NameError\n\n # setto il tasto caricamento di una rete non cliccabile\n self.ui.but_caricaRete.setEnabled(False)\n\n # cambio nome del tasto convalida\n self.ui.but_convalida.setText('confermato')\n self.ui.comunicazione.setText('')\n #abilito salvataggio\n self.ui.but_salva.setEnabled(True)\n\n # settandola a True permetto che il training venga effettuato\n # dato che i dati inseriti sono validi\n self.__convalida = True\n return True\n except:\n # in caso di eccezzioni faccio comparire il messaggio\n self.ui.comunicazione.setText(messaggio)\n return False", "def setdata(self):\n self.shortCheck.setChecked(True)\n if self.rustr[-2] == \"и\" or self.rustr[-2] == \"о\": \n self.rumascEdit.setText(self.stem)\n self.rufemEdit.setText(self.stem + self.morphs.adjshortsoft[2])\n self.runuetEdit.setText(self.stem + self.morphs.adjshortsoft[3])\n self.ruplurEdit.setText(self.stem + self.morphs.adjshortsoft[4])\n else:\n self.rumascEdit.setText(self.stem)\n self.rufemEdit.setText(self.stem + self.morphs.adjshorthard[2])\n self.runuetEdit.setText(self.stem + self.morphs.adjshorthard[3])\n self.ruplurEdit.setText(self.stem + self.morphs.adjshorthard[4])", "def encontrar_mejor_sitio(ubicaciones):\n\ttorretas_adicionales = ubicaciones.count(\"t\") # obtengo cuantas t (torretas adicionales) hay en ubicaciones\n\tif torretas_adicionales == 0:\n\t\tprint \"Posicion: 5\"\n\tif torretas_adicionales == 4:\n\t\tprint \"Posicion: \",\n\t\tfor i in range(len(ubicaciones)):\n\t\t\tif ubicaciones[i] == \"o\":\n\t\t\t\tprint i,", "def afficher(self):\n bordRect = (self.pos[0]-5, self.pos[1]-5, self.dim[0]+5, self.dim[1]+5)\n Fond = pygame.draw.rect(self.ecran.surface, self.ecran.couleur, bordRect, 0) # Efface le precedant text\n\n rang = 0\n verif = \"\"\n compteur = 0\n self.lignes = []\n if self.txt == \"\": self.txt = \" \"\n \n while verif != self.txt:\n verif =\"\"\n rang += self.correction(self.txt[rang:], compteur)\n compteur += 1\n for k in self.lignes:\n verif += k.txt\n\n for compteur in range(len(self.lignes)):\n self.lignes[compteur].afficher()\n\n self.dim = (self.dim[0], self.hLigne*(compteur+1)) # +1 -> Boucle for\n \n pygame.display.flip()", "def piskvorky(pole):\n\n print('Ahoj. Toto je hra 1D piskvorky. Pocitac hra so symbolmi \\'O\\', ty hras so symbolmi \\'X\\'.') \n while \"-\" in pole:\n if vyhodnot(pole) == False:\n break\n else:\n pole = tah_hraca(pole, str_policka)\n if vyhodnot(pole) == False:\n break\n else:\n pole = tah_pocitaca(pole, symbol=\"O\")\n if \"-\" not in pole:\n vyhodnot(pole)\n return print(\"Dakujem za hru.\")", "def pyramida(zakladna, orientacia, centrovanie):\n nova_pyramida = []\n if orientacia not in [\"normalna\", 'obratena']:\n print(\"Pyramida moze byt iba [normalna] alebo [obratena]\")\n return False\n\n if centrovanie != \"center\" and centrovanie != \"vlavo\":\n print(\"Centrovanie pyramidy moze byt iba [center] alebo [vlavo]\")\n return False\n\n if centrovanie == \"center\":\n if orientacia == \"normalna\":\n\n cislo_riadka = -1\n for i in range(1, zakladna + 1, 2): #pocet hviezdiciek rastie po 2\n #print(f\"{'*' * i:^{zakladna}}\")\n cislo_riadka +=1\n riadok = []\n for j in range(cislo_riadka,zakladna//2): #vyska pyramidy = polovica zakladne\n riadok.append(\" \") #kolky riadok, tolko medzier vlavo\n for j in range(0, i):\n riadok.append(\"*\")\n for j in range(cislo_riadka,zakladna//2): # aj v pravo\n riadok.append(\" \")\n nova_pyramida.append(riadok)\n else:\n cislo_riadka = -1\n for i in range(zakladna, 0, -2): #pocet hviezdiciek\n #print(f\"{'*' * i:^{zakladna}}\")\n cislo_riadka +=1\n riadok = []\n for j in range(0,cislo_riadka):\n riadok.append(\" \")\n for j in range(0,i):\n riadok.append(\"*\")\n for j in range(0,cislo_riadka):\n riadok.append(\" \")\n nova_pyramida.append(riadok)\n else:\n if orientacia == \"normalna\":\n for i in range(zakladna):\n #print(f\"{'*' * (i + 1)}\")\n riadok = []\n for j in range(0,i):\n riadok.append(\"*\")\n nova_pyramida.append(riadok)\n else:\n for i in range(zakladna):\n riadok = []\n #print(f\"{'*' * (zakladna - i)}\")\n for j in range(zakladna, i, -1):\n riadok.append(\"*\")\n nova_pyramida.append(riadok)\n return nova_pyramida", "def Wygrana():\r\n for x in range (0, ROZMIAR_PLANSZY):\r\n for y in range (0, ROZMIAR_PLANSZY):\r\n for kierunek in (\"poziom\", \"pion\", \"skos prawy\", \"skos lewy\"):\r\n iksy, kolka = SprawdzLinie ((x, y), kierunek)\r\n if iksy == 5:\r\n return X\r\n if kolka == 5:\r\n return O\r\n return False", "def ludnosc(lista):\n # wynik - lista zawierajaca wynik koncowy dzialania funkcji(lata i wartosci dla poszczegolnych panstw)\n wynik = []\n for panstwo in lista:\n # rok - lista zawierajaca lata\n # wartosc - lista zawierajaca wartosci dla lat\n rok = []\n wartosc = []\n for element in panstwo:\n # sprawdzenie czy klucz posiada odpowiednia wartosc\n if element[1].get('key') == \"EN.POP.DNST\":\n # dodanie roku do listy\n rok.append(int(element[2].text))\n # rozpatrywanie przypadku w ktorym wartosc jest None\n if element[3].text is None:\n wartosc.append(element[3].text)\n else:\n wartosc.append(float(element[3].text))\n # dodawanie list dla poszczegolnych panstw do listy wynikowej\n wynik.append(rok)\n wynik.append(wartosc)\n\n return wynik", "def turysci(lista):\n # wynik - lista zawierajaca wynik koncowy dzialania funkcji(lata i wartosci dla poszczegolnych panstw)\n wynik = []\n for panstwo in lista:\n # rok - lista zawierajaca lata\n # wartosc - lista zawierajaca wartosci dla lat\n rok = []\n wartosc = []\n for element in panstwo:\n # sprawdzenie czy klucz posiada odpowiednia wartosc\n if element[1].get('key') == \"ST.INT.ARVL\":\n # dodanie roku do listy\n rok.append(int(element[2].text))\n # rozpatrywanie przypadku w ktorym wartosc jest None\n if element[3].text is None:\n wartosc.append(element[3].text)\n else:\n wartosc.append(float(element[3].text))\n # dodawanie list dla poszczegolnych panstw do listy wynikowej\n wynik.append(rok)\n wynik.append(wartosc)\n\n return wynik", "def orquestador_tlk():\n\n try:\n #**Existe archivo TLK**\n if checkFileExistance(archivo_tlk):\n #**Llamo a parser inventario tlk**\n logger.info(f'Arvhivo inventario TLK encontrado: {archivo_tlk}')\n logger.info(\"\\n>>>>>>>>>>COMIENZO PROCESAMIENTO INVENTARIO TELELINK<<<<<<<<<<<<\")\n f_parsear_inventario (archivo_tlk,archivo_tlk_dst,archivo_tlk_viejo)\n\n #**Cargo inventario tlk parseado a la BD**\n f_cargar_inv_en_BD(archivo_tlk_dst)\n\n #**Proceso BD inventario tlk**\n f_procesar_resumne_tlk_BD()\n logger.info(\">>>>>>>>>>FIN PROCESAMIENTO INVENTARIO TELELINK<<<<<<<<<<<<\\n\\n\")\n #**if fin existe archivo TLK**\n\n #**existe archivo RBS DSC**\n elif checkFileExistance(archivo_rbs_DCS):\n #**llamo a parser inventario RBS**\n logger.info(f'Arvhivo inventario RBS encontrado: {archivo_rbs_DCS}')\n logger.info(\"\\n>>>>>>>>>>COMIENZO PROCESAMIENTO INVENTARIO RBS<<<<<<<<<<<<\")\n f_parseo_inventario_RBS (archivo_rbs_DCS,archivo_rbs_DCS_dst,archivo_rbs_DCS_old)\n\n #**Cargo inventario RBS parseado a la BD**\n f_cargar_inv_RBS_en_BD(archivo_rbs_DCS_dst)\n\n #**Proceso BD inventario tl**\n logger.info(\">>>>>>>>>>FIN PROCESAMIENTO INVENTARIO RBS<<<<<<<<<<<<\\n\\n\")\n #**if fin existe archivo TLK**\n except Exception as e:\n logger.error(traceback.format_exc())", "def danh():\n # printdb('def danh')\n imgmanh = imagesearch(path_Scr + '\\\\screencap.png',\n path_Scr + '\\\\longden.png', 0.77)\n time.sleep(1)\n # mạnh bỏ qua\n if imgmanh.find() >= 1:\n click_next()\n logging.info('Next')\n giaotranh()\n else:\n click_danh()\n #######################lỗi mạng######################\n time.sleep(30)\n screencap()\n error = imagesearch(path_Scr + '\\\\screencap.png',\n path_Scr + '\\\\error.png', 0.88)\n # printdb('check lỗi ')\n if error.find() >= 1: # check lỗi mạng\n check_loi_vethanh()\n else:\n time.sleep(cTimeWar) # 40s\n # Rutlui() #opt1 check khiên --end (rỉa 1 khiên)\n DanhTiep() # opt2 check nút hồi thành --end\n # click_rutlui() #opt3 chờ cTimeWar --End (đánh theo time chỉ định)\n #####################################################", "def ohodnotL(row, col, znak, prevx, prevy, pocet_ciest, hlbka, mx): # vlavo\r\n\r\n susedia = getSusedia_ohodnot(row, col, znak)\r\n\r\n if (values[row][col] != \"W\" and col != 0):\r\n hlbka += 1\r\n\r\n if col == 0:\r\n if values[row][col] == \"W\" and hlbka != 0:\r\n hlbka -= 1\r\n dlzkyL.append(hlbka)\r\n\r\n if (col != 0 and hlbka < mx):\r\n for sused in susedia:\r\n if (sused[1] == col - 1 or (sused[1] == col and (sused[0] != prevx or sused[1] != prevy))):\r\n if sused[1] == 0:\r\n pocet_ciest += 1\r\n pocet_ciest += ohodnotL(sused[0], sused[1], znak, row, col, 0, hlbka, mx)\r\n if (values[sused[0]][sused[1]] == \"W\") and col == 1: # nema zmysel sem umiestnovat - radsej inde\r\n pocet_ciest = 0\r\n return pocet_ciest", "def test_2(self):\n print(\"Consumir con clave incorrecta\")\n billetera1=BilleteraElectronica(1,\"Maria\", \"Bra\", 20267824, 1234)\n billetera1.recargar(100, \"20/12/2017\", \"Comercio1\", 20267824)\n self.assertEqual(billetera1.saldo(), 100)\n billetera1.consumir(50, \"22/15/2017\", \"Comercio1\", 20267824, 123)\n self.assertEqual(billetera1.saldo(), 100)", "def kto_wygral():\n for x in range(0, ROZMIAR_PLANSZY):\n for y in range(0, ROZMIAR_PLANSZY):\n for kierunek in (\"poziom\", \"pion\", \"skos prawy\", \"skos lewy\"):\n iksy, kolka = sprawdz_linie((x, y), kierunek)\n if iksy == ile_do_wygranej:\n return X\n if kolka == ile_do_wygranej:\n return O\n return False", "def __str__(self):\n return self.idLancamentosPagar", "def piskvorky1D():\n print(HLASKA_UVOD)\n \n symbol_hrace = input('Vyber si a zadej svuj herni symbol, \"x\" nebo \"o\"?: ') # vyber herniho symbolu hrace\n while symbol_hrace not in ('x', 'o'):\n symbol_hrace = input('Spatne, zadej znovu, \"x\" nebo \"o\"?: ')\n \n if symbol_hrace == 'x': # nastaveni herniho symbolu pocitace\n symbol_pocitace = 'o'\n else:\n symbol_pocitace = 'x'\n\n herni_pole = DELKA_HERNIHO_POLE * '-'\n print(herni_pole)\n \n kolo = 1\n while True:\n for tahne in (tah_hrace, tah_pocitace):\n herni_pole = tahne(herni_pole, symbol_pocitace, symbol_hrace)\n print('{}. kolo: {}'.format(kolo, herni_pole))\n stav = vyhodnot(herni_pole) # promenna, kde je ulozeno aktualni vyhodnoceni herniho pole\n if stav in KDO_VYHRAL:\n print(KDO_VYHRAL[stav])\n return\n kolo += 1", "def pretvori(stolpci, kwargs):\n kwargs[\"oznaka\"] = stolpci.index(\"lokacija\")\n return stolpci" ]
[ "0.6971054", "0.64303", "0.6378631", "0.6378631", "0.6378631", "0.6378631", "0.6378631", "0.610913", "0.6093041", "0.6004066", "0.5950295", "0.58474815", "0.58162034", "0.5795707", "0.5697223", "0.5587125", "0.5565432", "0.5492906", "0.5483054", "0.54795176", "0.54650056", "0.54500717", "0.5418208", "0.53935975", "0.5385375", "0.5382683", "0.53706086", "0.53689665", "0.5366543", "0.53504986", "0.53462476", "0.5343286", "0.53332025", "0.5318091", "0.53176695", "0.5315608", "0.5307975", "0.5278769", "0.5274305", "0.52637786", "0.52584475", "0.52557683", "0.52535", "0.5247873", "0.5247873", "0.5247873", "0.5247873", "0.5247873", "0.5247873", "0.5242815", "0.52384293", "0.5209341", "0.5201898", "0.51833713", "0.517737", "0.5174749", "0.51741904", "0.51677084", "0.51573676", "0.51429117", "0.51404035", "0.5122397", "0.5111579", "0.510538", "0.51018214", "0.510024", "0.5099534", "0.50989944", "0.5094277", "0.5079186", "0.50774497", "0.50709367", "0.50705355", "0.50681317", "0.5062607", "0.50589526", "0.5058171", "0.5058015", "0.5055456", "0.5040466", "0.50330836", "0.5030796", "0.50282586", "0.50251734", "0.5017247", "0.5015178", "0.501364", "0.50121784", "0.500893", "0.5006161", "0.4998557", "0.4996426", "0.49950516", "0.49939692", "0.49867055", "0.49866077", "0.49824703", "0.4980962", "0.4980426", "0.4977851", "0.49678233" ]
0.0
-1
buat titik di koordinat masukan
def buat_titik(self): # memberi geometri pada fitur baru # Memberi fitur titik feature = QgsFeature() feature.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(self.x, self.y))) self.measureDialog.insertParams(self.idTitik,self.x,self.y) # menambahkan fitur pada layer self.layerTitik.dataProvider().addFeatures([feature]) self.layerTitik.updateExtents() self.layerTitik.startEditing() self.layerTitik.changeAttributeValue(self.idTitik,0,self.idTitik) self.layerTitik.changeAttributeValue(self.idTitik,1,self.x ) self.layerTitik.changeAttributeValue(self.idTitik,2,self.y ) self.layerTitik.commitChanges() self.idTitik = self.idTitik+1 self.iface.actionZoomToLayer().trigger()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_title():", "def getTitle(self): #$NON-NLS-1$\r", "def getTitle(self): #$NON-NLS-1$\r", "def get_title(self, obj):\n title = obj.habit.title\n return title", "def Label(self) -> str:", "def title(self, txt):\n num = len(txt)\n ticks = \"=\" * num\n print(ticks)\n print(txt)\n print(ticks)", "def conclusion_title_map(self):\n pass", "def title(self):\n return 'Fale Conosco'", "def display_name(self):", "def Title(self, separator=u' / ', first_index=0):\n if self.position is None: # when created by transmogrifier by example\n return self.getId()\n position = self.position.to_object\n if position is None: # the reference was removed\n return self.getId()\n\n position = self.get_position()\n organization = self.get_organization()\n label = self.get_label()\n if position is None and not label:\n return \"(%s)\" % organization.get_full_title(separator=separator, first_index=first_index).encode('utf8')\n # we display the position title or the label\n position_title = label or position.title\n return \"%s (%s)\" % (position_title.encode('utf8'),\n organization.get_full_title(separator=separator, first_index=first_index).encode('utf8'))", "def dc_title(self):\n return u\"{0} ({1}): {2} {3}\".format(\n self.label, self.in_assessment[0].timepoint,\n self.subjects[0].code_in_study,\n \"...\" if len(self.subjects) > 1 else \"\")", "def title_content(label=\"A title\"):\n return {'label':label}", "def title_n(self):\n self.run_command('title_n')", "def __str__(self):\n\t\treturn self.titre", "def __draw_title(self):\n title = 'SNAAAAKE'\n x_offset = (curses.COLS - len(title)) // 2\n y_offset = max(1, (curses.LINES - self.config.arena_size[1] - 2) // 4)\n self.stdscr.addstr(y_offset, x_offset, title)", "def text(self) -> None:\n label_space = tk.Label(self)\n label_space.grid(row=0)\n label_book_number = tk.Label(self, text=f'Номер книги:')\n label_book_number.grid(row=1, column=0, ipady=5)\n label_title = tk.Label(self, text='Название книги:')\n label_title.grid(row=2, column=0, padx=5)\n label_author = tk.Label(self, text='Автор:')\n label_author.grid(row=3, column=0, pady=5)\n label_genre = tk.Label(self, text='Жанр:')\n label_genre.grid(row=4, column=0)", "def title_p(self):\n self.run_command('title_p')", "def _get_title_text(self):\n return Text(\n self,\n self.settings.font_bold_filename,\n 96,\n self.settings.font_color,\n 'zuckbot',\n {'center': self.screen_rect.center},\n 0,\n -50,\n )", "def get_title(self) -> str:\n pass", "def complete_alt_title(self, obj):\n return str(obj)", "def setTitlu(self, titlu):\n self.titlu = titlu", "def set_title(self) -> None:\n if self.__option == 1:\n self._ordered_title = \"AÑADIR PERMISOS\"\n else:\n self._ordered_title = \"ELIMINAR PERMISOS\"", "def title(self):\n return self.header", "def writeTitle( self ):\n \n if self.mTitle:\n e = SVGdraw.text( self.mPageWidth / 2,\n self.mTitleFontSize ,\n self.mTitle,\n self.mTitleFontSize,\n self.mTitleFont,\n stroke = \"rgb(%i,%i,%i)\" % BLACK,\n text_anchor = \"middle\" )\n\n self.addElement(e)", "def format_title(self, data):\n return data", "def __title(self, conf):\n return conf[\"conf_json\"][0][\"title\"]", "def title(self) -> str:\n pass", "def POSCAR_title(doc):\n com_for=doc['snl']\n formu=com_for['formula']\n return formu", "def Show_Titles( self ):\r\n self.system.Change_Seq( \"Title\" )", "def get_tooltip_text(self): # real signature unknown; restored from __doc__\n return \"\"", "def title(self) -> str:\n raise NotImplementedError", "def header(self):\n self.set_font(self.police, 'B', 15)\n self.cell(w=0, h=10, txt=f\"CV de {self.name}\", border=1, ln=1, align='C')", "def __str__(self):\n\t\treturn self.title", "def __str__(self):\n\t\treturn self.title", "def summary_title(tile_summary):\n return f\"Slide tile_summary.slide_name Tile Summary:\"", "def title(self) -> str:\n\t\t# pylint: disable=unsubscriptable-object\n\t\treturn self.value[1]", "def title(self):\n return self['title']", "def get_detail_title(soort, edit, obj):\n naam_ev = get_names_for_type(soort)[0]\n if edit == 'new':\n return _('Nieuw(e) ') + str(naam_ev)\n try:\n title = \" \".join((naam_ev.capitalize(), obj.naam))\n except AttributeError:\n title = \" \".join((naam_ev.capitalize(), obj.nummer))\n return title", "def formatName(self):\r\n return self.title.getVal() + \" \" + self.first.getVal() + \" \" + self.last.getVal()", "def title(text, level=0):\n return '\\n' + text + '\\n' + '=-~_#%^' [level] * len(text) + '\\n\\n'", "def title(self, obj):\n return str(obj)", "def makeTitle(self):\n l1=Label(self.app, text=\"Asset Allocation Combinations\")\n l1.grid(row=0, column=0)", "def title(self, value):\n self.definition.title = value", "def label(self):\n return ''", "def __unicode__(self):\n return self.title", "def __unicode__(self):\n return self.title", "def __unicode__(self):\n return self.title", "def __unicode__(self):\n return self.title", "def get_titre(self):\n return self.titre", "def title(self):\n return asarray(title(self))", "def _get_wiki_title(self, coordinates):\n\n formated_coord = f\"{coordinates['lat']}|{coordinates['lng']}\"\n\n PARAMS = {\n \"action\": \"query\",\n \"list\": \"geosearch\",\n \"format\": \"json\",\n \"gsradius\": 1000,\n \"gscoord\": formated_coord\n }\n\n return requests.get(\n url=BASE_URL, params=PARAMS\n ).json()['query']['geosearch'][0]['title']", "def setTitle(title, height=48, justify='center', pos='above', offset=0):\n titles = title.split(\"\\n\")[:4]\n i = 1\n for line in titles:\n dislin.titlin(line, i)\n i = i + 1\n dislin.titjus(justdict[justify])\n pdict = {'above':'ABOVE', 'below':'BELOW'}\n dislin.titpos(pdict[pos])\n dislin.htitle(height)\n dislin.vkytit(offset)\n dislin.title()", "def set_title(self, title):\n\t\tpass", "def get_name(self):\n return self.soup.find('div', id = 'zh-topic-title').h1\\\n .get_text(strip = True).encode(CODE)", "def kluisInfo():\r\n kluisDict = dictionary()\r\n beginSchermTerug.grid(pady=3, padx=(10, 10), sticky='w', row=1)\r\n\r\n for kluis in kluisDict:\r\n try:\r\n if kluisDict[kluis] is not None and int(beginSchermEntry.get()) in kluisDict[kluis]: # kluis zoeken in\r\n # dictionary\r\n beginSchermTopTitel['text'] = fietsStalTijd(kluisDict[kluis][1]) # functie fietsStalTijd op kluis\r\n # aanroepen\r\n beginSchermTitel['text'] = 'De huidige kosten zijn €' + str(prijs(kluisDict[kluis][0]))\r\n beginSchermEntry.delete(0, END)\r\n return\r\n except ValueError:\r\n beginSchermTitel['text'] = 'Geen geldige invoer'\r\n return\r\n beginSchermTitel['text'] = 'Dit OV nummer is onbekend'\r\n return", "def print_title(title):\n print \"\\n\"+\"#\"*32+\"\\n# \"+title+\"\\n\"+\"#\"*32+\"\\n\"", "def helptext(self):\n return \"\"", "def test_title(self):\n self.assertEquals(\"Title\\n=====\", trans(\"== Title ==\"))\n self.assertEquals(\"Title\\n-----\", trans(\"=== Title ===\"))\n self.assertEquals(\"#### Title\", trans(\"==== Title ====\"))\n self.assertEquals(\"##### Title\", trans(\"===== Title =====\"))", "def __draw_title(self):\n if self.title is not None:\n self.fig.suptitle(\n self.title, y=self.settings.otherParams[\"figure.title.yposition\"])", "def __str__(self):\n return \"{title}\".format(title=self.title)", "def make_title(words):", "def __str__(self):\n return self.titulo", "def title(self, string):\n return self.bold(string)", "def _visit_title(self, elem):\n pass", "def __unicode__(self):\n return self.title", "def build_heading(win, readonly=False):\n typetext = TYPETXT[win.parent().albumtype]\n actname = win.parent().albumdata['artist']\n album = win.parent().albumdata['titel']\n if not actname or not album:\n text = f'Opvoeren nieuw {typetext}'\n else:\n wintext = win.heading.text()\n newtext = ''\n for text in ('tracks', 'opnames'):\n if wintext == text:\n newtext = f': {wintext}'\n break\n if wintext.endswith(text):\n newtext = f': {text}'\n break\n text = 'G' if readonly else 'Wijzigen g'\n text = f'{text}egevens van {typetext} {actname} - {album}{newtext}'\n return text", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def __str__(self):\n return self.title", "def set_title(self, title):\n self.title = title\n self.opf.title = title\n self.ncx.title = title", "def _title(hit: DD) -> str:\n return hit[\"_source\"][\"title\"]", "def flag_titular(self):\n return self._flag_titular", "def title(self):\n return self.definition.title", "def title(self):\n return self.run_command('title')[0]", "def title(self):\n msg = __(u\"Indicateurs portlet\")\n return self.portlet_title or msg", "def printTitle(self, data):\r\n\t#try:\r\n #\twx.CallLater(1800, lambda x: x.SetTitle(self.title), self)\r\n\t#except:\r\n\t#\treturn\r\n #self.SetTitle(data)\r\n pass", "def __str__(self):\n return \"Author: \" + self.author +\"\\nTitle: \" + self.title + \"\\nHaiku: \" + str(self.haiku) + \\\n \"\\nText: \" + self.text + \"\\n\"", "def owner_and_subtitle_helper(self):\n\n subtitle = \"<br><sup>\"\n owner = self.ui.comboBox_coders.currentText()\n if owner == \"\":\n owner = '%'\n else:\n subtitle += _(\"Coder: \") + owner + \" \"\n if self.ui.comboBox_category.currentText() != \"\":\n subtitle += _(\"Category: \") + self.ui.comboBox_category.currentText()\n return owner, subtitle", "def getTitle(pan: str) -> str:\n src = open(pan).read()\n lines = src.split(\"\\n\")\n if len(lines)==0: return \"\"\n t = mark.render(lines[0].strip(\" #\"))\n if t.startswith(\"<p>\"): t = t[3:]\n if t.endswith(\"</p>\"): t = t[:-4]\n return t", "def Title(self):\n return self.title", "def numbered_title(self):\n return f\"{self.title}\"", "def numbered_title(self):\n return f\"{self.title}\"", "def headline(self):\r\n return '%s%s %s%s' % (BLUE, self.title,\r\n NORMAL, self.link)", "def show_title():\r\n complement = (\r\n '\\n __ ')\r\n title = ('\\n _______ _______________ ____ _______ __ ___ _ _______/ /_ ____ _____ ____ ____ ')\r\n title += ('\\n / ___/ / / / ___/ ___/ _ \\/ __ \\/ ___/ / / / / _ \\| |/_/ ___/ __ \\/ __ `/ __ \\/ __ `/ _ \\ ')\r\n title += ('\\n/ /__/ /_/ / / / / / __/ / / / /__/ /_/ / / __/> </ /__/ / / / /_/ / / / / /_/ / __/ ')\r\n title += ('\\n\\___/\\__,_/_/ /_/ \\___/_/ /_/\\___/\\__, / \\___/_/|_|\\___/_/ /_/\\__,_/_/ /_/\\__, /\\___/ ')\r\n title += ('\\n /____/ /____/ ')\r\n # Add Styles\r\n break_line = ('-' * len(complement) + \"\\n\") * 2\r\n print(\"{}\\n{}\\n{}\\n\".format(break_line, title, break_line))" ]
[ "0.7076962", "0.67579925", "0.67579925", "0.66918576", "0.64708865", "0.64061403", "0.6356622", "0.62820756", "0.62643343", "0.62187165", "0.61947083", "0.61748713", "0.6173121", "0.6151515", "0.6145324", "0.6133842", "0.61263466", "0.60638046", "0.60589373", "0.60490286", "0.60476065", "0.60463405", "0.60268646", "0.6017415", "0.6011525", "0.60037917", "0.6000749", "0.600026", "0.5992879", "0.59867364", "0.5985428", "0.59853035", "0.598083", "0.598083", "0.5977835", "0.5973075", "0.5972749", "0.5970309", "0.59624296", "0.5951911", "0.59507304", "0.5943773", "0.5939639", "0.59393054", "0.59384304", "0.59384304", "0.59384304", "0.59384304", "0.5936713", "0.5936028", "0.59258103", "0.5890458", "0.5882042", "0.5874476", "0.5871823", "0.58658475", "0.5859561", "0.5845359", "0.58452654", "0.58423555", "0.58340687", "0.58268803", "0.5825788", "0.5825467", "0.5819794", "0.58193153", "0.5819099", "0.5819099", "0.5819099", "0.5819099", "0.5819099", "0.5819099", "0.5819099", "0.5819099", "0.5819099", "0.5819099", "0.5819099", "0.5819099", "0.5819099", "0.5819099", "0.5819099", "0.5819099", "0.5819099", "0.5819099", "0.5819099", "0.5819099", "0.5818186", "0.5817754", "0.5817561", "0.5816586", "0.58129144", "0.5811889", "0.5805764", "0.5801638", "0.57985806", "0.5795982", "0.57952356", "0.5792009", "0.5792009", "0.57914364", "0.57877165" ]
0.0
-1
Creates an error log for a ``logging`` module ``record`` instance.
def create_from_record(self, record, **kwargs): for k in ('url', 'view', 'request', 'data'): if k not in kwargs: kwargs[k] = record.__dict__.get(k) kwargs.update({ 'logger': record.name, 'level': record.levelno, 'message': force_unicode(record.msg), 'server_name': conf.NAME, }) # construct the checksum with the unparsed message kwargs['checksum'] = construct_checksum(**kwargs) # save the message with included formatting kwargs['message'] = record.getMessage() # If there's no exception being processed, exc_info may be a 3-tuple of None # http://docs.python.org/library/sys.html#sys.exc_info if record.exc_info and all(record.exc_info): return self.create_from_exception(record.exc_info, **kwargs) return self.process( traceback=record.exc_text, **kwargs )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _record(self):\n record_attr = {\n 'name': 'test_record',\n 'level': 'ERROR',\n 'pathname': '/test/path',\n 'msg': 'This is a test record.',\n }\n record = logging.makeLogRecord(record_attr)\n return record", "def log_message(self, build_id, record):\n # Todo: provide \"shortcut\" methods to convert the traceback\n # (from exc_info) to a serializable object, and to clean\n # up the record object for decent serialization in the\n # database.\n pass", "def record_error(self, track_id, errtype):\n self.errlog.insert_one({\"track\": track_id, \"type\": errtype})", "def create_logger(level=logging.DEBUG, record_format=None):\n if record_format is None:\n record_format = \"[%(asctime)s][%(thread)d][%(filename)s][line: %(lineno)d][%(levelname)s] ## %(message)s\"\n\n logger = logging.getLogger(\"mylogger\")\n logger.setLevel(level)\n # 修改\n fh.setLevel(level)\n ch.setLevel(level)\n formatter = logging.Formatter(record_format)\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n logger.addHandler(fh)\n logger.addHandler(ch)\n return logger", "def create_log(self):\n from settings import evidence_path\n test_case = self.__class__.__name__\n log_extension = '.log'\n if evidence_path is not None:\n log_path = '{}/{}{}'.format(\n evidence_path, test_case, log_extension\n )\n else:\n log_path = None\n self.log = Log(log_path)\n self.log = self.log.get_logger()\n return self.log", "def log_new_error(*args, **kwargs):\n logging.error(*args, **kwargs)", "def _log (cls, logger, msg, parent=None, api_object=None, level='error'):\n\n log_method = logger.error\n\n try :\n log_method = getattr (logger, level.lower())\n except :\n sys.stderr.write (\"unknown log level '%s'\" % level)\n\n log_method (\"%s: %s\" % (cls.__name__, msg))\n\n return cls (msg, parent=parent, api_object=api_object, from_log=True)", "def prepare(self, record: LogRecord):\n # The format operation gets traceback text into record.exc_text\n # (if there's exception data), and also returns the formatted\n # message. We can then use this to replace the original\n # msg + args, as these might be unpickleable. We also zap the\n # exc_info and exc_text attributes, as they are no longer\n # needed and, if not None, will typically not be pickleable.\n\n # Not nedded, since we use tblib\n # msg = self.format(record)\n # # bpo-35726: make copy of record to avoid affecting other handlers in the chain.\n # record = copy.copy(record)\n # record.message = msg\n # record.msg = msg\n # record.args = None\n # record.exc_info = None\n # record.exc_text = None\n return ['log_msg', record]", "def Log(self, message, record_id=None):\n message = str(message)\n record = self._CreateRecord(message, record_id)\n if self._formatter:\n self._formatter.Format(record)\n if len(record.message) > _MAX_MSG_SIZE:\n logging.error('Message must be less than (%s)', _MAX_MSG_SIZE)\n return\n self._records.appendleft(record)\n return record.id", "def set_log_level_to_error():\n logging.setLevel(default_logging.ERROR)\n\n if os.path.exists(\"gen3tests.logs\"):\n os.remove(\"gen3tests.logs\")\n logfile_handler = default_logging.FileHandler(\"gen3tests.logs\")\n logfile_handler.setFormatter(default_logging.Formatter(LOG_FORMAT))\n logging.addHandler(logfile_handler)\n yield", "def _build_make_record_function():\n prev_factory = logging.getLogRecordFactory()\n\n def make_record(*arguments, **kwargs):\n record = prev_factory(*arguments, **kwargs)\n return _synchrolog_record_factory(record)\n\n return make_record", "def _stab_log_error(self, logconf, msg):\n\t\tprint \"Error when logging %s: %s\" % (logconf.name, msg)", "def _stab_log_error(self, logconf, msg):\n print('Error when logging %s: %s' % (logconf.name, msg))", "def raise_on_error(cls, prefix=None, exc_type=None):\n exc_type = exc_type or ValueError\n def on_message(msg):\n if msg.severity >= logging.ERROR:\n text = msg.text\n if prefix:\n text = '%s%s' % (prefix, text)\n raise exc_type(text)\n return cls(on_message=on_message)", "def _createlog(self):\n\t\tif self.toemail and self.fromemail and self.smtphost:\n\t\t\t# Use the email logger as the first logger, so that when sending the email (in :meth:`EmailLogger.close`) fails, it will still be logged to the log file/stdout/stderr\n\t\t\tself._loggers.append(EmailLogger(self))\n\t\tif self.log2stderr:\n\t\t\tself._loggers.append(StreamLogger(self, sys.stderr, self._formatlogline))\n\t\tif self.log2stdout:\n\t\t\tself._loggers.append(StreamLogger(self, sys.stdout, self._formatlogline))\n\t\tif self.log2file:\n\t\t\t# Create the log file\n\t\t\tlogfilename = ul4c.Template(self.logfilename, \"logfilename\").renders(job=self)\n\t\t\tlogfilename = url.File(logfilename).abs()\n\t\t\tself.logfileurl = str(url.Ssh(misc.sysinfo.user_name, misc.sysinfo.host_fqdn or misc.sysinfo.host_name, logfilename.local()))\n\t\t\tskipurls = [logfilename]\n\t\t\tlogfile = logfilename.open(mode=\"w\", encoding=self.encoding, errors=self.errors)\n\t\t\tif self.loglinkname is not None:\n\t\t\t\t# Create the log link\n\t\t\t\tloglinkname = ul4c.Template(self.loglinkname, \"loglinkname\").renders(job=self)\n\t\t\t\tloglinkname = url.File(loglinkname).abs()\n\t\t\t\tskipurls.append(loglinkname)\n\t\t\t\tlogfilename = logfilename.relative(loglinkname)\n\t\t\t\ttry:\n\t\t\t\t\tlogfilename.symlink(loglinkname)\n\t\t\t\texcept OSError as exc:\n\t\t\t\t\tif exc.errno == errno.EEXIST:\n\t\t\t\t\t\tloglinkname.remove()\n\t\t\t\t\t\tlogfilename.symlink(loglinkname)\n\t\t\t\t\telse:\n\t\t\t\t\t\traise\n\t\t\tself._loggers.append(URLResourceLogger(self, logfile, skipurls, self._formatlogline))", "def myHandleError(self, record):\n if raiseExceptions:\n ei = sys.exc_info()\n try:\n traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr)\n except IOError:\n pass # see issue 5971\n finally:\n del ei\n raise", "def myHandleError(self, record):\n if raiseExceptions:\n ei = sys.exc_info()\n try:\n traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr)\n except IOError:\n pass # see issue 5971\n finally:\n del ei\n raise", "def remote_logger(cls, record):\n # Only upload when force_profile or trick_ci_env is specified.\n # i.e. FORCE_PROFILE=1 or -c aitemplate.force_profile=true or TRICK_CI_ENV=1\n # Otherwise, dummy profiling records are not useful.\n if cls.force_profile(cls) or cls.trick_ci_env(cls):\n from aitemplate.AITemplate.fb.remote_logger import AITemplateRemoteLogger\n\n try:\n AITemplateRemoteLogger.log(record)\n except Exception as e:\n _LOGGER.info(f\"remote_logger failed: {e}\")", "def error(self, msg, stderr=True):\n self.log(msg, level=self.ERROR, stderr=stderr)", "def log_error(log_str):\n logger = logging.getLogger()\n logger.error(log_str)", "def _create_logger(title, log_msg_id=\"\", log_file_suffix=\".log\"):\n\n logging.setLoggerClass(SkidlLogger)\n logger = logging.getLogger(title)\n\n # Errors & warnings always appear on the terminal.\n handler = logging.StreamHandler(sys.stderr)\n handler.setLevel(logging.WARNING)\n handler.setFormatter(logging.Formatter(log_msg_id + \"%(levelname)s: %(message)s\"))\n logger.addHandler(handler)\n\n # Errors and warnings are stored in a log file with the top-level script's name.\n handler = SkidlLogFileHandler(get_script_name() + log_file_suffix, mode=\"w\")\n handler.setLevel(logging.WARNING)\n handler.setFormatter(logging.Formatter(log_msg_id + \"%(levelname)s: %(message)s\"))\n logger.addHandler(handler)\n\n # Set logger to trigger on info, warning, and error messages.\n logger.setLevel(logging.INFO)\n\n # Augment the logger's functions to count the number of errors and warnings.\n logger.error = CountCalls(logger.error)\n logger.warning = CountCalls(logger.warning)\n\n return logger", "def _make_child_error(msg, module, name, traceback, log, log_type, context):\n return ChildError(msg, module, name, traceback, log, log_type, context)", "def emit(self, record):\n try:\n msg = self.format(record)\n log_level = record.levelno\n self.write_log(msg, log_level)\n except Exception:\n self.handleError(record)", "def __add_log(self, logType: int, message: str) -> None:\n\n if isinstance(message, BaseException):\n ex: BaseException = message\n if hasattr(ex, 'message'):\n message = ex.message\n else:\n message = ex.__str__()\n\n message += f'\\n{traceback.format_exc().__str__()}'\n\n if message is None:\n return\n\n if isinstance(message, str) and message.strip().__len__() == 0:\n return\n\n st = stack()\n caller: Traceback = getframeinfo(st[2][0])\n log = LogModel()\n log.log_level = logType\n log.filename = caller.filename\n log.function = caller.function\n log.line_number = caller.lineno\n log.message = message\n log.creation_date = datetime.now()\n\n self.__logs.append(log)", "def emit(self, record):\n try:\n msg = self.format(record)\n log_level = record.levelno\n self.write_log_buffer(msg, log_level)\n except Exception:\n self.handleError(record)", "def error(self, tag, message, exc_info=False):\n \n self.log(logging.error,tag, message, exc_info)", "def log_error(self, msg):\n self.log(msg, level=LOG_ERROR)", "def init(filename, level, logname=None):\n\n global logfilename\n\n def openFileHandler(fname):\n mkdir.parents(os.path.dirname(fname), stat.S_IRWXU)\n return XendRotatingFileHandler(fname, mode = 'a',\n maxBytes = MAX_BYTES,\n backupCount = BACKUP_COUNT)\n\n # Rather unintuitively, getLevelName will get the number corresponding to\n # a level name, as well as getting the name corresponding to a level\n # number. setLevel seems to take the number only though, so convert if we\n # are given a string.\n if isinstance(level, types.StringType):\n level = logging.getLevelName(level)\n\n if logname:\n logname.setLevel(level)\n else:\n log.setLevel(level)\n\n try:\n fileHandler = openFileHandler(filename)\n logfilename = filename\n except IOError:\n try:\n logfilename = tempfile.mkstemp(\"-libvirt.log\")[1]\n except IOError:\n print >>sys.stderr, ('libvirt/OnceLogging.py: Unable to open standard or temporary log file for libvirt')\n os._exit(1)\n fileHandler = openFileHandler(logfilename)\n\n fileHandler.setFormatter(logging.Formatter(LOGFILE_FORMAT, DATE_FORMAT))\n if logname:\n logname.addHandler(fileHandler)\n else:\n log.addHandler(fileHandler)\n\n stderrHandler = logging.StreamHandler()\n stderrHandler.setFormatter(logging.Formatter(STDERR_FORMAT,\n DATE_FORMAT))\n if logname:\n logname.addHandler(fileHandler)\n else:\n log.addHandler(fileHandler)", "def emit(self, record):\r\n try:\r\n level = logger.level(record.levelname).name\r\n except (ValueError, AttributeError): # pragma: no cover\r\n level = record.levelno\r\n\r\n # Find caller from where originated the logged message\r\n frame, depth = logging.currentframe(), 2\r\n while frame.f_code.co_filename == logging.__file__:\r\n frame = frame.f_back\r\n depth += 1\r\n\r\n logger.opt(lazy=True, depth=depth, exception=record.exc_info,).log(\r\n level, record.getMessage()\r\n )", "def append_record_failure():\n\t\tpass", "def __CreateLog(self, log_name, log_level=NOTSET, log_handler=FILE,\n stream=sys.stderr):\n logger = logging.getLogger(log_name)\n\n # Update log level to reflect changes. If a higher log level is given\n # the logger should raise it's boundary.\n if log_level < logger.level or logger.level == logging.NOTSET:\n logger.setLevel(log_level)\n\n if (log_name in self.__log_table and\n self.__log_table[log_name] == Logger.FILE_AND_CONSOLE):\n # Don't add any more handlers.\n return\n\n # Create an entry for log name.\n if log_name not in self.__log_table:\n self.__log_table[log_name] = Logger.NONE\n\n if log_handler != Logger.NONE:\n fmt = ('[%(asctime)s::%(levelname)s::' + self.__lib_sig +\n '] %(message)s')\n # Add FILE handler if needed.\n if (log_handler == Logger.FILE or\n log_handler == Logger.FILE_AND_CONSOLE and\n self.__log_table[log_name] != Logger.FILE):\n if not os.path.exists(self.__log_path):\n os.makedirs(self.__log_path)\n fh = logging.FileHandler(os.path.join(self.__log_path,\n '%s.log' % log_name))\n fh.setLevel(log_level)\n fh.setFormatter(logging.Formatter(fmt))\n logger.addHandler(fh)\n # Binary arithmetic to yield updated handler.\n self.__log_table[log_name] = self.__log_table[log_name] + Logger.FILE\n\n # Add CONSOLE handler if needed.\n if (log_handler == Logger.CONSOLE or\n log_handler == Logger.FILE_AND_CONSOLE and\n self.__log_table[log_name] != Logger.CONSOLE):\n ch = logging.StreamHandler(stream)\n ch.setLevel(log_level)\n ch.setFormatter(logging.Formatter(fmt))\n logger.addHandler(ch)\n # Binary arithmetic to yield updated handler.\n self.__log_table[log_name] = self.__log_table[log_name] + Logger.CONSOLE", "def _log_error(self, err_msg):\n if self._on_error_action == \"raise\":\n raise InvalidDatasetError(err_msg)\n else:\n logger.warning(err_msg)", "def record_error(self,article_id='',identifier='',record_id='',method='',object_caller='',field='',value='',notes='',time=strftime(\"%Y-%m-%d %H:%M:%S\",localtime())):\n\t\tconn = sqlite3.connect('errors.db')\n\t\tc = conn.cursor()\n\t\tc.execute(\"CREATE TABLE IF NOT EXISTS errortable(article_id TEXT, identifier TEXT, record_id TEXT, method TEXT, object TEXT, field TEXT, value TEXT, notes TEXT, datetimestamp TEXT)\")\n\n\t\t#if theyre defined in environment variables\n\t\tif ('article_id' and 'identifier' in os.environ):\n\t\t\t#if they werent instantiated in keyword args\n\t\t\tif (not article_id):\n\t\t\t\tarticle_id = os.environ['article_id']\n\t\t\tif (not identifier):\n\t\t\t\tidentifier = os.environ['identifier']\n\n\t\ttry:\n\t\t\tc.execute(\"INSERT INTO errortable VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)\", (article_id,identifier,record_id,method,object_caller,field,value,notes,time))\n\t\t\tconn.commit()\n\t\t\tc.close()\n\t\t\tconn.close()\n\t\t\treturn 0 \t\t#no errors\n\t\texcept sqlite3.InterfaceError as e:\n\t\t\ttry:\n\t\t\t\tc.execute(\"INSERT INTO errortable VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)\", (str(article_id),str(identifier),str(record_id),str(method),str(object_caller),str(field),str(value),str(notes),str(time)))\n\t\t\t\tconn.commit()\n\t\t\t\tc.close()\n\t\t\t\tconn.close()\n\t\t\t\treturn 0 \t\t#no errors\n\t\t\texcept Exception:\n\t\t\t\tsqlfields = ['article_id','identifier','record_id','method','object_caller','field','value','notes','time']\n\t\t\t\terror_index = re.search(r'(\\d)',str(e)).group(1)\n\t\t\t\tself.record_error(method='record_error',object_caller='DatabaseManager',notes=\"invalid entry for field: '{}'\".format(sqlfields[int(error_index)]))\n\t\t\t\treturn 1 \t\t#errors occurred and couldnt be resolved\n\t\texcept Exception as e:\n\t\t\tself.record_error(method='record_error',object_caller='DatabaseManager',notes=\"error information from sqlite3: '{}'\".format(e))\n\t\t\treturn 1 \t\t#another error occurred", "def _log_error(self, event, err, **kwargs):\n self.context.logger.error(\n f\"step {self.name} got error {err} when processing an event:\\n {event.body}\"\n )\n message = traceback.format_exc()\n self.context.logger.error(message)\n self.context.push_error(\n event, f\"{err}\\n{message}\", source=self.fullname, **kwargs\n )", "def ERROR_LOG(msg, *args, **kwargs):\n logger.error(msg, *args, **kwargs)", "def setup_build_log(log_filepath: Path) -> logging.Logger:\n\n assert log_filepath not in UNAVAILABLE_LOG_FILENAMES\n UNAVAILABLE_LOG_FILENAMES.append(log_filepath)\n\n formatter = logging.Formatter(\"%(message)s\")\n handler = logging.FileHandler(str(log_filepath), \"w\")\n handler.setFormatter(formatter)\n build_log = logging.getLogger(str(log_filepath))\n build_log.addHandler(handler)\n build_log.setLevel(logging.DEBUG)\n return build_log", "def logerror(self, msg):\n self.logger.error(msg)", "def init_log(log_instance):\r\n base_dir = os.path.dirname(os.path.abspath(__file__))\r\n log_dir = os.path.join(base_dir, \"logs\")\r\n if not os.path.exists(log_dir):\r\n os.makedirs(log_dir)\r\n log_file = log_instance + \"_\" + datetime.datetime.now().strftime(\"%Y-%m-%d\") + \".log\"\r\n logging_conf = {\r\n \"version\": 1,\r\n \"disable_existing_loggers\": False,\r\n \"formatters\": {\r\n \"simple\": {\r\n 'format': '%(asctime)s [%(filename)s:%(lineno)d] [%(levelname)s]- %(message)s'\r\n },\r\n 'standard': {\r\n 'format': '%(asctime)s [%(threadName)s:%(thread)d] [%(filename)s:%(lineno)d] [%(levelname)s]- %(message)s'\r\n },\r\n },\r\n\r\n \"handlers\": {\r\n \"console\": {\r\n \"class\": \"logging.StreamHandler\",\r\n \"level\": \"DEBUG\",\r\n \"formatter\": \"simple\",\r\n \"stream\": \"ext://sys.stdout\"\r\n },\r\n\r\n \"default\": {\r\n \"class\": \"logging.handlers.RotatingFileHandler\",\r\n \"level\": \"DEBUG\",\r\n \"formatter\": \"standard\",\r\n \"filename\": os.path.join(log_dir, log_file),\r\n 'mode': 'w+',\r\n \"maxBytes\": 1024 * 1024 * 5, # 5 MB\r\n \"backupCount\": 20,\r\n \"encoding\": \"utf8\"\r\n },\r\n },\r\n\r\n \"root\": {\r\n 'handlers': ['default', 'console'],\r\n 'level': \"INFO\",\r\n 'propagate': False\r\n }\r\n }\r\n\r\n logging.config.dictConfig(logging_conf)\r\n\r\n # configure application log\r\n return logging.getLogger(log_instance)", "def log_error(self, msg):\n self.logger.error(msg)", "def setLogRecordFactory(self, factory):\n self.logRecordFactory = factory", "def emit(self, record):\n try:\n if self.check_base_filename(record):\n self.build_base_filename()\n logging.FileHandler.emit(self, record)\n except (KeyboardInterrupt, SystemExit):\n raise\n except Exception:\n self.handleError(record)", "def set_up_logger(name,\n logfilename,\n log_file_level='NOTICE',\n log_stderr_level='NOTICE',\n logo=False):\n logger = logbook.Logger(name)\n if logo:\n fmt_str = '{record.message:^120}'\n logger.handlers.append(logbook.StderrHandler(level='WARNING',\n format_string=fmt_str))\n logofile = os.path.join(os.path.dirname(__file__), 'logo.txt')\n with open(logofile, 'r') as f:\n for line in f:\n logger.warn(line.strip('\\n'))\n logger.handlers = []\n\n fmt_str = ('[{record.time:%Y-%m-%d %H:%M:%S}][{record.level_name:*^11}] :'\n ' {record.message:~^45}'\n ' line {record.lineno:<3} in '\n '{record.module:<}.{record.func_name:<} ')\n\n logfilename = os.path.join(os.getenv('PANLOG', '/var/huntsman/logs'), logfilename)\n logger.handlers.append(TRFH(logfilename,\n level=log_file_level,\n mode='a+',\n date_format='%Y-%m-%d',\n bubble=True,\n backup_count=100,\n format_string=fmt_str))\n\n logger.handlers.append(StdH(level=log_stderr_level,\n bubble=True,\n format_string=fmt_str))\n return logger", "def error(msg, **kwargs):\n kwargs[\"logger\"] = kwargs.get(\"logger\", default_logger)\n kwargs[\"label\"] = kwargs.get(\"label\", get_caller())\n _error(msg, **kwargs)", "def emit(self, record):\n standard_log_info = ['name', 'msg', 'args', 'levelname', 'levelno', 'pathname', 'filename', 'module', 'exc_info', 'exc_text', 'stack_info', 'lineno',\n 'funcName', 'created', 'msecs', 'relativeCreated', 'thread', 'threadName', 'processName', 'process']\n for t in range(self.retries):\n try:\n info = {k: str(v) for k, v in record.__dict__.items() if not k.startswith('__') and k not in standard_log_info}\n bod = 'log={}'.format(json.dumps(info))\n self.http_client.fetch(self.addr, method='POST', body=bod, request_timeout=self.request_timeout)\n except Exception as e:\n # Other errors are possible, such as IOError.\n logger.error(str(e))\n time.sleep(self.on_fail_sleep_duration)\n else:\n break\n # http_client.close()\n return", "def log_error(stage: str, err: Exception) -> None:\n try:\n msg = 'Error during {}: {}'.format(stage, err)\n print(msg)\n sys.print_exception(err)\n\n log_size = 0\n try:\n # Get size of main log file.\n log_size = os.stat(LOG_NAME_MAIN)[6]\n except OSError:\n pass\n\n # Rotate log if size reaches limit.\n if log_size > MAX_LOG_SIZE:\n os.rename(LOG_NAME_MAIN, LOG_NAME_SECONDARY)\n\n with open(LOG_NAME_MAIN, 'a') as f:\n f.write(msg)\n f.write('\\n')\n except:\n # This function is intended to log errors and there is no\n # sense to raise another exception as nothing could handle it.\n pass", "def record_error(self, message, keys=None, type=None, **kwargs):\n keys = list(keys) if keys is not None else []\n self.errors.append(\n dict(\n message=message,\n keys=keys,\n type=type or EntityErrors.UNCATEGORIZED,\n **kwargs\n )\n )", "def prepare(self, record):\r\n # The format operation gets traceback text into record.exc_text\r\n # (if there's exception data), and also returns the formatted\r\n # message. We can then use this to replace the original\r\n # msg + args, as these might be unpickleable. We also zap the\r\n # exc_info and exc_text attributes, as they are no longer\r\n # needed and, if not None, will typically not be pickleable.\r\n msg = self.format(record)\r\n record.message = msg\r\n record.msg = msg\r\n record.args = None\r\n record.exc_info = None\r\n record.exc_text = None\r\n return record", "def emit(self, record: logging.LogRecord):\n if record.levelno < self.level:\n return\n if record.exc_info:\n frame_summary: FrameSummary = extract_tb(record.exc_info[2], 1)[0]\n alias = f\"{frame_summary.filename}:{frame_summary.name}:{frame_summary.lineno}\"\n else:\n alias = f\"{record.pathname}:{record.funcName}:{record.lineno}\"\n body = {\n \"message\": record.getMessage(),\n \"alias\": alias,\n \"description\": self.format(record),\n \"visible_to\": [{\"name\": self._team_name, \"type\": \"team\"}],\n \"priority\": self._level_mapping[record.levelno],\n }\n try:\n requests.post(\n \"https://api.opsgenie.com/v2/alerts\", headers={\"Authorization\": f\"GenieKey {self._api_key}\"}, json=body,\n )\n except Exception as err:\n print(f\"Exception when sending log to OpsGenie: {err}\\n\")", "def init_log(log_level=logging.DEBUG):\n now = time.time()\n ts = datetime.datetime.fromtimestamp(now).strftime('%Y%m%d')\n file_name = os.path.abspath(os.path.join(os.getcwd(), '..', 'traffic_logs', f'{ts}_traffic.log'))\n folder, _ = os.path.split(file_name)\n Path(folder).mkdir(parents=True, exist_ok=True)\n\n # create formatter and add it to the handlers\n log_format = '[%(asctime)s][%(name)s][%(levelname)s] %(message)s'\n\n logging.basicConfig(filemode='a',\n format=log_format,\n datefmt='%H:%M:%S',\n level=logging.ERROR,\n stream=sys.stdout,\n # filename=file_handler\n )\n\n formatter = logging.Formatter(log_format)\n\n # create file handler which logs even debug messages\n file_handler = logging.FileHandler(file_name)\n file_handler.setFormatter(formatter)\n file_handler.setLevel(log_level)\n\n std_out = logging.StreamHandler(sys.stdout)\n std_out.setFormatter(formatter)\n std_out.setLevel(log_level)\n\n # This for avoiding streams to log to root's stderr, which prints in red in jupyter\n root_logger = logging.getLogger()\n for handler in root_logger.handlers:\n # continue\n root_logger.removeHandler(handler)\n\n # add the handlers to the logger\n root_logger.addHandler(file_handler)\n\n # By default the install() function installs a file_handler on the root root_logger,\n # this means that log messages from your code and log messages from the\n # libraries that you use will all show up on the terminal.\n coloredlogs.install(level=log_level, fmt=log_format, stream=sys.stdout)", "def log_exception():\n logging.basicConfig(level=logging.DEBUG)\n return logging.getLogger('exceptions_log')", "def log_error(msg, ex=None):\n if ex == None:\n exMsg = \"\"\n the_ex = Exception(msg)\n else:\n exMsg = \" \\n \" + repr(ex)\n the_ex = ex\n s = format_log(\"\\n\\n ERROR! %s%s\\n\\n\" % (msg,exMsg))\n print(s)\n log['error'].append({'msg':s, 'ex':ex})\n f_errors.write(s)", "def error(msg):\n return log().error(msg)", "def error(log):\n write(syslog.LOG_ERR, 'error', '{log}'.format(log=log))", "def error(msg):\n log('ERROR', msg)", "def json_record(self, message: str, record: LogRecord) -> Dict:\n record_dict = dict(record.__dict__)\n\n record_dict[\"message\"] = message\n\n additional = {\n \"timestamp\": int(record.created * 1000),\n \"severity\": record.levelname,\n \"logger.name\": record.name,\n \"logger.method_name\": record.funcName,\n \"logger.thread_name\": record.threadName,\n }\n\n record_dict = {**additional, **record_dict}\n\n # Handle exceptions, including those in the formatter itself\n exc_info = record.exc_info\n if exc_info:\n if \"error.kind\" not in record_dict and exc_info[0] is not None:\n record_dict[\"error.kind\"] = exc_info[0].__name__\n if \"error.message\" not in record_dict:\n record_dict[\"error.message\"] = str(exc_info[1])\n if \"error.stack\" not in record_dict:\n record_dict[\"error.stack\"] = self.formatException(exc_info)\n\n return record_dict", "def create_log(self, exc):\n return self.formatter.formatException(exc)", "def manage_kafka_error(msg):\n logger.error(msg.error())", "def setup_logger():\n logger = logging.getLogger('tracking_log')\n logger.setLevel(logging.INFO)\n #Where to Store needs to be identified?\n f_handler = logging.FileHandler(PROCESSED_LOGFILE, mode='a', encoding = None, delay = False)\n f_handler.setLevel(logging.INFO)\n f_format = logging.Formatter('%(asctime)s\\t%(message)s\\t%(dataset_id)s\\t%(status)s')\n f_handler.setFormatter(f_format)\n logger.addHandler(f_handler)\n return logger", "def log_error(self, format_str, *args):\r\n LOGGER.error(self._format_msg(format_str, *args))", "def log_error(self, format_str, *args):\n LOGGER.error(self._format_msg(format_str, *args))", "def error ( self , message , *args , **kwargs ) :\n return self.logger.error ( message , *args , **kwargs )", "def error(self, *args, **kwargs):\n self.msg(logging.ERROR, *args, **kwargs)", "def __init__(self, logfile=None, level=DefaultLevel, append=False,\n max_name_len=MaxModuleNameLen):\n\n # if no logfile given and there is a default logger, use it\n if logfile is None and 'DefaultObj' in globals():\n self.__dict__ = DefaultObj.__dict__\n # ignore level and max_name_len params\n return\n\n # no default logger, make up log filename if user didn't supply\n if logfile is None:\n # get caller module name, use it\n (caller_name, _) = self.caller_info()\n logfile = '%s.log' % caller_name\n\n # if no global logger yet, make this one global\n if not 'DefaultObj' in globals():\n globals()['DefaultObj'] = self\n\n # try to open log - a check for READONLY filesystem (ie, CD)\n try:\n if append:\n fd = open(logfile, 'a')\n else:\n fd = open(logfile, 'w')\n except IOError:\n # assume we have readonly filesystem\n basefile = os.path.basename(logfile)\n if sys.platform == 'win32':\n logfile = os.path.join('C:\\\\', basefile)\n else:\n logfile = os.path.join('~', basefile)\n else:\n fd.close()\n\n # try to open logfile again for real\n if append:\n self.logfd = open(logfile, 'a')\n else:\n self.logfd = open(logfile, 'w')\n\n # set attributes for this instance\n self.logfile = logfile\n self.level = level\n self.max_name_len = max_name_len\n\n # start the log with some information - date/time, level, etc.\n self.critical('='*55)\n self.critical('Log started on %s, log level=%s'\n % (datetime.datetime.now().ctime(),\n LevelNumToName[level]))\n self.critical('-'*55)", "def __init__(self, record=None):\n self.record = record", "def logError(self, errStr, varMsg=''):\n self.log.error(errStr, varMsg)\n self.errors.append(errStr + \" \" + varMsg)", "def _generate_log(path):\n # Create a logger and set the level.\n logger = logging.getLogger(\"Log_info\")\n # Check handler exists\n if len(logger.handlers) > 0:\n return logger # Logger already exists\n # set logger level\n logger.setLevel(logging.DEBUG)\n # Create file handler, log format and add the format to file handler\n stream_handler = logging.StreamHandler()\n file_handler = logging.FileHandler(path)\n\n # See https://docs.python.org/3/library/logging.html#logrecord-attributes\n # for log format attributes.\n log_format = \"%(levelname)s %(asctime)s %(message)s\"\n formatter = logging.Formatter(log_format)\n stream_handler.setFormatter(formatter)\n file_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n logger.addHandler(file_handler)\n\n return logger", "def setup_logger(name, log_file, format, log_mode, stream_handler):\n DATE_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n\n handler = logging.FileHandler(log_file, mode=log_mode)\n handler.setFormatter(logging.Formatter(fmt=format, datefmt=DATE_FORMAT))\n\n logger = logging.getLogger(name)\n logger.addHandler(handler)\n logger.setLevel(logging.INFO)\n if stream_handler:\n logger.addHandler(logging.StreamHandler()) # stderr\n\n return logger", "def emit(self, record):\n\n trace = None\n exc = record.__dict__['exc_info']\n if exc:\n trace = traceback.format_exc(exc)\n path = request.path\n method = request.method\n ip = request.remote_addr\n slack_event = LogEntry(\n logger=record.__dict__['name'],\n level=record.__dict__['levelname'],\n trace=trace,\n message=record.__dict__['msg'],\n path=path,\n method=method,\n ip=ip,\n )\n slack_event.save()\n #hook = SlackWebhookClient(self.webhook)\n #hook.send(\n # message=slack_event.to_slack_msg(),\n # emoji=\":ghost:\",\n # username=\"battleship-server\"\n #)", "def setup_log(self, log_file):\n directory = os.path.dirname(log_file)\n if directory:\n os.makedirs(directory, exist_ok=True)\n\n logger = logging.getLogger(log_file)\n formatter = logging.Formatter(config.LOG_FORMAT)\n\n file_handler = logging.FileHandler(log_file, mode='a')\n file_handler.setFormatter(formatter)\n\n logger.setLevel(logging.INFO)\n logger.addHandler(file_handler)\n\n return logger", "def setup_logging_and_errors() -> dict:\n import errorhandler\n\n # Track if message gets logged with severity of error or greater\n # See https://stackoverflow.com/a/45446664/4651668\n error_handler = errorhandler.ErrorHandler()\n\n # Log DeprecationWarnings\n warnings.simplefilter(\"always\", DeprecationWarning)\n logging.captureWarnings(True)\n\n # Log to stderr\n logger = logging.getLogger()\n stream_handler = logging.StreamHandler(stream=sys.stderr)\n stream_handler.setFormatter(\n logging.Formatter(\"## {levelname}\\n{message}\", style=\"{\")\n )\n logger.addHandler(stream_handler)\n return {\n \"logger\": logger,\n \"error_handler\": error_handler,\n }", "def setup_logging(module=None, level=logging.INFO): # pragma: no cover\n logger = logging.getLogger(module or '')\n logger.setLevel(level)\n logging.Formatter.converter = time.gmtime\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(processName)s - %(levelname)s - %(message)s'\n )\n stream_handler = logging.StreamHandler(sys.stderr)\n stream_handler.setLevel(level)\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n return logger", "def ParseRecord(self, parser_mediator, key, structure):\n if key != 'log_entry':\n raise errors.ParseError(\n 'Unable to parse record, unknown structure: {0:s}'.format(key))\n\n month_string = self._GetValueFromStructure(structure, 'month')\n\n year = self._GetValueFromStructure(structure, 'year')\n month = self.MONTHS.get(month_string)\n day = self._GetValueFromStructure(structure, 'day')\n hours = self._GetValueFromStructure(structure, 'hours')\n minutes = self._GetValueFromStructure(structure, 'minutes')\n seconds = self._GetValueFromStructure(structure, 'seconds')\n\n event_data = IOSSysdiagLogEventData()\n event_data.process_identifier = self._GetValueFromStructure(\n structure, 'process_identifier')\n event_data.severity = self._GetValueFromStructure(structure, 'severity')\n event_data.originating_call = self._GetValueFromStructure(\n structure, 'originating_call')\n event_data.body = self._GetValueFromStructure(structure, 'body')\n\n try:\n date_time = dfdatetime_time_elements.TimeElements(\n time_elements_tuple=(year, month, day, hours, minutes, seconds))\n except (TypeError, ValueError):\n parser_mediator.ProduceExtractionWarning('invalid date time value')\n return\n\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n\n parser_mediator.ProduceEventWithEventData(event, event_data)", "def log_error(err):\n print(err)", "def error(msg=None, *args, **kwargs):\n log(ERROR, msg, *args, **kwargs)", "def make_logger(model_dir: str, log_file: str = \"train.log\") -> Logger:\n logger = logging.getLogger(__name__)\n if not logger.handlers:\n logger.setLevel(level=logging.DEBUG)\n fh = logging.FileHandler(\"{}/{}\".format(model_dir, log_file))\n fh.setLevel(level=logging.DEBUG)\n logger.addHandler(fh)\n formatter = logging.Formatter(\"%(asctime)s %(message)s\")\n fh.setFormatter(formatter)\n if platform == \"linux\":\n sh = logging.StreamHandler()\n sh.setLevel(logging.INFO)\n sh.setFormatter(formatter)\n logging.getLogger(\"\").addHandler(sh)\n logger.info(\"Hello! This is Joey-NMT.\")\n return logger", "def _log_failed(cls, count):\n MetricLogger.log_metric(FUNCTION_NAME, MetricLogger.SQS_FAILED_RECORDS, count)", "def log_error(self, fmt, *args):\r\n pass\r\n # log_error\r", "def __init__(self,\n logdir,\n mode='a',\n delete=False,\n clearmem=True):\n Logger.__init__(self)\n self.logdir = logdir\n self.mode = mode\n self.delete = delete\n self.clearmem = clearmem\n if not os.path.exists(self.logdir):\n os.mkdir(self.logdir)", "def LogError(errorMessage,**kwargs):\n if(isinstance(errorMessage,str)):\n print(errorMessage)\n __errorsTracked__.append(errorMessage)\n return None\n else:\n raise TypeError('Expected str for errorMessage. Got {}'.format(type(errorMessage)))", "def setup_logging(filename):\n try:\n LOG_PATH.mkdir(parents=True, exist_ok=True)\n LOG_CONFIG['handlers']['file_handler']['filename'] = LOG_PATH / filename\n logging.config.dictConfig(LOG_CONFIG)\n except OSError:\n logging.basicConfig(level=logging.ERROR)\n logging.exception('Could not initialize logging to file')\n\n sys.excepthook = log_uncaught_exceptions", "def record_failure(self, now=None) -> None:\n logging.info('Recording failure at %r', now or int(time.time()))\n self.failure_timestamp = now or int(time.time())\n self.put()", "def setup_logging(log_level=logging.INFO, log_filename=None) -> Logger:\n logger = logging.getLogger()\n\n # Set log format to dislay the logger name to hunt down verbose logging modules\n fmt = \"%(name)-25s %(levelname)-8s %(message)s\"\n\n # Use colored logging output for console\n coloredlogs.install(level=log_level, fmt=fmt, logger=logger)\n\n # Quiet some internal logs\n logging.getLogger(\"dex_ohlcv.eventscanner\").setLevel(logging.INFO)\n\n # Disable logging of JSON-RPC requests and reploes\n logging.getLogger(\"web3.RequestManager\").setLevel(logging.WARNING)\n logging.getLogger(\"web3.providers.HTTPProvider\").setLevel(logging.WARNING)\n # logging.getLogger(\"web3.RequestManager\").propagate = False\n\n # Disable all internal debug logging of requests and urllib3\n # E.g. HTTP traffic\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n logging.getLogger(\"urllib3\").setLevel(logging.WARNING)\n\n # IPython notebook internal\n logging.getLogger(\"asyncio\").setLevel(logging.WARNING)\n\n # Datadog tracer agent\n # https://ddtrace.readthedocs.io/en/stable/basic_usage.html\n logging.getLogger(\"ddtrace\").setLevel(logging.INFO)\n\n # Flooding of OpenAPI spec debug notes on startup\n logging.getLogger(\"openapi_spec_validator\").setLevel(logging.WARNING)\n\n if log_filename:\n # Append to the log file\n handler = logging.FileHandler(log_filename, 'w+')\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n return logger", "def log_error(self, ex):\n api_error = ex.data.convert_to(ApiError)\n print(\"Error configuring {}\".format(api_error.error_message))\n print(\"{}\".format(api_error.__dict__))\n print(\"{}\".format(api_error.details))", "def logError(self, text):\n time = datetime.now().strftime(\"%H:%M:%S \")\n self.log(time + \"(ERR):\\t\", text)", "def emit(self, record):\n try:\n self.MAP[record.levelno](\"%s: %s\" % (record.name, record.msg))\n except KeyError:\n rospy.logerr(\"unknown log level %s LOG: %s: %s\" %\n (record.levelno, record.name, record.msg))", "def create_logger(\n project_name: str,\n level: str = \"INFO\",\n log_dir: str = \"/tmp/logs\",\n file_name: Optional[str] = None,\n do_print: bool = True,\n simple_logging: bool = False,\n log_to_file: bool = False,\n rich_logging: bool = False,\n time_zone: Optional[str] = None,\n):\n import __main__\n\n if file_name is None:\n try:\n file_name = ntpath.basename(__main__.__file__).split(\".\")[0]\n except:\n file_name = \"logs\"\n\n logger = logging.getLogger(file_name)\n logger.handlers.clear()\n logger.setLevel(getattr(logging, level))\n\n if time_zone:\n from pytz import timezone, utc\n def time_formatter(*args):\n # TODO: Doesnt work with rich formatter\n utc_dt = utc.localize(datetime.datetime.utcnow())\n my_tz = timezone(time_zone)\n converted = utc_dt.astimezone(my_tz)\n return converted.timetuple()\n\n logging.Formatter.converter = time_formatter\n\n if rich_logging:\n from rich.logging import RichHandler\n stream_format = f\"{project_name}:%(module)s:%(funcName)s: %(message)s\"\n stream_handler = RichHandler(omit_repeated_times=False)\n else:\n stream_format = f\"%(asctime)s:%(levelname)s:{project_name}:%(module)s:%(funcName)s: %(message)s\"\n stream_handler = logging.StreamHandler()\n\n file_formatter = stream_formatter = logging.Formatter(\n stream_format, \"%Y-%m-%d %H:%M:%S\"\n )\n\n if simple_logging:\n file_formatter = logging.Formatter(\"%(message)s\")\n stream_formatter = logging.Formatter(\"%(message)s\")\n\n if log_to_file:\n date = datetime.date.today()\n date = \"%s-%s-%s\" % (date.day, date.month, date.year)\n log_file_path = os.path.join(log_dir, \"%s-%s.log\" % (file_name, date))\n\n create_folder(log_dir)\n file_handler = logging.FileHandler(log_file_path)\n file_handler.setFormatter(file_formatter)\n logger.addHandler(file_handler)\n\n if do_print:\n stream_handler.setFormatter(stream_formatter)\n logger.addHandler(stream_handler)\n\n logger.propagate = False\n\n return logger", "def handle_error(msg, err_type, logger, raise_msg=None, log_msg=None):\n if raise_msg is None:\n raise_msg = msg\n if log_msg is None:\n log_msg = msg\n\n logger.error(log_msg)\n raise err_type(raise_msg)", "def set(cls, log_level, log_filename, append):\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n # Log to sys.stderr using log level passed through command line\n if log_level != logging.NOTSET:\n log_handler = logging.StreamHandler()\n formatter = logging.Formatter(\"%(levelname)-8s %(message)s\")\n log_handler.setFormatter(formatter)\n log_handler.setLevel(log_level)\n logger.addHandler(log_handler)\n\n # Log to rotating file using DEBUG log level\n log_handler = logging.handlers.RotatingFileHandler(\n log_filename, mode=\"a+\", backupCount=3\n )\n formatter = logging.Formatter(\"%(asctime)s %(levelname)-8s %(message)s\")\n log_handler.setFormatter(formatter)\n log_handler.setLevel(logging.DEBUG)\n logger.addHandler(log_handler)\n\n if not append:\n # Create a new log file on every new\n # (i.e. not scheduled) invocation\n log_handler.doRollover()", "def error(self, *args):\n self.mylog.error(*args)", "def log_error(self,msg):\r\n t = time.strftime(\"%a %d/%b/%Y %H:%M:%S\", time.gmtime())\r\n logfile = open(self.log_file, \"a\")\r\n logfile.write(\"Error at %s \"% t)\r\n logfile.write(msg)\r\n traceback.print_exc(file=logfile)\r\n logfile.write( \"---------------------\\n\")\r\n logfile.close()", "def cli_add_record(record_data):\n new_record = None\n try:\n new_record = api.insert_record( record_data)\n except DuplicateRecord as error:\n debug(\"%(error)s\" % locals())\n print \"Adding new record failed. %(error)s\" % locals()\n return None\n except MissingRequiredInformaton as error:\n debug(\"%(error)s\" % locals())\n print \"Adding new record failed. %(error)s\" % locals()\n return None\n\n return new_record", "def create_record(self, zone_id, record, record_type, data, ttl=60):\r\n self.record.createObject({\r\n 'domainId': zone_id,\r\n 'ttl': ttl,\r\n 'host': record,\r\n 'type': record_type,\r\n 'data': data})", "def set(cls, log_level, log_filename, append):\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n # Log to sys.stderr using log level passed through command line\n if log_level != logging.NOTSET:\n log_handler = logging.StreamHandler(sys.stdout)\n if sys.platform.find('linux') >= 0:\n formatter = ColoredFormatter(cls.COLOR_FORMAT)\n else:\n formatter = ColoredFormatter(cls.NO_COLOR_FORMAT, False)\n log_handler.setFormatter(formatter)\n log_handler.setLevel(log_level)\n logger.addHandler(log_handler)\n\n # Log to rotating file using DEBUG log level\n log_handler = logging.handlers.RotatingFileHandler(log_filename,\n mode='a+',\n backupCount=3)\n formatter = logging.Formatter('%(asctime)s %(levelname)-8s '\n '%(message)s')\n log_handler.setFormatter(formatter)\n log_handler.setLevel(logging.DEBUG)\n logger.addHandler(log_handler)\n\n if not append:\n # Create a new log file on every new\n # (i.e. not scheduled) invocation\n log_handler.doRollover()", "def error(msg, resource=None, stream_id=None):\n engine = get_engine()\n if engine is not None:\n _log(engine, engine_pb2.ERROR, msg, resource, stream_id)\n else:\n print(\"error: \" + msg, file=sys.stderr)", "def logerr( # pylint: disable=dangerous-default-value\n level: LogLevel,\n src: Any,\n err: BaseException,\n format_msg: str,\n *format_vargs: Any,\n **format_kvargs: Any\n) -> None:\n src_str = _to_src_str(src)\n _HANDLER_LOCK.acquire_read()\n try:\n formatted: Optional[str] = None\n for min_level, src_starts_with, handler in _HANDLERS.values():\n if level >= min_level and src_str.startswith(src_starts_with):\n if not formatted:\n formatted = format_msg.format(*format_vargs, **format_kvargs)\n handler(level, src_str, formatted, err)\n finally:\n _HANDLER_LOCK.release()", "def _configure_logging(self):\r\n self._logger = logging.getLogger('AWSIoTPythonSDK.core')\r\n self._logger.setLevel(logging.ERROR)\r\n self._streamHandler = logging.StreamHandler()\r\n self._formatter = logging.Formatter(\r\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n self._streamHandler.setFormatter(self._formatter)\r\n self._logger.addHandler(self._streamHandler)", "def logerror(entry, subarray=DEFAULT) :\n multiSubarray('logError', subarray, \"LOG: \" + entry)", "def log_error(customized_msg, logfile_handle):\r\n tb = sys.exc_info()[2]\r\n tbinfo = traceback.format_tb(tb)[0]\r\n pymsg = \"PYTHON ERRORS:\\nTraceback Info:\\n\" + tbinfo + \"\\nError Info:\\n \" + str(\r\n sys.exc_type) + \": \" + str(sys.exc_value) + \"\\n\"\r\n msgs = \"ARCPY ERRORS:\\n\" + arcpy.GetMessages(2) + \"\\n\"\r\n logfile_handle.writelines(customized_msg + str(msgs) + \"\\n\" + pymsg + \"\\n\")", "def error_handler(self, failure):\n log.error(failure)", "def init_logger(level=logging.DEBUG, when=\"D\", backup=7,\n _format=\"%(levelname)s: %(asctime)s: %(filename)s:%(lineno)d * %(thread)d %(message)s\",\n datefmt=\"%m-%d %H:%M:%S\"):\n formatter = logging.Formatter(_format, datefmt)\n logger = logging.getLogger()\n logger.setLevel(level)\n\n log_path = ops.join(os.getcwd(), 'logs/shadownet.log')\n _dir = os.path.dirname(log_path)\n if not os.path.isdir(_dir):\n os.makedirs(_dir)\n\n handler = handlers.TimedRotatingFileHandler(log_path, when=when, backupCount=backup)\n handler.setLevel(level)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n handler = handlers.TimedRotatingFileHandler(log_path + \".log.wf\", when=when, backupCount=backup)\n handler.setLevel(logging.WARNING)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n handler = logging.StreamHandler()\n handler.setLevel(level)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger" ]
[ "0.69343174", "0.6103778", "0.599118", "0.5986464", "0.58544385", "0.5821842", "0.58028203", "0.5739559", "0.57346153", "0.5654814", "0.56020546", "0.5592666", "0.5585921", "0.5539647", "0.55095744", "0.54659367", "0.54659367", "0.54548794", "0.5440924", "0.5418258", "0.53907716", "0.53742164", "0.5365384", "0.5360633", "0.5355909", "0.5351522", "0.5344819", "0.53226215", "0.5318566", "0.53004265", "0.52996707", "0.52875113", "0.5284286", "0.5261098", "0.52445", "0.52315426", "0.522184", "0.52214074", "0.5212505", "0.5205557", "0.520244", "0.5197343", "0.5193624", "0.5186486", "0.518519", "0.51840323", "0.5179276", "0.5175713", "0.51720804", "0.51622736", "0.51551616", "0.5149067", "0.51482815", "0.51311153", "0.5128057", "0.51245016", "0.5119707", "0.5118685", "0.5117311", "0.5116999", "0.5116964", "0.5115862", "0.51150626", "0.51135474", "0.5092785", "0.5090747", "0.5085803", "0.508164", "0.5077427", "0.50662166", "0.5065768", "0.5064137", "0.50556815", "0.5048131", "0.5025504", "0.5020605", "0.50190115", "0.5015711", "0.50151145", "0.5006327", "0.5002319", "0.49994072", "0.49988633", "0.49971437", "0.4992949", "0.49910098", "0.49898565", "0.4984088", "0.498151", "0.498046", "0.49790806", "0.4976705", "0.4976227", "0.4971269", "0.49653107", "0.49630263", "0.49625075", "0.4957951", "0.49531743", "0.49491382" ]
0.59985435
2
Creates an error log for from ``message``.
def create_from_text(self, message, **kwargs): return self.process( message=message, **kwargs )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def error(self, message):\n return self.log(\"ERROR\", message)", "def error(error_message: str):\n logger.error(error_message)", "def error(self, message: str):\n self.log(Level.ERROR, message)", "def err(message):\n\n timestamp = format_time(get_time())\n message = '{} - [ERROR] - {}'.format(timestamp, message)\n _log_status(message)", "def error(self, message: str) -> None:\n\n self.__add_log(self.ERROR, message)", "def add_to_error_log(message):\n f = open(Filenames.ERROR_LOG, \"a\")\n f.write((\"------------- %s --------------\\n\" % time.ctime()) + message)\n f.close()", "def log_error(self, message):\n # log the datetime+message to error_log.txt\n curr_time = datetime.datetime.now().strftime(\"%H:%M:%S \"\n \"%Y-%m-%d\")\n with open(ERROR_FILE_PATH, \"a+\") as error_file:\n error_file.write(\"{} $ {}\\n\".format(curr_time, message))", "def log_error(self, message):\n u = six.text_type\n log_line = (\n u('{0:%Y-%m-%d %H:%M:%S} [FALCON] [ERROR] {1} {2}?{3} => {4}\\n').\n format(datetime.now(), self.method, self.path, self.query_string,\n message)\n )\n\n self._wsgierrors.write(log_line)", "def log_error(self, message):\n self.logger.error(RED_RESET.format(thing=message))\n return", "def error(msg):\n return log().error(msg)", "def CreateError(error_message, return_code=500, allowed_origin=None,\n **kwargs):\n logging.error('Error occurred: %s', error_message)\n result = {\n 'template': 'error.html',\n 'data': {\n 'error_message': error_message\n },\n 'return_code': return_code,\n 'allowed_origin': allowed_origin,\n }\n\n result['data'].update(kwargs)\n return result", "def error(msg):\n log('ERROR', msg)", "def error(message: str, *args: Any) -> None:\n Logger.log(logging.ERROR, message, *args)", "def error(self, message, *args, **kwargs):\n\n self.logger.error(message, *args, **kwargs)", "def error(msg):\n log_msg(ERROR, msg)", "def log_error(self, msg):\n self.log(msg, level=LOG_ERROR)", "def error(cls, message):\n print('[ERROR] {0}'.format(message))", "def make_error_message(message_type, failure):\n exc_type, exc_value, exc_traceback = _adapt_to_exception_tuple(failure)\n return message_type(\n exception=exc_type,\n reason=exc_value,\n traceback=exc_traceback,\n )", "def error ( self , message , *args , **kwargs ) :\n return self.logger.error ( message , *args , **kwargs )", "def msg_err(message):\n to_stdout(\" !!! {message}\".format(message=message), colorf=red, bold=True)\n if _logger:\n _logger.error(message)", "def logerror(self, msg):\n self.logger.error(msg)", "def create_exception(self, msg: str):", "def log_error(self,msg):\r\n t = time.strftime(\"%a %d/%b/%Y %H:%M:%S\", time.gmtime())\r\n logfile = open(self.log_file, \"a\")\r\n logfile.write(\"Error at %s \"% t)\r\n logfile.write(msg)\r\n traceback.print_exc(file=logfile)\r\n logfile.write( \"---------------------\\n\")\r\n logfile.close()", "def error(self, message, new_line=True):\n #\n # Note that while the call to \"get_caller()\" is costly, it only happens\n # when an error occurs, so it shouldn't impact performance\n #\n error_data = (message, self.get_caller())\n self._errors.append(error_data)", "def error(self, _strMessage=\"\"):\n self.edLogging.error(_strMessage)", "def log_error(self, msg):\n self.logger.error(msg)", "def error(message):\n print str(message)", "def ERROR_LOG(msg, *args, **kwargs):\n logger.error(msg, *args, **kwargs)", "def log_error(task_request, message):\n _log(logger.error, task_request, message)", "def error(msg=None, *args, **kwargs):\n log(ERROR, msg, *args, **kwargs)", "def __add_log(self, logType: int, message: str) -> None:\n\n if isinstance(message, BaseException):\n ex: BaseException = message\n if hasattr(ex, 'message'):\n message = ex.message\n else:\n message = ex.__str__()\n\n message += f'\\n{traceback.format_exc().__str__()}'\n\n if message is None:\n return\n\n if isinstance(message, str) and message.strip().__len__() == 0:\n return\n\n st = stack()\n caller: Traceback = getframeinfo(st[2][0])\n log = LogModel()\n log.log_level = logType\n log.filename = caller.filename\n log.function = caller.function\n log.line_number = caller.lineno\n log.message = message\n log.creation_date = datetime.now()\n\n self.__logs.append(log)", "def error(self, message: str) -> None:\n lines = message.split('\\n')\n linum = 0\n formatted_message = ''\n for line in lines:\n if linum == 0:\n formatted_message = 'Error: ' + line\n else:\n formatted_message += '\\n ' + line\n linum += 1\n\n self.print_usage(sys.stderr)\n\n # Format errors with style_warning()\n formatted_message = ansi.style_warning(formatted_message)\n self.exit(2, '{}\\n\\n'.format(formatted_message))", "def message_error(self, m):\n self.message(m, logging.ERROR)", "def ERROR(self, _strMessage=\"\"):\n self.edLogging.ERROR(_strMessage)", "def exception(message):\n logging.exception('{0}'.format(message))\n if _logToArcpyMessagingWindow:\n arcpy.AddError('{0} \\n{1}'.format(message, traceback.format_exc()))", "def log_new_error(*args, **kwargs):\n logging.error(*args, **kwargs)", "def log_error(loglevel, message):\n syslog.syslog(loglevel, message + '\\n')\n sys.stderr.write(message + '\\n')", "def error(message: str) -> None:\n print(f\"ERROR: {message}\")", "def log_error(error_message, no_exit=False):\n log(f\"error: \")\n if not no_exit:\n exit()", "def error(msg, **kwargs):\n kwargs[\"logger\"] = kwargs.get(\"logger\", default_logger)\n kwargs[\"label\"] = kwargs.get(\"label\", get_caller())\n _error(msg, **kwargs)", "def log_error(message):\n sys.stderr.write(message)\n sys.stderr.flush()", "def error(msg):\n return ErrorRule(msg)", "def error(message):\n global LAST_LOG\n LAST_LOG = message\n cprint('\\r[ERR] {0}'.format(message), 'red', file=sys.stderr)", "def error(self, message):\n for_verbosity = 0\n if self.verbosity_level >= for_verbosity:\n self.logger.error(message, exc_info=True)", "def log(self, message, result_code, log_message=''):\n\n message_log = self.create(\n to_address=message.to_address,\n from_address=message.from_address,\n subject=message.subject,\n message_body=message.message_body,\n message_body_html=message.message_body_html,\n when_added=message.when_added,\n priority=message.priority,\n # @@@ other fields from Message\n result=result_code,\n log_message=log_message,\n )\n message_log.save()", "def error(self, msg, stderr=True):\n self.log(msg, level=self.ERROR, stderr=stderr)", "def error(message):\n if DEBUG:\n with print_lock:\n print((Colours.FAIL + 'ERROR: ' + Colours.END_COLOUR + message).strip())", "def _make_child_error(msg, module, name, traceback, log, log_type, context):\n return ChildError(msg, module, name, traceback, log, log_type, context)", "def error(message, details={}, status_code=400, exc_info=False):\n\n details['http_status_code'] = status_code\n\n logger = logging.getLogger(settings.LOGGER_ERROR)\n logger.exception(msg=message, extra=details, exc_info=exc_info)", "def error(self, *args, **kwargs):\n\n message = self.get_message(*args, **kwargs)\n self.logger.error(message)", "def log_error(msg, ex=None):\n if ex == None:\n exMsg = \"\"\n the_ex = Exception(msg)\n else:\n exMsg = \" \\n \" + repr(ex)\n the_ex = ex\n s = format_log(\"\\n\\n ERROR! %s%s\\n\\n\" % (msg,exMsg))\n print(s)\n log['error'].append({'msg':s, 'ex':ex})\n f_errors.write(s)", "def error(self, msg, *args, **kwargs):\n self._log(self.err, msg, *args, **kwargs)", "def error(self, msg, *args, **kwargs):\n logger = self.__get_logger()\n logger.error(msg, *args, **kwargs)", "def error(self, msg):\n\n self.logger.error(msg)", "def error(status, message):\n\n headers = {\"Content-Type\":\"text/plain\"}\n\n current.log.error(message)\n raise HTTP(status, body=message, web2py_error=message, **headers)", "def error(self, msg):\r\n self.logger.error(msg)", "def create_error_box(self, message):\n messagebox.showerror(\"Error\", message)", "def manage_kafka_error(msg):\n logger.error(msg.error())", "def error(self, msg, *args, **kwargs):\n self._logger.error(msg, *args, **kwargs)", "def error_message(self, error_message: str):\n\n self._error_message = error_message", "def LogError(errorMessage,**kwargs):\n if(isinstance(errorMessage,str)):\n print(errorMessage)\n __errorsTracked__.append(errorMessage)\n return None\n else:\n raise TypeError('Expected str for errorMessage. Got {}'.format(type(errorMessage)))", "def error(self, msg: str):\n self._logger.error(msg)", "def Log(self, message, record_id=None):\n message = str(message)\n record = self._CreateRecord(message, record_id)\n if self._formatter:\n self._formatter.Format(record)\n if len(record.message) > _MAX_MSG_SIZE:\n logging.error('Message must be less than (%s)', _MAX_MSG_SIZE)\n return\n self._records.appendleft(record)\n return record.id", "def error(module, message):\n if loggingLevel >= loggingLevelVerbose:\n ModuLog.log(\"E\", module, message)", "def create_error(test, time, error):\n info = _TestInfo(test, time)\n info._error = error\n return info", "def error(self, message):\n ErrorExit('error: {}\\n'.format(message), 2)", "def __init__(self, message):\n logging.error(\"ERROR: {0}\".format(message))\n logging.error(\"Try running with --help for more information.\")", "def error(log):\n write(syslog.LOG_ERR, 'error', '{log}'.format(log=log))", "def error(self, msg):\n self.__logger.error(msg)", "def error(msg, resource=None, stream_id=None):\n engine = get_engine()\n if engine is not None:\n _log(engine, engine_pb2.ERROR, msg, resource, stream_id)\n else:\n print(\"error: \" + msg, file=sys.stderr)", "async def log_exception(self, message=None):\n\t\tsio = io.StringIO()\n\t\tei = sys.exc_info()\n\t\ttb = ei[2]\n\t\ttraceback.print_exception(ei[0], ei[1], tb, None, sio)\n\t\tmsg = sio.getvalue()\n\t\tif msg[-1] == '\\n':\n\t\t\tmsg = msg[:-1]\n\t\tsio.close()\n\t\tif message is not None:\n\t\t\tmsg = message + msg\n\t\tawait self.log(msg, level=logging.ERROR)", "async def log_exception(self, message=None):\n\t\tsio = io.StringIO()\n\t\tei = sys.exc_info()\n\t\ttb = ei[2]\n\t\ttraceback.print_exception(ei[0], ei[1], tb, None, sio)\n\t\tmsg = sio.getvalue()\n\t\tif msg[-1] == '\\n':\n\t\t\tmsg = msg[:-1]\n\t\tsio.close()\n\t\tif message is not None:\n\t\t\tmsg = message + msg\n\t\tawait self.log(msg, level=logging.ERROR)", "def error(msg, *args, **kwargs):\n _log(syslog.LOG_ERR, msg, args, **kwargs)", "def log_error(msg: str, exception: Exception):\n print(f'\\033[31m[ERROR]\\033[m{msg} | {exception} | {exception.__class__.__name__}')", "def log_error(title, message):\n if title == \"Redundant\":\n print(f\"[{title}]: Refactoring is not necessary\")\n else:\n print(f\"[{title}]: Refactoring is not allowed\")\n print(f\"{message}\")", "def _stab_log_error(self, logconf, msg):\n print('Error when logging %s: %s' % (logconf.name, msg))", "def log_error(self, format_str, *args):\n LOGGER.error(self._format_msg(format_str, *args))", "def log(self, message: str):", "def logError(self, text):\n time = datetime.now().strftime(\"%H:%M:%S \")\n self.log(time + \"(ERR):\\t\", text)", "def log(message):\n from tempfile import gettempdir\n from time import strftime\n from sys import stderr\n timestamp = strftime(\"%d-%b-%y %H:%M:%S\")\n if len(message) == 0 or message[-1] != \"\\n\": message += \"\\n\"\n stderr.write(\"%s: %s\" % (timestamp,message))\n logfile = gettempdir()+\"/beam_profiler.log\"\n file(logfile,\"a\").write(timestamp+\" \"+message)", "def log_error(self, format_str, *args):\r\n LOGGER.error(self._format_msg(format_str, *args))", "def set_error_message(msg):\n set_message(msg, TYPE_ERROR)", "def error(self, message, **args):\n\t\terror_message = Utils.boldCode() + \"Error: \" + Utils.normalCode() + message\n\t\t\n\t\tif args.has_key(\"target\"):\n\t\t\tself.sendMessage(args[\"target\"], error_message)\n\t\t\t\n\t\tif args.has_key(\"console\"):\n\t\t\tif args[\"console\"]:\n\t\t\t\tprint self.errorTime(), \"<ERROR>\", Utils.stripCodes(message)\n\t\telse:\n\t\t\tprint self.errorTime(), \"<ERROR>\", Utils.stripCodes(message)", "def error(self, *args, **kwargs):\n self.msg(logging.ERROR, *args, **kwargs)", "def _stab_log_error(self, logconf, msg):\n\t\tprint \"Error when logging %s: %s\" % (logconf.name, msg)", "def write_cloud_error_message(message_text, error_code):\n\n vehicle_position_1 = Nav.VehiclePosition( -122.497885, 37.726946)\n\n current_cloud_message = CloudCom.CloudMessage('ErrorTest', config_Vehicle_Name, message_text,\n vehicle_position_1.lng, vehicle_position_1.lat, error_code)\n CloudCom.CloudMessage.write_cloud_message(current_cloud_message)", "def log_message(self, message):\n log_info = {'logGroupName': self.log_group_name,\n 'logStreamName': self.log_stream_name,\n 'logEvents': [\n {\n 'timestamp': int(1000 * time.time()),\n 'message': '[{}]: {}'.format(self.stage, message)\n },\n ]}\n\n if self.sequence_token:\n log_info['sequenceToken'] = self.sequence_token\n\n response = self.logs_client.put_log_events(**log_info)\n\n self.sequence_token = response['nextSequenceToken']", "def error(self, message):\n print message", "def _Error(message):\n return json.dumps({\n 'success': False,\n 'error': message,\n })", "def error(msg):\n if logger.level <= logging.ERROR:\n print('\\n~ ' + msg)\n logger.info(msg)", "def error_logging(filename, cloud, msg):\n with open(filename, 'a') as f:\n f.write(cloud + \" \" + msg + '\\n')\n f.write('\\n')", "def error(message):\n print(message, file=sys.stderr)", "def log(self, *error_message, **options):\n fatal = options.get(\"fatal\", True) # `fatal` option defaults to True\n error_message = \"\".join(map(str, error_message))\n try:\n with open(self.path_to(\"log.txt\"), \"a\") as error_log:\n error_log.write(\"%s - %s\" % (datetime.datetime.utcnow(), error_message))\n error_log.write(traceback.format_exc() + \"\\n\")\n except Exception:\n error_info = \"This error occurred very early during game initialisation and could not be logged\"\n else:\n error_info = \"Please check log.txt for details\"\n\n if fatal:\n text = \"\".join((\"An error has occurred:\\n\\n \",\n error_message, \".\\n\\n\\n\",\n error_info, \".\"))\n ctypes.windll.user32.MessageBoxA(0, text, \"Error\", 0) # Error popup\n raise CaughtFatalException(sys.exc_info()[1])\n else:\n raise", "def log_error(customized_msg, logfile_handle):\r\n tb = sys.exc_info()[2]\r\n tbinfo = traceback.format_tb(tb)[0]\r\n pymsg = \"PYTHON ERRORS:\\nTraceback Info:\\n\" + tbinfo + \"\\nError Info:\\n \" + str(\r\n sys.exc_type) + \": \" + str(sys.exc_value) + \"\\n\"\r\n msgs = \"ARCPY ERRORS:\\n\" + arcpy.GetMessages(2) + \"\\n\"\r\n logfile_handle.writelines(customized_msg + str(msgs) + \"\\n\" + pymsg + \"\\n\")", "def add(self, message, trace):\n error = models.Error(owtf_message=message, traceback=trace)\n self.db.session.add(error)\n self.db.session.commit()", "def error(msg):\n\n raise Exception(msg)", "def error_message(self, error_message):\n\n self._error_message = error_message", "def write_error_log(self, msg, with_trace=True):\n with open(self.error_log, 'a+') as logfile:\n logfile.write('Running Command: %s\\n' % Settings.running_command)\n if with_trace:\n traceback.print_exc(file=logfile)\n msg += '\\nError Message: %s\\n%s\\n' % (msg, '-' * 50)\n logfile.write(msg)\n\n self.write_debug_log(msg)", "def log_error(self, format, *args):\n self.log_message(format, args, level=xbmc.LOGERROR)", "def error(self, msg='', context='', severity=logging.INFO, traceback=False):\r\n if traceback:\r\n msg += _cperror.format_exc()\r\n self.error_log.log(severity, ' '.join((self.time(), context, msg)))", "def error(self, msg, *args):\n if self.lvl<=logging.ERROR: return self._log(msg, *args)" ]
[ "0.68963873", "0.6863878", "0.6833658", "0.68022585", "0.6657312", "0.66282344", "0.66032934", "0.6493894", "0.6439013", "0.6363229", "0.6361158", "0.6326939", "0.6277279", "0.62659764", "0.6256422", "0.61797696", "0.61486435", "0.6043684", "0.60246456", "0.59690106", "0.5967757", "0.5945455", "0.59283626", "0.5926146", "0.59230524", "0.5918737", "0.59130883", "0.58936864", "0.5879882", "0.5860295", "0.5843857", "0.58183897", "0.5815219", "0.5813584", "0.5802676", "0.5798657", "0.579", "0.578767", "0.5774477", "0.5721195", "0.57168645", "0.57163346", "0.5684308", "0.5681809", "0.56784105", "0.56744766", "0.56737065", "0.56532085", "0.5648729", "0.5645243", "0.5644775", "0.56387115", "0.563754", "0.56245667", "0.56226665", "0.56218696", "0.5611949", "0.56074256", "0.5601292", "0.559818", "0.5597758", "0.5589854", "0.5588977", "0.5582214", "0.5572875", "0.554108", "0.5538673", "0.55351716", "0.55302453", "0.55262196", "0.55174106", "0.55174106", "0.5505831", "0.54919106", "0.54808724", "0.54778093", "0.5474414", "0.5474115", "0.5472162", "0.54567605", "0.5455682", "0.5452583", "0.544345", "0.5435476", "0.54290855", "0.5426608", "0.5406155", "0.53933793", "0.53889024", "0.53839016", "0.537807", "0.53758705", "0.53730446", "0.53729814", "0.5371875", "0.53688574", "0.53668326", "0.53553987", "0.5352828", "0.53442436", "0.5342426" ]
0.0
-1
Creates an error log from an exception.
def create_from_exception(self, exc_info=None, **kwargs): if not exc_info: exc_info = sys.exc_info() exc_type, exc_value, exc_traceback = exc_info def shorten(var): var = transform(var) if isinstance(var, basestring) and len(var) > 200: var = var[:200] + '...' return var reporter = ExceptionReporter(None, exc_type, exc_value, exc_traceback) frames = varmap(shorten, reporter.get_traceback_frames()) if not kwargs.get('view'): # This should be cached modules = get_installed_apps() if conf.INCLUDE_PATHS: modules = set(list(modules) + conf.INCLUDE_PATHS) def iter_tb_frames(tb): while tb: yield tb.tb_frame tb = tb.tb_next def contains(iterator, value): for k in iterator: if value.startswith(k): return True return False # We iterate through each frame looking for an app in INSTALLED_APPS # When one is found, we mark it as last "best guess" (best_guess) and then # check it against SENTRY_EXCLUDE_PATHS. If it isnt listed, then we # use this option. If nothing is found, we use the "best guess". best_guess = None view = None for frame in iter_tb_frames(exc_traceback): try: view = '.'.join([frame.f_globals['__name__'], frame.f_code.co_name]) except: continue if contains(modules, view): if not (contains(conf.EXCLUDE_PATHS, view) and best_guess): best_guess = view elif best_guess: break if best_guess: view = best_guess if view: kwargs['view'] = view data = kwargs.pop('data', {}) or {} if hasattr(exc_type, '__class__'): exc_module = exc_type.__class__.__module__ else: exc_module = None data['__sentry__'] = { 'exc': map(transform, [exc_module, exc_value.args, frames]), } if isinstance(exc_value, TemplateSyntaxError) and hasattr(exc_value, 'source'): origin, (start, end) = exc_value.source data['__sentry__'].update({ 'template': (origin.reload(), start, end, origin.name), }) kwargs['view'] = origin.loadname tb_message = '\n'.join(traceback.format_exception(exc_type, exc_value, exc_traceback)) kwargs.setdefault('message', transform(force_unicode(exc_value))) return self.process( class_name=exc_type.__name__, traceback=tb_message, data=data, **kwargs )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_log(self, exc):\n return self.formatter.formatException(exc)", "def log_exception():\n logging.basicConfig(level=logging.DEBUG)\n return logging.getLogger('exceptions_log')", "def log_exception(e):\n logger.exception(e)\n raise", "def _log_exception(self, exception, query, parameters):\n logging.error(\"Error on MySQL Server:\" + self.host)\n logging.error(\"Error query:\", query)\n logging.error(\"Error parameters:\", parameters)\n logging.error(\"Error Exception:\", str(exception))", "async def log_exception(self, message=None):\n\t\tsio = io.StringIO()\n\t\tei = sys.exc_info()\n\t\ttb = ei[2]\n\t\ttraceback.print_exception(ei[0], ei[1], tb, None, sio)\n\t\tmsg = sio.getvalue()\n\t\tif msg[-1] == '\\n':\n\t\t\tmsg = msg[:-1]\n\t\tsio.close()\n\t\tif message is not None:\n\t\t\tmsg = message + msg\n\t\tawait self.log(msg, level=logging.ERROR)", "async def log_exception(self, message=None):\n\t\tsio = io.StringIO()\n\t\tei = sys.exc_info()\n\t\ttb = ei[2]\n\t\ttraceback.print_exception(ei[0], ei[1], tb, None, sio)\n\t\tmsg = sio.getvalue()\n\t\tif msg[-1] == '\\n':\n\t\t\tmsg = msg[:-1]\n\t\tsio.close()\n\t\tif message is not None:\n\t\t\tmsg = message + msg\n\t\tawait self.log(msg, level=logging.ERROR)", "def error_traceback():\n Logger.log('ERROR', traceback.format_exc())", "def log_error(msg, ex=None):\n if ex == None:\n exMsg = \"\"\n the_ex = Exception(msg)\n else:\n exMsg = \" \\n \" + repr(ex)\n the_ex = ex\n s = format_log(\"\\n\\n ERROR! %s%s\\n\\n\" % (msg,exMsg))\n print(s)\n log['error'].append({'msg':s, 'ex':ex})\n f_errors.write(s)", "def create_exception(self, msg: str):", "def log_new_error(*args, **kwargs):\n logging.error(*args, **kwargs)", "def log_exception(*args, **kwds):\n cls, err = sys.exc_info()[:2]\n logging.exception('Exception in request: %s: %s', cls.__name__, err)", "def raise_on_error(cls, prefix=None, exc_type=None):\n exc_type = exc_type or ValueError\n def on_message(msg):\n if msg.severity >= logging.ERROR:\n text = msg.text\n if prefix:\n text = '%s%s' % (prefix, text)\n raise exc_type(text)\n return cls(on_message=on_message)", "def logerror(logger: logging.Logger = logging.root):\n def log_wrapper(function):\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n try:\n return function(*args, **kwargs)\n except Exception as e:\n # log the exception\n logger.exception(\n f'{function.__name__}(args={args}, kwargs={kwargs}) failed:\\n{e}.')\n raise e\n return wrapper\n return log_wrapper", "def log_format_error(caught_exception, event_str):\n\tcheck_type(caught_exception, Exception)\n\tcheck_type(event_str, StringType)\n\t\n\treturn '{0}, Class: {1}:{2}'.format(event_str, str(type(caught_exception)), caught_exception)", "def log_error(msg: str, exception: Exception):\n print(f'\\033[31m[ERROR]\\033[m{msg} | {exception} | {exception.__class__.__name__}')", "def exception(self, *args, **kwargs):\n\n message = self.get_message(*args, **kwargs)\n self.logger.exception(message)", "def exception(self, msg, *args):\n self._log(logging.ERROR, msg, args, {'exc_info': 1})", "def log_error(self, format_str, *args):\n LOGGER.error(self._format_msg(format_str, *args))", "def error(transactionId, exception):\n\n traceback.print_tb(exception.__traceback__)\n\n if django.conf.settings.DEBUG:\n # Pass the exception up to Django, which renders a dynamic exception report.\n raise exception\n\n m = str(exception)\n if len(m) > 0:\n m = \": \" + m\n _log.error(\n \"{} END ERROR {}{}\".format(\n transactionId.hex,\n impl.util.encode1(type(exception).__name__),\n impl.util.encode1(m),\n )\n )\n _notifyAdmins(\n \"Exception raised in {}:\\n{}{}\\n\\n{}\".format(\n _extractRaiser(traceback.extract_tb(sys.exc_info()[2])),\n type(exception).__name__,\n m,\n traceback.format_exc(),\n )\n )", "def log_error(log_str):\n logger = logging.getLogger()\n logger.error(log_str)", "def log_error(self, ex):\n api_error = ex.data.convert_to(ApiError)\n print(\"Error configuring {}\".format(api_error.error_message))\n print(\"{}\".format(api_error.__dict__))\n print(\"{}\".format(api_error.details))", "def log_error(self, format_str, *args):\r\n LOGGER.error(self._format_msg(format_str, *args))", "def logError(e):\r\n print(e)", "def log_error(exc: Exception, error_msg: str):\n exc_info = sys.exc_info()\n last_traceback = exc_info[2]\n\n if hasattr(exc, \"__cause__\") and exc.__cause__ is not None:\n exc_info = (exc.__cause__.__class__, exc.__cause__, last_traceback)\n\n extra = dict(url=request.path, **get_err_source_info(last_traceback))\n\n msg = (\n '{error_name}:\"{message}\" [occured at {src_module}({src_func}):{src_linenr},'\n \"URL was: {url}]\".format(\n error_name=exc.__class__.__name__, message=error_msg, **extra\n )\n )\n\n current_app.logger.error(msg, exc_info=exc_info)", "def log_exception(title=None, exit=False, call=None, stack=False):\n try:\n yield\n except (Exception, KeyboardInterrupt) as e:\n if title:\n log.error(title)\n if stack:\n log.exception(e)\n elif str(e):\n log.error(e)\n if call:\n call()\n if exit:\n sys.exit(1)", "def log_error(e):\n print(e)", "def log_error(e):\n print(e)", "def log_error(e):\n print(e)", "def log_error(e):\n print(e)", "def log_error(e):\n print(e)", "def log_error(e):\n print(e)", "def log_error(e):\n print(e)", "def log_error(e):\n print(e)", "def log_error(e):\n print(e)", "def log_exception(level='warning', tb_skip=2):\n stack = \"\".join(traceback.format_stack()[:-tb_skip])\n tb = traceback.format_exception(*sys.exc_info())\n msg = tb[0] # \"Traceback (most recent call last):\"\n msg += stack\n msg += \" << caught exception here: >>\\n\"\n msg += \"\".join(tb[1:]).rstrip()\n logger.log(logging_types[level], msg)", "def save_exception(exc):\n LOG.error(\"Error - %s\", str(exc))\n hour = time.strftime(\"_%H_%M_%S\")\n today = time.strftime(\"_%d_%m_%Y\")\n data = (str(exc)+traceback.format_exc())\n\n file = open(\"./logs/ERROR_\"+threading.currentThread().getName()+today+\".log\",'a+') #Replace to fix OSError\n file.write(\"\\n==\"+hour+\"==\\n\")\n file.write(Parser.parse_text(data))\n file.write(\"=====================================\\n\")\n file.close()", "def log_exception(*args, **kwds):\n import logging\n logging.exception('Exception in request:')", "def log_exception(logger, signature, e):\n # This will log the traceback.\n logger.error('[Error in method ' + signature + ': Details ' + str(e) + ']', exc_info=True)\n return e", "def log_error(e):\r\n print(e)", "def log_error(e):\r\n print(e)", "def log_error(self, error: Exception) -> None:\n logging.error(error)", "def log_error(e):\n\tprint(e)", "def log_error(e):\n\tprint(e)", "def log_error(self,msg):\r\n t = time.strftime(\"%a %d/%b/%Y %H:%M:%S\", time.gmtime())\r\n logfile = open(self.log_file, \"a\")\r\n logfile.write(\"Error at %s \"% t)\r\n logfile.write(msg)\r\n traceback.print_exc(file=logfile)\r\n logfile.write( \"---------------------\\n\")\r\n logfile.close()", "def log_exception(filename):\n log = logging.getLogger(__name__)\n log.setLevel(logging.DEBUG)\n handler = logging.FileHandler(filename)\n handler.setFormatter(\n TaskFormatter('task_id: %(task_id)s name: %(task_name)s %(message)s'))\n log.addHandler(handler)\n\n __traceback_info__ = ('we can handle traceback info')\n try:\n raise NotImplementedError()\n except NotImplementedError:\n log.error('we are logging', exc_info=True)\n handler.flush()\n handler.close()\n return \"Successful logged.\"", "def ERROR_LOG(msg, *args, **kwargs):\n logger.error(msg, *args, **kwargs)", "def exception(msg):\n log('EXCEPTION', msg)", "def log_error(stage: str, err: Exception) -> None:\n try:\n msg = 'Error during {}: {}'.format(stage, err)\n print(msg)\n sys.print_exception(err)\n\n log_size = 0\n try:\n # Get size of main log file.\n log_size = os.stat(LOG_NAME_MAIN)[6]\n except OSError:\n pass\n\n # Rotate log if size reaches limit.\n if log_size > MAX_LOG_SIZE:\n os.rename(LOG_NAME_MAIN, LOG_NAME_SECONDARY)\n\n with open(LOG_NAME_MAIN, 'a') as f:\n f.write(msg)\n f.write('\\n')\n except:\n # This function is intended to log errors and there is no\n # sense to raise another exception as nothing could handle it.\n pass", "def writeException(className, exceptionString):\n errorFile = open(\"error.log\", 'a')\n errorFile.write(\"ERROR occured in \" + className + \" at \" + str(datetime.now()) + \" with the following message\\n\" + str(exceptionString) + \"\\n\\n\")\n errorFile.close()", "def __add_log(self, logType: int, message: str) -> None:\n\n if isinstance(message, BaseException):\n ex: BaseException = message\n if hasattr(ex, 'message'):\n message = ex.message\n else:\n message = ex.__str__()\n\n message += f'\\n{traceback.format_exc().__str__()}'\n\n if message is None:\n return\n\n if isinstance(message, str) and message.strip().__len__() == 0:\n return\n\n st = stack()\n caller: Traceback = getframeinfo(st[2][0])\n log = LogModel()\n log.log_level = logType\n log.filename = caller.filename\n log.function = caller.function\n log.line_number = caller.lineno\n log.message = message\n log.creation_date = datetime.now()\n\n self.__logs.append(log)", "def error(error_message: str):\n logger.error(error_message)", "def exception(self, msg, *args, **kwargs):\n logger = self.__get_logger()\n logger.exception(str(msg), *args, **kwargs)", "def exception(message):\n logging.exception('{0}'.format(message))\n if _logToArcpyMessagingWindow:\n arcpy.AddError('{0} \\n{1}'.format(message, traceback.format_exc()))", "def exception(self, message, *args, **kwargs):\n\n self.logger.exception(message, *args, **kwargs)", "def log_exception(type, value, tb, logger=twiggy.log, multiline=False):\n\n if multiline:\n for x in traceback.format_exception(type, value, tb)[1:]:\n for y in x.strip('\\n ').split('\\n'):\n logger.error(y)\n else:\n msg = '|'.join([': '.join([y.strip() for y in x.strip('\\n ').split('\\n')]) for x in \\\n traceback.format_exception(type, value, tb)[1:]])\n logger.error('Uncaught exception: %s' % str(msg))", "def exc_log_str(exception) -> str:\n return \"{}: {!s}\".format(type(exception).__name__, exception)", "def exception(self, msg, *args, **kwargs):\n ex = sys.exc_info()[1]\n\n if hasattr(ex, '_monocle'):\n args = args + (format_tb(ex),)\n self.logger.error('%s\\n%%s' % msg, *args, **kwargs)\n else:\n super(Adapter, self).exception(msg, *args, **kwargs)", "def error(msg, ex=None):\n log_error(msg, ex)\n if ex == None:\n exMsg = \"\"\n else:\n exMsg = \" \\n \" + repr(ex)\n if ex == None:\n raise Exception(exMsg)\n else:\n raise ex", "def _make_child_error(msg, module, name, traceback, log, log_type, context):\n return ChildError(msg, module, name, traceback, log, log_type, context)", "def _log_error(self, event, err, **kwargs):\n self.context.logger.error(\n f\"step {self.name} got error {err} when processing an event:\\n {event.body}\"\n )\n message = traceback.format_exc()\n self.context.logger.error(message)\n self.context.push_error(\n event, f\"{err}\\n{message}\", source=self.fullname, **kwargs\n )", "def catch_exception(output_directory: str, e: Exception):\n # Log error message\n tbstr = \"\".join(traceback.extract_tb(e.__traceback__).format())\n errormsg = f\"Traceback:\\n{tbstr}\\nError: {e}\"\n\n # Rename output dir\n src = output_directory\n if src.endswith(\"/\"):\n src = src[:-1]\n dst = src + \"_error\"\n\n # Write error to separate file\n with open(os.path.join(output_directory, \"error.txt\"), \"w\") as f:\n f.write(errormsg)\n\n logger.error(\"Error caught!\")\n logger.error(f\"Moving output directory from\")\n logger.error(src)\n logger.error(\"to\")\n logger.error(dst)\n\n shutil.move(src, dst)\n raise e", "def _log_exception(fn):\n\n @functools.wraps(fn)\n def decorated(*args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except Exception: # pylint: disable=broad-except\n err_str = traceback.format_exc()\n logging.error(f'Exception occured for {args}, {kwargs}:\\n' + err_str)\n raise\n\n return decorated", "def log_error(customized_msg, logfile_handle):\r\n tb = sys.exc_info()[2]\r\n tbinfo = traceback.format_tb(tb)[0]\r\n pymsg = \"PYTHON ERRORS:\\nTraceback Info:\\n\" + tbinfo + \"\\nError Info:\\n \" + str(\r\n sys.exc_type) + \": \" + str(sys.exc_value) + \"\\n\"\r\n msgs = \"ARCPY ERRORS:\\n\" + arcpy.GetMessages(2) + \"\\n\"\r\n logfile_handle.writelines(customized_msg + str(msgs) + \"\\n\" + pymsg + \"\\n\")", "def _stab_log_error(self, logconf, msg):\n print('Error when logging %s: %s' % (logconf.name, msg))", "def log_exception(sender, exception, **extra):\n app.logger.error('Error in Geocoding Service: %s', exception)", "def log_traceback(fn):\n functools.wraps(fn)\n def wrapper(*args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except Exception as ex:\n rule_name = None\n if hasattr(fn, 'log'):\n fn.log.error(traceback.format_exc())\n rule_name = fn.name\n elif len(args) > 0 and hasattr(args[0], 'log'):\n args[0].log.error(traceback.format_exc())\n rule_name = args[0].name\n else:\n logging.getLogger(LOG_PREFIX).error(traceback.format_exc())\n import core.actions\n if hasattr(core.actions, 'NotificationAction'):\n import configuration\n if hasattr(configuration, 'admin_email') and configuration.admin_email != \"admin_email@some_domain.com\":\n core.actions.NotificationAction.sendNotification(configuration.admin_email, \"Exception: {}: [{}]\".format(rule_name, traceback.format_exc()))\n else:\n core.actions.NotificationAction.sendBroadcastNotification(\"Exception: {}: [{}]\".format(rule_name, traceback.format_exc()))\n return wrapper", "def error(self, *args, **kwargs):\n\n message = self.get_message(*args, **kwargs)\n self.logger.error(message)", "def log_exception(modulename, exceptiondetails):\n log_msg(format_exc(sys.exc_info()), xbmc.LOGWARNING)\n log_msg(\"ERROR in %s ! --> %s\" % (modulename, exceptiondetails), xbmc.LOGERROR)", "def __init__ (self, msg, parent=None, api_object=None, from_log=False) :\n Exception.__init__(self, msg)\n\n self._plain_message = msg\n self._exceptions = [self]\n self._top_exception = self\n self._ptype = type(parent).__name__ # parent exception type\n self._stype = type(self ).__name__ # own exception type \n\n ignore_stack = 2\n if from_log : \n ignore_stack += 1\n\n\n if api_object : \n self._object = weakref.ref (api_object)\n else :\n self._object = None\n\n\n # did we get a parent exception?\n if parent :\n\n # if so, then this exception is likely created in some 'except'\n # clause, as a reaction on a previously catched exception (the\n # parent). Thus we append the message of the parent to our own\n # message, but keep the parent's traceback (after all, the original\n # exception location is what we are interested in).\n #\n if isinstance (parent, SagaException) :\n # that all works nicely when parent is our own exception type...\n self._traceback = parent.traceback\n\n frame = traceback.extract_stack ()[- ignore_stack]\n line = \"%s +%s (%s) : %s\" % frame \n self._message = \" %-20s: %s (%s)\\n%s\" \\\n % (self._stype, msg, line, parent.msg)\n\n else :\n if self._stype != \"NoneType\" :\n # ... but if parent is a native (or any other) exception type,\n # we don't have a traceback really -- so we dig it out of\n # sys.exc_info. \n trace = sys.exc_info ()[2]\n stack = traceback.extract_tb (trace)\n traceback_list = traceback.format_list (stack)\n self._traceback = \"\".join (traceback_list)\n\n # the message composition is very similar -- we just inject the\n # parent exception type inconspicuously somewhere (above that\n # was part of 'parent.message' already).\n frame = traceback.extract_stack ()[- ignore_stack]\n line = \"%s +%s (%s) : %s\" % frame \n self._message = \" %-20s: %s (%s)\\n %-20s: %s\" \\\n % (self._stype, msg, line, self._ptype, parent)\n\n else :\n\n # if we don't have a parent, we are a 1st principle exception,\n # i.e. a reaction to some genuine code error. Thus we extract the\n # traceback from exactly where we are in the code (the last stack\n # frame will be the call to this exception constructor), and we\n # create the original exception message from 'stype' and 'message'.\n stack = traceback.extract_stack ()\n traceback_list = traceback.format_list (stack)\n self._traceback = \"\".join (traceback_list[:-1])\n frame = traceback.extract_stack ()[- ignore_stack -1]\n line = \"%s +%s (%s) : %s\" % frame \n self._message = \"%s (%s)\" % (msg, line)\n\n # we can't do that earlier as _msg was not set up before\n self._messages = [self._message]", "def CreateError(error_message, return_code=500, allowed_origin=None,\n **kwargs):\n logging.error('Error occurred: %s', error_message)\n result = {\n 'template': 'error.html',\n 'data': {\n 'error_message': error_message\n },\n 'return_code': return_code,\n 'allowed_origin': allowed_origin,\n }\n\n result['data'].update(kwargs)\n return result", "def __init__(self, module, message, _type, exc_message=None, *args, **kwargs):\n logger.error(\"[{}] {} {} {}\".format(module,\n _type,\n '<{}>'.format(exc_message) if exc_message else '',\n message))\n super(CliException, self).__init__(message, *args)\n self.message = message\n self.type = _type\n self.exc_message = exc_message\n self.str_at_error = kwargs.get('str_at_error', None)", "def log_error(self, message):\n u = six.text_type\n log_line = (\n u('{0:%Y-%m-%d %H:%M:%S} [FALCON] [ERROR] {1} {2}?{3} => {4}\\n').\n format(datetime.now(), self.method, self.path, self.query_string,\n message)\n )\n\n self._wsgierrors.write(log_line)", "def logerror(self, msg):\n self.logger.error(msg)", "def error(self, trigger=None):\n try:\n trace = traceback.format_exc()\n if sys.version_info.major < 3:\n trace = trace.decode('utf-8', errors='xmlcharrefreplace')\n stderr(trace)\n try:\n lines = list(reversed(trace.splitlines()))\n report = [lines[0].strip()]\n for line in lines:\n line = line.strip()\n if line.startswith('File \"'):\n report.append(line[0].lower() + line[1:])\n break\n else:\n report.append('source unknown')\n\n signature = '%s (%s)' % (report[0], report[1])\n # TODO: make not hardcoded\n log_filename = os.path.join(self.config.core.logdir, 'exceptions.log')\n with codecs.open(log_filename, 'a', encoding='utf-8') as logfile:\n logfile.write('Signature: %s\\n' % signature)\n if trigger:\n logfile.write('from {} at {}. Message was: {}\\n'.format(\n trigger.nick, str(datetime.now()), trigger.group(0)))\n logfile.write(trace)\n logfile.write(\n '----------------------------------------\\n\\n'\n )\n except Exception as e:\n stderr(\"Could not save full traceback!\")\n LOGGER.error(\"Could not save traceback from %s to file: %s\", trigger.sender, str(e))\n\n if trigger and self.config.core.reply_errors and trigger.sender is not None:\n self.msg(trigger.sender, signature)\n if trigger:\n LOGGER.error('Exception from {}: {} ({})'.format(trigger.sender, str(signature), trigger.raw))\n except Exception as e:\n if trigger and self.config.core.reply_errors and trigger.sender is not None:\n LOGGER.error('Exception from {}: {} ({})'.format(trigger.sender, str(e), trigger.raw))", "def LogError(errorMessage,**kwargs):\n if(isinstance(errorMessage,str)):\n print(errorMessage)\n __errorsTracked__.append(errorMessage)\n return None\n else:\n raise TypeError('Expected str for errorMessage. Got {}'.format(type(errorMessage)))", "def create_log(self):\n from settings import evidence_path\n test_case = self.__class__.__name__\n log_extension = '.log'\n if evidence_path is not None:\n log_path = '{}/{}{}'.format(\n evidence_path, test_case, log_extension\n )\n else:\n log_path = None\n self.log = Log(log_path)\n self.log = self.log.get_logger()\n return self.log", "def logErr(msg:Any, showStackTrace:bool = True, exc:Exception = None, stackOffset:int = 0) -> str:\n\t\tfrom ..services import CSE\n\t\t# raise logError event\n\t\tCSE.event.logError()\t# type: ignore\n\t\tif exc:\n\t\t\tfmtexc = ''.join(traceback.TracebackException.from_exception(exc).format())\n\t\t\tLogging._log(logging.ERROR, f'{msg}\\n\\n{fmtexc}', stackOffset = stackOffset)\n\t\telif showStackTrace and Logging.stackTraceOnError:\n\t\t\tstrace = ''.join(map(str, traceback.format_stack()[:-1]))\n\t\t\tLogging._log(logging.ERROR, f'{msg}\\n\\n{strace}', stackOffset = stackOffset)\n\t\telse:\n\t\t\tLogging._log(logging.ERROR, msg, stackOffset = stackOffset)\n\t\treturn msg", "def exception(msg='', details={}, exc_info=True):\n logger = logging.getLogger(settings.LOGGER_EXCEPTION)\n logger.exception(msg=msg or sys.exc_info(), extra=details, exc_info=exc_info)", "def _stab_log_error(self, logconf, msg):\n\t\tprint \"Error when logging %s: %s\" % (logconf.name, msg)", "def LogException(func):\r\n def wrapper(c, s):\r\n global _log\r\n try:\r\n func(c, s)\r\n except Exception as e:\r\n exceptionInfo = traceback.format_exc()\r\n LogDebug(u\"Uncaught exception in %@, %@\", func.__name__, exceptionInfo.rstrip())\r\n alert = NSAlert.alloc().init()\r\n alert.setMessageText_(u\"Uncaught exception\")\r\n alert.setInformativeText_(exceptionInfo)\r\n alert.addButtonWithTitle_(u\"Dismiss\")\r\n alert.addButtonWithTitle_(u\"Save Log…\")\r\n while alert.runModal() == NSAlertSecondButtonReturn:\r\n _log.saveLog_(IEDLog.IEDLog, None)\r\n return wrapper", "def exception(self, msg, *args, **kwargs):\n if args:\n try:\n msg = msg % args\n except TypeError:\n log.exception_orig(_('Wrong format of a log message'))\n\n (exc_type, exc_value, exc_tb) = sys.exc_info()\n bugdialog.ShowEI(exc_type, exc_value, exc_tb, msg)\n if compat.PYTHON2: sys.exc_clear()", "def error(log):\n write(syslog.LOG_ERR, 'error', '{log}'.format(log=log))", "def log_error(task_request, message):\n _log(logger.error, task_request, message)", "def log_error(self, msg):\n self.logger.error(msg)", "def logerrors(f):\n try:\n f()\n except Exception:\n # grab the exception info and format without the outside layer\n # that way, we don't include this function in the call stack\n ei = sys.exc_info()\n logging.error(''.join(traceback.format_exception(ei[0], ei[1], ei[2].tb_next)))", "def log_error(self, msg):\n self.log(msg, level=LOG_ERROR)", "def exception_hook(exc_type, exc_value, exc_traceback) -> None:\n log.error(\n \"exception\",\n exception_type=exc_type.__name__,\n exc_info=(exc_type, exc_value, exc_traceback),\n )", "def log_error(self, format, *args):\n\n self.logger.error(\"%s %s\" % (self.client_address[0], format % args))", "def set_log_level_to_error():\n logging.setLevel(default_logging.ERROR)\n\n if os.path.exists(\"gen3tests.logs\"):\n os.remove(\"gen3tests.logs\")\n logfile_handler = default_logging.FileHandler(\"gen3tests.logs\")\n logfile_handler.setFormatter(default_logging.Formatter(LOG_FORMAT))\n logging.addHandler(logfile_handler)\n yield", "def form_error_return(logger, e):\n if not isinstance(e, GFedv2BaseError): # convert unknown errors into GFedv2ServerError\n e = GFedv2ServerError(str(e))\n # do some logging\n logger.error(e)\n logger.error(traceback.format_exc())\n return { 'code' : e.code, 'output' : str(e) }", "def log_error(self, message):\n # log the datetime+message to error_log.txt\n curr_time = datetime.datetime.now().strftime(\"%H:%M:%S \"\n \"%Y-%m-%d\")\n with open(ERROR_FILE_PATH, \"a+\") as error_file:\n error_file.write(\"{} $ {}\\n\".format(curr_time, message))", "def from_log_string(log_string):\n\n\t\tfirst_part = None\n\t\tsecond_part = None\n\n\t\tif not log_string.endswith(\"}\"):\n\t\t\t# Value error for later use\n\t\t\tvalue_error = ValueError(\"Given string has invalid format: {}\".format(log_string))\n\n\t\t\tbracket_idx = log_string.find(\"}\")\n\t\t\tlast_comma_idx = log_string.find(\",\", bracket_idx)\n\t\t\tif last_comma_idx != bracket_idx + 1:\n\t\t\t\traise value_error\n\n\t\t\t# The bracket is kept\n\t\t\tfirst_part = log_string[:bracket_idx + 1]\n\t\t\t# The comma is removed\n\t\t\tsecond_part = log_string[last_comma_idx + 1:]\n\t\t\tif \"}\" not in first_part or \"}\" in second_part or \"{\" in second_part:\n\t\t\t\traise value_error\n\n\t\tdata_dict = json.loads(first_part)\n\t\treturn LogEntry.from_data(data_dict, second_part)", "def otherError(caller, exception):\n m = str(exception)\n if len(m) > 0:\n m = \": \" + m\n _log.error(\n \"- ERROR {} {}{}\".format(\n impl.util.encode2(caller),\n impl.util.encode1(type(exception).__name__),\n impl.util.encode1(m),\n )\n )\n if django.conf.settings.DEBUG:\n raise exception\n else:\n _notifyAdmins(\n \"Exception raised in {}:\\n{}{}\\n\\n{}\".format(\n caller, type(exception).__name__, m, traceback.format_exc()\n )\n )", "def setup_log(self, log_file):\n directory = os.path.dirname(log_file)\n if directory:\n os.makedirs(directory, exist_ok=True)\n\n logger = logging.getLogger(log_file)\n formatter = logging.Formatter(config.LOG_FORMAT)\n\n file_handler = logging.FileHandler(log_file, mode='a')\n file_handler.setFormatter(formatter)\n\n logger.setLevel(logging.INFO)\n logger.addHandler(file_handler)\n\n return logger", "def eventlogger(self, event):\n \n eventlogger = loguru.logger\n \n eventlogger.add(\n sink = \"events.log\", \n level = \"WARNING\", \n format = \"\\n\\n\\n\\n{level} {time: {time:DD-MM-YYYY HH:mm:ss}}\\n\"\n \"Elapsed Time: {elapsed}\\n\"\n \"File: {file}\\n\"\n \"Message: {message}\")\n \n eventlogger.exception(event)", "def log_error(self, format, *args):\n self.log_message(format, args, level=xbmc.LOGERROR)", "def _get_failure_from_exception(\n e: BaseException) -> TransactionResult.Failure:\n\n try:\n if isinstance(e, IconServiceBaseException):\n if e.code == ExceptionCode.SCORE_ERROR or isinstance(e, ScoreErrorException):\n Logger.warning(e.message, ICON_SERVICE_LOG_TAG)\n else:\n Logger.exception(e.message, ICON_SERVICE_LOG_TAG)\n\n code = int(e.code)\n message = str(e.message)\n else:\n Logger.exception(e, ICON_SERVICE_LOG_TAG)\n Logger.error(e, ICON_SERVICE_LOG_TAG)\n\n code: int = ExceptionCode.SERVER_ERROR.value\n message = str(e)\n except:\n code: int = ExceptionCode.SERVER_ERROR.value\n message = 'Invalid exception: code or message is invalid'\n\n return TransactionResult.Failure(code, message)", "def create_k2a_exception_from_k2o_exception(e, addmsg=None, exclogger=None):\n\n if not isinstance(e, K2Error):\n return K2aK2Other(e, addmsg)\n\n if isinstance(e, K2SSLError):\n return K2aK2SslError(e, addmsg)\n\n if isinstance(e, K2ConnectionError):\n return K2aK2ConnectionError(e, addmsg)\n\n if isinstance(e, K2TimeoutError):\n return K2aK2TimeoutError(e, addmsg)\n\n k2response = e.k2response\n # If non status_code then use -1\n status = -1\n if k2response is not None and k2response.status is not None:\n status = k2response.status\n\n # if activated place k2response in exception log\n diagfspec = None\n if (k2response is not None\n and exclogger is not None\n and status not in _EXCLUDED_EXCEPTIONS):\n category = \"UNC\"\n if status > -1:\n category = str(status)\n\n diagfspec = exclogger.emit(category, addmsg, k2response, exc=e)\n addmsg += (_(\", exception diagnostics have been written to: >%s<\") %\n diagfspec)\n\n cls = _code_to_exception_map.get(status, K2aK2ErrorUnclassified)\n\n k2msg = None\n if hasattr(k2response, 'k2err'):\n m = k2response.k2err.find('./Message')\n if m is not None:\n k2msg = m.text\n return cls(status, e, addmsg, k2msg=k2msg,\n diagfspec=diagfspec)", "def log_exception_to_sentry(app, exception=None, **kwargs):\n sentry.captureException(exception)", "def log_error(self,file_dict):\n import hashlib\n from datetime import datetime\n import shutil\n\n # Today's date\n file_date = datetime.today()\n\n # Generate hash with file content\n h = hashlib.md5()\n f = open(file_dict['tmpfile'], 'r')\n h.update(f.read() + file_date.__str__())\n f.close()\n\n # Copy file to repository\n session = model.Session\n\n # Create table if it doesn't exists\n setup_model()\n\n # First check if hash is already in database\n results = session.query(ErrorRepository.hash).filter_by(hash=h.hexdigest()).all()\n\n if len(results) > 0:\n self.log( 'This file %s has the same hash of a file already in\\\n database. Aborting' % file_dict['filename'])\n os.remove(file_dict['tmpfile'])\n return\n\n # Filename hash to store\n filename3, extension = os.path.splitext(os.path.basename(file_dict['filename']))\n filename3 = file_date.__str__() + '-' + filename3 + extension\n\n # Now add full repository path to filename\n filename2 = os.path.join(self.repository,os.path.join('import_errors',filename3.replace(' ', '-')))\n\n # Now insert data and copy file to repository\n #log.error('Error parsing file %s. Inserting in repository' % file_dict['filename'])\n self.log('Error in file %s. Inserting in repository with message\\n %s' % (file_dict['filename'],file_dict.get('errmsg')))\n\n # Create base dir if it doesn't exist\n if not os.path.exists(os.path.join(self.repository,'import_errors')):\n os.mkdir(os.path.join(self.repository,'import_errors'), 0770)\n\n # Copy file to repository\n shutil.copy2(file_dict['tmpfile'],filename2)\n\n # insert info in database\n repository = ErrorRepository(\n hash=h.hexdigest(),\n creation_date=file_date.today(),\n original_file=filename2,\n errmsg=file_dict.get('errmsg'),\n error_type=file_dict.get('error_type'),\n package_file=file_dict.get('package_file')\n )\n session.add(repository)\n session.commit()\n\n #log.warning('File inserted')\n self.log('File inserted')\n\n # Remove other file\n os.remove(file_dict['tmpfile'])", "def capture_exception():\n\n pm_logger.exception()\n exc_type, exc_value, exc_tb = sys.exc_info()\n exc_type_string = \"%s.%s\" % (exc_type.__module__, exc_type.__name__)\n exc_message = traceback.format_exception_only(exc_type, exc_value)[-1].strip()\n error = {\"type\": exc_type_string,\n \"message\": exc_message}\n try:\n BSON.encode({'args': exc_value.args})\n except InvalidDocument:\n pass\n else:\n error[\"args\"] = exc_value.args\n return error" ]
[ "0.6838723", "0.6404485", "0.6333006", "0.6018795", "0.5974132", "0.5974132", "0.5963912", "0.59542024", "0.5951783", "0.5901242", "0.5837355", "0.5835964", "0.5816076", "0.5780152", "0.57722586", "0.5741803", "0.5727721", "0.57214516", "0.5720709", "0.5701371", "0.56902647", "0.5687057", "0.5685889", "0.56740296", "0.56691617", "0.5635998", "0.5635998", "0.5635998", "0.5635998", "0.5635998", "0.5635998", "0.5635998", "0.5635998", "0.5635998", "0.5630545", "0.56151825", "0.5600754", "0.55926305", "0.55554336", "0.55554336", "0.5532301", "0.5528493", "0.5528493", "0.54966533", "0.5492493", "0.5474959", "0.5459177", "0.5451158", "0.5430235", "0.54218155", "0.5419322", "0.5414729", "0.54120797", "0.5397881", "0.53957826", "0.53878236", "0.537886", "0.5370186", "0.5360603", "0.5357526", "0.53267723", "0.5318926", "0.531665", "0.5285375", "0.5282048", "0.52819294", "0.523333", "0.5232926", "0.5218377", "0.52161795", "0.5205441", "0.52051157", "0.5195685", "0.5193282", "0.5191863", "0.51865", "0.51862526", "0.5179836", "0.5175785", "0.5172467", "0.5170052", "0.5169025", "0.515961", "0.51334256", "0.51297855", "0.51199865", "0.5111389", "0.51071566", "0.50978297", "0.5096551", "0.5085513", "0.50812984", "0.5079459", "0.5062498", "0.50518805", "0.50432646", "0.50337464", "0.50178045", "0.49906418", "0.49875697", "0.49851236" ]
0.0
-1
Initialize the motor with its control pins and start pulsewidth modulation
def __init__(self, pinForward, pinBackward, pinControlStraight,pinLeft, pinRight, pinControlSteering): self.pinForward = pinForward self.pinBackward = pinBackward self.pinControlStraight = pinControlStraight self.pinLeft = pinLeft self.pinRight = pinRight self.pinControlSteering = pinControlSteering GPIO.setup(self.pinForward, GPIO.OUT) GPIO.setup(self.pinBackward, GPIO.OUT) GPIO.setup(self.pinControlStraight, GPIO.OUT) GPIO.setup(self.pinLeft, GPIO.OUT) GPIO.setup(self.pinRight, GPIO.OUT) GPIO.setup(self.pinControlSteering, GPIO.OUT) self.pwm_forward = GPIO.PWM(self.pinForward, 100) self.pwm_backward = GPIO.PWM(self.pinBackward, 100) self.pwm_forward.start(0) self.pwm_backward.start(0) self.pwm_left = GPIO.PWM(self.pinLeft, 100) self.pwm_right = GPIO.PWM(self.pinRight, 100) self.pwm_left.start(0) self.pwm_right.start(0) GPIO.output(self.pinControlStraight,GPIO.HIGH) GPIO.output(self.pinControlSteering,GPIO.HIGH)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_motor(self,pin_num):\n pi.set_servo_pulsewidth(pin_num, 2000)\n sleep(2)\n pi.set_servo_pulsewidth(pin_num, 500 )\n sleep(2)", "def setup(self):\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.Motor_A_EN, GPIO.OUT)\n GPIO.setup(self.Motor_B_EN, GPIO.OUT)\n GPIO.setup(self.Motor_A_Pin1, GPIO.OUT)\n GPIO.setup(self.Motor_A_Pin2, GPIO.OUT)\n GPIO.setup(self.Motor_B_Pin1, GPIO.OUT)\n GPIO.setup(self.Motor_B_Pin2, GPIO.OUT)\n self.motorStop() # Avoids automatic motor rotation after initialization\n try: # Try is used here to avoid errors due to repeated setting of PWM\n self.pwm_A = GPIO.PWM(self.Motor_A_EN, 1000)\n self.pwm_B = GPIO.PWM(self.Motor_B_EN, 1000)\n except:\n pass", "def __init__(self, pinForward1, pinBackward1,pinForward2, pinBackward2):\n\n self.pinForward1 = pinForward1\n self.pinBackward1 = pinBackward1\n self.pinForward2 = pinForward2\n self.pinBackward2 = pinBackward2\n\n GPIO.setup(self.pinForward1, GPIO.OUT)\n GPIO.setup(self.pinBackward1, GPIO.OUT)\n GPIO.setup(self.pinForward2, GPIO.OUT)\n GPIO.setup(self.pinBackward2, GPIO.OUT)\n\n self.pwm_forward1 = GPIO.PWM(self.pinForward1, 100)\n self.pwm_backward1 = GPIO.PWM(self.pinBackward1, 100)\n self.pwm_forward2 = GPIO.PWM(self.pinForward2, 100)\n self.pwm_backward2 = GPIO.PWM(self.pinBackward2, 100)\n \n self.pwm_forward1.start(0)\n self.pwm_backward1.start(0)\n self.pwm_forward2.start(0)\n self.pwm_backward2.start(0)", "def __init__(self, pinForward, pinBackward, pinControl):\n\n self.pinForward = pinForward\n self.pinBackward = pinBackward\n self.pinControl = pinControl\n GPIO.setup(self.pinForward, GPIO.OUT)\n GPIO.setup(self.pinBackward, GPIO.OUT)\n GPIO.setup(self.pinControl, GPIO.OUT)\n self.pwm_forward = GPIO.PWM(self.pinForward, 100)\n self.pwm_backward = GPIO.PWM(self.pinBackward, 100)\n self.pwm_forward.start(0)\n self.pwm_backward.start(0)\n GPIO.output(self.pinControl,GPIO.HIGH)", "def __init__(self):\n GPIO.setwarnings(False)\n GPIO.cleanup() # Reset the high and low levels of the GPIO port\n #The following code defines the GPIO used to control the L298N chip. This definition is different for different Raspberry Pi driver boards.\n self.Motor_A_EN = 17\n self.Motor_B_EN = 4\n self.Motor_A_Pin1 = 27\n self.Motor_A_Pin2 = 18\n self.Motor_B_Pin1 = 21\n self.Motor_B_Pin2 = 26\n self.setup()", "def init(self):\n self.l_motor = lazytalonsrx.LazyTalonSRX(Constants.IL_MOTOR_ID)\n self.r_motor = lazytalonsrx.LazyTalonSRX(Constants.IR_MOTOR_ID)\n self.l_motor.initialize(\n inverted=False, encoder=False, phase=False, name=\"Intake Left\")\n self.r_motor.initialize(\n inverted=True, encoder=False, phase=False, name=\"Intake Right\")", "def __init__(self):\n\n super().__init__()\n\n self.active = True\n self.driver = Driver.instance()\n self.sensor_manager = SensorManager.instance()\n\n self.pwm = Adafruit_PCA9685.PCA9685(address=0x40, busnum=1) # create PCA9685-object at I2C-port\n self.pwm.set_pwm_freq(50)\n\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(20, GPIO.OUT)\n GPIO.setup(21, GPIO.OUT)\n GPIO.setup(26, GPIO.OUT)\n self.driven_distance = 0", "def __init__(self, pwm_pin, dir_pin_1, dir_pin_2, pwm_freq):\n\t\tself._pwm_pin = pwm_pin # PWM input pin.\n\t\tself._dir_pin_1 = dir_pin_1 # GPIO number to control the direction of rotation of the wheel.\n\t\tself._dir_pin_2 = dir_pin_2 # GPIO number to control the direction of rotation of the wheel.\n\t\tself._pwm_freq = pwm_freq # PWM cycle.\n\n\t\tself._last_dir = 's' # Last rotation direction of this wheel. 's' indicates stop.\n\t\tself._last_dc_val = 0 # Last duty cycle value.\n\t\tself._current_dc_val = 0 # Current duty cycle value.\n\n\t\tGPIO.setmode(GPIO.BOARD)\n\n\t\t# Set the direction control GPIO output mode.\n\t\tGPIO.setup(self._pwm_pin, GPIO.OUT)\n\t\tGPIO.setup(self._dir_pin_1, GPIO.OUT)\n\t\tGPIO.setup(self._dir_pin_2, GPIO.OUT)\n\n\t\t# Inits PWM pin.\n\t\tself._motor_pwm = GPIO.PWM(self._pwm_pin, self._pwm_freq) # pwm_freq: Hz\n\t\tself._motor_pwm.start(0) # Set duty cycle to 0.", "def physical_init(self):\n \n # BCM numbering scheme for Pi pins\n GPIO.setmode(GPIO.BCM)\n \n for attr in self.parm_list:\n if attr.io_pin > 0:\n GPIO.setup(attr.io_pin, attr.io_dir)\n if attr.io_dir == GPIO.OUT:\n GPIO.output(attr.io_pin, attr.value)\n #\n # There seems to be a bug where the edge detection triggers on both\n # edges. Compensate in the ISR.\n #\n GPIO.add_event_detect(self.motion.io_pin, GPIO.BOTH, callback=self.motion_edge)", "def setup(self):\n if not self._gpio_setup:\n if self._gpio is None:\n try:\n import RPi.GPIO as GPIO\n self._gpio = GPIO\n except ImportError:\n raise ImportError('This library requires the RPi.GPIO module\\nInstall with: sudo apt install python-rpi.gpio')\n self._gpio.setmode(self._gpio.BCM)\n self._gpio.setwarnings(False)\n self._gpio.setup(self.cs_pin, self._gpio.OUT)\n self._gpio.setup(self.dc_pin, self._gpio.OUT, initial=self._gpio.LOW, pull_up_down=self._gpio.PUD_OFF)\n self._gpio.setup(self.reset_pin, self._gpio.OUT, initial=self._gpio.HIGH, pull_up_down=self._gpio.PUD_OFF)\n self._gpio.setup(self.busy_pin, self._gpio.IN, pull_up_down=self._gpio.PUD_OFF)\n\n if self._spi_bus is None:\n import spidev\n self._spi_bus = spidev.SpiDev()\n\n self._spi_bus.open(0, self.cs_channel)\n self._spi_bus.no_cs = True\n self._spi_bus.max_speed_hz = 5000000\n\n self._gpio_setup = True\n\n self._gpio.output(self.reset_pin, self._gpio.LOW)\n time.sleep(0.1)\n self._gpio.output(self.reset_pin, self._gpio.HIGH)\n time.sleep(0.1)\n\n self._gpio.output(self.reset_pin, self._gpio.LOW)\n time.sleep(0.1)\n self._gpio.output(self.reset_pin, self._gpio.HIGH)\n\n self._busy_wait(1.0)\n\n # Sending init commands to display\n self._send_command(AC073TC1_CMDH, [0x49, 0x55, 0x20, 0x08, 0x09, 0x18])\n\n self._send_command(AC073TC1_PWR, [0x3F, 0x00, 0x32, 0x2A, 0x0E, 0x2A])\n\n self._send_command(AC073TC1_PSR, [0x5F, 0x69])\n\n self._send_command(AC073TC1_POFS, [0x00, 0x54, 0x00, 0x44])\n\n self._send_command(AC073TC1_BTST1, [0x40, 0x1F, 0x1F, 0x2C])\n\n self._send_command(AC073TC1_BTST2, [0x6F, 0x1F, 0x16, 0x25])\n\n self._send_command(AC073TC1_BTST3, [0x6F, 0x1F, 0x1F, 0x22])\n\n self._send_command(AC073TC1_IPC, [0x00, 0x04])\n\n self._send_command(AC073TC1_PLL, [0x02])\n\n self._send_command(AC073TC1_TSE, [0x00])\n\n self._send_command(AC073TC1_CDI, [0x3F])\n\n self._send_command(AC073TC1_TCON, [0x02, 0x00])\n\n self._send_command(AC073TC1_TRES, [0x03, 0x20, 0x01, 0xE0])\n\n self._send_command(AC073TC1_VDCS, [0x1E])\n\n self._send_command(AC073TC1_T_VDCS, [0x00])\n\n self._send_command(AC073TC1_AGID, [0x00])\n\n self._send_command(AC073TC1_PWS, [0x2F])\n\n self._send_command(AC073TC1_CCSET, [0x00])\n\n self._send_command(AC073TC1_TSSET, [0x00])", "def start(self):\n\n buttons = {}\n for pin in self._pin_nums:\n buttons[\"button_\" + str(pin)] = pin\n\n self._gpio = self.init_interface(\"gpio\",\n impl=self._impl, \n **buttons)\n\n d_len = len(self._dir)\n b_len = len(self._bounce)\n for i, button in enumerate(buttons):\n # Initiliaze every button\n dire = self._dir[i % d_len]\n self._directions.append(dire)\n\n boun = self._bounce[i % b_len]\n self._debounce.append(boun)\n\n self._button_init(button, dire, boun)", "def init_servos():\n for i in range(0, 7):\n kit.servo[i].actuation_range = 180\n kit.servo[i].set_pulse_width_range(450, 2550)", "def pwm_controller_init(self, chain: machine.I2C = None, freq: int = 333):\n if self.antenny_config.get(\"use_motor\"):\n print(\"use_motor found in config: {}\".format(self.antenny_config.get_name()))\n if chain is None:\n i2c_pwm_controller_scl = self.antenny_config.get(\"i2c_pwm_controller_scl\")\n i2c_pwm_controller_sda = self.antenny_config.get(\"i2c_pwm_controller_sda\")\n self.i2c_pwm_controller = self.i2c_init(\n 0,\n i2c_pwm_controller_scl,\n i2c_pwm_controller_sda\n )\n else:\n self.i2c_pwm_controller = chain\n pwm_controller = Pca9685Controller(self.i2c_pwm_controller, freq=freq)\n print(\"Motor connected\")\n safe_mode = False\n else:\n pwm_controller = MockPWMController()\n print(\"According to your config, you do not have a motor connected, entering Safe Mode\")\n safe_mode = True\n self.pwm_controller = pwm_controller\n self.safe_mode = safe_mode\n return pwm_controller, safe_mode", "def __init__(self, red_pin, green_pin, blue_pin):\n #self.red = gpiozero.PWMLED(red_pin, frequency=80, initial_value=1)\n #self.green = gpiozero.PWMLED(green_pin, frequency=80, initial_value=1)\n #self.blue = gpiozero.PWMLED(blue_pin, frequency=80, initial_value=1)\n self.red = gpiozero.LED(red_pin)\n self.green = gpiozero.LED(green_pin)\n self.blue = gpiozero.LED(blue_pin)\n self.leds = [self.red, self.green, self.blue]\n self.off()", "def _setup(self):\n if self._setup:\n return\n\n GPIO.setmode(GPIO.BCM)\n for i in (self.on, self.off):\n GPIO.setup(i, GPIO.OUT)\n self.stop_all()\n Outlet._setup = True", "def _set_pwm(self, raw_values):\n for i in range(len(self._pins)):\n self._pi.set_PWM_dutycycle(self._pins[i], raw_values[i])", "def __init__(self):\n\n GPIO.setup(PIN_BTN, GPIO.IN, GPIO.PUD_UP)\n GPIO.setup(PIN_RED_LED_0, GPIO.OUT, GPIO.LOW)\n GPIO.setup(PIN_BLUE_LED, GPIO.OUT, GPIO.LOW)", "def initialize(self) -> None:\n # Set motors to stop, read encoder values for starting point\n self.drive.arcadeDrive(0, 0)\n self.drive.resetEncoders()", "def _init_io(self):\n GPIO.setwarnings(False)\n GPIO.setmode( GPIO.BCM )\n pins = [ self._spi_dc ]\n for pin in pins:\n GPIO.setup( pin, GPIO.OUT )", "def __init__(self, servo_gpio, pi=None, pulse_left_ns=2500, pulse_right_ns=1000, pulse_centre_ns=None):\n\n self.gpio = servo_gpio\n\n if pi is None:\n self.pi = pi = pigpio.pi()\n else:\n self.pi = pi\n\n self.pulse_left_ns = pulse_left_ns\n self.pulse_right_ns = pulse_right_ns\n\n if pulse_centre_ns is None:\n self.pulse_centre_ns = ((pulse_left_ns - pulse_right_ns) // 2) + pulse_right_ns", "def initialize(self):\n\t\tpcd8544.LCD.initialize(self)\n\t\tRPIO.setup(self._backlight_pin, RPIO.OUT, initial=RPIO.LOW)", "def idle(self):\n self.pi.set_servo_pulsewidth(self.gpio, 0)", "def __init__(self):\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(pin,GPIO.OUT)", "def __init__(self):\n\n self.wp = wp\n self.wp.wiringPiSetup()\n\n self.LEDON_PIN = 21\n self.SENSOR_PINS = [22, 26, 23, 27, 24, 28, 25, 29]\n self.NUM_SENSORS = len(self.SENSOR_PINS)\n self.CHARGE_TIME = 10 #us to charge the capacitors\n self.READING_TIMEOUT = 1000 #us, assume reading is black\n\n self.sensorValues = []\n self.calibratedMax = []\n self.calibratedMin = []\n self.lastValue = 0\n self.init_pins()", "def __init__(self):\n super().__init__()\n\n # Robot state\n self.ask_mode = False\n\n # Connect two large motors on output ports B and C\n self.sound = Sound()\n self.leds = Leds()\n self.p1 = TouchSensor(INPUT_1)\n self.p2 = TouchSensor(INPUT_2)\n self.p3 = TouchSensor(INPUT_3)\n self.p4 = TouchSensor(INPUT_4)", "def configure_servo(self, board):\n self.servo = board.get_pin(f\"d:{self.pin}:p\")\n board.servo_config(\n pin = self.pin,\n min_pulse = 544,\n max_pulse = 2400,\n angle = 93\n )", "def init():\n print(\"initializing...\")\n print(\"setting relays off\")\n for pin in PINS:\n GPIO.setup(pin, GPIO.OUT)\n GPIO.output(pin, RELAYOFF)", "def init(self):\n self.reset()\n\n self.__interface.send_command('POWER_SETTING')\n self.__interface.send_data(0x37)\n self.__interface.send_data(0x00)\n\n self.__interface.send_command('PANEL_SETTING')\n self.__interface.send_data(0xCF)\n self.__interface.send_data(0x08)\n\n self.__interface.send_command('BOOSTER_SOFT_START')\n self.__interface.send_data(0xc7)\n self.__interface.send_data(0xcc)\n self.__interface.send_data(0x28)\n\n self.__interface.send_command('POWER_ON')\n self.wait_until_idle()\n\n self.__interface.send_command('PLL_CONTROL')\n self.__interface.send_data(0x3c)\n\n self.__interface.send_command('TEMPERATURE_CALIBRATION')\n self.__interface.send_data(0x00)\n\n self.__interface.send_command('VCOM_AND_DATA_INTERVAL_SETTING')\n self.__interface.send_data(0x77)\n\n self.__interface.send_command('TCON_SETTING')\n self.__interface.send_data(0x22)\n\n self.__interface.send_command('TCON_RESOLUTION')\n self.__interface.send_data(0x02) #source 640\n self.__interface.send_data(0x80)\n self.__interface.send_data(0x01) #gate 384\n self.__interface.send_data(0x80)\n\n self.__interface.send_command('VCM_DC_SETTING')\n self.__interface.send_data(0x1E) #decide by LUT file\n\n self.__interface.send_command(0xe5, False) #FLASH MODE\n self.__interface.send_data(0x03)", "def set_param_motor():\n servo.setSpeed(0, 0) # max = 255\n servo.setAccel(0, 0)\n servo.setSpeed(1, 150) # max = 255\n servo.setAccel(1, 150)", "def __init__(self):\n GPIO.setmode(GPIO.BOARD)\n for light in self.all:\n GPIO.setup(light, GPIO.OUT)", "def initialize():\n for pin in sorted(OUTPUT_PINS.values()):\n _enable_pin(pin, OUT)\n\n for pin in sorted(INPUT_PINS.values()):\n _enable_pin(pin, IN)", "def turn_on(self):\n # read out the current pose of the robot\n configuration = self.robot.get_all_servo_position()\n\n # interpolate to the default position\n interpolation_time = 3000 # ms\n interpolation_steps = interpolation_time // TIME_FRAME\n\n speed = np.zeros(18)\n for i in range(18):\n speed[i] = (SERVOS_BASE[i] - configuration[i]) / interpolation_steps\n\n # execute the motion\n for t in range(interpolation_steps):\n self.robot.set_all_servo_position(configuration + t * speed)", "def init_pins(self):\n for pin in self.SENSOR_PINS:\n self.sensorValues.append(0)\n self.calibratedMax.append(0)\n self.calibratedMin.append(0)\n self.wp.pullUpDnControl(pin, self.wp.PUD_DOWN)\n self.wp.pinMode(self.LEDON_PIN, self.wp.OUTPUT)", "def initialize(self):\n self.initilize_multiply_array() # m\n self.initialize_cameras()\n self.initialize_electronics()\n self.logger.info('Starting free runs and continuous reads')\n self.camera_microscope.start_free_run()\n self.camera_microscope.continuous_reads()\n self.camera_fiber.start_free_run()\n self.camera_fiber.continuous_reads()\n self.servo_off()\n\n time.sleep(1) #m Without the sleep below initialize_multiply_array does not work", "def set_init_speed(self):\n self.control_instance.set_init_speed()", "def main():\n\n # Center positions when joystick is at rest\n center_x_pos = 530\n center_y_pos = 504\n\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup([red_led, green_led, blue_led], GPIO.OUT, initial=GPIO.LOW)\n\n pwm_r = GPIO.PWM(red_led, 300)\n pwm_g = GPIO.PWM(green_led, 300)\n pwm_b = GPIO.PWM(blue_led, 300)\n\n pwm_instances = [pwm_r, pwm_g, pwm_b]\n\n for p in pwm_instances:\n p.start(0)\n\n try:\n while True:\n # If joystick switch is pressed down, turn off LEDs\n switch = read_spi_data_channel(mcp3008_switch_channel)\n if switch == 0:\n for p in pwm_instances:\n p.ChangeDutyCycle(0)\n continue\n\n # Read the joystick position data\n x_pos = read_spi_data_channel(mcp3008_x_voltage_channel)\n y_pos = read_spi_data_channel(mcp3008_y_voltage_channel)\n\n # If joystick is at rest in center, turn on all LEDs at max\n if is_joystick_near_center(x_pos, y_pos, center_x_pos, center_y_pos):\n for p in pwm_instances:\n p.ChangeDutyCycle(100)\n continue\n\n # Adjust duty cycle of LEDs based on joystick position\n angle = convert_coordinates_to_angle(x_pos, y_pos, center_x_pos, center_y_pos)\n pwm_r.ChangeDutyCycle(calculate_next_pwm_duty_cycle_for_led(angle, 'R'))\n pwm_g.ChangeDutyCycle(calculate_next_pwm_duty_cycle_for_led(angle, 'G'))\n pwm_b.ChangeDutyCycle(calculate_next_pwm_duty_cycle_for_led(angle, 'B'))\n\n # print(\"Position : ({},{}) -- Angle : {}\".format(x_pos, y_pos, round(angle, 2)))\n\n except KeyboardInterrupt:\n pass\n\n finally:\n for p in pwm_instances:\n p.stop()\n spi.close()\n GPIO.cleanup()", "def GPIO_initialization():\n GPIO.setmode(GPIO.BCM)\n\n GPIO.setup(Sensor.IN_1, GPIO.OUT)\n GPIO.setup(Sensor.IN_2, GPIO.OUT)\n GPIO.setup(Sensor.EN, GPIO.OUT)\n\n GPIO.setup(Membrane_Switch.PSEUDO_MEMBRANE_SWITCH['RED_STOP'], GPIO.IN)\n GPIO.setup(Membrane_Switch.PSEUDO_MEMBRANE_SWITCH['YELLOW_CW'], GPIO.IN)\n GPIO.setup(Membrane_Switch.PSEUDO_MEMBRANE_SWITCH['GREEN_CCW'], GPIO.IN)\n\n GPIO.output(Sensor.IN_1, GPIO.LOW)\n GPIO.output(Sensor.IN_2, GPIO.LOW)", "def __init__(self):\n # set vars\n self.__deamon = True\n _tacho_pin = 'GPIO_PZ0'\n self._pow_pin = 'CAM_AF_EN'\n self._mist_pin = 'SPI2_MOSI'\n _pwm_pin = 'LCD_BL_PW'\n self._pwm_obj = None\n self._pump_pin = 'DAP4_DIN'\n self._sens_level_mist = 'UART2_CTS'\n self._sens_level_tank = 'DAP4_FS'\n self._sleep_pin = 'SPI2_SCK'\n self.__pow = 0\n self.__hum = 0\n self.__hum_temp = 100\n self.__sens_temp = 100\n self.__t = time.time()\n self.__rpm = 0\n\n # init everything\n GPIO.setmode(GPIO.TEGRA_SOC)\n GPIO.setup(self._pow_pin, GPIO.OUT)\n GPIO.setup(self._mist_pin, GPIO.OUT)\n GPIO.setup(self._pump_pin, GPIO.OUT)\n GPIO.setup(_tacho_pin, GPIO.IN)\n GPIO.setup(self._sens_level_mist, GPIO.IN)\n GPIO.setup(self._sens_level_tank, GPIO.IN)\n GPIO.setup(self._sleep_pin, GPIO.IN)\n GPIO.add_event_detect(_tacho_pin, GPIO.FALLING, self.__fan_int)\n GPIO.add_event_detect(self._sens_level_tank,\n GPIO.BOTH, self.__tank)\n GPIO.add_event_detect(self._sleep_pin, GPIO.FALLING, self.__Sleep)\n self._tah = temps.Temp()\n self._led = LED()\n self.kit = ServoKit(channels=16)\n self.kit.servo[7].actuation_range = 180\n self.kit.servo[8].actuation_range = 20\n self._pwm_obj = GPIO.PWM(_pwm_pin, 1000)\n self.__mist_thread = threading.Thread(target=self.__mist_deamon)\n self.__pow_thread = threading.Thread(target=self.__pow_deamon)\n self.__pump_thread = threading.Thread(target=self.__pump_deamon)\n self.__mist_thread.start()\n self.__pow_thread.start()\n self.__pump_thread.start()\n self._pwm_obj.start(0)\n self._led.reset_error()", "def __init__(self):\n self.inches_moved = 0\n self.left_motor = ev3.LargeMotor(ev3.OUTPUT_B)\n self.right_motor = ev3.LargeMotor(ev3.OUTPUT_C)\n self.arm_motor = ev3.MediumMotor(ev3.OUTPUT_A)\n self.touch_sensor = ev3.TouchSensor()\n self.running = True\n self.ir_sensor = ev3.InfraredSensor()\n self.color_sensor = ev3.ColorSensor()\n assert self.color_sensor\n assert self.ir_sensor\n assert self.touch_sensor\n self.arm_motor.position = 0\n self.pixy = ev3.Sensor(driver_name=\"pixy-lego\")\n assert self.pixy\n\n self.right_motor_encoder = self.right_motor.position\n self.left_motor_encoder = self.left_motor.position", "def __init__(self, pin1=24, pin2=28, pin3=25, pin4=33):\n self.GP = GPIOProcessor()\n self.pin1 = self.GP.getPin(pin1)\n self.pin2 = self.GP.getPin(pin2)\n self.pin3 = self.GP.getPin(pin3)\n self.pin4 = self.GP.getPin(pin4)\n self.pinl = [self.pin1, self.pin2, self.pin3, self.pin4]\n\n for k in range(4):\n self.pinl[k].out()\n\n self.speed = 100.0", "def __init__(self):\n GPIO.setmode(GPIO.BOARD) # Set's GPIO referencing to RPi Board Refdes\n self.chanlist = [29, 31, 33, 35, 37] # chanlist 0, 1, 2, 3, 4\n GPIO.setup(29, GPIO.IN) # Setup as input to pi\n GPIO.setup(31, GPIO.IN) # Setup as input\n GPIO.setup(33, GPIO.IN) # Setup as input\n GPIO.setup(35, GPIO.IN) # Setup as input\n GPIO.setup(37, GPIO.OUT) # Setup as output from pi\n self.SHTDWN = False\n\n GPIO.add_event_detect(self.chanlist[1], GPIO.BOTH) \n GPIO.add_event_detect(self.chanlist[3], GPIO.FALLING, self.callback_SHTDWN, bouncetime=200)", "def motor_A(self, direction, speed):\n if direction == -1:\n GPIO.output(self.Motor_A_Pin1, GPIO.HIGH)\n GPIO.output(self.Motor_A_Pin2, GPIO.LOW)\n self.pwm_A.start(100)\n self.pwm_A.ChangeDutyCycle(speed)\n if direction == 1:\n GPIO.output(self.Motor_A_Pin1, GPIO.LOW)\n GPIO.output(self.Motor_A_Pin2, GPIO.HIGH)\n self.pwm_A.start(100)\n self.pwm_A.ChangeDutyCycle(speed)", "def initPWM(tmr_channel, divisorCode, firstPart, secondPart, invert):\n divisorCode += 8\n makeTMRpin(tmr_channel)\n writeTMR(tmr_channel, TMR_CTRL, 0x0000)\n writeTMR(tmr_channel, TMR_LOAD, 0x0000)\n writeTMR(tmr_channel, TMR_CNTR, 0x0000)\n if invert:\n sctrl = 0x0007\n else:\n sctrl = 0x0005\n writeTMR(tmr_channel, TMR_SCTRL, sctrl)\n writeTMR(tmr_channel, TMR_CSCTRL, 0x0009)\n writeTMR(tmr_channel, TMR_COMP1, firstPart)\n writeTMR(tmr_channel, TMR_CMPLD1, firstPart)\n writeTMR(tmr_channel, TMR_COMP2, secondPart)\n writeTMR(tmr_channel, TMR_CMPLD2, secondPart)\n ctrl = 0x2024 | (divisorCode << 9)\n writeTMR(tmr_channel, TMR_CTRL, ctrl)", "def __init__(self, io_callback):\n self.io_callback = io_callback\n # Current magnets' state of the motor\n self.magnets = [ 0, 0, 0, 0 ]\n # Linear speed of the motor\n self.linear_speed = 0\n # Time elapsed from the last magnets change\n self.lastchange = 0\n # Time the motor can keep the current speed\n self.timecap = 0", "def enable_motor():\n print('Enabling motor')\n start_motor = '{\"id\" : \"Motor1\", \"enabled\" : \"1\"}'\n SERIAL_PARENT.send(start_motor)\n OUTGOING.append(start_motor)", "def _initialize_hardware(self):\n # Import\n try:\n from gpiozero import MCP3008\n except Exception as ex:\n logging.error('\\n *** ERROR importing gpiozero: {}'.format(ex))\n\n # Things failed, must be running locally, not on a widget, so don't\n # bother initializing the MCP3008\n return\n\n # Initialize the MCP3008\n try:\n self._sensor = MCP3008(channel=0)\n except Exception as ex:\n logging.error('\\n *** ERROR initializing MCP3008: {}'.format(ex))\n return\n\n # Start force loop thread\n threading.Thread(target=self._force_loop, daemon=True).start()", "def startup(self):\n # Initializing the cycle data (cd) dictionary\n self.cd[\"started_up\"] = False\n self.cd[\"peak_pressure\"] = 0\n self.cd[\"tidal_volume\"] = 0\n self.cd[\"inhale_duration\"] = 0\n self.cd[\"exhale_duration\"] = 0\n self.cd[\"IE_ratio\"] = 1\n self.cd[\"PEEP\"] = 0\n\n to = 2 # Timeout\n startup_cycles = 0\n limit = 20\n # If the piston position is unknown\n last_cycle = time.time()\n while not self.piston.piston_at_bottom and not self.piston.piston_at_top:\n if self.pst_dir == 1:\n self.piston.pst_up()\n if time.time() - last_cycle > to:\n self.pst_dir = 0\n startup_cycles += 1\n last_cycle = time.time()\n else:\n self.piston.pst_down()\n if time.time() - last_cycle > to:\n self.pst_dir = 1\n startup_cycles += 1\n last_cycle = time.time()\n if startup_cycles >= limit:\n print(\"There is a problem at startup, check compressed air\")\n print(f\"Tried to startup for {startup_cycles} cycles\")\n # Breaks the loop so that the controller doesn't start\n self.signal_startup_error.emit(True)\n return\n while not self.piston.piston_at_top:\n self.piston.pst_up()\n self.piston.stop()\n\n print(f\"startup_cycles: {startup_cycles}\")\n self.cd[\"started_up\"] = True\n self.signal_cycle_data.emit(self.cd)\n # Duration of the first tare of the system\n tare_duration = 5.0\n time.sleep(tare_duration)\n self.signal_get_tare.emit(tare_duration)\n # Waits a little bit just to make sure that the respirator isn't working when the controller \n # is called\n time.sleep(0.5)\n self.piston_control()", "def autonomousInit(self):\n fieldState = self.driverStation.getGameSpecificMessage()\n self.fieldState = fieldState\n self.smartDashboard.putString(\"field state\", fieldState)\n fieldPosition = self.smartDashboard.getString(\"field position\", \"\")\n self.startingFieldPosition = self.parserobotFieldPosition(fieldPosition)\n self.smartDashboard.putNumber(\"position\", self.startingFieldPosition)\n \n #convert field states to our enum values \n self.ourSwitchSide = self.parserobotFieldPosition(self.fieldState[0])\n self.scaleSide = self.parserobotFieldPosition(self.fieldState[1])\n self.theirSwitchSide = self.parserobotFieldPosition(self.fieldState[2])\n if self.startingFieldPosition==self.kNothing:\n print(\"No field position set. Aborting\")\n return \n \n \n #self.Encoder.setMaxPeriod(.1)\n #self.Encoder.setMinRate(10)\n #self.Encoder.setDistancePerPulse(5)\n #self.Encoder.setReverseDirection(True)\n #self.Encoder.getDistance()\n \n \"\"\"self.Encoder.reset()\n while (self.Encoder.get() < value):\n drive\n delay\"\"\"\n \n \n \n \n \n \n \n #self.Encoder.getRawAxis()\n \n \n #todo change RRR to from fms, maybe parse it first\n \n self.autonomousProgram = commands.autonomousCommand.AutonomousProgram(self.startingFieldPosition)\n self.autonomousProgram.start()", "def __init__(self, inPs, outPs):\n self.angle = 0.0\n self.speed = 16.5\n\n super(MovementControl,self).__init__(inPs, outPs)", "def auto_setup(self):\n if self.mot_type == \"xps8p\":\n return\n if self.get_par(\"err_sevr\") == 3:\n print \"Reinitializing motor {}...\".format(self.name)\n self.reinit()\n ok = self.wait_par(\"err_sevr\", 3, match_value=False, timeout=20)\n if ok:\n print \"Successfully reinitialized {}.\".format(self.name)\n time.sleep(0.5)\n else:\n print \"Reinitializing {} timed out. Aborting auto_setup.\".format(self.name)\n return\n\n for i in range(3):\n for clear, name in ((self.clear_pu, \"powerup\"),\n (self.clear_stall, \"stall flag\"),\n (self.clear_error, \"error flag\")):\n clear(check=True, wait=False)\n\n ok = []\n for bit, mask in ((RA_POWERUP, 1), (RA_STALL, 1), (RA_ERR, RA_ERR_MASK)):\n ok.append(self._wait_msta_bit(bit, 0, mask, timeout=10))\n if not all(ok):\n print \"Issues with clearing flags for {}\".format(self.name)\n\n try: # Not every environment has pmgr access\n self.pmgr.apply_config(dumb_config=self.name)\n except:\n pass", "def DriveMotor():\n\n # cnt overflows at 25KHz (approximately)\n cnt = intbv(0, min = 0, max = CNT_MAX + 1)\n\n # 10-bit duty cycle\n duty_cycle = intbv(0)[10:]\n\n while True:\n yield clk25.posedge, rst_n.negedge\n if rst_n == LOW:\n cnt[:] = 0\n duty_cycle[:] = 0\n dir.next = HIGH_OPTO\n pwm.next = LOW_OPTO\n en_n.next = LOW_OPTO\n else:\n # accept new consign at the beginning of a period\n if cnt == 0:\n # extract duty cycle and direction\n if speed >= 0:\n duty_cycle[:] = speed\n dir.next = HIGH_OPTO\n elif -speed >= CNT_MAX: # handle -1024 case\n duty_cycle[:] = CNT_MAX\n dir.next = LOW_OPTO\n else:\n duty_cycle[:] = -speed\n dir.next = LOW_OPTO\n\n # reached consign?\n if cnt >= duty_cycle:\n pwm.next = LOW_OPTO\n else:\n pwm.next = HIGH_OPTO\n\n if cnt == CNT_MAX:\n cnt[:] = 0\n else:\n cnt += 1\n\n en_n.next = LOW_OPTO", "def robotInit(self):\n\n #Initialize Networktables\n self.sd = NetworkTables.getTable('SmartDashboard')\n\n \n #Set up motors to drive robot\n self.M2 = wpilib.VictorSP(2)\n self.M3 = wpilib.VictorSP(3)\n #self.M2.setInverted(True)\n #self.M3.setInverted(True)\n self.left = wpilib.SpeedControllerGroup(self.M2,self.M3)\n \n self.M0 = wpilib.VictorSP(0)\n self.M1 = wpilib.VictorSP(1)\n self.right = wpilib.SpeedControllerGroup(self.M0,self.M1)\n self.drive = wpilib.drive.DifferentialDrive(self.left, self.right)\n \n \n self.stick = wpilib.Joystick(1)\n self.timer = wpilib.Timer()\n #Camera\n wpilib.CameraServer.launch()\n #Servo\n self.SV1 = wpilib.Servo(9)\n self.SV2 = wpilib.Servo(8) \n #Dashboard\n NetworkTables.initialize(server='10.61.62.2')\n #Switches\n self.SW0 = wpilib.DigitalInput(0)\n self.SW1 = wpilib.DigitalInput(1)\n #Elevator\n self.E = wpilib.VictorSP(5)\n self.prepareCubeFlag = 0\n self.grabCubeFlag = 0\n self.deliverCubeFlag = 0\n self.adjustLeftFlag=0\n self.adjustRightFlag=0\n self.driveFlag=0\n #Gyro\n self.gyro = wpilib.ADXRS450_Gyro(0)\n self.gyro.reset()\n #All possible autonomous routines in a sendable chooser\n '''\n self.chooser = wpilib.SendableChooser()\n self.chooser.addDefault(\"None\", '4')\n self.chooser.addObject(\"left-LeftScale\", '1')\n self.chooser.addObject(\"Middle-LeftScale\", '2')\n self.chooser.addObject(\"Right-LeftScale\", '3')\n self.chooser.addObject(\"Left-RightScale\", '5')\n '''\n #wpilib.SmartDashboard.putData('Choice', self.chooser)\n #Encoders\n self.EC1 = wpilib.Encoder(2,3)\n self.EC2 = wpilib.Encoder(4,5)\n self.EC1.reset()\n self.EC2.reset()", "def init():\n\n global leftDriverStick\n global rightDriverStick\n global goGamePad\n\n try:\n leftDriverStick = T16000M(0)\n except:\n print('OI: Error - Could not instantiate Left Driver Stick on USB port 0!!!')\n\n try:\n rightDriverStick = T16000M(1)\n except:\n print('OI: Error - Could not instantiate Right Driver Stick on USB port 0!!!')\n\n try:\n goGamePad = Joystick(2)\n except:\n print('OI: Error - Could not instantiate Right Driver Stick on USB port 2!!!')\n\n\n # ----------------------------------------------------------\n # Driver Controls\n # ----------------------------------------------------------\n #global resetYawBtn\n #resetYawBtn = JoystickButton(rightDriverStick, config.btnResetYawAngleIndex)\n #resetYawBtn.whenPressed(NavxResetYawAngle())\n\n global btnDriveSlow\n btnDriveSlow = JoystickButton(leftDriverStick, config.btnDriveSlow)\n \n global btnEnableLightSensor\n btnEnableLightSensor = JoystickButton(leftDriverStick, config.btnEnableLightSensorIndex)\n\n global btnExtendAll\n btnExtendAll = JoystickButton(rightDriverStick, config.btnExtendAllIndex)\n btnExtendAll.whenPressed(ExtendAll())\n\n global btnRetract\n btnRetract = JoystickButton(rightDriverStick, config.btnRetractAllIndex)\n btnRetract.whenPressed(RetractAll())\n\n global btnExtendFront\n btnExtendFront = JoystickButton(rightDriverStick, config.btnExtendFrontIndex)\n btnExtendFront.whenPressed(ExtendFront())\n\n global btnExtendBack\n btnExtendBack = JoystickButton(rightDriverStick, config.btnExtendBackIndex)\n btnExtendBack.whenPressed(ExtendBack())\n\n global btnRetractFront\n btnRetractFront = JoystickButton(rightDriverStick, config.btnRetractFrontIndex)\n btnRetractFront.whenPressed(RetractFront())\n\n global btnCargoGrabTog\n btnCargoGrabTog = JoystickButton(goGamePad, config.btnHatchGrabTogIndex)\n btnCargoGrabTog.whenPressed(ExtendBack())\n \n \"\"\"\n global btnResetEncoders\n btnResetEncoders = JoystickButton(leftDriverStick, config.btnResetEncodersIndex)\n btnResetEncoders.whenPressed(TankDriveResetEncoders())\n \"\"\"\n\n \"\"\"\n global axisElevator\n axisElevator = JoystickAxis(goGamePad, config.axisElevatorIndex)\n axisElevator. #??? idk how to configure joystick axis\n \"\"\"\n\n \"\"\"\n global btnRampTog\n btnRampTog = JoystickButton(goGamePad, config.btnRampTogIndex)\n btnRampTog.whenPressed(ExtendFront())\n \"\"\"\n #global btnResetEncoders\n #btnResetEncoders = JoystickButton(leftDriverStick, config.btnResetEncodersIndex)\n #btnResetEncoders.whenPressed(TankDriveResetEncoders())\n\n # These variable names are inconsistent, need to be fixed!!!!\n #global btnRampExtendTog\n #btnRampExtendTog = JoystickButton(goGamePad, config.btnRampExtendTogIndex)\n #btnRampExtendTog.whenPressed(RampExtend())\n\n #global btnRampRetractTog\n #btnRampRetractTog = JoystickButton(goGamePad, config.btnRampRetractTogIndex)\n #btnRampRetractTog.whenPressed(RampRetract())", "def __init__(self, serial):\n self.serial = serial # Serial object (timeout should be set to 1 second)\n\n # Make sure motors aren't running and servo is centred\n\n self.stop()\n self.servo_state(\"centre\")", "def setup(self):\n \n # Board refers to the P1 header of the Raspberry Pi board\n GPIO.setmode(GPIO.BOARD)\n\n # Set up pin as an input with a pull up resistor to 3.3V\n GPIO.setup(self.__pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)", "def initialize_relays(self):\n #create list of bytes to clear out relays\n zeroed_bytes = []\n for i in range(self.num_registers):\n zeroed_bytes.append(0x00)\n\n #clear out any data in the shift registers\n ret = self.e.write_SPI_bytes_to_portA(zeroed_bytes)\n self.strobe_relays()\n print \"read from SPI: \",\n print ret\n\n #enable the relays\n self.enable_relays()", "def init(\n baudrate=1000000, bits=8, mode=0, sclk=\"pin13\", mosi=\"pin15\", miso=\"pin14\"\n ):\n utils.print_for_unimplemented_functions(SPI.init.__qualname__)\n telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_SPI)", "def setup(motion):\n GPIO.setmode(GPIO.BCM)\n # Sets pin numbering system\n GPIO.setup(motion, GPIO.IN)\n # Configures given pin for input usage.", "def setup(self):\n self.pi.set_pull_up_down(self.gpio, pigpio.PUD_OFF)\n self.pi.set_watchdog(self.gpio, 0)\n self.register_callbacks()", "def __init__(self, pin =0):\n\t\tself.uv_sensor = pin\n\t\tgrovepi.pinMode(self.uv_sensor, \"INPUT\")", "def initialize(self):\n self.ros.enable()\n self.phone_link.enable()", "def control_motorized(self, action, pin_num=18):\n pulsewidth = self.avail_actions.get(action, None)\n if not pulsewidth:\n raise ValueError('Action not permitted')\n self.pi.set_servo_pulsewidth(pin_num, pulsewidth)\n return self.pi.get_servo_pulsewidth(pin_num)", "def __init__(self, gpio_on, gpio_off, delay):\n self.gpio_on = gpio_on\n self.gpio_off = gpio_off\n self.delay = delay", "def __init__(self, port, pmin=None, pmax=None, calpow=20):\n super().__init__(port)\n\n # power with which the motor will be calibrated\n self._calpow = calpow\n # minimum position\n self._pmin = pmin\n # maximum position\n self._pmax = pmax\n\n # if min and max were given, calculate initial position\n if self._pmin and self._pmax:\n self._pinit = (self._pmax + self._pmin) * 0.5\n else:\n # initial position for this motor. will be determined in `calibrate`\n self._pinit = None", "def __init__(self, channel):\n self.servo = wpilib.PWM(channel)\n self.close_value = 0\n #self.setBounds(1.0, 1.48, 1.5, 1.52, 2.0)\n self.setBounds(2.0, 1.65, 1.5, 1.35, 1.0)", "def setup(self):\n\n self._enable_torque(self._reg.TORQUE_ENABLE)\n self.change_operating_mode(self._reg.MODE_EXT_POSI)\n # set to max velocity\n self.change_veloity(self._default_velocity)", "def start(self):\n global trackWidth\n trackWidth = self.getTrackWidth()\n print(\"track width = \" + str(trackWidth))\n #motors.moveForward(0,2)\n initTheta = self.getTheta(trackWidth)\n motors.pivot(\"left\", 30, 0.25) #spin left for 1 second\n print(\"Moved\")\n newTheta = self.getTheta(trackWidth)\n #Checks if the robot is pointed even further of course or not, corrects for whichever\n if newTheta < initTheta:\n while self.getTheta(trackWidth) >=rads: #Spins while the robot is pointed more than 0.122 rads from straight\n motors.pivot(\"left\", 30, 0.25) #spin left for 0.25 second\n elif newTheta > initTheta:\n while self.getTheta(trackWidth) >= rads:\n motors.pivot(\"right\", 30, 0.25) #spin right for 0.25 second", "def connect(self):\n try:\n # Port and packet handler set up\n self.port_handler = port_h.PortHandler(self.port_name)\n self.packet_handler = packet_h.PacketHandler(self.protocol_version)\n\n # Set up port and baud rate\n self.port_handler.openPort()\n self.port_handler.setBaudRate(self.baud_rate)\n self.__find_motors()\n except rospy.ROSInterruptException: pass\n\n self.running = True", "def zeroMotor(self):\n\t\tpass", "def __init__(self):\n self._read_calibration_data()\n self.configure_sensor(\n TemperatureOversamplings.x08,\n PressureOversamplings.x16,\n HumidityOversamplings.x08,\n IIRFilterCoefficients.FC_003,\n 250,\n 250)", "def steer(self):\n\n while self.active:\n angle = self.driver.angle\n steering_pwm_calc = self.angle_to_pmw(angle)\n self.pwm.set_pwm(0, 0, steering_pwm_calc)", "def _pwm_pin(self, pin_obj):\n self.hw_interfaces[\"pwm\"][pin_obj.name] = PWM(pin=pin_obj)", "def poweron(self) -> None:\n self.servo_reset()", "def _setup(self) -> None:\n # Call base implementation\n super()._setup()\n\n # Configure the low-level integrator\n engine_options = self.simulator.engine.get_options()\n engine_options[\"stepper\"][\"iterMax\"] = 0\n engine_options[\"stepper\"][\"dtMax\"] = min(0.02, self.step_dt)\n engine_options[\"stepper\"][\"logInternalStepperSteps\"] = False\n\n # Set maximum computation time for single internal integration steps\n if self.debug:\n engine_options[\"stepper\"][\"timeout\"] = 0.0\n else:\n engine_options[\"stepper\"][\"timeout\"] = 2.0\n\n # Enable logging of geometries in debug mode\n if self.debug:\n engine_options[\"telemetry\"][\"isPersistent\"] = True\n\n # Update engine options\n self.simulator.engine.set_options(engine_options)\n\n # Set robot in neutral configuration\n qpos = self._neutral()\n framesForwardKinematics(\n self.robot.pinocchio_model, self.robot.pinocchio_data, qpos)", "def autonomousInit(self):\n #self.timer.reset()\n #self.timer.start()\n pass", "def init_switches(inds):\n for i in inds:\n GPIO.setup(i, GPIO.OUT, initial=0)", "def setup(self):\n\t\tself.interface = self.getDriver('light_interface')\n\n\t\tself.pin = self.config['interface_position']\n\t\tself.blink_rate = self.config['blink_rate'] / 2 or 0.5\n\t\tself.is_on = False\n\n\t\tself.intensity = 255\n\t\tself.blink = False\n\t\tself.count = None\n\t\tself.current_count = False\n\t\tself.current_count = None\n\n\t\tself.saved_intensity = None\n\t\tself.saved_blink = False\n\t\tself.saved_count = None\n\n\t\treturn True", "def initialize_light_pins(pi, pins):\n for pin in pins:\n pi.set_mode(pin, pigpio.OUTPUT)\n pi.set_pull_up_down(pin, pigpio.PUD_DOWN)", "def setup_gpio(self):\n logger.info(\"Setting up GPIO pins\")\n gpio.setmode(gpio.BOARD)\n gpio.setup(self.pins[\"SCLK\"], gpio.OUT)\n gpio.setup(self.pins[\"SDO\"], gpio.OUT)\n gpio.setup(self.pins[\"SDI\"], gpio.IN)\n gpio.setup(self.pins[\"IO_UPDATE\"], gpio.OUT)\n gpio.setup(self.pins[\"IO_RESET\"], gpio.OUT)\n gpio.setup(self.pins[\"RAM_SWP_OVR\"], gpio.IN)\n gpio.setup(self.pins[\"EXT_PWR_DOWN\"], gpio.OUT)\n gpio.setup(self.pins[\"MASTER_RESET\"], gpio.OUT)\n gpio.setup(self.pins[\"PLL_LOCK\"], gpio.IN)\n gpio.setup(self.pins[\"P_0\"], gpio.OUT)\n gpio.setup(self.pins[\"P_1\"], gpio.OUT)\n gpio.setup(self.pins[\"P_2\"], gpio.OUT)", "def set_pulse_width(self):\n\t\t\"\"\"For PWM Register-0\"\"\"\n\t\tbus.write_byte_data(PCA9530_2C_1_DEFAULT_ADDRESS, PCA9530_2C_1_REG_PWM0, PCA9530_2C_1_PWM0_USERDEFINED)\n\t\t\n\t\t\"\"\"For PWM Register-1\"\"\"\n\t\tbus.write_byte_data(PCA9530_2C_1_DEFAULT_ADDRESS, PCA9530_2C_1_REG_PWM1, PCA9530_2C_1_PWM1_USERDEFINED)", "def __init__(self):\n\n global GPIO\n\n # running is used to control thread execution.\n self._running = True\n\n # Keep MuleBot parallel to the wall at this distance.\n self.distanceToWall = 0\n\n\n self.pwmEnablePin = 23 # Broadcom pin 23 was 16\n self.motor1DirectionPin = 24 # Broadcom pin 24 was 20\n self.motor2DirectionPin = 25 # Broadcom pin 25 was 21\n\n self.motorForward = GPIO.HIGH\n self.motorReverse = GPIO.LOW\n\n\n self.dcMotorLeftMotor = 0\n self.dcMotorRightMotor = 1\n\n self.laserDetectLeftPin = 6\n self.laserDetectRightPin = 5\n\n self.motorMaxRPM = 12.0\n self.motorMaxRadiansPM = 2 * self.motorMaxRPM\n\n # Pin Setup:\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM) # Broadcom pin-numbering scheme\n GPIO.setup(self.pwmEnablePin, GPIO.OUT)\n GPIO.setup(self.motor1DirectionPin, GPIO.OUT)\n GPIO.setup(self.motor2DirectionPin, GPIO.OUT)\n\n GPIO.output(self.pwmEnablePin, GPIO.LOW )\n\n # This is interupts setups. They get used with the\n # test() method.\n #GPIO.setup(laserDetectLeftPin, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n #GPIO.setup(laserDetectRightPin, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n #GPIO.add_event_detect(laserDetectLeftPin, GPIO.FALLING, callback=myInt)\n #GPIO.add_event_detect(laserDetectRightPin, GPIO.FALLING, callback=myInt)\n\n\n # Initialise the PWM device using the default address\n self.pwm = PWM(0x40)\n # Note if you'd like more debug output you can instead run:\n #pwm = PWM(0x40, debug=True)\n\n\n #count = 1\n self.pwm.setPWMFreq(1000) # Set frequency to 1000 Hz\n\n self.tgt_min_range = 15", "def stop_motor(self):\n self.output(self.steering_pin, 0)\n self.pi.set_servo_pulsewidth(self.steering_pin, 0)", "async def manual_controls():\n # Connect to the Simulation\n drone = System()\n await drone.connect(system_address=\"udp://:14540\")\n\n # This waits till a mavlink based drone is connected\n async for state in drone.core.connection_state():\n if state.is_connected:\n print(f\"-- Connected to drone with UUID: {state.uuid}\")\n break\n\n # Checking if Global Position Estimate is ok\n async for global_lock in drone.telemetry.health():\n if global_lock.is_global_position_ok:\n print(\"-- Global position state is ok\")\n break\n\n # set the manual control input after arming\n await drone.manual_control.set_manual_control_input(\n float(0), float(0), float(0.5), float(0)\n )\n\n while not control.quit():\n if control.takeoff():\n print(\"key-takeoff\")\n # Arming the drone\n print(\"-- Arming\")\n await drone.action.arm()\n\n # Takeoff the vehicle\n print(\"-- Taking off\")\n await drone.action.takeoff()\n await asyncio.sleep(5)\n\n print(\"Setting gimbal mode\")\n await drone.gimbal.set_mode(GimbalMode.YAW_FOLLOW)\n await asyncio.sleep(5)\n\n # Move the gimbal to point at pitch -40 degrees, yaw 30 degrees\n # print(\"Setting pitch & yaw\")\n await drone.gimbal.set_pitch_and_yaw(-90, 0)\n await asyncio.sleep(10)\n\n # # set the manual control input after arming\n # await drone.manual_control.set_manual_control_input(\n # float(0), float(0), float(0.5), float(0)\n # )\n\n # # start manual control\n # print(\"-- Starting manual control\")\n # await drone.manual_control.start_position_control()\n\n elif control.landing():\n print(\"key-landing\")\n await drone.action.land()\n if control.has_piloting_cmd():\n print(\"-- piloting\")\n roll = float(control.roll()) * 0.5\n pitch = float(control.pitch()) * 0.5\n throttle = float(control.throttle())\n yaw = float(control.yaw())\n\n print(\"roll=\",roll)\n print(\"pitch=\", pitch)\n print(\"yaw=\",yaw)\n print(\"throtle=\", throttle)\n\n if throttle < 0:\n throttle = 0\n await drone.manual_control.set_manual_control_input(pitch,roll, throttle, yaw)\n\n # await asyncio.sleep(0.1)\n else:\n await drone.manual_control.set_manual_control_input(\n float(0), float(0), float(0.5), float(0)\n ) \n await asyncio.sleep(0.1) \n # elif control.quit():\n # print(\"key-quit\")\n # elif control.roll():\n # print(\"key-roll\")\n # elif control.pitch():\n # print(\"key-pitch\")\n # elif control.yaw():\n # print(\"key-yaw\")\n # elif control.throttle():\n # print(\"key-throttle\")\n\n\n\n # while True:\n # # grabs a random input from the test list\n # # WARNING - your simulation vehicle may crash if its unlucky enough\n # input_index = random.randint(0, len(manual_inputs) - 1)\n # input_list = manual_inputs[input_index]\n\n # # get current state of roll axis (between -1 and 1)\n # roll = float(input_list[0])\n # # get current state of pitch axis (between -1 and 1)\n # pitch = float(input_list[1])\n # # get current state of throttle axis (between -1 and 1, but between 0 and 1 is expected)\n # throttle = float(input_list[2])\n # # get current state of yaw axis (between -1 and 1)\n # yaw = float(input_list[3])\n\n # await drone.manual_control.set_manual_control_input(roll, pitch, throttle, yaw)\n\n # await asyncio.sleep(0.1)", "def __init__(self):\n self.left_motor = ev3.LargeMotor(ev3.OUTPUT_B)\n self.right_motor = ev3.LargeMotor(ev3.OUTPUT_C)\n self.arm_motor = ev3.MediumMotor(ev3.OUTPUT_A)\n self.touch_sensor = ev3.TouchSensor()\n self.color_sensor = ev3.ColorSensor()\n self.ir_sensor = ev3.InfraredSensor()\n self.MAX_SPEED = 900\n self.pixy = ev3.Sensor(driver_name=\"pixy-lego\")\n assert self.left_motor.connected\n assert self.right_motor.connected\n assert self.arm_motor.connected\n assert self.touch_sensor\n assert self.color_sensor\n assert self.ir_sensor\n assert self.pixy", "def run_motor(self, motor, power):\n self.run_flag = True\n super(RemoteControl, self).run_motor(motor, power)", "def setup_pin(self, pin, dutycycle, frequency=2000):\n raise NotImplementedError", "def initialize_pins(self):\n\n for pin_name in self.pin_defs.keys():\n if self.pin_defs[pin_name]['mode'] == 'input':\n self.pin(pin_name).direction = digitalio.Direction.INPUT\n #logging.info('Pin {} set to {}'.format(pin_num, self.pin_defs[pin_num]['mode']))\n\n elif self.pin_defs[pin_name]['mode'] == 'output':\n self.pin(pin_name).switch_to_output(value=self.pin_defs[pin_name]['init'])\n #logging.info('Pin {} set to {}'.format(pin_num, self.pin_defs[pin_num]['mode']))\n\n else:\n logging.error('Error, no direction defined for pin {}, pin_defs: {}'\n .format(pin_name, self.pin_defs[pin_name]))", "def set_control_commands(self, ref_state, ref_ind):\n super(DummyVehicle, self).set_control_commands(ref_state, ref_ind)\n safety_distance = 20.\n full_stop_distance = 15.\n\n\n self.check_if_overtake_is_finished()\n\n # Only continue from this point if there are some radar sensings\n if not numpy.any(self.radar_readings[0, :]):\n return\n\n\n min_dist = numpy.min(self.radar_readings[0, :])\n # Set speed.\n if min_dist < full_stop_distance:\n desired_speed = 0.\n self.overtake_begin_counter = 0\n\n elif min_dist < safety_distance:\n desired_speed = self.cruising_speed * min_dist / safety_distance\n else:\n desired_speed = self.cruising_speed\n\n # Every subclass can\n if not self.overtake:\n if self.check_if_overtake(min_dist):\n if self.check_if_safe_to_overtake():\n rospy.logwarn(str(self.vehicle_id) + ' start overtaking')\n self.overtake = True\n\n self.commands['speed'] = desired_speed", "def __init__(\n self,\n SER, SRCLK, RCLK,\n outputs=8,\n frequency=100,\n SRCLR=None, OE=None, pin_factory=None,\n ):\n\n super().__init__(pin_factory=pin_factory)\n\n self._closed = False\n\n self._underlying = ShiftRegister(SER, SRCLK, RCLK, outputs=outputs, SRCLR=SRCLR, OE=OE, pin_factory=pin_factory)\n\n if not isinstance(frequency, (float, int)):\n raise TypeError(\"Frequency must be a number.\")\n if frequency < 10:\n raise Warning(\"Frequency is too low.\")\n\n self._pulse_duration = 1 / frequency\n\n if not isinstance(outputs, int):\n raise TypeError(\"Number of outputs must be an int.\")\n self.outputs = outputs\n\n self._state = [0.0] * outputs\n\n self._running = True\n self._pwm_thread = Thread(target=self._pwm_runner, daemon=True)\n self._pwm_thread.start()", "def __init__(self, reset=True):\n self.__helper = _ABEHelpers()\n\n self.__bus = self.__helper.get_smbus()\n self.__bus.write_byte_data(\n self.__ioaddress, self.IOCON, self.__ioconfig)\n self.__port_a_value = self.__bus.read_byte_data(\n self.__ioaddress, self.GPIOA)\n self.__port_b_value = self.__bus.read_byte_data(\n self.__ioaddress, self.GPIOB)\n if reset is True:\n self.__bus.write_byte_data(self.__ioaddress, self.IODIRA, 0xFF)\n self.__bus.write_byte_data(self.__ioaddress, self.IODIRB, 0xFF)\n self.set_port_pullups(0, 0x00)\n self.set_port_pullups(1, 0x00)\n self.invert_port(0, 0x00)\n self.invert_port(1, 0x00)\n\n return", "def init_controls(self):\n\n\n controls_keypress_QWERTY = {\n 'w': lambda: self.set_speed(\"pitch\", self.def_speed[\"pitch\"]),\n 's': lambda: self.set_speed(\"pitch\", -self.def_speed[\"pitch\"]),\n 'a': lambda: self.set_speed(\"roll\", -self.def_speed[\"roll\"]),\n 'd': lambda: self.set_speed(\"roll\", self.def_speed[\"roll\"]),\n 'q': lambda: self.set_speed(\"yaw\", -self.def_speed[\"yaw\"]),\n 'e': lambda: self.set_speed(\"yaw\", self.def_speed[\"yaw\"]),\n 'i': lambda: self.drone.flip_forward(),\n 'k': lambda: self.drone.flip_back(),\n 'j': lambda: self.drone.flip_left(),\n 'l': lambda: self.drone.flip_right(),\n 'Key.left': lambda: self.set_speed(\"yaw\", -1.5*self.def_speed[\"yaw\"]),\n 'Key.right': lambda: self.set_speed(\"yaw\", 1.5*self.def_speed[\"yaw\"]),\n 'Key.up': lambda: self.set_speed(\"throttle\", self.def_speed[\"throttle\"]),\n 'Key.down': lambda: self.set_speed(\"throttle\", -self.def_speed[\"throttle\"]),\n 'Key.tab': lambda: self.drone.takeoff(),\n 'Key.backspace': lambda: self.drone.land(),\n 'p': lambda: self.palm_land_approach(),\n 'v': lambda: self.toggle_use_voice(),\n 't': lambda: self.toggle_tracking(),\n 'k': lambda: self.toggle_distance_mode(),\n 'm': lambda: self.toogle_manual_control(),\n 'Key.enter': lambda: self.take_picture(),\n 'c': lambda: self.clockwise_degrees(360),\n \n \n \n \n \n \n # '0': lambda: self.drone.set_video_encoder_rate(0),\n # '1': lambda: self.drone.set_video_encoder_rate(1),\n # '2': lambda: self.drone.set_video_encoder_rate(2),\n # '3': lambda: self.drone.set_video_encoder_rate(3),\n # '4': lambda: self.drone.set_video_encoder_rate(4),\n # '5': lambda: self.drone.set_video_encoder_rate(5),\n\n '7': lambda: self.set_exposure(-1), \n '8': lambda: self.set_exposure(0),\n '9': lambda: self.set_exposure(1)\n }\n\n controls_keyrelease_QWERTY = {\n 'w': lambda: self.set_speed(\"pitch\", 0),\n 's': lambda: self.set_speed(\"pitch\", 0),\n 'a': lambda: self.set_speed(\"roll\", 0),\n 'd': lambda: self.set_speed(\"roll\", 0),\n 'q': lambda: self.set_speed(\"yaw\", 0),\n 'e': lambda: self.set_speed(\"yaw\", 0),\n 'Key.left': lambda: self.set_speed(\"yaw\", 0),\n 'Key.right': lambda: self.set_speed(\"yaw\", 0),\n 'Key.up': lambda: self.set_speed(\"throttle\", 0),\n 'Key.down': lambda: self.set_speed(\"throttle\", 0)\n }\n\n controls_keypress_AZERTY = {\n 'z': lambda: self.set_speed(\"pitch\", self.def_speed[\"pitch\"]),\n 's': lambda: self.set_speed(\"pitch\", -self.def_speed[\"pitch\"]),\n 'q': lambda: self.set_speed(\"roll\", -self.def_speed[\"roll\"]),\n 'd': lambda: self.set_speed(\"roll\", self.def_speed[\"roll\"]),\n 'a': lambda: self.set_speed(\"yaw\", -self.def_speed[\"yaw\"]),\n 'e': lambda: self.set_speed(\"yaw\", self.def_speed[\"yaw\"]),\n 'i': lambda: self.drone.flip_forward(),\n 'k': lambda: self.drone.flip_back(),\n 'j': lambda: self.drone.flip_left(),\n 'l': lambda: self.drone.flip_right(),\n 'Key.left': lambda: self.set_speed(\"yaw\", -1.5*self.def_speed[\"yaw\"]),\n 'Key.right': lambda: self.set_speed(\"yaw\", 1.5*self.def_speed[\"yaw\"]),\n 'Key.up': lambda: self.set_speed(\"throttle\", self.def_speed[\"throttle\"]),\n 'Key.down': lambda: self.set_speed(\"throttle\", -self.def_speed[\"throttle\"]),\n 'Key.tab': lambda: self.drone.takeoff(),\n 'Key.backspace': lambda: self.drone.land(),\n 'p': lambda: self.palm_land(),\n 't': lambda: self.toggle_tracking(),\n 'Key.enter': lambda: self.take_picture(),\n 'c': lambda: self.clockwise_degrees(360),\n '0': lambda: self.drone.set_video_encoder_rate(0),\n '1': lambda: self.drone.set_video_encoder_rate(1),\n '2': lambda: self.drone.set_video_encoder_rate(2),\n '3': lambda: self.drone.set_video_encoder_rate(3),\n '4': lambda: self.drone.set_video_encoder_rate(4),\n '5': lambda: self.drone.set_video_encoder_rate(5),\n\n '7': lambda: self.set_exposure(-1), \n '8': lambda: self.set_exposure(0),\n '9': lambda: self.set_exposure(1)\n }\n\n controls_keyrelease_AZERTY = {\n 'z': lambda: self.set_speed(\"pitch\", 0),\n 's': lambda: self.set_speed(\"pitch\", 0),\n 'q': lambda: self.set_speed(\"roll\", 0),\n 'd': lambda: self.set_speed(\"roll\", 0),\n 'a': lambda: self.set_speed(\"yaw\", 0),\n 'e': lambda: self.set_speed(\"yaw\", 0),\n 'Key.left': lambda: self.set_speed(\"yaw\", 0),\n 'Key.right': lambda: self.set_speed(\"yaw\", 0),\n 'Key.up': lambda: self.set_speed(\"throttle\", 0),\n 'Key.down': lambda: self.set_speed(\"throttle\", 0)\n }\n\n if self.kbd_layout == \"AZERTY\":\n self.controls_keypress = controls_keypress_AZERTY\n self.controls_keyrelease = controls_keyrelease_AZERTY\n else:\n self.controls_keypress = controls_keypress_QWERTY\n self.controls_keyrelease = controls_keyrelease_QWERTY\n self.key_listener = keyboard.Listener(on_press=self.on_press,\n on_release=self.on_release)\n self.key_listener.start()", "def start_drill(self):\n\n # Enable all motors\n # NOTE 1: Order matters!\n # NOTE 2: BFM not energized since it will cause motor to move but it is pushed back a bit to ensure the feed is all the way back.\n self.bfm_startup()\n self.fmt.energize_motor()\n self.fmb.energize_motor()\n self.ym.energize_motor()\n self.pm.energize_motor()\n # NOTE: BQM may need to be enabled right before running the drill\n self.bqm.energize_motor()\n\n # Enabling motors takes time\n # NOTE: This may need to be optimized after all the motors have been tuned to make the process faster\n time.sleep(2)", "def __init__(\n self,\n **kwargs,\n ):\n\n self._step_counter = 0\n\n # The most recent robot state proto received.\n self._robot_state = None\n\n # Use the instance default worker and the gin configured ip address and\n # port.\n serial_port = next(list_ports.grep(\".*ttyACM0.*\")).device\n self._hardware_interface = interface.Interface(serial_port)\n time.sleep(0.25)\n self._hardware_interface.set_joint_space_parameters(kp=50.0, kd=5.0, max_current=7.0)\n super().__init__(**kwargs)\n self._clock = time.time", "def initialize_multiprocessing(self):\n if self.multiprocessing_controller is not None:\n MPControl.set_multiprocess_engine(self.multiprocessing_controller)\n MPControl.connect()", "def autonomousInit(self):\n '''\n self.cumulativeTime=0\n self.totalTime=0\n self.dataSet=[[-0.5,0,1,-1.0],[0.3,0.4,1,1.0],[-0.5,0,1,-1.0]]\n for i in self.dataSet:\n self.totalTime+=i[2]\n self.intervals = 0\n self.currentTime = 0\n for i in range(0,len(self.dataSet)):\n self.dataSet[i].append([self.currentTime,self.currentTime+self.dataSet[i][2]])\n self.currentTime+=self.dataSet[i][2]\n for i in self.dataSet:\n if i[3]==1.0:\n i.append(\"Forward\")\n if i[3]==-1.0:\n i.append(\"Backward\")\n \n self.timer.reset()\n self.timer.start()\n '''\n self.timer.reset()\n self.timer.start()\n\n #self.auto = self.chooser.getSelected()\n self.auto = 6\n self.autoState = 0\n #self.auto = 1\n\n self.EC1.reset()\n \n\n #self.auto = self.chooser.getSelected()", "def __init__(self, pads):\n mfioCommon.__init__(self, pads)\n\n mfio_width = len(pads)\n #\n mfio_o = Signal(mfio_width)\n mfio_oe = Signal(mfio_width)\n mfio_i = Signal(mfio_width)\n # create single pin tristate buffers\n for b in range(mfio_width):\n self.submodules += mfioSinglePin(pads, b, mfio_i[b], mfio_o[b], mfio_oe[b]) \t\n\n # Wishbone \n self.bus = bus = wishbone.Interface()\n\n #\n sel = Signal(mfio_width)\n inbit = Signal(1)\n\n # todo: dynamic address width calc to optimize the decode logic\n addr_width = 12\n # 1024 IO max\n seladr = Signal(10)\n\n # 10 bits of address = 1024 pins max\n self.comb += seladr.eq(self.bus.adr[:10]) \n \n # address decoder\n for b in range(mfio_width):\n self.comb += sel[b].eq(seladr == b)\n\n self.comb += inbit.eq( (mfio_i & sel) != 0 )\n\n # Read bit\n rdbus = Signal(32)\n self.comb += [\n rdbus[0].eq(inbit),\n bus.dat_r.eq(rdbus)\n ]\t\n\n # process output \n outbit = Signal(1)\n oebit = Signal(1)\n wren = Signal(1)\n\n # PINAPI 1.0 compatible: 0 = drive 0, 1 drive 1, 3 = HiZ\n self.comb += outbit.eq( bus.dat_w[0] )\n self.comb += oebit.eq ( ~bus.dat_w[1] )\n\n # write enable\n self.comb += wren.eq(self.bus.stb & self.bus.cyc & self.bus.we) \n\n for b in range(mfio_width):\n self.sync += If(wren & sel[b], mfio_o[b].eq(outbit), mfio_oe[b].eq(oebit) )\n\n seq = [\n (1, [bus.ack.eq(1)]), #\n (1, [bus.ack.eq(0)]), #\n (0, []),\n ]\n \n t, tseq = 0, []\n for dt, a in seq:\n tseq.append((t, a))\n t += dt\n\n self.sync += timeline(bus.cyc & bus.stb, tseq)", "def setup():\n GPIO.setmode(GPIO.BCM)\n for pin in [config.gpio_pin_p1_stretch,\n config.gpio_pin_p1_serve,\n config.gpio_pin_p2_stretch,\n config.gpio_pin_p2_serve]:\n GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n\n input_reader_thread = threading.Thread(target=input_reader_worker)\n input_reader_thread.setDaemon(True)\n input_reader_thread.start()", "def __init__(self):\n # Global attributes\n self.ON = {\"RED\":[0], \"GREEN\":[2], \"YELLOW\":[4], \"BLINK\":[6], \"NORMAL\":[2], \"WARNING\":[2,6], \"CRITICAL\":[4], \"ERROR\":[0]}\n self.OFF = {\"RED\":[1], \"GREEN\":[3], \"YELLOW\":[5], \"BLINK\":[5], \"NORMAL\":[3], \"WARNING\":[3,5], \"CRITICAL\":[5], \"ERROR\":[1]}\n\n # Indicator topic\n topic = rospy.get_param(rospy.get_name() + \"/indicator_topic\", \"/tower_lights_cmd\")\n # Namespace fixing\n if (topic[0] != '/'): topic = rospy.get_name() + \"/\" + topic\n\n # Starting publisher\n self.indicator_publisher = rospy.Publisher(topic, Int32, queue_size=100)\n rospy.sleep(0.8) # Publisher initialization tiom\n\n # Turn off all indications\n for state in self.OFF:\n for cmd in self.OFF[state]:\n self.publish_cmd(cmd)\n \n # Start indicator thread\n self.event = threading.Condition()\n thread = threading.Thread(target=self.indicator_thread)\n thread.start()\n\n # Initialize default indication\n self.current_indication = \"NORMAL\"\n self.indication = \"NORMAL\"\n for i in self.ON[self.current_indication]:\n self.publish_cmd(i)", "def ON(self):\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(self.PIN, GPIO.OUT)\n GPIO.output(self.PIN, True)\n self.STATUS = \"ON\"", "def servo_on(self):\n self.logger.info('Setting servo ON')\n self.electronics.move_servo(1)\n self.config['servo']['status'] = 1" ]
[ "0.78306526", "0.7692866", "0.72555536", "0.71739495", "0.7074802", "0.6707869", "0.66933286", "0.6581241", "0.6572955", "0.6535936", "0.6500315", "0.6482761", "0.64587724", "0.6382999", "0.63571215", "0.63376045", "0.6329462", "0.63277125", "0.630661", "0.6273071", "0.6264633", "0.62354857", "0.62150645", "0.6214874", "0.6168147", "0.61611587", "0.61283207", "0.6117211", "0.6112222", "0.6106163", "0.610089", "0.60987645", "0.60946727", "0.60803604", "0.604001", "0.603366", "0.6031487", "0.6025536", "0.60248816", "0.6009659", "0.5988311", "0.5983788", "0.5982729", "0.5980181", "0.59778327", "0.5975929", "0.5966954", "0.5959579", "0.59331703", "0.5925985", "0.59227186", "0.59203327", "0.5918329", "0.5915858", "0.5909719", "0.58959746", "0.58950025", "0.58784884", "0.5872167", "0.58689016", "0.58640885", "0.5861795", "0.5859207", "0.5856724", "0.5845705", "0.58384717", "0.58307254", "0.58267045", "0.5818454", "0.5805956", "0.58035445", "0.57793915", "0.57721853", "0.5764711", "0.5754981", "0.57489294", "0.5744258", "0.57368666", "0.5729641", "0.5725064", "0.5720442", "0.57095057", "0.5668773", "0.5668707", "0.5660118", "0.5653124", "0.56504637", "0.5649591", "0.5641128", "0.56387943", "0.56363463", "0.5622852", "0.5622216", "0.5622067", "0.56184226", "0.5615821", "0.56146294", "0.560608", "0.559561", "0.557701" ]
0.72353333
3
pinForward is the forward Pin, so we change its duty cycle according to speed.
def forward(self, speed): self.pwm_backward.ChangeDutyCycle(0) self.pwm_forward.ChangeDutyCycle(speed)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self):\n global motor_direction\n with self._lock:\n GPIO.output(7, True)\n GPIO.output(11, False)\n GPIO.output(13, True)\n GPIO.output(15, False)\n # time.sleep(sec)\n motor_direction = 'Forward'\n return motor_direction", "def __init__(self, pinForward, pinBackward, pinControl):\n\n self.pinForward = pinForward\n self.pinBackward = pinBackward\n self.pinControl = pinControl\n GPIO.setup(self.pinForward, GPIO.OUT)\n GPIO.setup(self.pinBackward, GPIO.OUT)\n GPIO.setup(self.pinControl, GPIO.OUT)\n self.pwm_forward = GPIO.PWM(self.pinForward, 100)\n self.pwm_backward = GPIO.PWM(self.pinBackward, 100)\n self.pwm_forward.start(0)\n self.pwm_backward.start(0)\n GPIO.output(self.pinControl,GPIO.HIGH)", "def forward_left(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed) \n self.pwm_right.ChangeDutyCycle(0)\n self.pwm_left.ChangeDutyCycle(100)", "def __init__(self, pinForward1, pinBackward1,pinForward2, pinBackward2):\n\n self.pinForward1 = pinForward1\n self.pinBackward1 = pinBackward1\n self.pinForward2 = pinForward2\n self.pinBackward2 = pinBackward2\n\n GPIO.setup(self.pinForward1, GPIO.OUT)\n GPIO.setup(self.pinBackward1, GPIO.OUT)\n GPIO.setup(self.pinForward2, GPIO.OUT)\n GPIO.setup(self.pinBackward2, GPIO.OUT)\n\n self.pwm_forward1 = GPIO.PWM(self.pinForward1, 100)\n self.pwm_backward1 = GPIO.PWM(self.pinBackward1, 100)\n self.pwm_forward2 = GPIO.PWM(self.pinForward2, 100)\n self.pwm_backward2 = GPIO.PWM(self.pinBackward2, 100)\n \n self.pwm_forward1.start(0)\n self.pwm_backward1.start(0)\n self.pwm_forward2.start(0)\n self.pwm_backward2.start(0)", "def forward(speed, bias, biasDir):\n\t# todo: check directions for me please\n\tif biasDir == 1:\n rightMotor.run_direct(duty_cycle_sp=speed+bias)\n leftMotor.run_direct(duty_cycle_sp=speed)\n elif biasDir == -1:\n rightMotor.run_direct(duty_cycle_sp=speed)\n leftMotor.run_direct(duty_cycle_sp=speed+bias)\n else:\n rightMotor.run_direct(duty_cycle_sp=speed)\n leftMotor.run_direct(duty_cycle_sp=speed)", "def __init__(self, pinForward, pinBackward, pinControlStraight,pinLeft, pinRight, pinControlSteering):\n\n self.pinForward = pinForward\n self.pinBackward = pinBackward\n self.pinControlStraight = pinControlStraight\n self.pinLeft = pinLeft\n self.pinRight = pinRight\n self.pinControlSteering = pinControlSteering\n GPIO.setup(self.pinForward, GPIO.OUT)\n GPIO.setup(self.pinBackward, GPIO.OUT)\n GPIO.setup(self.pinControlStraight, GPIO.OUT)\n\n GPIO.setup(self.pinLeft, GPIO.OUT)\n GPIO.setup(self.pinRight, GPIO.OUT)\n GPIO.setup(self.pinControlSteering, GPIO.OUT)\n\n self.pwm_forward = GPIO.PWM(self.pinForward, 100)\n self.pwm_backward = GPIO.PWM(self.pinBackward, 100)\n self.pwm_forward.start(0)\n self.pwm_backward.start(0)\n\n self.pwm_left = GPIO.PWM(self.pinLeft, 100)\n self.pwm_right = GPIO.PWM(self.pinRight, 100)\n self.pwm_left.start(0)\n self.pwm_right.start(0)\n\n GPIO.output(self.pinControlStraight,GPIO.HIGH) \n GPIO.output(self.pinControlSteering,GPIO.HIGH)", "def motorDirection(self, motorPin, direction):\n # print \"motorPin: \", motorPin\n # print \"direction: \", direction\n GPIO.output(motorPin, direction)", "def motor_A(self, direction, speed):\n if direction == -1:\n GPIO.output(self.Motor_A_Pin1, GPIO.HIGH)\n GPIO.output(self.Motor_A_Pin2, GPIO.LOW)\n self.pwm_A.start(100)\n self.pwm_A.ChangeDutyCycle(speed)\n if direction == 1:\n GPIO.output(self.Motor_A_Pin1, GPIO.LOW)\n GPIO.output(self.Motor_A_Pin2, GPIO.HIGH)\n self.pwm_A.start(100)\n self.pwm_A.ChangeDutyCycle(speed)", "def forward(self, speed):\n self.controller.forward(speed)", "def setup_pin(self, pin, dutycycle, frequency=2000):\n raise NotImplementedError", "def motor_B(self, direction, speed):\n if direction == 1:\n GPIO.output(self.Motor_B_Pin1, GPIO.HIGH)\n GPIO.output(self.Motor_B_Pin2, GPIO.LOW)\n self.pwm_B.start(100)\n self.pwm_B.ChangeDutyCycle(speed)\n if direction == -1:\n GPIO.output(self.Motor_B_Pin1, GPIO.LOW)\n GPIO.output(self.Motor_B_Pin2, GPIO.HIGH)\n self.pwm_B.start(100)\n self.pwm_B.ChangeDutyCycle(speed)", "def forward_right(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed)\n self.pwm_left.ChangeDutyCycle(0)\n self.pwm_right.ChangeDutyCycle(100)", "def setup_motor(self,pin_num):\n pi.set_servo_pulsewidth(pin_num, 2000)\n sleep(2)\n pi.set_servo_pulsewidth(pin_num, 500 )\n sleep(2)", "def fwd(dist=0): #distance is in cm\n try:\n if dist>0:\n # this casting to int doesn't seem necessary\n pulse=int(PPR*(dist//WHEEL_CIRC) )\n enc_tgt(1,1,pulse)\n except Exception as e:\n print (\"gopigo fwd: {}\".format(e))\n pass\n return write_i2c_block(ADDRESS,motor_fwd_cmd+[0,0,0])", "def startAcceleratingForward(self,event):\n self.isAcceleratingForward=True", "def steer(direction):\n if direction == 1:\n steerMotor.run(Adafruit_MotorHAT.FORWARD)\n steerMotor.setSpeed(255)\n if direction == -1:\n steerMotor.run(Adafruit_MotorHAT.BACKWARD)\n steerMotor.setSpeed(255)\n if direction == 0:\n steerMotor.setSpeed(0)\n steerMotor.run(Adafruit_MotorHAT.RELEASE)", "def move_forward(self, speed):\n\t\t# You should modify the bias of 4 wheels depending on your hardware.\n\t\tself._front_left_wheel.anticlockwise_rotate(speed + LEFT_FR_BIAS + LEFT_RIGHT_BIAS)\n\t\tself._front_right_wheel.clockwise_rotate(speed + RIGHT_FR_BIAS)\n\t\tself._rear_left_wheel.anticlockwise_rotate(speed + LEFT_RIGHT_BIAS)\n\t\tself._rear_right_wheel.clockwise_rotate(speed)", "def forward_button(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=int(left_speed))\n self.right_motor.run_forever(speed_sp=int(right_speed))", "def forward(self, speed):\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_rr' + self.postfix], -speed,\n ONE_SHOT_MODE)\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_rl' + self.postfix], -speed,\n ONE_SHOT_MODE)\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_fr' + self.postfix], -speed,\n ONE_SHOT_MODE)\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_fl' + self.postfix], -speed,\n ONE_SHOT_MODE)", "def GET_forward(self):\n self.roomba.DriveStraight(pyrobot.VELOCITY_FAST)\n time.sleep(1)\n self.roomba.SlowStop(pyrobot.VELOCITY_FAST)", "def _reverseduty(self):\n if self.ir_pin.duty() == 0:\n self.ir_pin.duty(512)\n else:\n self.ir_pin.duty(0)", "def turn_90degrees(self, direction):\n if direction == \"right\" or direction == 1:\n self.myspeedctrl.send_speed(0,1)\n elif direction == \"left\" or direction == 2:\n self.myspeedctrl.send_speed(0,-1)\n rospy.sleep(1.61) #value found by trail and error\n self.myspeedctrl.send_speed(0,0)", "def drive_forward(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=left_speed)\n self.right_motor.run_forever(speed_sp=right_speed)", "def set_pin_direction(self, pin, direction):\n pin = pin - 1\n if pin < 8:\n self.__port_a_direction = self.__helper.updatebyte(\n self.__port_a_direction, pin, direction)\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRA, self.__port_a_direction)\n else:\n self.__port_b_direction = self.__helper.updatebyte(\n self.__port_b_direction, pin - 8, direction)\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRB, self.__port_b_direction)\n return", "def input_forward(self, joy_input):\n if self.saved[joy_input]:\n value = self.saved[joy_input]\n else:\n value = self.inputs[joy_input]\n yaw_pwm = np.interp(value, [-1, 1], [0, Joystick.MAX_YAW_PWM])\n print(\"(input forward) setting yaw pwm to \" + str(yaw_pwm))\n self.publish(Topic.YAW_PWM, yaw_pwm)", "def increment_speed(self):\n self.speed += 0.0004", "def move_forward(self, speed):\n\n # Clamp the speed\n speed = clamp(delta_unit(speed), 0, delta_unit(Car.max_speed))\n\n # Appends the speed according to the direction\n rad = np.radians(self.direction)\n self.fx += speed * np.cos(rad)\n self.fy += speed * np.sin(rad)\n\n # Set marker to move\n self.moved = True", "def change_motor_speed(self, speed=0.0):\r\n if not self.enabled:\r\n self.set_neutral(braked=False)\r\n return\r\n\r\n # logging.info(\"{} Motor Speed: {}\".format(self.motor_name, speed))\r\n self.current_speed = speed # Store current set speed\r\n\r\n # If speed is < 0.0, we are driving in reverse.\r\n self.forward = True\r\n if speed < 0.0:\r\n # Normalise speed value to be in range [0, 100]\r\n speed = -speed\r\n # Store direction\r\n self.forward = False\r\n\r\n # Apply a factor to the speed to limit speed\r\n speed *= self.speed_factor\r\n\r\n # Set motor directional pins\r\n if self.forward:\r\n if self.a_pin >= 0:\r\n self.GPIO.output(self.a_pin, 1)\r\n if self.b_pin >= 0:\r\n self.GPIO.output(self.b_pin, 0)\r\n else:\r\n if self.a_pin >= 0:\r\n self.GPIO.output(self.a_pin, 0)\r\n if self.b_pin >= 0:\r\n self.GPIO.output(self.b_pin, 1)\r\n\r\n # Convert speed into PWM duty cycle\r\n # and clamp values to min/max ranges.\r\n dutycycle = speed\r\n if dutycycle < 0.0:\r\n dutycycle = 0.0\r\n elif dutycycle > self.max_speed:\r\n dutycycle = self.max_speed\r\n\r\n # Change the PWM duty cycle based on fabs() of speed value.\r\n self.PWM.ChangeDutyCycle(dutycycle)", "def skipp(self):\n for x in range(4):\n self.fwd(right=100, left=100)\n time.sleep(.5)\n self.servo(1000)\n time.sleep(.1)\n self.servo(2000)\n time.sleep(.1)\n self.fwd(right=-100, left=-100)\n time.sleep(.1)\n self.servo(-1000)\n self.stop()", "def drive(self,direction, speed=100) -> None:\n if direction == 1:\n driveMotor.run(Adafruit_MotorHAT.FORWARD)\n driveMotor.setSpeed(speed)\n if direction == -1:\n driveMotor.run(Adafruit_MotorHAT.BACKWARD)\n driveMotor.setSpeed(speed)\n if direction == 0:\n driveMotor.setSpeed(0)\n driveMotor.run(Adafruit_MotorHAT.RELEASE)", "def drive(self,direction, speed=100):\n if direction == 1:\n self.leftMotor.run(Adafruit_MotorHAT.FORWARD)\n self.rightMotor.run(Adafruit_MotorHAT.FORWARD)\n self.leftMotor.setSpeed(speed)\n self.rightMotor.setSpeed(speed)\n if direction == -1:\n self.leftMotor.run(Adafruit_MotorHAT.BACKWARD)\n self.rightMotor.run(Adafruit_MotorHAT.BACKWARD)\n self.leftMotor.setSpeed(speed)\n self.rightMotor.setSpeed(speed)\n if direction == 0:\n self.leftMotor.setSpeed(0)\n self.rightMotor.setSpeed(0)\n self.leftMotor.run(Adafruit_MotorHAT.RELEASE)\n self.rightMotor.run(Adafruit_MotorHAT.RELEASE)", "def move_forward(self, val):\n val = val * 180 / math.pi\n print(\"gyro diff\", self.gyro - val)\n print(\"gyrof\", self.gyro)\n if math.fabs(self.gyro - val) > 0.6:\n if self.gyro - val > 0:\n self.om_right = self.om_right - 0.7\n self.om_left = self.om_left + 0.5\n self.set_speed(self.om_left, self.om_right)\n print(\"om_l\", self.om_left)\n print(\"om_r\", self.om_right)\n else:\n self.om_right = self.om_right + 0.3\n self.om_left = self.om_left - 0.5\n self.set_speed(self.om_left, self.om_right)\n print(\"om_l\", self.om_left)\n print(\"om_r\", self.om_right)\n else:\n self.om_right = 10\n self.om_left = 10", "def forward(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=left_speed)\n self.right_motor.run_forever(speed_sp=right_speed)", "def right_forward(self):\n self.right_motor.run_forever(speed_sp=self.MAX_SPEED)", "def left(self, speed):\n self.pwm_right.ChangeDutyCycle(0)\n self.pwm_left.ChangeDutyCycle(speed)", "def steer(self):\n\n while self.active:\n angle = self.driver.angle\n steering_pwm_calc = self.angle_to_pmw(angle)\n self.pwm.set_pwm(0, 0, steering_pwm_calc)", "def drive_forward(self):\n print(f\"{self.make.title()} is now driving forward.\")", "def __init__(self, pwm_pin, dir_pin_1, dir_pin_2, pwm_freq):\n\t\tself._pwm_pin = pwm_pin # PWM input pin.\n\t\tself._dir_pin_1 = dir_pin_1 # GPIO number to control the direction of rotation of the wheel.\n\t\tself._dir_pin_2 = dir_pin_2 # GPIO number to control the direction of rotation of the wheel.\n\t\tself._pwm_freq = pwm_freq # PWM cycle.\n\n\t\tself._last_dir = 's' # Last rotation direction of this wheel. 's' indicates stop.\n\t\tself._last_dc_val = 0 # Last duty cycle value.\n\t\tself._current_dc_val = 0 # Current duty cycle value.\n\n\t\tGPIO.setmode(GPIO.BOARD)\n\n\t\t# Set the direction control GPIO output mode.\n\t\tGPIO.setup(self._pwm_pin, GPIO.OUT)\n\t\tGPIO.setup(self._dir_pin_1, GPIO.OUT)\n\t\tGPIO.setup(self._dir_pin_2, GPIO.OUT)\n\n\t\t# Inits PWM pin.\n\t\tself._motor_pwm = GPIO.PWM(self._pwm_pin, self._pwm_freq) # pwm_freq: Hz\n\t\tself._motor_pwm.start(0) # Set duty cycle to 0.", "def right_forward(self, state, speed):\n if state:\n self.right_motor.run_forever(speed_sp=speed)\n ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.GREEN)\n else:\n self.right_motor.stop()\n ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.BLACK)", "def moveForward(self):\n if self.onGround:\n self.vx = 4", "def backward(self, speed):\n\n self.pwm_forward.ChangeDutyCycle(0)\n self.pwm_backward.ChangeDutyCycle(speed)", "def backward(self, speed):\n\n self.pwm_forward.ChangeDutyCycle(0)\n self.pwm_backward.ChangeDutyCycle(speed)", "def _set_pwm(self, raw_values):\n for i in range(len(self._pins)):\n self._pi.set_PWM_dutycycle(self._pins[i], raw_values[i])", "def __init__(self, pin1=24, pin2=28, pin3=25, pin4=33):\n self.GP = GPIOProcessor()\n self.pin1 = self.GP.getPin(pin1)\n self.pin2 = self.GP.getPin(pin2)\n self.pin3 = self.GP.getPin(pin3)\n self.pin4 = self.GP.getPin(pin4)\n self.pinl = [self.pin1, self.pin2, self.pin3, self.pin4]\n\n for k in range(4):\n self.pinl[k].out()\n\n self.speed = 100.0", "def forward(self, forward):\n\n self._forward = forward", "def __spur_on_if_needed(self):\n if len(self.waypoints) < 2:\n return\n next_speed = (get_waypoint_speed(self.waypoints[0]) +\n get_waypoint_speed(self.waypoints[1])) / 2.0\n set_waypoint_speed(self.waypoints[0], next_speed)", "def slither(self):\n #writedown where we started\n starting_direction = self.get_heading()\n #start driving forward\n self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT)\n self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT)\n self.fwd()\n # throttl down the left motor\n for power in range(self.LEFT_DEFAULT, 60,-10):\n self.set_motor_power(self.MOTOR_LEFT, power)\n time.sleep(.5)\n #throttle up the left while lowring the right\n for power in range(60, self.LEFT_DEFAULT +1, 10):\n self.set_motor_power(self.MOTOR_LEFT, power)\n time.sleep(.5)\n # throttl down the right motor\n for power in range(self.RIGHT_DEFAULT, 60,-10):\n self.set_motor_power(self.MOTOR_RIGHT, power)\n time.sleep(.5)\n #throttle up the right while lowring the right\n for power in range(60, self.RIGHT_DEFAULT +1, 10):\n self.set_motor_power(self.MOTOR_RIGHT, power)\n time.sleep(.5)\n \n #straighten out\n while self.get_heading() != starting_direction:\n #if I need to veer right\n if self.get_heading() < starting_direction:\n self.set_motor_power(self.MOTOR_LEFT, 90)\n self.set_motor_power(self.MOTOR_RIGHT, 60)\n #if I need to veer left\n elif self.get_heading() > starting_direction:\n self.set_motor_power(self.MOTOR_LEFT, 60)\n self.set_motor_power(self.MOTOR_RIGHT, 90)\n \n time.sleep(.1)\n self.stop()", "def on(self):\n if not self._is_on:\n self._pwms.enable(self._pin_index, self._frequency)\n self._is_on = True", "def toggleIpforward(v):\n file_path = \"/proc/sys/net/ipv4/ip_forward\"\n with open(file_path, \"w\") as f:\n if v.ipForward:\n print(0, file=f)\n v.ipForward = False\n else:\n print(1, file=f)\n v.ipForward = True\n return", "def set_duty_cycle(self, pin, dutycycle):\n raise NotImplementedError", "def activatePinReading(self):\n\n for pin in self.pinsToMeasure:\n arduino.samplePinDuringCapture(self.f, self.pinMap[pin], self.wallClock)", "def update(self):\n bondState = self._bond.getDeviceState(self._deviceId)\n if 'power' in bondState:\n self._state = True if bondState['power'] == 1 else False\n if self._state and bondState['speed'] in self._speed_name_by_value:\n self._attributes['current_speed'] = self._speed_name_by_value[bondState['speed']]\n else:\n self._attributes['current_speed'] = SPEED_OFF\n\n if 'direction' in bondState:\n if bondState['direction'] == Directions.REVERSE:\n self._attributes['current_direction'] = \"reverse\"\n else:\n self._attributes['current_direction'] = \"forward\"", "def _enable_pin(pin, direction):\n _write_value(pin, \"{}/export\".format(_path_prefix))\n _write_value(direction, \"{0}/gpio{1}/direction\".format(_path_prefix, pin))", "def togglePWMPinEnable(self, PWMpin):\n bitPos = PWMpin + 8\n mask = 1 << bitPos\n self._injectFault(\"PWM1PCR\",self.PCR,mask)", "def forward(self):\n print('forward')\n self.linearVector = Vector3(x=1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def strut(self):\n self.fwd(left=50, right=50)\n for x in range(2):\n self.servo(1000)\n time.sleep(.1) \n self.servo(1500) # Look Straight\n time.sleep(1)\n self.servo(2000)\n time.sleep(.1)\n self.servo(1500)", "def _get_forward_speed(self):\n\n velocity = self._vehicle.get_velocity()\n transform = self._vehicle.get_transform()\n vel_np = np.array([velocity.x, velocity.y, velocity.z])\n pitch = np.deg2rad(transform.rotation.pitch)\n yaw = np.deg2rad(transform.rotation.yaw)\n orientation = np.array([np.cos(pitch) * np.cos(yaw), np.cos(pitch) * np.sin(yaw), np.sin(pitch)])\n speed = np.dot(vel_np, orientation)\n return speed", "def left_forward(self):\n self.left_motor.run_forever(speed_sp=self.MAX_SPEED)", "def move_forward():\n twister = Twist(linear=Vector3(x=0.5,y=0,z=0),angular=Vector3(x=0,y=0,z=0))\n pub.publish(twister)", "def forward(self):\n self.position += 1", "def forward(self, param):\n\t\tif param:\n\t\t\tself.linear_move(param * .3048)\n\t\telse:\n\t\t\tself.linear_move(riu.default_dist * .3048)", "def handle_go_forward(entry_box, mqtt_client):\n speed_string = entry_box.get()\n print('sending the go_forward message with speed', speed_string)\n mqtt_client.send_message('go_forward', [speed_string])\n # --------------------------------------------------------------------------", "def update_speed_input_step(self,curr_v):\n \n # update speed inputs \n self.speed_inputs_east*=0\n self.speed_inputs_west*=0\n self.speed_inputs_north*=0\n self.speed_inputs_south*=0\n\n if self.use_eight_directions is True: \n self.speed_inputs_north_east*=0\n self.speed_inputs_north_west*=0\n self.speed_inputs_south_east*=0\n self.speed_inputs_south_west*=0\n \n #speed_values=self.rr[:self.N_e,0] \n speed_values=np.ones((self.N_e,1))\n\n if curr_v[0]>0:\n \n # north-east\n if self.use_eight_directions is True and curr_v[1]>0:\n self.speed_inputs_north_east=speed_values \n \n # south-east \n elif self.use_eight_directions is True and curr_v[1]<0:\n self.speed_inputs_south_east=speed_values\n \n #east \n else:\n self.speed_inputs_east=speed_values\n\n\n elif curr_v[0]<0:\n\n # north-west \n if self.use_eight_directions is True and curr_v[1]>0:\n self.speed_inputs_north_west=speed_values\n\n # south-west \n elif self.use_eight_directions is True and curr_v[1]<0:\n self.speed_inputs_south_west=speed_values\n \n # west \n else:\n self.speed_inputs_west=speed_values\n\n else: \n # north\n if curr_v[1]>0:\n self.speed_inputs_north=speed_values\n\n # south\n elif curr_v[1]<0:\n self.speed_inputs_south=speed_values", "def forward(self):\n #print('forward\\r')\n self.linearVector = Vector3(x=1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def _birdUpdateHandler(self, pin):\n\n # Update movement value from PIR pin status\n self.p.update(pin)\n\n if(self.p.movement == 1):\n #print(\"Motion detected\")\n self._distanceCheck()\n\n timeO = 0\n while(self.birdHere == 0 and self.p.movement == 1 and timeO < self.timeout):\n sleep(1)\n self._distanceCheck()\n timeO += 1\n\n else:\n #print(\"Motion ended\")\n self.birdHere = 0", "def motorSpeed(self, speedRPM_l, speedRPM_r):\n\n self.motors__Direction(speedRPM_l, speedRPM_r)\n\n speedRPM_l = abs(speedRPM_l)\n speedRPM_r = abs(speedRPM_r)\n\n speedRPM_l = self.constrainSpeed(speedRPM_l)\n speedRPM_r = self.constrainSpeed(speedRPM_r)\n\n# Left motor\n pwmDuration = 4095.0 * speedRPM_l / self.motorMaxRPM\n# print(\"MuleBot.motorSpeed Duration left float: \", pwmDuration)\n pwmDuration = int( pwmDuration )\n# print(\"MuleBot.motorSpeed Duration left int: \", pwmDuration)\n startOfPulse = 0\n self.pwm.setPWM(self.dcMotorLeftMotor, startOfPulse, pwmDuration)\n MuleBot.dcMotorPWMDurationLeft = pwmDuration\n\n# Right motor\n #Adjust for right motor being faster\n pwmDuration = 4095.0 * speedRPM_r / self.motorMaxRPM\n pwmDuration = pwmDuration * 9727 / 10000 # 98.519113 percent\n pwmDuration = int( pwmDuration )\n# print(\"MuleBot.motorSpeed Duration right int: \", pwmDuration)\n startOfPulse = 0\n self.pwm.setPWM(self.dcMotorRightMotor, startOfPulse, pwmDuration)\n MuleBot.dcMotorPWMDurationRight = pwmDuration", "def move_forward():\n pass", "def motorsDirection(self, direction):\n\n print (direction)\n if direction == 'r' or direction == 'R':\n self.motorDirection(self.motor1DirectionPin, self.motorReverse)\n self.motorDirection(self.motor2DirectionPin, self.motorReverse)\n print (\"Direction reverse\")\n else:\n self.motorDirection(self.motor1DirectionPin, self.motorForward)\n self.motorDirection(self.motor2DirectionPin, self.motorForward)\n print (\"Direction forward\")", "def step(self, count, direction):\n for x in range(count):\n for bit in self.mode[::direction]:\n self.pin1.value(bit[0])\n self.pin2.value(bit[1])\n self.pin3.value(bit[2])\n self.pin4.value(bit[3])\n time.sleep(DELAY)\n self.reset()", "def nine_punishment(self):\n self.direction_clock_wise = not self.direction_clock_wise", "def int_handle_encoder(self,pin):\n\t\t#print \"DEBUG: self.int_handle_encoder! for pin: {0}\".format(pin)\n\t\t\t\n\t\tdevice = self.get_device_config_by_pin(pin)\n\t\t\n\t\tencoder_pinA = device['clk']\n\t\tencoder_pinB = device['dt']\n\n\t\tSwitch_A = self.gpio.input(encoder_pinA)\n\t\tSwitch_B = self.gpio.input(encoder_pinB)\n\t\t\n\t\t# debounce\n\t\t#if 'debounce' in self.pins_config[pin]:\n\t\t#\tdebounce = self.pins_config[pin]['debounce'] / 1000\n\t\t#\tprint \"DEBUG: sleeping: {0}\".format(debounce)\n\t\t#\tsleep(debounce)\n\t\t#\t\n\t\t#sleep(0.02)\n\t\t#if not self.gpio.input(encoder_pinA) == self.pins_config[encoder_pinA]:\n\t\t#\treturn None\n\t\t#if not self.gpio.input(encoder_pinB) == self.pins_config[encoder_pinB]:\n\t\t#\treturn None\n\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# now check if state of A or B has changed\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# if not that means that bouncing caused it\t\n\t\tCurrent_A = self.pins_state[encoder_pinA]\n\t\tCurrent_B = self.pins_state[encoder_pinB]\n\t\tif Current_A == Switch_A and Current_B == Switch_B:\t\t# Same interrupt as before (Bouncing)?\n\t\t\treturn\t\t\t\t\t\t\t\t\t\t\t\t# ignore interrupt!\n\n\t\tself.pins_state[encoder_pinA] = Switch_A\t\t\t\t# remember new state\n\t\tself.pins_state[encoder_pinB] = Switch_B\t\t\t\t# for next bouncing check\n\t\t\n\t\t# -------------------------------\n\t\tfunction = self.get_encoder_function_by_pin(pin)\n\t\tself.__mode_reset()\t\t\t\t\t\t\t\t\t# Keep resetting as long as the mode is being used\n\n\t\t# TODO, check if possible to only reset affected timer: self.ms_all[fun['mode_cycle']].\n\t\tif function is not None:\n\t\t\tif (Switch_A and Switch_B):\t\t\t\t\t\t# Both one active? Yes -> end of sequence\n\t\t\t\tthis_chg = datetime.now()\n\t\t\t\tdelta = this_chg - self.encoder_last_chg\n\t\t\t\t#print \"diff: {0}\".format(delta.total_seconds())\n\t\t\t\t#print type(delta.total_seconds())\t#float\n\t\t\t\tif delta.total_seconds() < 0.1:\n\t\t\t\t\tself.encoder_fast_count += 1\n\t\t\t\t\t#if self.encoder_fast_count > 3:\n\t\t\t\t\t#\tprint \"FAST {0}\".format(self.encoder_fast_count)\n\t\t\t\t\t#else:\n\t\t\t\t\t#\tprint \"Maybe.....\"\n\t\t\t\telse:\n\t\t\t\t\tself.encoder_fast_count = 0\n\t\t\t\n\t\t\t\t\"\"\" why do we do this?\n\t\t\t\tif self.modes.active_modes():\n\t\t\t\t\t#self.reset_mode_timer(self.modes_old[0]['reset'])\n\t\t\t\t\tif 'reset' in self.mode_sets[function['mode_cycle']]:\n\t\t\t\t\t\tself.reset_mode_timer(self.mode_sets[function['mode_cycle']]['reset'])\n\t\t\t\t\"\"\"\n\n\t\t\t\tf_args = None\n\t\t\t\tif pin == encoder_pinB:\t\t\t\t\t\t\t# Turning direction depends on \n\t\t\t\t\t#COUNTER CLOCKWISE (CCW) or DECREASE\n\t\t\t\t\tif self.encoder_fast_count > 3 and 'function_fast_ccw' in function:\t\t\t\t\n\t\t\t\t\t\tkey = 'function_fast_ccw'\n\t\t\t\t\t\tkey_args = 'function_fast_ccw_args'\n\n\t\t\t\t\telif 'function_ccw' in function:\n\t\t\t\t\t\tkey = 'function_ccw'\n\t\t\t\t\t\tkey_args = 'function_ccw_args'\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\t#CLOCKWISE (CW) or INCREASE\n\t\t\t\t\tif self.encoder_fast_count > 3 and 'function_fast_cw' in function:\n\t\t\t\t\t\tkey = 'function_fast_cw'\n\t\t\t\t\t\tkey_args = 'function_cw_args'\n\t\t\t\t\t\n\t\t\t\t\telif 'function_cw' in function:\n\t\t\t\t\t\tkey = 'function_cw'\n\t\t\t\t\t\tkey_args = 'function_cw_args'\n\n\t\t\t\t# prepare arguments\n\t\t\t\tif key_args in function:\n\t\t\t\t\tif isinstance(function[key_args],str):\n\t\t\t\t\t\t#f_args = [function[key_args]]\n\t\t\t\t\t\tself.__exec_function_by_code(function[key], *[function[key_args]])\n\t\t\t\t\telse:\n\t\t\t\t\t\t#f_args = *function[key_args]\n\t\t\t\t\t\tself.__exec_function_by_code(function[key], *function[key_args])\n\t\t\t\telse:\n\t\t\t\t\tself.__exec_function_by_code(function[key])\n\t\t\t\t\t\n\t\t\t\t# execute\n\t\t\t\t#self.__exec_function_by_code(function[key], *[function[key_args]])\n\t\t\t\t\t\t\n\t\t\t\tself.encoder_last_chg = this_chg\n\t\telse:\n\t\t\tself.__printer(\"Encoder, no function\",level=LL_DEBUG)\n\n\n\t\t\tpigpio.pi()", "def DriveMotor():\n\n # cnt overflows at 25KHz (approximately)\n cnt = intbv(0, min = 0, max = CNT_MAX + 1)\n\n # 10-bit duty cycle\n duty_cycle = intbv(0)[10:]\n\n while True:\n yield clk25.posedge, rst_n.negedge\n if rst_n == LOW:\n cnt[:] = 0\n duty_cycle[:] = 0\n dir.next = HIGH_OPTO\n pwm.next = LOW_OPTO\n en_n.next = LOW_OPTO\n else:\n # accept new consign at the beginning of a period\n if cnt == 0:\n # extract duty cycle and direction\n if speed >= 0:\n duty_cycle[:] = speed\n dir.next = HIGH_OPTO\n elif -speed >= CNT_MAX: # handle -1024 case\n duty_cycle[:] = CNT_MAX\n dir.next = LOW_OPTO\n else:\n duty_cycle[:] = -speed\n dir.next = LOW_OPTO\n\n # reached consign?\n if cnt >= duty_cycle:\n pwm.next = LOW_OPTO\n else:\n pwm.next = HIGH_OPTO\n\n if cnt == CNT_MAX:\n cnt[:] = 0\n else:\n cnt += 1\n\n en_n.next = LOW_OPTO", "def set_speed(self, om_left, om_right):\n analog_om_left = self.LEFT_CONST + om_left*4\n analog_om_right = self.RIGHT_CONST - om_right*4\n self.servoWriteMicroseconds(self.PIN_LEFT, analog_om_left)\n self.servoWriteMicroseconds(self.PIN_RIGHT, analog_om_right)", "def go(self, position):\n if self._is_on:\n val = min(180.0, position)\n val = max(0.0, position)\n val = (val / 180.0) * (self._max_duty - self._min_duty) + self._min_duty\n val = val * 100.0\n self._pwms.set_duty(self._pin_index, val)\n else:\n raise Exception(\"You must turn the servo on by calling the `on()` method before you can tell the servo to `go()`!\")", "def advanceTan():\n global tanBallX, speed\n tanBallX += speed\n if tanBallX <= -4:\n # Reached the bottom - switch directions\n tanBallX = -4\n speed = -speed\n elif tanBallX >= 2.8:\n # Reached the top - switch directions\n tanBallX = 2.8\n speed = -speed", "def forward(self, distance):\n self.logger.debug(\"forward \" + str(distance))", "def _get_v0x01_v0x04_speed(self):\n fts = self.features\n pfts = PortFeatures01\n if fts and fts & pfts.OFPPF_10GB_FD:\n return 10 * 10**9 / 8\n if fts and fts & (pfts.OFPPF_1GB_HD | pfts.OFPPF_1GB_FD):\n return 10**9 / 8\n if fts and fts & (pfts.OFPPF_100MB_HD | pfts.OFPPF_100MB_FD):\n return 100 * 10**6 / 8\n if fts and fts & (pfts.OFPPF_10MB_HD | pfts.OFPPF_10MB_FD):\n return 10 * 10**6 / 8\n return None", "def setSpeed(self, v):\n\t\tconverted = self.convertSpeed(v)\n\t\tprint(converted)\n\t\t# set both stage speeds\n\t\tself.zaberSend(self.translation[\"hor\"], self.cmd[\"setTargetSpeed\"], data = converted)\n\t\tself.zaberSend(self.translation[\"ver\"], self.cmd[\"setTargetSpeed\"], data = converted)", "def stream_function(self, X, Y):\n self.psi = (self.strength / (2 * np.pi) *\n np.arctan2((Y - self.yc), (X - self.xc)))", "def stopAcceleratingForward(self,event):\n self.isAcceleratingForward=False", "def update_and_publish(self):\n # 1. Find next_waypoint based on ego position & orientation\n if self._update_next_waypoint():\n\n # 2. Generate the list of next LOOKAHEAD_WPS waypoints\n num_base_wp = len(self.base_waypoints)\n last_base_wp = num_base_wp-1\n waypoint_idx = [idx % num_base_wp for idx in range(self.next_waypoint,self.next_waypoint+LOOKAHEAD_WPS)]\n final_waypoints = [self.base_waypoints[wp] for wp in waypoint_idx]\n\n # 3. If there is a red light ahead, update velocity for them\n if self.stop_on_red:\n # Start from original velocities\n self.restore_velocities(waypoint_idx)\n try:\n red_idx = waypoint_idx.index(self.red_light_waypoint)\n self.decelerate(final_waypoints, red_idx, self.stop_distance)\n except ValueError:\n # No red light available: self.red_light_waypoint is None or not in final_waypoints\n red_idx = None\n if debugging:\n v = self.get_waypoint_velocity(final_waypoints, 0)\n rospy.loginfo(\"Target velocity: %.1f, RL:%s wps ahead\", v, str(red_idx))\n\n # 3b. If we are close to the end of the circuit, make sure that we stop there\n if self.force_stop_on_last_waypoint or self.base_wp_orig_v[-1] < 1e-5:\n try:\n last_wp_idx = waypoint_idx.index(last_base_wp)\n self.decelerate(final_waypoints, last_wp_idx, 0)\n except ValueError:\n # Last waypoint is not one of the next LOOKAHEAD_WPS\n pass\n\n # 4. Publish waypoints to \"/final_waypoints\"\n self.publish_msg(final_waypoints)", "def right(self, speed):\n self.pwm_left.ChangeDutyCycle(0)\n self.pwm_right.ChangeDutyCycle(speed)", "def set_pin_pullup(self, pin, value):\n pin = pin - 1\n if pin < 8:\n self.__port_a_pullup = self.__helper.updatebyte(\n self.__port_a_pullup, pin, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPPUA, self.__port_a_pullup)\n else:\n self.__port_b_pullup = self.__helper.updatebyte(\n self.__port_b_pullup, pin - 8, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPPUB, self.__port_b_pullup)\n return", "def set_speed(self,speed):\n self.speed_p = speed", "def pwm(self, index, on=None, off=None):\n raise NotImplementedError()", "def forward( self ):\n self._has_change = True\n print( \"Forward\" )", "def UpdateForward(self, deltaT):\n self.position += self.velocity * deltaT\n self.velocity += self.acceleration * deltaT", "def motors__Direction(self, speed_l, speed_r):\n\n if speed_l >= 0:\n self.motorDirection(self.motor1DirectionPin, self.motorForward)\n else:\n self.motorDirection(self.motor1DirectionPin, self.motorReverse)\n\n if speed_r >= 0:\n self.motorDirection(self.motor2DirectionPin, self.motorForward)\n else :\n self.motorDirection(self.motor2DirectionPin, self.motorReverse)", "def _get_v0x04_speed(self):\n fts = self.features\n pfts = PortFeatures04\n if fts and fts & pfts.OFPPF_1TB_FD:\n return 10**12 / 8\n if fts and fts & pfts.OFPPF_100GB_FD:\n return 100 * 10**9 / 8\n if fts and fts & pfts.OFPPF_40GB_FD:\n return 40 * 10**9 / 8\n return None", "def step(self):\n if self.change_rate != 0:\n self.speed += stats.norm(loc=0, scale=self.change_rate).rvs()\n\n if self.speed < 0.5 * self._initial_speed:\n self.speed = 0.5 * self._initial_speed\n if self.speed > 2.0 * self._initial_speed:\n self.speed = 2.0 * self._initial_speed\n else:\n pass", "def __init__(self, forward):\n self.forward = forward\n self.kp = 0.0\n self.ki = 0.0\n self.kd = 0.0\n self.p_on_e = False\n self.out_min = 0.0\n self.out_max = 0.0\n self.iterm = 0.0\n self.output = 0.0\n self.set_point = 0.0\n self.last_time = 0.0\n self.last_input = 0.0\n self.init_input = 0.0", "def increase_car_speed(self):\r\n self.car_speed += 5", "def left_forward(self, state, speed):\n if state:\n self.left_motor.run_forever(speed_sp=speed)\n ev3.Leds.set_color(ev3.Leds.LEFT, ev3.Leds.GREEN)\n else:\n self.left_motor.stop()\n ev3.Leds.set_color(ev3.Leds.LEFT, ev3.Leds.BLACK)", "def settle(self):\n if (self.angle >= self.max_angle) or (\n self.angle <= -self.max_angle\n ): # time to reverse\n print(\"reverse\", self.angle, self.max_angle)\n self.speed *= -0.9 # damped\n self.max_angle *= 0.9\n if self.speed > 0:\n self.angle = self.max_angle\n else:\n self.angle = -self.max_angle\n\n self.angle += radians(self.speed)\n print(self.angle, self.max_angle, self.speed)\n self.x = self.cx + self.length * sin(self.angle)\n self.y = self.cy + self.length * cos(self.angle)", "def rotate_servo_rel(pi, pin, pct):\n try:\n pw_old = pi.get_servo_pulsewidth(pin)\n except:\n pw_old = 0 # no PWM has been set yet, so assume 0 \n pct_old = pulsewidth2pct(pw_old)\n if pct_old == -25: # no PWM output commanded, go to center first to get a reference point\n pi.set_servo_pulsewidth(pin, pct2pulsewidth(50))\n pct_old = pulsewidth2pct(pi.get_servo_pulsewidth(pin))\n pct_cmd = pct_old + pct\n # saturate input to protect servo \n if pct_cmd < 10:\n pct_cmd = 10\n elif pct_cmd > 90:\n pct_cmd = 90\n pi.set_servo_pulsewidth(pin, pct2pulsewidth(pct_cmd))", "def move_forward(self, distance):\n quad_offset = self.quad_offset_mapping['forward']\n client.moveByVelocityAsync(self.velocity * quad_offset[0], self.velocity * quad_offset[1],\n 0.15, distance/self.velocity).join()\n # if self.logging:\n # self.log_arr.append(\"forward\")", "def updatePWM(self):\n v_dc = self.dcmotorSpeed * self.dcmotor_sgn # changed \"vr\" to \"v_dc\", \"rightSpeed\" to \"dcmotorSpeed\" and \"right_sgn\" to dcmotor_sgn\", RFMH_2019_02_26\n pwm_dc = self.PWMvalue(v_dc, self.DC_MOTOR_MIN_PWM,\n self.DC_MOTOR_MAX_PWM) # changed \"pwmr\" to \"pwm_dc\" and \"vr\" to \"v_dc\" and adjusted both orange constants to \"DC_MOTOR_MIN_PWM\" AND \"DC_MOTOR_MAX_PWM\", RFMH_2019_02_26\n\n # TODO: Fix this debug message. I am trying to port this code over from an old version, and I do not know\n # what v and u are supposed to be here. Timothy Scott, 5.11.2019\n # if self.debug: # where the duck does the \"u\" come from?!?, RFMH_2019_02_26\n # print(\"v = %5.3f, u = %5.3f, v_dc = %5.3f, pwm_dc = %3d\" % (\n # v, u, v_dc, pwm_dc)) # deleted \"vl\" and \"pwml\" and adjust \"vr\" to \"v_dc\" to \"pwm_dc\"\n\n if math.fabs(v_dc) < self.SPEED_TOLERANCE: # changed v_r to v_dc in if loop , RFMH_2019_02_28\n DcMotorMode = Adafruit_MotorHAT.RELEASE\n pwm_dc = 0\n elif v_dc > 0:\n DcMotorMode = Adafruit_MotorHAT.FORWARD\n elif v_dc < 0:\n DcMotorMode = Adafruit_MotorHAT.BACKWARD\n\n if not self.old_pwm_dc == pwm_dc:\n self.DcMotor.setSpeed(pwm_dc) # changed rightMotor to DcMotor and pwmr to pwm_dc , RFMH_2019_02_28\n self.DcMotor.run(DcMotorMode)\n\n self.old_pwm_dc = pwm_dc", "def reverse(self):\n global motor_direction\n with self._lock:\n GPIO.output(7, False)\n GPIO.output(11, True)\n GPIO.output(13, False)\n GPIO.output(15, True)\n # time.sleep(sec)\n motor_direction = 'Reverse'\n return motor_direction", "def set_led(self, pin, value=0):\n value = self.int_lim(lower=PWM_MIN, upper=PWM_MAX, value=value) #Standardise the value to our correct range\n if self.iface.connected:\n try:\n self.iface.set_PWM_dutycycle(pin, value)\n except (AttributeError, IOError):\n logging.error(\" Cannot output to pins. PWM of pin #%s would be %s\" % (pin,value))\n else:\n logging.error(\" Interface not connected. Cannot output to pins. PWM of pin #%s would be %s\" % (pin,value))\n return value" ]
[ "0.66786534", "0.66583955", "0.66447824", "0.6522863", "0.6463652", "0.6370594", "0.62973475", "0.6285964", "0.6233026", "0.6130106", "0.6119961", "0.6082686", "0.60227823", "0.6020035", "0.5921472", "0.5857334", "0.58558404", "0.57667553", "0.5758451", "0.5737794", "0.571628", "0.5694123", "0.56780034", "0.56552285", "0.5648163", "0.5646829", "0.55609435", "0.5547905", "0.5525594", "0.5504033", "0.5489867", "0.5474994", "0.5471796", "0.546215", "0.545681", "0.5442292", "0.54369867", "0.5426305", "0.54116637", "0.54039836", "0.53955567", "0.53955567", "0.53632617", "0.53522843", "0.5342727", "0.5337775", "0.5326285", "0.5326135", "0.53181064", "0.53155833", "0.5276137", "0.5275573", "0.5262687", "0.52505773", "0.5247028", "0.52371836", "0.52270997", "0.5222567", "0.52201855", "0.5211416", "0.52087164", "0.5201736", "0.51958144", "0.5193997", "0.5192996", "0.5191104", "0.5190375", "0.51799774", "0.5179109", "0.5167561", "0.5166904", "0.51662576", "0.5164349", "0.5150207", "0.5149646", "0.5148688", "0.5140731", "0.51379544", "0.5132469", "0.51266366", "0.5122312", "0.51202166", "0.5118013", "0.509914", "0.50988287", "0.50923795", "0.5090297", "0.5089257", "0.50637174", "0.50617045", "0.5054418", "0.5047024", "0.5044288", "0.5032554", "0.5031725", "0.50315034", "0.5030493", "0.5026359", "0.50234956" ]
0.726632
1
pinForward is the forward Pin, so we change its duty cycle according to speed.
def forward_left(self, speed): self.pwm_backward.ChangeDutyCycle(0) self.pwm_forward.ChangeDutyCycle(speed) self.pwm_right.ChangeDutyCycle(0) self.pwm_left.ChangeDutyCycle(100)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed)", "def forward(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed)", "def forward(self):\n global motor_direction\n with self._lock:\n GPIO.output(7, True)\n GPIO.output(11, False)\n GPIO.output(13, True)\n GPIO.output(15, False)\n # time.sleep(sec)\n motor_direction = 'Forward'\n return motor_direction", "def __init__(self, pinForward, pinBackward, pinControl):\n\n self.pinForward = pinForward\n self.pinBackward = pinBackward\n self.pinControl = pinControl\n GPIO.setup(self.pinForward, GPIO.OUT)\n GPIO.setup(self.pinBackward, GPIO.OUT)\n GPIO.setup(self.pinControl, GPIO.OUT)\n self.pwm_forward = GPIO.PWM(self.pinForward, 100)\n self.pwm_backward = GPIO.PWM(self.pinBackward, 100)\n self.pwm_forward.start(0)\n self.pwm_backward.start(0)\n GPIO.output(self.pinControl,GPIO.HIGH)", "def __init__(self, pinForward1, pinBackward1,pinForward2, pinBackward2):\n\n self.pinForward1 = pinForward1\n self.pinBackward1 = pinBackward1\n self.pinForward2 = pinForward2\n self.pinBackward2 = pinBackward2\n\n GPIO.setup(self.pinForward1, GPIO.OUT)\n GPIO.setup(self.pinBackward1, GPIO.OUT)\n GPIO.setup(self.pinForward2, GPIO.OUT)\n GPIO.setup(self.pinBackward2, GPIO.OUT)\n\n self.pwm_forward1 = GPIO.PWM(self.pinForward1, 100)\n self.pwm_backward1 = GPIO.PWM(self.pinBackward1, 100)\n self.pwm_forward2 = GPIO.PWM(self.pinForward2, 100)\n self.pwm_backward2 = GPIO.PWM(self.pinBackward2, 100)\n \n self.pwm_forward1.start(0)\n self.pwm_backward1.start(0)\n self.pwm_forward2.start(0)\n self.pwm_backward2.start(0)", "def forward(speed, bias, biasDir):\n\t# todo: check directions for me please\n\tif biasDir == 1:\n rightMotor.run_direct(duty_cycle_sp=speed+bias)\n leftMotor.run_direct(duty_cycle_sp=speed)\n elif biasDir == -1:\n rightMotor.run_direct(duty_cycle_sp=speed)\n leftMotor.run_direct(duty_cycle_sp=speed+bias)\n else:\n rightMotor.run_direct(duty_cycle_sp=speed)\n leftMotor.run_direct(duty_cycle_sp=speed)", "def __init__(self, pinForward, pinBackward, pinControlStraight,pinLeft, pinRight, pinControlSteering):\n\n self.pinForward = pinForward\n self.pinBackward = pinBackward\n self.pinControlStraight = pinControlStraight\n self.pinLeft = pinLeft\n self.pinRight = pinRight\n self.pinControlSteering = pinControlSteering\n GPIO.setup(self.pinForward, GPIO.OUT)\n GPIO.setup(self.pinBackward, GPIO.OUT)\n GPIO.setup(self.pinControlStraight, GPIO.OUT)\n\n GPIO.setup(self.pinLeft, GPIO.OUT)\n GPIO.setup(self.pinRight, GPIO.OUT)\n GPIO.setup(self.pinControlSteering, GPIO.OUT)\n\n self.pwm_forward = GPIO.PWM(self.pinForward, 100)\n self.pwm_backward = GPIO.PWM(self.pinBackward, 100)\n self.pwm_forward.start(0)\n self.pwm_backward.start(0)\n\n self.pwm_left = GPIO.PWM(self.pinLeft, 100)\n self.pwm_right = GPIO.PWM(self.pinRight, 100)\n self.pwm_left.start(0)\n self.pwm_right.start(0)\n\n GPIO.output(self.pinControlStraight,GPIO.HIGH) \n GPIO.output(self.pinControlSteering,GPIO.HIGH)", "def motorDirection(self, motorPin, direction):\n # print \"motorPin: \", motorPin\n # print \"direction: \", direction\n GPIO.output(motorPin, direction)", "def motor_A(self, direction, speed):\n if direction == -1:\n GPIO.output(self.Motor_A_Pin1, GPIO.HIGH)\n GPIO.output(self.Motor_A_Pin2, GPIO.LOW)\n self.pwm_A.start(100)\n self.pwm_A.ChangeDutyCycle(speed)\n if direction == 1:\n GPIO.output(self.Motor_A_Pin1, GPIO.LOW)\n GPIO.output(self.Motor_A_Pin2, GPIO.HIGH)\n self.pwm_A.start(100)\n self.pwm_A.ChangeDutyCycle(speed)", "def forward(self, speed):\n self.controller.forward(speed)", "def setup_pin(self, pin, dutycycle, frequency=2000):\n raise NotImplementedError", "def motor_B(self, direction, speed):\n if direction == 1:\n GPIO.output(self.Motor_B_Pin1, GPIO.HIGH)\n GPIO.output(self.Motor_B_Pin2, GPIO.LOW)\n self.pwm_B.start(100)\n self.pwm_B.ChangeDutyCycle(speed)\n if direction == -1:\n GPIO.output(self.Motor_B_Pin1, GPIO.LOW)\n GPIO.output(self.Motor_B_Pin2, GPIO.HIGH)\n self.pwm_B.start(100)\n self.pwm_B.ChangeDutyCycle(speed)", "def forward_right(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed)\n self.pwm_left.ChangeDutyCycle(0)\n self.pwm_right.ChangeDutyCycle(100)", "def setup_motor(self,pin_num):\n pi.set_servo_pulsewidth(pin_num, 2000)\n sleep(2)\n pi.set_servo_pulsewidth(pin_num, 500 )\n sleep(2)", "def fwd(dist=0): #distance is in cm\n try:\n if dist>0:\n # this casting to int doesn't seem necessary\n pulse=int(PPR*(dist//WHEEL_CIRC) )\n enc_tgt(1,1,pulse)\n except Exception as e:\n print (\"gopigo fwd: {}\".format(e))\n pass\n return write_i2c_block(ADDRESS,motor_fwd_cmd+[0,0,0])", "def startAcceleratingForward(self,event):\n self.isAcceleratingForward=True", "def move_forward(self, speed):\n\t\t# You should modify the bias of 4 wheels depending on your hardware.\n\t\tself._front_left_wheel.anticlockwise_rotate(speed + LEFT_FR_BIAS + LEFT_RIGHT_BIAS)\n\t\tself._front_right_wheel.clockwise_rotate(speed + RIGHT_FR_BIAS)\n\t\tself._rear_left_wheel.anticlockwise_rotate(speed + LEFT_RIGHT_BIAS)\n\t\tself._rear_right_wheel.clockwise_rotate(speed)", "def steer(direction):\n if direction == 1:\n steerMotor.run(Adafruit_MotorHAT.FORWARD)\n steerMotor.setSpeed(255)\n if direction == -1:\n steerMotor.run(Adafruit_MotorHAT.BACKWARD)\n steerMotor.setSpeed(255)\n if direction == 0:\n steerMotor.setSpeed(0)\n steerMotor.run(Adafruit_MotorHAT.RELEASE)", "def forward_button(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=int(left_speed))\n self.right_motor.run_forever(speed_sp=int(right_speed))", "def forward(self, speed):\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_rr' + self.postfix], -speed,\n ONE_SHOT_MODE)\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_rl' + self.postfix], -speed,\n ONE_SHOT_MODE)\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_fr' + self.postfix], -speed,\n ONE_SHOT_MODE)\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_fl' + self.postfix], -speed,\n ONE_SHOT_MODE)", "def GET_forward(self):\n self.roomba.DriveStraight(pyrobot.VELOCITY_FAST)\n time.sleep(1)\n self.roomba.SlowStop(pyrobot.VELOCITY_FAST)", "def _reverseduty(self):\n if self.ir_pin.duty() == 0:\n self.ir_pin.duty(512)\n else:\n self.ir_pin.duty(0)", "def turn_90degrees(self, direction):\n if direction == \"right\" or direction == 1:\n self.myspeedctrl.send_speed(0,1)\n elif direction == \"left\" or direction == 2:\n self.myspeedctrl.send_speed(0,-1)\n rospy.sleep(1.61) #value found by trail and error\n self.myspeedctrl.send_speed(0,0)", "def drive_forward(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=left_speed)\n self.right_motor.run_forever(speed_sp=right_speed)", "def set_pin_direction(self, pin, direction):\n pin = pin - 1\n if pin < 8:\n self.__port_a_direction = self.__helper.updatebyte(\n self.__port_a_direction, pin, direction)\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRA, self.__port_a_direction)\n else:\n self.__port_b_direction = self.__helper.updatebyte(\n self.__port_b_direction, pin - 8, direction)\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRB, self.__port_b_direction)\n return", "def input_forward(self, joy_input):\n if self.saved[joy_input]:\n value = self.saved[joy_input]\n else:\n value = self.inputs[joy_input]\n yaw_pwm = np.interp(value, [-1, 1], [0, Joystick.MAX_YAW_PWM])\n print(\"(input forward) setting yaw pwm to \" + str(yaw_pwm))\n self.publish(Topic.YAW_PWM, yaw_pwm)", "def increment_speed(self):\n self.speed += 0.0004", "def move_forward(self, speed):\n\n # Clamp the speed\n speed = clamp(delta_unit(speed), 0, delta_unit(Car.max_speed))\n\n # Appends the speed according to the direction\n rad = np.radians(self.direction)\n self.fx += speed * np.cos(rad)\n self.fy += speed * np.sin(rad)\n\n # Set marker to move\n self.moved = True", "def change_motor_speed(self, speed=0.0):\r\n if not self.enabled:\r\n self.set_neutral(braked=False)\r\n return\r\n\r\n # logging.info(\"{} Motor Speed: {}\".format(self.motor_name, speed))\r\n self.current_speed = speed # Store current set speed\r\n\r\n # If speed is < 0.0, we are driving in reverse.\r\n self.forward = True\r\n if speed < 0.0:\r\n # Normalise speed value to be in range [0, 100]\r\n speed = -speed\r\n # Store direction\r\n self.forward = False\r\n\r\n # Apply a factor to the speed to limit speed\r\n speed *= self.speed_factor\r\n\r\n # Set motor directional pins\r\n if self.forward:\r\n if self.a_pin >= 0:\r\n self.GPIO.output(self.a_pin, 1)\r\n if self.b_pin >= 0:\r\n self.GPIO.output(self.b_pin, 0)\r\n else:\r\n if self.a_pin >= 0:\r\n self.GPIO.output(self.a_pin, 0)\r\n if self.b_pin >= 0:\r\n self.GPIO.output(self.b_pin, 1)\r\n\r\n # Convert speed into PWM duty cycle\r\n # and clamp values to min/max ranges.\r\n dutycycle = speed\r\n if dutycycle < 0.0:\r\n dutycycle = 0.0\r\n elif dutycycle > self.max_speed:\r\n dutycycle = self.max_speed\r\n\r\n # Change the PWM duty cycle based on fabs() of speed value.\r\n self.PWM.ChangeDutyCycle(dutycycle)", "def skipp(self):\n for x in range(4):\n self.fwd(right=100, left=100)\n time.sleep(.5)\n self.servo(1000)\n time.sleep(.1)\n self.servo(2000)\n time.sleep(.1)\n self.fwd(right=-100, left=-100)\n time.sleep(.1)\n self.servo(-1000)\n self.stop()", "def drive(self,direction, speed=100) -> None:\n if direction == 1:\n driveMotor.run(Adafruit_MotorHAT.FORWARD)\n driveMotor.setSpeed(speed)\n if direction == -1:\n driveMotor.run(Adafruit_MotorHAT.BACKWARD)\n driveMotor.setSpeed(speed)\n if direction == 0:\n driveMotor.setSpeed(0)\n driveMotor.run(Adafruit_MotorHAT.RELEASE)", "def drive(self,direction, speed=100):\n if direction == 1:\n self.leftMotor.run(Adafruit_MotorHAT.FORWARD)\n self.rightMotor.run(Adafruit_MotorHAT.FORWARD)\n self.leftMotor.setSpeed(speed)\n self.rightMotor.setSpeed(speed)\n if direction == -1:\n self.leftMotor.run(Adafruit_MotorHAT.BACKWARD)\n self.rightMotor.run(Adafruit_MotorHAT.BACKWARD)\n self.leftMotor.setSpeed(speed)\n self.rightMotor.setSpeed(speed)\n if direction == 0:\n self.leftMotor.setSpeed(0)\n self.rightMotor.setSpeed(0)\n self.leftMotor.run(Adafruit_MotorHAT.RELEASE)\n self.rightMotor.run(Adafruit_MotorHAT.RELEASE)", "def move_forward(self, val):\n val = val * 180 / math.pi\n print(\"gyro diff\", self.gyro - val)\n print(\"gyrof\", self.gyro)\n if math.fabs(self.gyro - val) > 0.6:\n if self.gyro - val > 0:\n self.om_right = self.om_right - 0.7\n self.om_left = self.om_left + 0.5\n self.set_speed(self.om_left, self.om_right)\n print(\"om_l\", self.om_left)\n print(\"om_r\", self.om_right)\n else:\n self.om_right = self.om_right + 0.3\n self.om_left = self.om_left - 0.5\n self.set_speed(self.om_left, self.om_right)\n print(\"om_l\", self.om_left)\n print(\"om_r\", self.om_right)\n else:\n self.om_right = 10\n self.om_left = 10", "def forward(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=left_speed)\n self.right_motor.run_forever(speed_sp=right_speed)", "def right_forward(self):\n self.right_motor.run_forever(speed_sp=self.MAX_SPEED)", "def left(self, speed):\n self.pwm_right.ChangeDutyCycle(0)\n self.pwm_left.ChangeDutyCycle(speed)", "def steer(self):\n\n while self.active:\n angle = self.driver.angle\n steering_pwm_calc = self.angle_to_pmw(angle)\n self.pwm.set_pwm(0, 0, steering_pwm_calc)", "def drive_forward(self):\n print(f\"{self.make.title()} is now driving forward.\")", "def __init__(self, pwm_pin, dir_pin_1, dir_pin_2, pwm_freq):\n\t\tself._pwm_pin = pwm_pin # PWM input pin.\n\t\tself._dir_pin_1 = dir_pin_1 # GPIO number to control the direction of rotation of the wheel.\n\t\tself._dir_pin_2 = dir_pin_2 # GPIO number to control the direction of rotation of the wheel.\n\t\tself._pwm_freq = pwm_freq # PWM cycle.\n\n\t\tself._last_dir = 's' # Last rotation direction of this wheel. 's' indicates stop.\n\t\tself._last_dc_val = 0 # Last duty cycle value.\n\t\tself._current_dc_val = 0 # Current duty cycle value.\n\n\t\tGPIO.setmode(GPIO.BOARD)\n\n\t\t# Set the direction control GPIO output mode.\n\t\tGPIO.setup(self._pwm_pin, GPIO.OUT)\n\t\tGPIO.setup(self._dir_pin_1, GPIO.OUT)\n\t\tGPIO.setup(self._dir_pin_2, GPIO.OUT)\n\n\t\t# Inits PWM pin.\n\t\tself._motor_pwm = GPIO.PWM(self._pwm_pin, self._pwm_freq) # pwm_freq: Hz\n\t\tself._motor_pwm.start(0) # Set duty cycle to 0.", "def right_forward(self, state, speed):\n if state:\n self.right_motor.run_forever(speed_sp=speed)\n ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.GREEN)\n else:\n self.right_motor.stop()\n ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.BLACK)", "def moveForward(self):\n if self.onGround:\n self.vx = 4", "def backward(self, speed):\n\n self.pwm_forward.ChangeDutyCycle(0)\n self.pwm_backward.ChangeDutyCycle(speed)", "def backward(self, speed):\n\n self.pwm_forward.ChangeDutyCycle(0)\n self.pwm_backward.ChangeDutyCycle(speed)", "def _set_pwm(self, raw_values):\n for i in range(len(self._pins)):\n self._pi.set_PWM_dutycycle(self._pins[i], raw_values[i])", "def __init__(self, pin1=24, pin2=28, pin3=25, pin4=33):\n self.GP = GPIOProcessor()\n self.pin1 = self.GP.getPin(pin1)\n self.pin2 = self.GP.getPin(pin2)\n self.pin3 = self.GP.getPin(pin3)\n self.pin4 = self.GP.getPin(pin4)\n self.pinl = [self.pin1, self.pin2, self.pin3, self.pin4]\n\n for k in range(4):\n self.pinl[k].out()\n\n self.speed = 100.0", "def forward(self, forward):\n\n self._forward = forward", "def __spur_on_if_needed(self):\n if len(self.waypoints) < 2:\n return\n next_speed = (get_waypoint_speed(self.waypoints[0]) +\n get_waypoint_speed(self.waypoints[1])) / 2.0\n set_waypoint_speed(self.waypoints[0], next_speed)", "def slither(self):\n #writedown where we started\n starting_direction = self.get_heading()\n #start driving forward\n self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT)\n self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT)\n self.fwd()\n # throttl down the left motor\n for power in range(self.LEFT_DEFAULT, 60,-10):\n self.set_motor_power(self.MOTOR_LEFT, power)\n time.sleep(.5)\n #throttle up the left while lowring the right\n for power in range(60, self.LEFT_DEFAULT +1, 10):\n self.set_motor_power(self.MOTOR_LEFT, power)\n time.sleep(.5)\n # throttl down the right motor\n for power in range(self.RIGHT_DEFAULT, 60,-10):\n self.set_motor_power(self.MOTOR_RIGHT, power)\n time.sleep(.5)\n #throttle up the right while lowring the right\n for power in range(60, self.RIGHT_DEFAULT +1, 10):\n self.set_motor_power(self.MOTOR_RIGHT, power)\n time.sleep(.5)\n \n #straighten out\n while self.get_heading() != starting_direction:\n #if I need to veer right\n if self.get_heading() < starting_direction:\n self.set_motor_power(self.MOTOR_LEFT, 90)\n self.set_motor_power(self.MOTOR_RIGHT, 60)\n #if I need to veer left\n elif self.get_heading() > starting_direction:\n self.set_motor_power(self.MOTOR_LEFT, 60)\n self.set_motor_power(self.MOTOR_RIGHT, 90)\n \n time.sleep(.1)\n self.stop()", "def on(self):\n if not self._is_on:\n self._pwms.enable(self._pin_index, self._frequency)\n self._is_on = True", "def toggleIpforward(v):\n file_path = \"/proc/sys/net/ipv4/ip_forward\"\n with open(file_path, \"w\") as f:\n if v.ipForward:\n print(0, file=f)\n v.ipForward = False\n else:\n print(1, file=f)\n v.ipForward = True\n return", "def set_duty_cycle(self, pin, dutycycle):\n raise NotImplementedError", "def activatePinReading(self):\n\n for pin in self.pinsToMeasure:\n arduino.samplePinDuringCapture(self.f, self.pinMap[pin], self.wallClock)", "def update(self):\n bondState = self._bond.getDeviceState(self._deviceId)\n if 'power' in bondState:\n self._state = True if bondState['power'] == 1 else False\n if self._state and bondState['speed'] in self._speed_name_by_value:\n self._attributes['current_speed'] = self._speed_name_by_value[bondState['speed']]\n else:\n self._attributes['current_speed'] = SPEED_OFF\n\n if 'direction' in bondState:\n if bondState['direction'] == Directions.REVERSE:\n self._attributes['current_direction'] = \"reverse\"\n else:\n self._attributes['current_direction'] = \"forward\"", "def _enable_pin(pin, direction):\n _write_value(pin, \"{}/export\".format(_path_prefix))\n _write_value(direction, \"{0}/gpio{1}/direction\".format(_path_prefix, pin))", "def forward(self):\n print('forward')\n self.linearVector = Vector3(x=1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def togglePWMPinEnable(self, PWMpin):\n bitPos = PWMpin + 8\n mask = 1 << bitPos\n self._injectFault(\"PWM1PCR\",self.PCR,mask)", "def strut(self):\n self.fwd(left=50, right=50)\n for x in range(2):\n self.servo(1000)\n time.sleep(.1) \n self.servo(1500) # Look Straight\n time.sleep(1)\n self.servo(2000)\n time.sleep(.1)\n self.servo(1500)", "def _get_forward_speed(self):\n\n velocity = self._vehicle.get_velocity()\n transform = self._vehicle.get_transform()\n vel_np = np.array([velocity.x, velocity.y, velocity.z])\n pitch = np.deg2rad(transform.rotation.pitch)\n yaw = np.deg2rad(transform.rotation.yaw)\n orientation = np.array([np.cos(pitch) * np.cos(yaw), np.cos(pitch) * np.sin(yaw), np.sin(pitch)])\n speed = np.dot(vel_np, orientation)\n return speed", "def left_forward(self):\n self.left_motor.run_forever(speed_sp=self.MAX_SPEED)", "def move_forward():\n twister = Twist(linear=Vector3(x=0.5,y=0,z=0),angular=Vector3(x=0,y=0,z=0))\n pub.publish(twister)", "def forward(self):\n self.position += 1", "def forward(self, param):\n\t\tif param:\n\t\t\tself.linear_move(param * .3048)\n\t\telse:\n\t\t\tself.linear_move(riu.default_dist * .3048)", "def handle_go_forward(entry_box, mqtt_client):\n speed_string = entry_box.get()\n print('sending the go_forward message with speed', speed_string)\n mqtt_client.send_message('go_forward', [speed_string])\n # --------------------------------------------------------------------------", "def forward(self):\n #print('forward\\r')\n self.linearVector = Vector3(x=1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def update_speed_input_step(self,curr_v):\n \n # update speed inputs \n self.speed_inputs_east*=0\n self.speed_inputs_west*=0\n self.speed_inputs_north*=0\n self.speed_inputs_south*=0\n\n if self.use_eight_directions is True: \n self.speed_inputs_north_east*=0\n self.speed_inputs_north_west*=0\n self.speed_inputs_south_east*=0\n self.speed_inputs_south_west*=0\n \n #speed_values=self.rr[:self.N_e,0] \n speed_values=np.ones((self.N_e,1))\n\n if curr_v[0]>0:\n \n # north-east\n if self.use_eight_directions is True and curr_v[1]>0:\n self.speed_inputs_north_east=speed_values \n \n # south-east \n elif self.use_eight_directions is True and curr_v[1]<0:\n self.speed_inputs_south_east=speed_values\n \n #east \n else:\n self.speed_inputs_east=speed_values\n\n\n elif curr_v[0]<0:\n\n # north-west \n if self.use_eight_directions is True and curr_v[1]>0:\n self.speed_inputs_north_west=speed_values\n\n # south-west \n elif self.use_eight_directions is True and curr_v[1]<0:\n self.speed_inputs_south_west=speed_values\n \n # west \n else:\n self.speed_inputs_west=speed_values\n\n else: \n # north\n if curr_v[1]>0:\n self.speed_inputs_north=speed_values\n\n # south\n elif curr_v[1]<0:\n self.speed_inputs_south=speed_values", "def _birdUpdateHandler(self, pin):\n\n # Update movement value from PIR pin status\n self.p.update(pin)\n\n if(self.p.movement == 1):\n #print(\"Motion detected\")\n self._distanceCheck()\n\n timeO = 0\n while(self.birdHere == 0 and self.p.movement == 1 and timeO < self.timeout):\n sleep(1)\n self._distanceCheck()\n timeO += 1\n\n else:\n #print(\"Motion ended\")\n self.birdHere = 0", "def move_forward():\n pass", "def motorSpeed(self, speedRPM_l, speedRPM_r):\n\n self.motors__Direction(speedRPM_l, speedRPM_r)\n\n speedRPM_l = abs(speedRPM_l)\n speedRPM_r = abs(speedRPM_r)\n\n speedRPM_l = self.constrainSpeed(speedRPM_l)\n speedRPM_r = self.constrainSpeed(speedRPM_r)\n\n# Left motor\n pwmDuration = 4095.0 * speedRPM_l / self.motorMaxRPM\n# print(\"MuleBot.motorSpeed Duration left float: \", pwmDuration)\n pwmDuration = int( pwmDuration )\n# print(\"MuleBot.motorSpeed Duration left int: \", pwmDuration)\n startOfPulse = 0\n self.pwm.setPWM(self.dcMotorLeftMotor, startOfPulse, pwmDuration)\n MuleBot.dcMotorPWMDurationLeft = pwmDuration\n\n# Right motor\n #Adjust for right motor being faster\n pwmDuration = 4095.0 * speedRPM_r / self.motorMaxRPM\n pwmDuration = pwmDuration * 9727 / 10000 # 98.519113 percent\n pwmDuration = int( pwmDuration )\n# print(\"MuleBot.motorSpeed Duration right int: \", pwmDuration)\n startOfPulse = 0\n self.pwm.setPWM(self.dcMotorRightMotor, startOfPulse, pwmDuration)\n MuleBot.dcMotorPWMDurationRight = pwmDuration", "def motorsDirection(self, direction):\n\n print (direction)\n if direction == 'r' or direction == 'R':\n self.motorDirection(self.motor1DirectionPin, self.motorReverse)\n self.motorDirection(self.motor2DirectionPin, self.motorReverse)\n print (\"Direction reverse\")\n else:\n self.motorDirection(self.motor1DirectionPin, self.motorForward)\n self.motorDirection(self.motor2DirectionPin, self.motorForward)\n print (\"Direction forward\")", "def step(self, count, direction):\n for x in range(count):\n for bit in self.mode[::direction]:\n self.pin1.value(bit[0])\n self.pin2.value(bit[1])\n self.pin3.value(bit[2])\n self.pin4.value(bit[3])\n time.sleep(DELAY)\n self.reset()", "def DriveMotor():\n\n # cnt overflows at 25KHz (approximately)\n cnt = intbv(0, min = 0, max = CNT_MAX + 1)\n\n # 10-bit duty cycle\n duty_cycle = intbv(0)[10:]\n\n while True:\n yield clk25.posedge, rst_n.negedge\n if rst_n == LOW:\n cnt[:] = 0\n duty_cycle[:] = 0\n dir.next = HIGH_OPTO\n pwm.next = LOW_OPTO\n en_n.next = LOW_OPTO\n else:\n # accept new consign at the beginning of a period\n if cnt == 0:\n # extract duty cycle and direction\n if speed >= 0:\n duty_cycle[:] = speed\n dir.next = HIGH_OPTO\n elif -speed >= CNT_MAX: # handle -1024 case\n duty_cycle[:] = CNT_MAX\n dir.next = LOW_OPTO\n else:\n duty_cycle[:] = -speed\n dir.next = LOW_OPTO\n\n # reached consign?\n if cnt >= duty_cycle:\n pwm.next = LOW_OPTO\n else:\n pwm.next = HIGH_OPTO\n\n if cnt == CNT_MAX:\n cnt[:] = 0\n else:\n cnt += 1\n\n en_n.next = LOW_OPTO", "def nine_punishment(self):\n self.direction_clock_wise = not self.direction_clock_wise", "def int_handle_encoder(self,pin):\n\t\t#print \"DEBUG: self.int_handle_encoder! for pin: {0}\".format(pin)\n\t\t\t\n\t\tdevice = self.get_device_config_by_pin(pin)\n\t\t\n\t\tencoder_pinA = device['clk']\n\t\tencoder_pinB = device['dt']\n\n\t\tSwitch_A = self.gpio.input(encoder_pinA)\n\t\tSwitch_B = self.gpio.input(encoder_pinB)\n\t\t\n\t\t# debounce\n\t\t#if 'debounce' in self.pins_config[pin]:\n\t\t#\tdebounce = self.pins_config[pin]['debounce'] / 1000\n\t\t#\tprint \"DEBUG: sleeping: {0}\".format(debounce)\n\t\t#\tsleep(debounce)\n\t\t#\t\n\t\t#sleep(0.02)\n\t\t#if not self.gpio.input(encoder_pinA) == self.pins_config[encoder_pinA]:\n\t\t#\treturn None\n\t\t#if not self.gpio.input(encoder_pinB) == self.pins_config[encoder_pinB]:\n\t\t#\treturn None\n\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# now check if state of A or B has changed\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# if not that means that bouncing caused it\t\n\t\tCurrent_A = self.pins_state[encoder_pinA]\n\t\tCurrent_B = self.pins_state[encoder_pinB]\n\t\tif Current_A == Switch_A and Current_B == Switch_B:\t\t# Same interrupt as before (Bouncing)?\n\t\t\treturn\t\t\t\t\t\t\t\t\t\t\t\t# ignore interrupt!\n\n\t\tself.pins_state[encoder_pinA] = Switch_A\t\t\t\t# remember new state\n\t\tself.pins_state[encoder_pinB] = Switch_B\t\t\t\t# for next bouncing check\n\t\t\n\t\t# -------------------------------\n\t\tfunction = self.get_encoder_function_by_pin(pin)\n\t\tself.__mode_reset()\t\t\t\t\t\t\t\t\t# Keep resetting as long as the mode is being used\n\n\t\t# TODO, check if possible to only reset affected timer: self.ms_all[fun['mode_cycle']].\n\t\tif function is not None:\n\t\t\tif (Switch_A and Switch_B):\t\t\t\t\t\t# Both one active? Yes -> end of sequence\n\t\t\t\tthis_chg = datetime.now()\n\t\t\t\tdelta = this_chg - self.encoder_last_chg\n\t\t\t\t#print \"diff: {0}\".format(delta.total_seconds())\n\t\t\t\t#print type(delta.total_seconds())\t#float\n\t\t\t\tif delta.total_seconds() < 0.1:\n\t\t\t\t\tself.encoder_fast_count += 1\n\t\t\t\t\t#if self.encoder_fast_count > 3:\n\t\t\t\t\t#\tprint \"FAST {0}\".format(self.encoder_fast_count)\n\t\t\t\t\t#else:\n\t\t\t\t\t#\tprint \"Maybe.....\"\n\t\t\t\telse:\n\t\t\t\t\tself.encoder_fast_count = 0\n\t\t\t\n\t\t\t\t\"\"\" why do we do this?\n\t\t\t\tif self.modes.active_modes():\n\t\t\t\t\t#self.reset_mode_timer(self.modes_old[0]['reset'])\n\t\t\t\t\tif 'reset' in self.mode_sets[function['mode_cycle']]:\n\t\t\t\t\t\tself.reset_mode_timer(self.mode_sets[function['mode_cycle']]['reset'])\n\t\t\t\t\"\"\"\n\n\t\t\t\tf_args = None\n\t\t\t\tif pin == encoder_pinB:\t\t\t\t\t\t\t# Turning direction depends on \n\t\t\t\t\t#COUNTER CLOCKWISE (CCW) or DECREASE\n\t\t\t\t\tif self.encoder_fast_count > 3 and 'function_fast_ccw' in function:\t\t\t\t\n\t\t\t\t\t\tkey = 'function_fast_ccw'\n\t\t\t\t\t\tkey_args = 'function_fast_ccw_args'\n\n\t\t\t\t\telif 'function_ccw' in function:\n\t\t\t\t\t\tkey = 'function_ccw'\n\t\t\t\t\t\tkey_args = 'function_ccw_args'\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\t#CLOCKWISE (CW) or INCREASE\n\t\t\t\t\tif self.encoder_fast_count > 3 and 'function_fast_cw' in function:\n\t\t\t\t\t\tkey = 'function_fast_cw'\n\t\t\t\t\t\tkey_args = 'function_cw_args'\n\t\t\t\t\t\n\t\t\t\t\telif 'function_cw' in function:\n\t\t\t\t\t\tkey = 'function_cw'\n\t\t\t\t\t\tkey_args = 'function_cw_args'\n\n\t\t\t\t# prepare arguments\n\t\t\t\tif key_args in function:\n\t\t\t\t\tif isinstance(function[key_args],str):\n\t\t\t\t\t\t#f_args = [function[key_args]]\n\t\t\t\t\t\tself.__exec_function_by_code(function[key], *[function[key_args]])\n\t\t\t\t\telse:\n\t\t\t\t\t\t#f_args = *function[key_args]\n\t\t\t\t\t\tself.__exec_function_by_code(function[key], *function[key_args])\n\t\t\t\telse:\n\t\t\t\t\tself.__exec_function_by_code(function[key])\n\t\t\t\t\t\n\t\t\t\t# execute\n\t\t\t\t#self.__exec_function_by_code(function[key], *[function[key_args]])\n\t\t\t\t\t\t\n\t\t\t\tself.encoder_last_chg = this_chg\n\t\telse:\n\t\t\tself.__printer(\"Encoder, no function\",level=LL_DEBUG)\n\n\n\t\t\tpigpio.pi()", "def set_speed(self, om_left, om_right):\n analog_om_left = self.LEFT_CONST + om_left*4\n analog_om_right = self.RIGHT_CONST - om_right*4\n self.servoWriteMicroseconds(self.PIN_LEFT, analog_om_left)\n self.servoWriteMicroseconds(self.PIN_RIGHT, analog_om_right)", "def forward(self, distance):\n self.logger.debug(\"forward \" + str(distance))", "def go(self, position):\n if self._is_on:\n val = min(180.0, position)\n val = max(0.0, position)\n val = (val / 180.0) * (self._max_duty - self._min_duty) + self._min_duty\n val = val * 100.0\n self._pwms.set_duty(self._pin_index, val)\n else:\n raise Exception(\"You must turn the servo on by calling the `on()` method before you can tell the servo to `go()`!\")", "def advanceTan():\n global tanBallX, speed\n tanBallX += speed\n if tanBallX <= -4:\n # Reached the bottom - switch directions\n tanBallX = -4\n speed = -speed\n elif tanBallX >= 2.8:\n # Reached the top - switch directions\n tanBallX = 2.8\n speed = -speed", "def _get_v0x01_v0x04_speed(self):\n fts = self.features\n pfts = PortFeatures01\n if fts and fts & pfts.OFPPF_10GB_FD:\n return 10 * 10**9 / 8\n if fts and fts & (pfts.OFPPF_1GB_HD | pfts.OFPPF_1GB_FD):\n return 10**9 / 8\n if fts and fts & (pfts.OFPPF_100MB_HD | pfts.OFPPF_100MB_FD):\n return 100 * 10**6 / 8\n if fts and fts & (pfts.OFPPF_10MB_HD | pfts.OFPPF_10MB_FD):\n return 10 * 10**6 / 8\n return None", "def setSpeed(self, v):\n\t\tconverted = self.convertSpeed(v)\n\t\tprint(converted)\n\t\t# set both stage speeds\n\t\tself.zaberSend(self.translation[\"hor\"], self.cmd[\"setTargetSpeed\"], data = converted)\n\t\tself.zaberSend(self.translation[\"ver\"], self.cmd[\"setTargetSpeed\"], data = converted)", "def stream_function(self, X, Y):\n self.psi = (self.strength / (2 * np.pi) *\n np.arctan2((Y - self.yc), (X - self.xc)))", "def stopAcceleratingForward(self,event):\n self.isAcceleratingForward=False", "def update_and_publish(self):\n # 1. Find next_waypoint based on ego position & orientation\n if self._update_next_waypoint():\n\n # 2. Generate the list of next LOOKAHEAD_WPS waypoints\n num_base_wp = len(self.base_waypoints)\n last_base_wp = num_base_wp-1\n waypoint_idx = [idx % num_base_wp for idx in range(self.next_waypoint,self.next_waypoint+LOOKAHEAD_WPS)]\n final_waypoints = [self.base_waypoints[wp] for wp in waypoint_idx]\n\n # 3. If there is a red light ahead, update velocity for them\n if self.stop_on_red:\n # Start from original velocities\n self.restore_velocities(waypoint_idx)\n try:\n red_idx = waypoint_idx.index(self.red_light_waypoint)\n self.decelerate(final_waypoints, red_idx, self.stop_distance)\n except ValueError:\n # No red light available: self.red_light_waypoint is None or not in final_waypoints\n red_idx = None\n if debugging:\n v = self.get_waypoint_velocity(final_waypoints, 0)\n rospy.loginfo(\"Target velocity: %.1f, RL:%s wps ahead\", v, str(red_idx))\n\n # 3b. If we are close to the end of the circuit, make sure that we stop there\n if self.force_stop_on_last_waypoint or self.base_wp_orig_v[-1] < 1e-5:\n try:\n last_wp_idx = waypoint_idx.index(last_base_wp)\n self.decelerate(final_waypoints, last_wp_idx, 0)\n except ValueError:\n # Last waypoint is not one of the next LOOKAHEAD_WPS\n pass\n\n # 4. Publish waypoints to \"/final_waypoints\"\n self.publish_msg(final_waypoints)", "def right(self, speed):\n self.pwm_left.ChangeDutyCycle(0)\n self.pwm_right.ChangeDutyCycle(speed)", "def set_pin_pullup(self, pin, value):\n pin = pin - 1\n if pin < 8:\n self.__port_a_pullup = self.__helper.updatebyte(\n self.__port_a_pullup, pin, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPPUA, self.__port_a_pullup)\n else:\n self.__port_b_pullup = self.__helper.updatebyte(\n self.__port_b_pullup, pin - 8, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPPUB, self.__port_b_pullup)\n return", "def pwm(self, index, on=None, off=None):\n raise NotImplementedError()", "def set_speed(self,speed):\n self.speed_p = speed", "def forward( self ):\n self._has_change = True\n print( \"Forward\" )", "def UpdateForward(self, deltaT):\n self.position += self.velocity * deltaT\n self.velocity += self.acceleration * deltaT", "def motors__Direction(self, speed_l, speed_r):\n\n if speed_l >= 0:\n self.motorDirection(self.motor1DirectionPin, self.motorForward)\n else:\n self.motorDirection(self.motor1DirectionPin, self.motorReverse)\n\n if speed_r >= 0:\n self.motorDirection(self.motor2DirectionPin, self.motorForward)\n else :\n self.motorDirection(self.motor2DirectionPin, self.motorReverse)", "def _get_v0x04_speed(self):\n fts = self.features\n pfts = PortFeatures04\n if fts and fts & pfts.OFPPF_1TB_FD:\n return 10**12 / 8\n if fts and fts & pfts.OFPPF_100GB_FD:\n return 100 * 10**9 / 8\n if fts and fts & pfts.OFPPF_40GB_FD:\n return 40 * 10**9 / 8\n return None", "def step(self):\n if self.change_rate != 0:\n self.speed += stats.norm(loc=0, scale=self.change_rate).rvs()\n\n if self.speed < 0.5 * self._initial_speed:\n self.speed = 0.5 * self._initial_speed\n if self.speed > 2.0 * self._initial_speed:\n self.speed = 2.0 * self._initial_speed\n else:\n pass", "def __init__(self, forward):\n self.forward = forward\n self.kp = 0.0\n self.ki = 0.0\n self.kd = 0.0\n self.p_on_e = False\n self.out_min = 0.0\n self.out_max = 0.0\n self.iterm = 0.0\n self.output = 0.0\n self.set_point = 0.0\n self.last_time = 0.0\n self.last_input = 0.0\n self.init_input = 0.0", "def increase_car_speed(self):\r\n self.car_speed += 5", "def left_forward(self, state, speed):\n if state:\n self.left_motor.run_forever(speed_sp=speed)\n ev3.Leds.set_color(ev3.Leds.LEFT, ev3.Leds.GREEN)\n else:\n self.left_motor.stop()\n ev3.Leds.set_color(ev3.Leds.LEFT, ev3.Leds.BLACK)", "def move_forward(self, distance):\n quad_offset = self.quad_offset_mapping['forward']\n client.moveByVelocityAsync(self.velocity * quad_offset[0], self.velocity * quad_offset[1],\n 0.15, distance/self.velocity).join()\n # if self.logging:\n # self.log_arr.append(\"forward\")", "def settle(self):\n if (self.angle >= self.max_angle) or (\n self.angle <= -self.max_angle\n ): # time to reverse\n print(\"reverse\", self.angle, self.max_angle)\n self.speed *= -0.9 # damped\n self.max_angle *= 0.9\n if self.speed > 0:\n self.angle = self.max_angle\n else:\n self.angle = -self.max_angle\n\n self.angle += radians(self.speed)\n print(self.angle, self.max_angle, self.speed)\n self.x = self.cx + self.length * sin(self.angle)\n self.y = self.cy + self.length * cos(self.angle)", "def updatePWM(self):\n v_dc = self.dcmotorSpeed * self.dcmotor_sgn # changed \"vr\" to \"v_dc\", \"rightSpeed\" to \"dcmotorSpeed\" and \"right_sgn\" to dcmotor_sgn\", RFMH_2019_02_26\n pwm_dc = self.PWMvalue(v_dc, self.DC_MOTOR_MIN_PWM,\n self.DC_MOTOR_MAX_PWM) # changed \"pwmr\" to \"pwm_dc\" and \"vr\" to \"v_dc\" and adjusted both orange constants to \"DC_MOTOR_MIN_PWM\" AND \"DC_MOTOR_MAX_PWM\", RFMH_2019_02_26\n\n # TODO: Fix this debug message. I am trying to port this code over from an old version, and I do not know\n # what v and u are supposed to be here. Timothy Scott, 5.11.2019\n # if self.debug: # where the duck does the \"u\" come from?!?, RFMH_2019_02_26\n # print(\"v = %5.3f, u = %5.3f, v_dc = %5.3f, pwm_dc = %3d\" % (\n # v, u, v_dc, pwm_dc)) # deleted \"vl\" and \"pwml\" and adjust \"vr\" to \"v_dc\" to \"pwm_dc\"\n\n if math.fabs(v_dc) < self.SPEED_TOLERANCE: # changed v_r to v_dc in if loop , RFMH_2019_02_28\n DcMotorMode = Adafruit_MotorHAT.RELEASE\n pwm_dc = 0\n elif v_dc > 0:\n DcMotorMode = Adafruit_MotorHAT.FORWARD\n elif v_dc < 0:\n DcMotorMode = Adafruit_MotorHAT.BACKWARD\n\n if not self.old_pwm_dc == pwm_dc:\n self.DcMotor.setSpeed(pwm_dc) # changed rightMotor to DcMotor and pwmr to pwm_dc , RFMH_2019_02_28\n self.DcMotor.run(DcMotorMode)\n\n self.old_pwm_dc = pwm_dc", "def rotate_servo_rel(pi, pin, pct):\n try:\n pw_old = pi.get_servo_pulsewidth(pin)\n except:\n pw_old = 0 # no PWM has been set yet, so assume 0 \n pct_old = pulsewidth2pct(pw_old)\n if pct_old == -25: # no PWM output commanded, go to center first to get a reference point\n pi.set_servo_pulsewidth(pin, pct2pulsewidth(50))\n pct_old = pulsewidth2pct(pi.get_servo_pulsewidth(pin))\n pct_cmd = pct_old + pct\n # saturate input to protect servo \n if pct_cmd < 10:\n pct_cmd = 10\n elif pct_cmd > 90:\n pct_cmd = 90\n pi.set_servo_pulsewidth(pin, pct2pulsewidth(pct_cmd))", "def reverse(self):\n global motor_direction\n with self._lock:\n GPIO.output(7, False)\n GPIO.output(11, True)\n GPIO.output(13, False)\n GPIO.output(15, True)\n # time.sleep(sec)\n motor_direction = 'Reverse'\n return motor_direction", "def set_led(self, pin, value=0):\n value = self.int_lim(lower=PWM_MIN, upper=PWM_MAX, value=value) #Standardise the value to our correct range\n if self.iface.connected:\n try:\n self.iface.set_PWM_dutycycle(pin, value)\n except (AttributeError, IOError):\n logging.error(\" Cannot output to pins. PWM of pin #%s would be %s\" % (pin,value))\n else:\n logging.error(\" Interface not connected. Cannot output to pins. PWM of pin #%s would be %s\" % (pin,value))\n return value" ]
[ "0.7267332", "0.7267332", "0.66788316", "0.66588324", "0.6522946", "0.64650464", "0.6370248", "0.6297182", "0.6286759", "0.6234336", "0.6129071", "0.61207354", "0.60830605", "0.6021252", "0.6019744", "0.5922687", "0.58570683", "0.5856794", "0.57674026", "0.5759413", "0.5737956", "0.5714141", "0.5692802", "0.5678496", "0.5654111", "0.56484044", "0.5645786", "0.55621254", "0.5547726", "0.55245554", "0.5504462", "0.5490348", "0.5474369", "0.5472551", "0.5462932", "0.54566246", "0.5440891", "0.54375577", "0.54267484", "0.5412307", "0.5404999", "0.53960466", "0.53960466", "0.5362945", "0.5351662", "0.534471", "0.5337986", "0.5325374", "0.532454", "0.53181136", "0.5314115", "0.52759933", "0.5275445", "0.52611953", "0.52493405", "0.524873", "0.52356386", "0.5229099", "0.52234954", "0.52215767", "0.52128935", "0.52096397", "0.520367", "0.5196193", "0.5194782", "0.51924723", "0.519103", "0.5190705", "0.51802284", "0.51778376", "0.51657194", "0.51654845", "0.51650554", "0.5162503", "0.51498044", "0.51491714", "0.5147926", "0.5141137", "0.51373327", "0.5132007", "0.5127466", "0.512286", "0.51197535", "0.5115778", "0.5098812", "0.50984097", "0.50931364", "0.50915813", "0.5089161", "0.5064066", "0.50609237", "0.5054922", "0.50457007", "0.5045043", "0.5032321", "0.50312126", "0.50299054", "0.5029683", "0.5025306", "0.5022861" ]
0.66452456
4
pinForward is the forward Pin, so we change its duty cycle according to speed.
def forward_right(self, speed): self.pwm_backward.ChangeDutyCycle(0) self.pwm_forward.ChangeDutyCycle(speed) self.pwm_left.ChangeDutyCycle(0) self.pwm_right.ChangeDutyCycle(100)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed)", "def forward(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed)", "def forward(self):\n global motor_direction\n with self._lock:\n GPIO.output(7, True)\n GPIO.output(11, False)\n GPIO.output(13, True)\n GPIO.output(15, False)\n # time.sleep(sec)\n motor_direction = 'Forward'\n return motor_direction", "def __init__(self, pinForward, pinBackward, pinControl):\n\n self.pinForward = pinForward\n self.pinBackward = pinBackward\n self.pinControl = pinControl\n GPIO.setup(self.pinForward, GPIO.OUT)\n GPIO.setup(self.pinBackward, GPIO.OUT)\n GPIO.setup(self.pinControl, GPIO.OUT)\n self.pwm_forward = GPIO.PWM(self.pinForward, 100)\n self.pwm_backward = GPIO.PWM(self.pinBackward, 100)\n self.pwm_forward.start(0)\n self.pwm_backward.start(0)\n GPIO.output(self.pinControl,GPIO.HIGH)", "def forward_left(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed) \n self.pwm_right.ChangeDutyCycle(0)\n self.pwm_left.ChangeDutyCycle(100)", "def __init__(self, pinForward1, pinBackward1,pinForward2, pinBackward2):\n\n self.pinForward1 = pinForward1\n self.pinBackward1 = pinBackward1\n self.pinForward2 = pinForward2\n self.pinBackward2 = pinBackward2\n\n GPIO.setup(self.pinForward1, GPIO.OUT)\n GPIO.setup(self.pinBackward1, GPIO.OUT)\n GPIO.setup(self.pinForward2, GPIO.OUT)\n GPIO.setup(self.pinBackward2, GPIO.OUT)\n\n self.pwm_forward1 = GPIO.PWM(self.pinForward1, 100)\n self.pwm_backward1 = GPIO.PWM(self.pinBackward1, 100)\n self.pwm_forward2 = GPIO.PWM(self.pinForward2, 100)\n self.pwm_backward2 = GPIO.PWM(self.pinBackward2, 100)\n \n self.pwm_forward1.start(0)\n self.pwm_backward1.start(0)\n self.pwm_forward2.start(0)\n self.pwm_backward2.start(0)", "def forward(speed, bias, biasDir):\n\t# todo: check directions for me please\n\tif biasDir == 1:\n rightMotor.run_direct(duty_cycle_sp=speed+bias)\n leftMotor.run_direct(duty_cycle_sp=speed)\n elif biasDir == -1:\n rightMotor.run_direct(duty_cycle_sp=speed)\n leftMotor.run_direct(duty_cycle_sp=speed+bias)\n else:\n rightMotor.run_direct(duty_cycle_sp=speed)\n leftMotor.run_direct(duty_cycle_sp=speed)", "def __init__(self, pinForward, pinBackward, pinControlStraight,pinLeft, pinRight, pinControlSteering):\n\n self.pinForward = pinForward\n self.pinBackward = pinBackward\n self.pinControlStraight = pinControlStraight\n self.pinLeft = pinLeft\n self.pinRight = pinRight\n self.pinControlSteering = pinControlSteering\n GPIO.setup(self.pinForward, GPIO.OUT)\n GPIO.setup(self.pinBackward, GPIO.OUT)\n GPIO.setup(self.pinControlStraight, GPIO.OUT)\n\n GPIO.setup(self.pinLeft, GPIO.OUT)\n GPIO.setup(self.pinRight, GPIO.OUT)\n GPIO.setup(self.pinControlSteering, GPIO.OUT)\n\n self.pwm_forward = GPIO.PWM(self.pinForward, 100)\n self.pwm_backward = GPIO.PWM(self.pinBackward, 100)\n self.pwm_forward.start(0)\n self.pwm_backward.start(0)\n\n self.pwm_left = GPIO.PWM(self.pinLeft, 100)\n self.pwm_right = GPIO.PWM(self.pinRight, 100)\n self.pwm_left.start(0)\n self.pwm_right.start(0)\n\n GPIO.output(self.pinControlStraight,GPIO.HIGH) \n GPIO.output(self.pinControlSteering,GPIO.HIGH)", "def motorDirection(self, motorPin, direction):\n # print \"motorPin: \", motorPin\n # print \"direction: \", direction\n GPIO.output(motorPin, direction)", "def motor_A(self, direction, speed):\n if direction == -1:\n GPIO.output(self.Motor_A_Pin1, GPIO.HIGH)\n GPIO.output(self.Motor_A_Pin2, GPIO.LOW)\n self.pwm_A.start(100)\n self.pwm_A.ChangeDutyCycle(speed)\n if direction == 1:\n GPIO.output(self.Motor_A_Pin1, GPIO.LOW)\n GPIO.output(self.Motor_A_Pin2, GPIO.HIGH)\n self.pwm_A.start(100)\n self.pwm_A.ChangeDutyCycle(speed)", "def forward(self, speed):\n self.controller.forward(speed)", "def setup_pin(self, pin, dutycycle, frequency=2000):\n raise NotImplementedError", "def motor_B(self, direction, speed):\n if direction == 1:\n GPIO.output(self.Motor_B_Pin1, GPIO.HIGH)\n GPIO.output(self.Motor_B_Pin2, GPIO.LOW)\n self.pwm_B.start(100)\n self.pwm_B.ChangeDutyCycle(speed)\n if direction == -1:\n GPIO.output(self.Motor_B_Pin1, GPIO.LOW)\n GPIO.output(self.Motor_B_Pin2, GPIO.HIGH)\n self.pwm_B.start(100)\n self.pwm_B.ChangeDutyCycle(speed)", "def setup_motor(self,pin_num):\n pi.set_servo_pulsewidth(pin_num, 2000)\n sleep(2)\n pi.set_servo_pulsewidth(pin_num, 500 )\n sleep(2)", "def fwd(dist=0): #distance is in cm\n try:\n if dist>0:\n # this casting to int doesn't seem necessary\n pulse=int(PPR*(dist//WHEEL_CIRC) )\n enc_tgt(1,1,pulse)\n except Exception as e:\n print (\"gopigo fwd: {}\".format(e))\n pass\n return write_i2c_block(ADDRESS,motor_fwd_cmd+[0,0,0])", "def startAcceleratingForward(self,event):\n self.isAcceleratingForward=True", "def steer(direction):\n if direction == 1:\n steerMotor.run(Adafruit_MotorHAT.FORWARD)\n steerMotor.setSpeed(255)\n if direction == -1:\n steerMotor.run(Adafruit_MotorHAT.BACKWARD)\n steerMotor.setSpeed(255)\n if direction == 0:\n steerMotor.setSpeed(0)\n steerMotor.run(Adafruit_MotorHAT.RELEASE)", "def move_forward(self, speed):\n\t\t# You should modify the bias of 4 wheels depending on your hardware.\n\t\tself._front_left_wheel.anticlockwise_rotate(speed + LEFT_FR_BIAS + LEFT_RIGHT_BIAS)\n\t\tself._front_right_wheel.clockwise_rotate(speed + RIGHT_FR_BIAS)\n\t\tself._rear_left_wheel.anticlockwise_rotate(speed + LEFT_RIGHT_BIAS)\n\t\tself._rear_right_wheel.clockwise_rotate(speed)", "def forward_button(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=int(left_speed))\n self.right_motor.run_forever(speed_sp=int(right_speed))", "def forward(self, speed):\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_rr' + self.postfix], -speed,\n ONE_SHOT_MODE)\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_rl' + self.postfix], -speed,\n ONE_SHOT_MODE)\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_fr' + self.postfix], -speed,\n ONE_SHOT_MODE)\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_fl' + self.postfix], -speed,\n ONE_SHOT_MODE)", "def GET_forward(self):\n self.roomba.DriveStraight(pyrobot.VELOCITY_FAST)\n time.sleep(1)\n self.roomba.SlowStop(pyrobot.VELOCITY_FAST)", "def _reverseduty(self):\n if self.ir_pin.duty() == 0:\n self.ir_pin.duty(512)\n else:\n self.ir_pin.duty(0)", "def turn_90degrees(self, direction):\n if direction == \"right\" or direction == 1:\n self.myspeedctrl.send_speed(0,1)\n elif direction == \"left\" or direction == 2:\n self.myspeedctrl.send_speed(0,-1)\n rospy.sleep(1.61) #value found by trail and error\n self.myspeedctrl.send_speed(0,0)", "def drive_forward(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=left_speed)\n self.right_motor.run_forever(speed_sp=right_speed)", "def set_pin_direction(self, pin, direction):\n pin = pin - 1\n if pin < 8:\n self.__port_a_direction = self.__helper.updatebyte(\n self.__port_a_direction, pin, direction)\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRA, self.__port_a_direction)\n else:\n self.__port_b_direction = self.__helper.updatebyte(\n self.__port_b_direction, pin - 8, direction)\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRB, self.__port_b_direction)\n return", "def input_forward(self, joy_input):\n if self.saved[joy_input]:\n value = self.saved[joy_input]\n else:\n value = self.inputs[joy_input]\n yaw_pwm = np.interp(value, [-1, 1], [0, Joystick.MAX_YAW_PWM])\n print(\"(input forward) setting yaw pwm to \" + str(yaw_pwm))\n self.publish(Topic.YAW_PWM, yaw_pwm)", "def increment_speed(self):\n self.speed += 0.0004", "def move_forward(self, speed):\n\n # Clamp the speed\n speed = clamp(delta_unit(speed), 0, delta_unit(Car.max_speed))\n\n # Appends the speed according to the direction\n rad = np.radians(self.direction)\n self.fx += speed * np.cos(rad)\n self.fy += speed * np.sin(rad)\n\n # Set marker to move\n self.moved = True", "def change_motor_speed(self, speed=0.0):\r\n if not self.enabled:\r\n self.set_neutral(braked=False)\r\n return\r\n\r\n # logging.info(\"{} Motor Speed: {}\".format(self.motor_name, speed))\r\n self.current_speed = speed # Store current set speed\r\n\r\n # If speed is < 0.0, we are driving in reverse.\r\n self.forward = True\r\n if speed < 0.0:\r\n # Normalise speed value to be in range [0, 100]\r\n speed = -speed\r\n # Store direction\r\n self.forward = False\r\n\r\n # Apply a factor to the speed to limit speed\r\n speed *= self.speed_factor\r\n\r\n # Set motor directional pins\r\n if self.forward:\r\n if self.a_pin >= 0:\r\n self.GPIO.output(self.a_pin, 1)\r\n if self.b_pin >= 0:\r\n self.GPIO.output(self.b_pin, 0)\r\n else:\r\n if self.a_pin >= 0:\r\n self.GPIO.output(self.a_pin, 0)\r\n if self.b_pin >= 0:\r\n self.GPIO.output(self.b_pin, 1)\r\n\r\n # Convert speed into PWM duty cycle\r\n # and clamp values to min/max ranges.\r\n dutycycle = speed\r\n if dutycycle < 0.0:\r\n dutycycle = 0.0\r\n elif dutycycle > self.max_speed:\r\n dutycycle = self.max_speed\r\n\r\n # Change the PWM duty cycle based on fabs() of speed value.\r\n self.PWM.ChangeDutyCycle(dutycycle)", "def skipp(self):\n for x in range(4):\n self.fwd(right=100, left=100)\n time.sleep(.5)\n self.servo(1000)\n time.sleep(.1)\n self.servo(2000)\n time.sleep(.1)\n self.fwd(right=-100, left=-100)\n time.sleep(.1)\n self.servo(-1000)\n self.stop()", "def drive(self,direction, speed=100) -> None:\n if direction == 1:\n driveMotor.run(Adafruit_MotorHAT.FORWARD)\n driveMotor.setSpeed(speed)\n if direction == -1:\n driveMotor.run(Adafruit_MotorHAT.BACKWARD)\n driveMotor.setSpeed(speed)\n if direction == 0:\n driveMotor.setSpeed(0)\n driveMotor.run(Adafruit_MotorHAT.RELEASE)", "def drive(self,direction, speed=100):\n if direction == 1:\n self.leftMotor.run(Adafruit_MotorHAT.FORWARD)\n self.rightMotor.run(Adafruit_MotorHAT.FORWARD)\n self.leftMotor.setSpeed(speed)\n self.rightMotor.setSpeed(speed)\n if direction == -1:\n self.leftMotor.run(Adafruit_MotorHAT.BACKWARD)\n self.rightMotor.run(Adafruit_MotorHAT.BACKWARD)\n self.leftMotor.setSpeed(speed)\n self.rightMotor.setSpeed(speed)\n if direction == 0:\n self.leftMotor.setSpeed(0)\n self.rightMotor.setSpeed(0)\n self.leftMotor.run(Adafruit_MotorHAT.RELEASE)\n self.rightMotor.run(Adafruit_MotorHAT.RELEASE)", "def move_forward(self, val):\n val = val * 180 / math.pi\n print(\"gyro diff\", self.gyro - val)\n print(\"gyrof\", self.gyro)\n if math.fabs(self.gyro - val) > 0.6:\n if self.gyro - val > 0:\n self.om_right = self.om_right - 0.7\n self.om_left = self.om_left + 0.5\n self.set_speed(self.om_left, self.om_right)\n print(\"om_l\", self.om_left)\n print(\"om_r\", self.om_right)\n else:\n self.om_right = self.om_right + 0.3\n self.om_left = self.om_left - 0.5\n self.set_speed(self.om_left, self.om_right)\n print(\"om_l\", self.om_left)\n print(\"om_r\", self.om_right)\n else:\n self.om_right = 10\n self.om_left = 10", "def forward(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=left_speed)\n self.right_motor.run_forever(speed_sp=right_speed)", "def right_forward(self):\n self.right_motor.run_forever(speed_sp=self.MAX_SPEED)", "def left(self, speed):\n self.pwm_right.ChangeDutyCycle(0)\n self.pwm_left.ChangeDutyCycle(speed)", "def steer(self):\n\n while self.active:\n angle = self.driver.angle\n steering_pwm_calc = self.angle_to_pmw(angle)\n self.pwm.set_pwm(0, 0, steering_pwm_calc)", "def drive_forward(self):\n print(f\"{self.make.title()} is now driving forward.\")", "def __init__(self, pwm_pin, dir_pin_1, dir_pin_2, pwm_freq):\n\t\tself._pwm_pin = pwm_pin # PWM input pin.\n\t\tself._dir_pin_1 = dir_pin_1 # GPIO number to control the direction of rotation of the wheel.\n\t\tself._dir_pin_2 = dir_pin_2 # GPIO number to control the direction of rotation of the wheel.\n\t\tself._pwm_freq = pwm_freq # PWM cycle.\n\n\t\tself._last_dir = 's' # Last rotation direction of this wheel. 's' indicates stop.\n\t\tself._last_dc_val = 0 # Last duty cycle value.\n\t\tself._current_dc_val = 0 # Current duty cycle value.\n\n\t\tGPIO.setmode(GPIO.BOARD)\n\n\t\t# Set the direction control GPIO output mode.\n\t\tGPIO.setup(self._pwm_pin, GPIO.OUT)\n\t\tGPIO.setup(self._dir_pin_1, GPIO.OUT)\n\t\tGPIO.setup(self._dir_pin_2, GPIO.OUT)\n\n\t\t# Inits PWM pin.\n\t\tself._motor_pwm = GPIO.PWM(self._pwm_pin, self._pwm_freq) # pwm_freq: Hz\n\t\tself._motor_pwm.start(0) # Set duty cycle to 0.", "def right_forward(self, state, speed):\n if state:\n self.right_motor.run_forever(speed_sp=speed)\n ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.GREEN)\n else:\n self.right_motor.stop()\n ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.BLACK)", "def moveForward(self):\n if self.onGround:\n self.vx = 4", "def backward(self, speed):\n\n self.pwm_forward.ChangeDutyCycle(0)\n self.pwm_backward.ChangeDutyCycle(speed)", "def backward(self, speed):\n\n self.pwm_forward.ChangeDutyCycle(0)\n self.pwm_backward.ChangeDutyCycle(speed)", "def _set_pwm(self, raw_values):\n for i in range(len(self._pins)):\n self._pi.set_PWM_dutycycle(self._pins[i], raw_values[i])", "def __init__(self, pin1=24, pin2=28, pin3=25, pin4=33):\n self.GP = GPIOProcessor()\n self.pin1 = self.GP.getPin(pin1)\n self.pin2 = self.GP.getPin(pin2)\n self.pin3 = self.GP.getPin(pin3)\n self.pin4 = self.GP.getPin(pin4)\n self.pinl = [self.pin1, self.pin2, self.pin3, self.pin4]\n\n for k in range(4):\n self.pinl[k].out()\n\n self.speed = 100.0", "def forward(self, forward):\n\n self._forward = forward", "def __spur_on_if_needed(self):\n if len(self.waypoints) < 2:\n return\n next_speed = (get_waypoint_speed(self.waypoints[0]) +\n get_waypoint_speed(self.waypoints[1])) / 2.0\n set_waypoint_speed(self.waypoints[0], next_speed)", "def slither(self):\n #writedown where we started\n starting_direction = self.get_heading()\n #start driving forward\n self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT)\n self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT)\n self.fwd()\n # throttl down the left motor\n for power in range(self.LEFT_DEFAULT, 60,-10):\n self.set_motor_power(self.MOTOR_LEFT, power)\n time.sleep(.5)\n #throttle up the left while lowring the right\n for power in range(60, self.LEFT_DEFAULT +1, 10):\n self.set_motor_power(self.MOTOR_LEFT, power)\n time.sleep(.5)\n # throttl down the right motor\n for power in range(self.RIGHT_DEFAULT, 60,-10):\n self.set_motor_power(self.MOTOR_RIGHT, power)\n time.sleep(.5)\n #throttle up the right while lowring the right\n for power in range(60, self.RIGHT_DEFAULT +1, 10):\n self.set_motor_power(self.MOTOR_RIGHT, power)\n time.sleep(.5)\n \n #straighten out\n while self.get_heading() != starting_direction:\n #if I need to veer right\n if self.get_heading() < starting_direction:\n self.set_motor_power(self.MOTOR_LEFT, 90)\n self.set_motor_power(self.MOTOR_RIGHT, 60)\n #if I need to veer left\n elif self.get_heading() > starting_direction:\n self.set_motor_power(self.MOTOR_LEFT, 60)\n self.set_motor_power(self.MOTOR_RIGHT, 90)\n \n time.sleep(.1)\n self.stop()", "def on(self):\n if not self._is_on:\n self._pwms.enable(self._pin_index, self._frequency)\n self._is_on = True", "def toggleIpforward(v):\n file_path = \"/proc/sys/net/ipv4/ip_forward\"\n with open(file_path, \"w\") as f:\n if v.ipForward:\n print(0, file=f)\n v.ipForward = False\n else:\n print(1, file=f)\n v.ipForward = True\n return", "def set_duty_cycle(self, pin, dutycycle):\n raise NotImplementedError", "def update(self):\n bondState = self._bond.getDeviceState(self._deviceId)\n if 'power' in bondState:\n self._state = True if bondState['power'] == 1 else False\n if self._state and bondState['speed'] in self._speed_name_by_value:\n self._attributes['current_speed'] = self._speed_name_by_value[bondState['speed']]\n else:\n self._attributes['current_speed'] = SPEED_OFF\n\n if 'direction' in bondState:\n if bondState['direction'] == Directions.REVERSE:\n self._attributes['current_direction'] = \"reverse\"\n else:\n self._attributes['current_direction'] = \"forward\"", "def activatePinReading(self):\n\n for pin in self.pinsToMeasure:\n arduino.samplePinDuringCapture(self.f, self.pinMap[pin], self.wallClock)", "def _enable_pin(pin, direction):\n _write_value(pin, \"{}/export\".format(_path_prefix))\n _write_value(direction, \"{0}/gpio{1}/direction\".format(_path_prefix, pin))", "def forward(self):\n print('forward')\n self.linearVector = Vector3(x=1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def togglePWMPinEnable(self, PWMpin):\n bitPos = PWMpin + 8\n mask = 1 << bitPos\n self._injectFault(\"PWM1PCR\",self.PCR,mask)", "def strut(self):\n self.fwd(left=50, right=50)\n for x in range(2):\n self.servo(1000)\n time.sleep(.1) \n self.servo(1500) # Look Straight\n time.sleep(1)\n self.servo(2000)\n time.sleep(.1)\n self.servo(1500)", "def _get_forward_speed(self):\n\n velocity = self._vehicle.get_velocity()\n transform = self._vehicle.get_transform()\n vel_np = np.array([velocity.x, velocity.y, velocity.z])\n pitch = np.deg2rad(transform.rotation.pitch)\n yaw = np.deg2rad(transform.rotation.yaw)\n orientation = np.array([np.cos(pitch) * np.cos(yaw), np.cos(pitch) * np.sin(yaw), np.sin(pitch)])\n speed = np.dot(vel_np, orientation)\n return speed", "def left_forward(self):\n self.left_motor.run_forever(speed_sp=self.MAX_SPEED)", "def move_forward():\n twister = Twist(linear=Vector3(x=0.5,y=0,z=0),angular=Vector3(x=0,y=0,z=0))\n pub.publish(twister)", "def forward(self):\n self.position += 1", "def forward(self, param):\n\t\tif param:\n\t\t\tself.linear_move(param * .3048)\n\t\telse:\n\t\t\tself.linear_move(riu.default_dist * .3048)", "def handle_go_forward(entry_box, mqtt_client):\n speed_string = entry_box.get()\n print('sending the go_forward message with speed', speed_string)\n mqtt_client.send_message('go_forward', [speed_string])\n # --------------------------------------------------------------------------", "def update_speed_input_step(self,curr_v):\n \n # update speed inputs \n self.speed_inputs_east*=0\n self.speed_inputs_west*=0\n self.speed_inputs_north*=0\n self.speed_inputs_south*=0\n\n if self.use_eight_directions is True: \n self.speed_inputs_north_east*=0\n self.speed_inputs_north_west*=0\n self.speed_inputs_south_east*=0\n self.speed_inputs_south_west*=0\n \n #speed_values=self.rr[:self.N_e,0] \n speed_values=np.ones((self.N_e,1))\n\n if curr_v[0]>0:\n \n # north-east\n if self.use_eight_directions is True and curr_v[1]>0:\n self.speed_inputs_north_east=speed_values \n \n # south-east \n elif self.use_eight_directions is True and curr_v[1]<0:\n self.speed_inputs_south_east=speed_values\n \n #east \n else:\n self.speed_inputs_east=speed_values\n\n\n elif curr_v[0]<0:\n\n # north-west \n if self.use_eight_directions is True and curr_v[1]>0:\n self.speed_inputs_north_west=speed_values\n\n # south-west \n elif self.use_eight_directions is True and curr_v[1]<0:\n self.speed_inputs_south_west=speed_values\n \n # west \n else:\n self.speed_inputs_west=speed_values\n\n else: \n # north\n if curr_v[1]>0:\n self.speed_inputs_north=speed_values\n\n # south\n elif curr_v[1]<0:\n self.speed_inputs_south=speed_values", "def forward(self):\n #print('forward\\r')\n self.linearVector = Vector3(x=1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def _birdUpdateHandler(self, pin):\n\n # Update movement value from PIR pin status\n self.p.update(pin)\n\n if(self.p.movement == 1):\n #print(\"Motion detected\")\n self._distanceCheck()\n\n timeO = 0\n while(self.birdHere == 0 and self.p.movement == 1 and timeO < self.timeout):\n sleep(1)\n self._distanceCheck()\n timeO += 1\n\n else:\n #print(\"Motion ended\")\n self.birdHere = 0", "def motorSpeed(self, speedRPM_l, speedRPM_r):\n\n self.motors__Direction(speedRPM_l, speedRPM_r)\n\n speedRPM_l = abs(speedRPM_l)\n speedRPM_r = abs(speedRPM_r)\n\n speedRPM_l = self.constrainSpeed(speedRPM_l)\n speedRPM_r = self.constrainSpeed(speedRPM_r)\n\n# Left motor\n pwmDuration = 4095.0 * speedRPM_l / self.motorMaxRPM\n# print(\"MuleBot.motorSpeed Duration left float: \", pwmDuration)\n pwmDuration = int( pwmDuration )\n# print(\"MuleBot.motorSpeed Duration left int: \", pwmDuration)\n startOfPulse = 0\n self.pwm.setPWM(self.dcMotorLeftMotor, startOfPulse, pwmDuration)\n MuleBot.dcMotorPWMDurationLeft = pwmDuration\n\n# Right motor\n #Adjust for right motor being faster\n pwmDuration = 4095.0 * speedRPM_r / self.motorMaxRPM\n pwmDuration = pwmDuration * 9727 / 10000 # 98.519113 percent\n pwmDuration = int( pwmDuration )\n# print(\"MuleBot.motorSpeed Duration right int: \", pwmDuration)\n startOfPulse = 0\n self.pwm.setPWM(self.dcMotorRightMotor, startOfPulse, pwmDuration)\n MuleBot.dcMotorPWMDurationRight = pwmDuration", "def move_forward():\n pass", "def step(self, count, direction):\n for x in range(count):\n for bit in self.mode[::direction]:\n self.pin1.value(bit[0])\n self.pin2.value(bit[1])\n self.pin3.value(bit[2])\n self.pin4.value(bit[3])\n time.sleep(DELAY)\n self.reset()", "def motorsDirection(self, direction):\n\n print (direction)\n if direction == 'r' or direction == 'R':\n self.motorDirection(self.motor1DirectionPin, self.motorReverse)\n self.motorDirection(self.motor2DirectionPin, self.motorReverse)\n print (\"Direction reverse\")\n else:\n self.motorDirection(self.motor1DirectionPin, self.motorForward)\n self.motorDirection(self.motor2DirectionPin, self.motorForward)\n print (\"Direction forward\")", "def nine_punishment(self):\n self.direction_clock_wise = not self.direction_clock_wise", "def int_handle_encoder(self,pin):\n\t\t#print \"DEBUG: self.int_handle_encoder! for pin: {0}\".format(pin)\n\t\t\t\n\t\tdevice = self.get_device_config_by_pin(pin)\n\t\t\n\t\tencoder_pinA = device['clk']\n\t\tencoder_pinB = device['dt']\n\n\t\tSwitch_A = self.gpio.input(encoder_pinA)\n\t\tSwitch_B = self.gpio.input(encoder_pinB)\n\t\t\n\t\t# debounce\n\t\t#if 'debounce' in self.pins_config[pin]:\n\t\t#\tdebounce = self.pins_config[pin]['debounce'] / 1000\n\t\t#\tprint \"DEBUG: sleeping: {0}\".format(debounce)\n\t\t#\tsleep(debounce)\n\t\t#\t\n\t\t#sleep(0.02)\n\t\t#if not self.gpio.input(encoder_pinA) == self.pins_config[encoder_pinA]:\n\t\t#\treturn None\n\t\t#if not self.gpio.input(encoder_pinB) == self.pins_config[encoder_pinB]:\n\t\t#\treturn None\n\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# now check if state of A or B has changed\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# if not that means that bouncing caused it\t\n\t\tCurrent_A = self.pins_state[encoder_pinA]\n\t\tCurrent_B = self.pins_state[encoder_pinB]\n\t\tif Current_A == Switch_A and Current_B == Switch_B:\t\t# Same interrupt as before (Bouncing)?\n\t\t\treturn\t\t\t\t\t\t\t\t\t\t\t\t# ignore interrupt!\n\n\t\tself.pins_state[encoder_pinA] = Switch_A\t\t\t\t# remember new state\n\t\tself.pins_state[encoder_pinB] = Switch_B\t\t\t\t# for next bouncing check\n\t\t\n\t\t# -------------------------------\n\t\tfunction = self.get_encoder_function_by_pin(pin)\n\t\tself.__mode_reset()\t\t\t\t\t\t\t\t\t# Keep resetting as long as the mode is being used\n\n\t\t# TODO, check if possible to only reset affected timer: self.ms_all[fun['mode_cycle']].\n\t\tif function is not None:\n\t\t\tif (Switch_A and Switch_B):\t\t\t\t\t\t# Both one active? Yes -> end of sequence\n\t\t\t\tthis_chg = datetime.now()\n\t\t\t\tdelta = this_chg - self.encoder_last_chg\n\t\t\t\t#print \"diff: {0}\".format(delta.total_seconds())\n\t\t\t\t#print type(delta.total_seconds())\t#float\n\t\t\t\tif delta.total_seconds() < 0.1:\n\t\t\t\t\tself.encoder_fast_count += 1\n\t\t\t\t\t#if self.encoder_fast_count > 3:\n\t\t\t\t\t#\tprint \"FAST {0}\".format(self.encoder_fast_count)\n\t\t\t\t\t#else:\n\t\t\t\t\t#\tprint \"Maybe.....\"\n\t\t\t\telse:\n\t\t\t\t\tself.encoder_fast_count = 0\n\t\t\t\n\t\t\t\t\"\"\" why do we do this?\n\t\t\t\tif self.modes.active_modes():\n\t\t\t\t\t#self.reset_mode_timer(self.modes_old[0]['reset'])\n\t\t\t\t\tif 'reset' in self.mode_sets[function['mode_cycle']]:\n\t\t\t\t\t\tself.reset_mode_timer(self.mode_sets[function['mode_cycle']]['reset'])\n\t\t\t\t\"\"\"\n\n\t\t\t\tf_args = None\n\t\t\t\tif pin == encoder_pinB:\t\t\t\t\t\t\t# Turning direction depends on \n\t\t\t\t\t#COUNTER CLOCKWISE (CCW) or DECREASE\n\t\t\t\t\tif self.encoder_fast_count > 3 and 'function_fast_ccw' in function:\t\t\t\t\n\t\t\t\t\t\tkey = 'function_fast_ccw'\n\t\t\t\t\t\tkey_args = 'function_fast_ccw_args'\n\n\t\t\t\t\telif 'function_ccw' in function:\n\t\t\t\t\t\tkey = 'function_ccw'\n\t\t\t\t\t\tkey_args = 'function_ccw_args'\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\t#CLOCKWISE (CW) or INCREASE\n\t\t\t\t\tif self.encoder_fast_count > 3 and 'function_fast_cw' in function:\n\t\t\t\t\t\tkey = 'function_fast_cw'\n\t\t\t\t\t\tkey_args = 'function_cw_args'\n\t\t\t\t\t\n\t\t\t\t\telif 'function_cw' in function:\n\t\t\t\t\t\tkey = 'function_cw'\n\t\t\t\t\t\tkey_args = 'function_cw_args'\n\n\t\t\t\t# prepare arguments\n\t\t\t\tif key_args in function:\n\t\t\t\t\tif isinstance(function[key_args],str):\n\t\t\t\t\t\t#f_args = [function[key_args]]\n\t\t\t\t\t\tself.__exec_function_by_code(function[key], *[function[key_args]])\n\t\t\t\t\telse:\n\t\t\t\t\t\t#f_args = *function[key_args]\n\t\t\t\t\t\tself.__exec_function_by_code(function[key], *function[key_args])\n\t\t\t\telse:\n\t\t\t\t\tself.__exec_function_by_code(function[key])\n\t\t\t\t\t\n\t\t\t\t# execute\n\t\t\t\t#self.__exec_function_by_code(function[key], *[function[key_args]])\n\t\t\t\t\t\t\n\t\t\t\tself.encoder_last_chg = this_chg\n\t\telse:\n\t\t\tself.__printer(\"Encoder, no function\",level=LL_DEBUG)\n\n\n\t\t\tpigpio.pi()", "def DriveMotor():\n\n # cnt overflows at 25KHz (approximately)\n cnt = intbv(0, min = 0, max = CNT_MAX + 1)\n\n # 10-bit duty cycle\n duty_cycle = intbv(0)[10:]\n\n while True:\n yield clk25.posedge, rst_n.negedge\n if rst_n == LOW:\n cnt[:] = 0\n duty_cycle[:] = 0\n dir.next = HIGH_OPTO\n pwm.next = LOW_OPTO\n en_n.next = LOW_OPTO\n else:\n # accept new consign at the beginning of a period\n if cnt == 0:\n # extract duty cycle and direction\n if speed >= 0:\n duty_cycle[:] = speed\n dir.next = HIGH_OPTO\n elif -speed >= CNT_MAX: # handle -1024 case\n duty_cycle[:] = CNT_MAX\n dir.next = LOW_OPTO\n else:\n duty_cycle[:] = -speed\n dir.next = LOW_OPTO\n\n # reached consign?\n if cnt >= duty_cycle:\n pwm.next = LOW_OPTO\n else:\n pwm.next = HIGH_OPTO\n\n if cnt == CNT_MAX:\n cnt[:] = 0\n else:\n cnt += 1\n\n en_n.next = LOW_OPTO", "def set_speed(self, om_left, om_right):\n analog_om_left = self.LEFT_CONST + om_left*4\n analog_om_right = self.RIGHT_CONST - om_right*4\n self.servoWriteMicroseconds(self.PIN_LEFT, analog_om_left)\n self.servoWriteMicroseconds(self.PIN_RIGHT, analog_om_right)", "def advanceTan():\n global tanBallX, speed\n tanBallX += speed\n if tanBallX <= -4:\n # Reached the bottom - switch directions\n tanBallX = -4\n speed = -speed\n elif tanBallX >= 2.8:\n # Reached the top - switch directions\n tanBallX = 2.8\n speed = -speed", "def go(self, position):\n if self._is_on:\n val = min(180.0, position)\n val = max(0.0, position)\n val = (val / 180.0) * (self._max_duty - self._min_duty) + self._min_duty\n val = val * 100.0\n self._pwms.set_duty(self._pin_index, val)\n else:\n raise Exception(\"You must turn the servo on by calling the `on()` method before you can tell the servo to `go()`!\")", "def forward(self, distance):\n self.logger.debug(\"forward \" + str(distance))", "def _get_v0x01_v0x04_speed(self):\n fts = self.features\n pfts = PortFeatures01\n if fts and fts & pfts.OFPPF_10GB_FD:\n return 10 * 10**9 / 8\n if fts and fts & (pfts.OFPPF_1GB_HD | pfts.OFPPF_1GB_FD):\n return 10**9 / 8\n if fts and fts & (pfts.OFPPF_100MB_HD | pfts.OFPPF_100MB_FD):\n return 100 * 10**6 / 8\n if fts and fts & (pfts.OFPPF_10MB_HD | pfts.OFPPF_10MB_FD):\n return 10 * 10**6 / 8\n return None", "def setSpeed(self, v):\n\t\tconverted = self.convertSpeed(v)\n\t\tprint(converted)\n\t\t# set both stage speeds\n\t\tself.zaberSend(self.translation[\"hor\"], self.cmd[\"setTargetSpeed\"], data = converted)\n\t\tself.zaberSend(self.translation[\"ver\"], self.cmd[\"setTargetSpeed\"], data = converted)", "def stream_function(self, X, Y):\n self.psi = (self.strength / (2 * np.pi) *\n np.arctan2((Y - self.yc), (X - self.xc)))", "def stopAcceleratingForward(self,event):\n self.isAcceleratingForward=False", "def update_and_publish(self):\n # 1. Find next_waypoint based on ego position & orientation\n if self._update_next_waypoint():\n\n # 2. Generate the list of next LOOKAHEAD_WPS waypoints\n num_base_wp = len(self.base_waypoints)\n last_base_wp = num_base_wp-1\n waypoint_idx = [idx % num_base_wp for idx in range(self.next_waypoint,self.next_waypoint+LOOKAHEAD_WPS)]\n final_waypoints = [self.base_waypoints[wp] for wp in waypoint_idx]\n\n # 3. If there is a red light ahead, update velocity for them\n if self.stop_on_red:\n # Start from original velocities\n self.restore_velocities(waypoint_idx)\n try:\n red_idx = waypoint_idx.index(self.red_light_waypoint)\n self.decelerate(final_waypoints, red_idx, self.stop_distance)\n except ValueError:\n # No red light available: self.red_light_waypoint is None or not in final_waypoints\n red_idx = None\n if debugging:\n v = self.get_waypoint_velocity(final_waypoints, 0)\n rospy.loginfo(\"Target velocity: %.1f, RL:%s wps ahead\", v, str(red_idx))\n\n # 3b. If we are close to the end of the circuit, make sure that we stop there\n if self.force_stop_on_last_waypoint or self.base_wp_orig_v[-1] < 1e-5:\n try:\n last_wp_idx = waypoint_idx.index(last_base_wp)\n self.decelerate(final_waypoints, last_wp_idx, 0)\n except ValueError:\n # Last waypoint is not one of the next LOOKAHEAD_WPS\n pass\n\n # 4. Publish waypoints to \"/final_waypoints\"\n self.publish_msg(final_waypoints)", "def right(self, speed):\n self.pwm_left.ChangeDutyCycle(0)\n self.pwm_right.ChangeDutyCycle(speed)", "def set_pin_pullup(self, pin, value):\n pin = pin - 1\n if pin < 8:\n self.__port_a_pullup = self.__helper.updatebyte(\n self.__port_a_pullup, pin, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPPUA, self.__port_a_pullup)\n else:\n self.__port_b_pullup = self.__helper.updatebyte(\n self.__port_b_pullup, pin - 8, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPPUB, self.__port_b_pullup)\n return", "def set_speed(self,speed):\n self.speed_p = speed", "def pwm(self, index, on=None, off=None):\n raise NotImplementedError()", "def forward( self ):\n self._has_change = True\n print( \"Forward\" )", "def UpdateForward(self, deltaT):\n self.position += self.velocity * deltaT\n self.velocity += self.acceleration * deltaT", "def motors__Direction(self, speed_l, speed_r):\n\n if speed_l >= 0:\n self.motorDirection(self.motor1DirectionPin, self.motorForward)\n else:\n self.motorDirection(self.motor1DirectionPin, self.motorReverse)\n\n if speed_r >= 0:\n self.motorDirection(self.motor2DirectionPin, self.motorForward)\n else :\n self.motorDirection(self.motor2DirectionPin, self.motorReverse)", "def _get_v0x04_speed(self):\n fts = self.features\n pfts = PortFeatures04\n if fts and fts & pfts.OFPPF_1TB_FD:\n return 10**12 / 8\n if fts and fts & pfts.OFPPF_100GB_FD:\n return 100 * 10**9 / 8\n if fts and fts & pfts.OFPPF_40GB_FD:\n return 40 * 10**9 / 8\n return None", "def step(self):\n if self.change_rate != 0:\n self.speed += stats.norm(loc=0, scale=self.change_rate).rvs()\n\n if self.speed < 0.5 * self._initial_speed:\n self.speed = 0.5 * self._initial_speed\n if self.speed > 2.0 * self._initial_speed:\n self.speed = 2.0 * self._initial_speed\n else:\n pass", "def __init__(self, forward):\n self.forward = forward\n self.kp = 0.0\n self.ki = 0.0\n self.kd = 0.0\n self.p_on_e = False\n self.out_min = 0.0\n self.out_max = 0.0\n self.iterm = 0.0\n self.output = 0.0\n self.set_point = 0.0\n self.last_time = 0.0\n self.last_input = 0.0\n self.init_input = 0.0", "def increase_car_speed(self):\r\n self.car_speed += 5", "def left_forward(self, state, speed):\n if state:\n self.left_motor.run_forever(speed_sp=speed)\n ev3.Leds.set_color(ev3.Leds.LEFT, ev3.Leds.GREEN)\n else:\n self.left_motor.stop()\n ev3.Leds.set_color(ev3.Leds.LEFT, ev3.Leds.BLACK)", "def settle(self):\n if (self.angle >= self.max_angle) or (\n self.angle <= -self.max_angle\n ): # time to reverse\n print(\"reverse\", self.angle, self.max_angle)\n self.speed *= -0.9 # damped\n self.max_angle *= 0.9\n if self.speed > 0:\n self.angle = self.max_angle\n else:\n self.angle = -self.max_angle\n\n self.angle += radians(self.speed)\n print(self.angle, self.max_angle, self.speed)\n self.x = self.cx + self.length * sin(self.angle)\n self.y = self.cy + self.length * cos(self.angle)", "def move_forward(self, distance):\n quad_offset = self.quad_offset_mapping['forward']\n client.moveByVelocityAsync(self.velocity * quad_offset[0], self.velocity * quad_offset[1],\n 0.15, distance/self.velocity).join()\n # if self.logging:\n # self.log_arr.append(\"forward\")", "def updatePWM(self):\n v_dc = self.dcmotorSpeed * self.dcmotor_sgn # changed \"vr\" to \"v_dc\", \"rightSpeed\" to \"dcmotorSpeed\" and \"right_sgn\" to dcmotor_sgn\", RFMH_2019_02_26\n pwm_dc = self.PWMvalue(v_dc, self.DC_MOTOR_MIN_PWM,\n self.DC_MOTOR_MAX_PWM) # changed \"pwmr\" to \"pwm_dc\" and \"vr\" to \"v_dc\" and adjusted both orange constants to \"DC_MOTOR_MIN_PWM\" AND \"DC_MOTOR_MAX_PWM\", RFMH_2019_02_26\n\n # TODO: Fix this debug message. I am trying to port this code over from an old version, and I do not know\n # what v and u are supposed to be here. Timothy Scott, 5.11.2019\n # if self.debug: # where the duck does the \"u\" come from?!?, RFMH_2019_02_26\n # print(\"v = %5.3f, u = %5.3f, v_dc = %5.3f, pwm_dc = %3d\" % (\n # v, u, v_dc, pwm_dc)) # deleted \"vl\" and \"pwml\" and adjust \"vr\" to \"v_dc\" to \"pwm_dc\"\n\n if math.fabs(v_dc) < self.SPEED_TOLERANCE: # changed v_r to v_dc in if loop , RFMH_2019_02_28\n DcMotorMode = Adafruit_MotorHAT.RELEASE\n pwm_dc = 0\n elif v_dc > 0:\n DcMotorMode = Adafruit_MotorHAT.FORWARD\n elif v_dc < 0:\n DcMotorMode = Adafruit_MotorHAT.BACKWARD\n\n if not self.old_pwm_dc == pwm_dc:\n self.DcMotor.setSpeed(pwm_dc) # changed rightMotor to DcMotor and pwmr to pwm_dc , RFMH_2019_02_28\n self.DcMotor.run(DcMotorMode)\n\n self.old_pwm_dc = pwm_dc", "def rotate_servo_rel(pi, pin, pct):\n try:\n pw_old = pi.get_servo_pulsewidth(pin)\n except:\n pw_old = 0 # no PWM has been set yet, so assume 0 \n pct_old = pulsewidth2pct(pw_old)\n if pct_old == -25: # no PWM output commanded, go to center first to get a reference point\n pi.set_servo_pulsewidth(pin, pct2pulsewidth(50))\n pct_old = pulsewidth2pct(pi.get_servo_pulsewidth(pin))\n pct_cmd = pct_old + pct\n # saturate input to protect servo \n if pct_cmd < 10:\n pct_cmd = 10\n elif pct_cmd > 90:\n pct_cmd = 90\n pi.set_servo_pulsewidth(pin, pct2pulsewidth(pct_cmd))", "def reverse(self):\n global motor_direction\n with self._lock:\n GPIO.output(7, False)\n GPIO.output(11, True)\n GPIO.output(13, False)\n GPIO.output(15, True)\n # time.sleep(sec)\n motor_direction = 'Reverse'\n return motor_direction", "def set_led(self, pin, value=0):\n value = self.int_lim(lower=PWM_MIN, upper=PWM_MAX, value=value) #Standardise the value to our correct range\n if self.iface.connected:\n try:\n self.iface.set_PWM_dutycycle(pin, value)\n except (AttributeError, IOError):\n logging.error(\" Cannot output to pins. PWM of pin #%s would be %s\" % (pin,value))\n else:\n logging.error(\" Interface not connected. Cannot output to pins. PWM of pin #%s would be %s\" % (pin,value))\n return value" ]
[ "0.72670037", "0.72670037", "0.6678285", "0.6659141", "0.6645468", "0.6523574", "0.64648247", "0.6370868", "0.62966657", "0.62861395", "0.62338525", "0.6129374", "0.6120421", "0.60226107", "0.60188246", "0.59220994", "0.5857952", "0.58566344", "0.57671356", "0.5759647", "0.57380575", "0.5716455", "0.56941694", "0.567874", "0.5655273", "0.5648542", "0.56480867", "0.5561641", "0.55479556", "0.55259126", "0.5504126", "0.54901326", "0.54758346", "0.5472976", "0.5463375", "0.54572964", "0.54435754", "0.54371816", "0.54255325", "0.54118466", "0.5405514", "0.5396413", "0.5396413", "0.53645813", "0.5353437", "0.5345385", "0.5338472", "0.53275245", "0.53254545", "0.5317953", "0.5315464", "0.52779675", "0.52769554", "0.5261894", "0.5249734", "0.52492833", "0.52375966", "0.52278274", "0.52236336", "0.5221164", "0.52139586", "0.521052", "0.5201662", "0.51968944", "0.5196833", "0.51941615", "0.5191823", "0.5191024", "0.5180057", "0.5179845", "0.5168158", "0.5167053", "0.51663625", "0.51655257", "0.51499426", "0.5149399", "0.51491445", "0.5140831", "0.5139521", "0.51325774", "0.5127085", "0.5123759", "0.51210093", "0.5118684", "0.5100055", "0.50999796", "0.50942725", "0.5092208", "0.5088572", "0.50635856", "0.5063495", "0.50560147", "0.5048559", "0.504423", "0.5033717", "0.5031926", "0.5031017", "0.50306606", "0.502635", "0.5023168" ]
0.6083617
13
pinBackward is the forward Pin, so we change its duty cycle according to speed.
def backward(self, speed): self.pwm_forward.ChangeDutyCycle(0) self.pwm_backward.ChangeDutyCycle(speed)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _reverseduty(self):\n if self.ir_pin.duty() == 0:\n self.ir_pin.duty(512)\n else:\n self.ir_pin.duty(0)", "def motor_B(self, direction, speed):\n if direction == 1:\n GPIO.output(self.Motor_B_Pin1, GPIO.HIGH)\n GPIO.output(self.Motor_B_Pin2, GPIO.LOW)\n self.pwm_B.start(100)\n self.pwm_B.ChangeDutyCycle(speed)\n if direction == -1:\n GPIO.output(self.Motor_B_Pin1, GPIO.LOW)\n GPIO.output(self.Motor_B_Pin2, GPIO.HIGH)\n self.pwm_B.start(100)\n self.pwm_B.ChangeDutyCycle(speed)", "def right_backward(self):\n self.right_motor.run_forever(speed_sp=-self.MAX_SPEED)", "def stop(self):\n\n self.pwm_forward.ChangeDutyCycle(0)\n self.pwm_backward.ChangeDutyCycle(0)", "def __init__(self, pinForward, pinBackward, pinControl):\n\n self.pinForward = pinForward\n self.pinBackward = pinBackward\n self.pinControl = pinControl\n GPIO.setup(self.pinForward, GPIO.OUT)\n GPIO.setup(self.pinBackward, GPIO.OUT)\n GPIO.setup(self.pinControl, GPIO.OUT)\n self.pwm_forward = GPIO.PWM(self.pinForward, 100)\n self.pwm_backward = GPIO.PWM(self.pinBackward, 100)\n self.pwm_forward.start(0)\n self.pwm_backward.start(0)\n GPIO.output(self.pinControl,GPIO.HIGH)", "def forward(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed)", "def forward(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed)", "def __init__(self, pinForward1, pinBackward1,pinForward2, pinBackward2):\n\n self.pinForward1 = pinForward1\n self.pinBackward1 = pinBackward1\n self.pinForward2 = pinForward2\n self.pinBackward2 = pinBackward2\n\n GPIO.setup(self.pinForward1, GPIO.OUT)\n GPIO.setup(self.pinBackward1, GPIO.OUT)\n GPIO.setup(self.pinForward2, GPIO.OUT)\n GPIO.setup(self.pinBackward2, GPIO.OUT)\n\n self.pwm_forward1 = GPIO.PWM(self.pinForward1, 100)\n self.pwm_backward1 = GPIO.PWM(self.pinBackward1, 100)\n self.pwm_forward2 = GPIO.PWM(self.pinForward2, 100)\n self.pwm_backward2 = GPIO.PWM(self.pinBackward2, 100)\n \n self.pwm_forward1.start(0)\n self.pwm_backward1.start(0)\n self.pwm_forward2.start(0)\n self.pwm_backward2.start(0)", "def forward_right(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed)\n self.pwm_left.ChangeDutyCycle(0)\n self.pwm_right.ChangeDutyCycle(100)", "def right_backward(self, state, speed):\n if state:\n self.right_motor.run_forever(speed_sp=-speed)\n ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.RED)\n else:\n self.right_motor.stop()\n ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.BLACK)", "def forward_backward(self, x):\n raise NotImplementedError()", "def reverse(self):\n global motor_direction\n with self._lock:\n GPIO.output(7, False)\n GPIO.output(11, True)\n GPIO.output(13, False)\n GPIO.output(15, True)\n # time.sleep(sec)\n motor_direction = 'Reverse'\n return motor_direction", "def __init__(self, pinForward, pinBackward, pinControlStraight,pinLeft, pinRight, pinControlSteering):\n\n self.pinForward = pinForward\n self.pinBackward = pinBackward\n self.pinControlStraight = pinControlStraight\n self.pinLeft = pinLeft\n self.pinRight = pinRight\n self.pinControlSteering = pinControlSteering\n GPIO.setup(self.pinForward, GPIO.OUT)\n GPIO.setup(self.pinBackward, GPIO.OUT)\n GPIO.setup(self.pinControlStraight, GPIO.OUT)\n\n GPIO.setup(self.pinLeft, GPIO.OUT)\n GPIO.setup(self.pinRight, GPIO.OUT)\n GPIO.setup(self.pinControlSteering, GPIO.OUT)\n\n self.pwm_forward = GPIO.PWM(self.pinForward, 100)\n self.pwm_backward = GPIO.PWM(self.pinBackward, 100)\n self.pwm_forward.start(0)\n self.pwm_backward.start(0)\n\n self.pwm_left = GPIO.PWM(self.pinLeft, 100)\n self.pwm_right = GPIO.PWM(self.pinRight, 100)\n self.pwm_left.start(0)\n self.pwm_right.start(0)\n\n GPIO.output(self.pinControlStraight,GPIO.HIGH) \n GPIO.output(self.pinControlSteering,GPIO.HIGH)", "def left_backward(self):\n self.left_motor.run_forever(speed_sp=-self.MAX_SPEED)", "def stop(self):\n\n self.pwm_forward.ChangeDutyCycle(0)\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_left.ChangeDutyCycle(0)\n self.pwm_right.ChangeDutyCycle(0)", "def backward(self, speed):\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_rr' + self.postfix], speed,\n ONE_SHOT_MODE)\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_rl' + self.postfix], speed,\n ONE_SHOT_MODE)\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_fr' + self.postfix], speed,\n ONE_SHOT_MODE)\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_fl' + self.postfix], speed,\n ONE_SHOT_MODE)", "def move_down(self):\n self.pitch_motor.step_forward()", "def turn_off(self):\n self.set_pin(0, -1)\n self.set_pin(1, -1)\n self.set_pin(2, -1)", "def backward(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=-left_speed)\n self.right_motor.run_forever(speed_sp=-right_speed)", "def forward(speed, bias, biasDir):\n\t# todo: check directions for me please\n\tif biasDir == 1:\n rightMotor.run_direct(duty_cycle_sp=speed+bias)\n leftMotor.run_direct(duty_cycle_sp=speed)\n elif biasDir == -1:\n rightMotor.run_direct(duty_cycle_sp=speed)\n leftMotor.run_direct(duty_cycle_sp=speed+bias)\n else:\n rightMotor.run_direct(duty_cycle_sp=speed)\n leftMotor.run_direct(duty_cycle_sp=speed)", "def backward(self, speed):\n self.controller.reverse(speed)", "def drive_backward(self):\n\n print(f\"{self.make.title()} driving backward.\")", "def invert_pin(self, pin, polarity):\n\n pin = pin - 1\n if pin < 8:\n self.__port_a_polarity = self.__helper.updatebyte(\n self.__port_a_polarity,\n pin,\n polarity)\n self.__bus.write_byte_data(\n self.__ioaddress, self.IPOLA, self.__port_a_polarity)\n else:\n self.__port_b_polarity = self.__helper.updatebyte(\n self.__port_b_polarity,\n pin - 8,\n polarity)\n self.__bus.write_byte_data(\n self.__ioaddress, self.IPOLB, self.__port_b_polarity)\n return", "def stop(self):\n\t\tGPIO.output(self._dir_pin_1, GPIO.HIGH)\n\t\tGPIO.output(self._dir_pin_2, GPIO.HIGH)\n\t\tself._last_dir = 's'\n\t\t# self._motor_pwm.ChangeDutyCycle(0)", "def stop_motor(self):\n self.output(self.steering_pin, 0)\n self.pi.set_servo_pulsewidth(self.steering_pin, 0)", "def motorDirection(self, motorPin, direction):\n # print \"motorPin: \", motorPin\n # print \"direction: \", direction\n GPIO.output(motorPin, direction)", "def slither(self):\n #writedown where we started\n starting_direction = self.get_heading()\n #start driving forward\n self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT)\n self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT)\n self.fwd()\n # throttl down the left motor\n for power in range(self.LEFT_DEFAULT, 60,-10):\n self.set_motor_power(self.MOTOR_LEFT, power)\n time.sleep(.5)\n #throttle up the left while lowring the right\n for power in range(60, self.LEFT_DEFAULT +1, 10):\n self.set_motor_power(self.MOTOR_LEFT, power)\n time.sleep(.5)\n # throttl down the right motor\n for power in range(self.RIGHT_DEFAULT, 60,-10):\n self.set_motor_power(self.MOTOR_RIGHT, power)\n time.sleep(.5)\n #throttle up the right while lowring the right\n for power in range(60, self.RIGHT_DEFAULT +1, 10):\n self.set_motor_power(self.MOTOR_RIGHT, power)\n time.sleep(.5)\n \n #straighten out\n while self.get_heading() != starting_direction:\n #if I need to veer right\n if self.get_heading() < starting_direction:\n self.set_motor_power(self.MOTOR_LEFT, 90)\n self.set_motor_power(self.MOTOR_RIGHT, 60)\n #if I need to veer left\n elif self.get_heading() > starting_direction:\n self.set_motor_power(self.MOTOR_LEFT, 60)\n self.set_motor_power(self.MOTOR_RIGHT, 90)\n \n time.sleep(.1)\n self.stop()", "def backward(self, x_out, x_target):\r\n return 2*(x_out - x_target)", "def forward(self):\n global motor_direction\n with self._lock:\n GPIO.output(7, True)\n GPIO.output(11, False)\n GPIO.output(13, True)\n GPIO.output(15, False)\n # time.sleep(sec)\n motor_direction = 'Forward'\n return motor_direction", "def forward_left(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed) \n self.pwm_right.ChangeDutyCycle(0)\n self.pwm_left.ChangeDutyCycle(100)", "def right(self, speed):\n self.pwm_left.ChangeDutyCycle(0)\n self.pwm_right.ChangeDutyCycle(speed)", "def backward(self, duration):\n self.set_motor(self.left_motor, 'right', 0.5)\n self.set_motor(self.right_motor, 'left', 0.5)\n time.sleep(duration)", "def _backward(self):\n if self.units[0].value > 0:\n self.units[0].gradient += 1 * self.utop.gradient\n else:\n self.units[0].gradient += 0 * self.utop.gradient", "def moveBackward(self):\n if self.onGround:\n self.vx = -4", "def GET_reverse(self):\n self.roomba.DriveStraight(-pyrobot.VELOCITY_FAST)\n time.sleep(1)\n self.roomba.SlowStop(-pyrobot.VELOCITY_FAST)", "def reverse_button(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=-int(left_speed))\n self.right_motor.run_forever(speed_sp=-int(right_speed))", "def move_backward():\n pass", "def ramp_down(self):\n value = self.current_event[\"ramp_down\"][\"value\"]\n self.current_value.append(self.current_value[-1] - value)", "def backward(self, y):\n pass", "def backward_shimmey(self):\n for x in range(6):\n self.right(primary=-70, counter=-30)\n time.sleep(.5)\n self.left(primary=-70, counter=-30)\n time.sleep(.5)\n self.stop()", "def backward_shimmey(self):\n for x in range(6):\n self.right(primary=-70, counter=-30)\n time.sleep(.5)\n self.left(primary=-70, counter=-30)\n time.sleep(.5)\n self.stop()", "def turn_off(self, **kwargs):\n self._attributes['current_speed'] = SPEED_OFF\n self._bond.turnOff(self._deviceId)", "def skipp(self):\n for x in range(4):\n self.fwd(right=100, left=100)\n time.sleep(.5)\n self.servo(1000)\n time.sleep(.1)\n self.servo(2000)\n time.sleep(.1)\n self.fwd(right=-100, left=-100)\n time.sleep(.1)\n self.servo(-1000)\n self.stop()", "def left_backward(self, state, speed):\n if state:\n self.left_motor.run_forever(speed_sp=-speed)\n ev3.Leds.set_color(ev3.Leds.LEFT, ev3.Leds.RED)\n else:\n self.left_motor.stop()\n ev3.Leds.set_color(ev3.Leds.LEFT, ev3.Leds.BLACK)", "def backward(self, dout):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n for l in range(len(self.layers)-1,-1,-1):\n act_dout = self.activations[l].backward(dout)\n dout = self.layers[l].backward(act_dout)\n ########################\n # END OF YOUR CODE #\n #######################\n\n return", "def motor_A(self, direction, speed):\n if direction == -1:\n GPIO.output(self.Motor_A_Pin1, GPIO.HIGH)\n GPIO.output(self.Motor_A_Pin2, GPIO.LOW)\n self.pwm_A.start(100)\n self.pwm_A.ChangeDutyCycle(speed)\n if direction == 1:\n GPIO.output(self.Motor_A_Pin1, GPIO.LOW)\n GPIO.output(self.Motor_A_Pin2, GPIO.HIGH)\n self.pwm_A.start(100)\n self.pwm_A.ChangeDutyCycle(speed)", "async def skip_backward(self) -> None:\n return await self.relay(\"skip_backward\")()", "def turn_biases_off(self, reverse = False):\n if not reverse:\n self.b_copy = self.b.get_value()\n self.b.set_value(np.float32(0. * self.b_copy))\n else:\n self.b.set_value(self.b_copy)", "def backward(self):\n #print('backward\\r')\n self.linearVector = Vector3(x=-1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def backward(self,x, y):\n # TODO\n self.delta[self.L-1]=self.a[self.L-1]-y\n le=len(self.delta)\n for i in range(le-2,-1,-1):\n cx= self.w[i+1].T@self.delta[i+1]\n self.delta[i]=self.phi_d(self.z[i])*cx\n for i in range(1,self.L):\n self.dw[i]=np.asmatrix(self.delta[i]).T@np.asmatrix(self.a[i-1])\n self.db[i]=self.delta[i]", "def pin_pulldown(self, pin):\n port_num = self._convert_pin_port(pin)\n gpio.pullup(port_num, gpio.PULLDOWN)", "def backward(self, top, propagate_down, bottom):\r\n pass", "def backward_pass(self, grad):\n pass", "def move_reverse(self, speed):\n\t\t# You should modify the bias of 4 wheels depending on your hardware.\n\t\tself._front_left_wheel.clockwise_rotate(speed + LEFT_FR_BIAS + LEFT_RIGHT_BIAS)\n\t\tself._front_right_wheel.anticlockwise_rotate(speed + RIGHT_FR_BIAS)\n\t\tself._rear_left_wheel.clockwise_rotate(speed + LEFT_RIGHT_BIAS)\n\t\tself._rear_right_wheel.anticlockwise_rotate(speed)", "def move_up(self):\n self.pitch_motor.step_backward()", "def move_backward(self, distance):\n quad_offset = self.quad_offset_mapping['backward']\n client.moveByVelocityAsync(self.velocity * quad_offset[0], self.velocity * quad_offset[1],\n 0.15, distance/self.velocity).join()\n # if self.logging:\n # self.log_arr.append(\"backward\")", "def arm_down(self):\n arm_revolutions_for_full_range = 14.2 * 360\n self.arm_motor.run_to_rel_pos(position_sp=-arm_revolutions_for_full_range)\n self.arm_motor.wait_while(ev3.Motor.STATE_RUNNING)\n ev3.Sound.beep().wait()", "def setup_motor(self,pin_num):\n pi.set_servo_pulsewidth(pin_num, 2000)\n sleep(2)\n pi.set_servo_pulsewidth(pin_num, 500 )\n sleep(2)", "def backward(self):\n raise NotImplementedError", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def deactivate(self):\n self.game.paddle.transition(NormalState(self.game.paddle))\n for ball in self.game.balls:\n ball.base_speed -= 1", "def deactivate(self):\n self.game.paddle.transition(NormalState(self.game.paddle))\n for ball in self.game.balls:\n ball.base_speed -= 1", "def backward(cls, grad_out, activated_out):\n raise Exception(\"Unimplemented\")", "def backward(cls, grad_out, activated_out):\n raise Exception(\"Unimplemented\")", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def set_pin_direction(self, pin, direction):\n pin = pin - 1\n if pin < 8:\n self.__port_a_direction = self.__helper.updatebyte(\n self.__port_a_direction, pin, direction)\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRA, self.__port_a_direction)\n else:\n self.__port_b_direction = self.__helper.updatebyte(\n self.__port_b_direction, pin - 8, direction)\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRB, self.__port_b_direction)\n return", "def switch_off(self):\n if threading.current_thread() != self._blinking_thread:\n self._blinking_thread.unregister(self)\n GPIO.output(self.pin, GPIO.LOW)", "def backward(self, top, propagate_down, bottom):\n\t\tpass", "def backward(self, param):\n\t\tif param:\n\t\t\tself.linear_move(-1 * param * .3048)\n\t\telse:\n\t\t\tself.linear_move(-1 * riu.default_dist * .3048)", "def sidebounce(self):\r\n self.dx=-self.dx", "def stopAcceleratingForward(self,event):\n self.isAcceleratingForward=False", "def motor_rotate_deg(power,deg,port,sampling_time=.01,delay_when_stopping=.05): \n debug = False\n num_motor=len(power) #Number of motors being used\n #print num_motor\n init_val=[0]*num_motor\n curr_val=[0]*num_motor\n final_val=[0]*num_motor\n last_encod=[0]*num_motor\n \n delta=0\n gain=0.005\n idelta=0.0\n alpha=10\n smulti=0\n BrickPiUpdateValues()\n for i in range(num_motor):\n BrickPi.MotorEnable[port[i]] = 1 #Enable the Motors\n power[i]=abs(power[i])\n \n init_val[i]=BrickPi.Encoder[port[i]] #Initial reading of the encoder \n \n final_val[i]=init_val[i]+(deg[i]*2) #Final value when the motor has to be stopped;One encoder value counts for 0.5 degrees\n \n #For running clockwise and anticlockwise\n if deg[i]>0:\n BrickPi.MotorSpeed[port[i]] = power[i]\n elif deg[i]<0:\n BrickPi.MotorSpeed[port[i]] = -power[i]\n else:\n BrickPi.MotorSpeed[port[i]] = 0\n \n \n run_stat=[0]*num_motor\n\n time_start = time.time()\n time_end = time.time()\n time_total = time_end - time_start\n \n while True:\n time_end = time.time()\n time_total = time_end - time_start\n if time_total >= ROTATE_DEG_TIMEOUT:\n break\n \n result = BrickPiUpdateValues() #Ask BrickPi to update values for sensors/motors\n time.sleep(sampling_time) #sleep for the sampling time given (default:10 ms)\n i = 0\n #if debug:\n #print \"Result of Update Values: \" + `result`\n if not result :\n for i in range(num_motor): #Do for each of the motors\n #The FIRST thing we should do is check our encoders!\n curr_val[i]=BrickPi.Encoder[port[i]]\n if debug :\n print \"Motor \" + `i` + \" encoder: \" + `curr_val[i]`\n \n if run_stat[i]==1:\n continue\n # Check if final value reached for each of the motors\n if(deg[i]>0 and final_val[i]<=curr_val[i]) or (deg[i]<0 and final_val[i]>=curr_val[i]) :\n #This motor has reached its goal\n run_stat[i]=1\n \n #Now let's hit the breaks by going in reverse for a VERY quick amount of time.\n if deg[i]>0:\n BrickPi.MotorSpeed[port[i]] = -power[i]\n elif deg[i]<0:\n BrickPi.MotorSpeed[port[i]] = power[i]\n else:\n BrickPi.MotorSpeed[port[i]] = 0 \n BrickPiUpdateValues()\n time.sleep(delay_when_stopping)\n #Now let's turn the motor off all together\n BrickPi.MotorEnable[port[i]] = 0\n BrickPiUpdateValues()\n \n if(all(e==1 for e in run_stat)): #If all the motors have already completed their rotation, then stop\n break\n \n #Let's use Proportional Integral Control on the Motors to keep them in Sync\n if i == 1 :\n if curr_val[0] <> 0 and curr_val[1] <>0 : \n if last_encod[0]<>0 and last_encod[1] <>1 :\n if abs(last_encod[0] - init_val[0]) < abs(last_encod[1] - init_val[1]) :\n #Motor 1 is going faster\n delta = abs(curr_val[1]-last_encod[1]) - abs(curr_val[0]-last_encod[0])\n idelta = (abs(curr_val[1]-init_val[1]) - abs(curr_val[0]-init_val[0]))/alpha\n if debug:\n print \"Motor 1 is faster by \" + `delta`\n print \"last_encod = \" + `last_encod[0]` + \" , \" + `last_encod[1]`\n print \"idelta = \" + `idelta`\n print \"Current Encode = \" + `curr_val[0]` + \" , \" + `curr_val[1]`\n\n if int(abs(BrickPi.MotorSpeed[port[0]])) == 255 :\n #Motor 0 CANNOT be sped up\n smulti=BrickPi.MotorSpeed[port[0]]*delta*gain+idelta*gain\n #Speed Multiplier: the amount we want to slow down Motor 1\n if int(abs(BrickPi.MotorSpeed[port[1]]-smulti)) <= 255 : \n #Target speed is inside the bounds of Motor speed\n BrickPi.MotorSpeed[port[1]] = int (BrickPi.MotorSpeed[port[1]]-smulti)\n elif int (BrickPi.MotorSpeed[port[1]]-smulti) < 0 :\n #Target speed is outside the bounds of -255 to 255\n BrickPi.MotorSpeed[port[1]] = -255\n else :\n BrickPi.MotorSpeed[port[1]] = 255\n BrickPiUpdateValues()\n if debug : \n print \"Motor 1 speed : \" + `BrickPi.MotorSpeed[port[1]]`\n print \"Speed Multiplier : \" + `smulti`\n\n else :\n #Motor 0 CAN be sped up\n smulti=BrickPi.MotorSpeed[port[0]]*delta*gain+idelta*gain\n #Speed Multiplier: the amount we want to speed up Motor 0\n if int(abs(BrickPi.MotorSpeed[port[0]]+smulti)) <= 255 : \n #Target speed is inside the bounds of Motor speed\n BrickPi.MotorSpeed[port[0]] = int (BrickPi.MotorSpeed[port[0]]+smulti)\n elif int (BrickPi.MotorSpeed[port[0]]+smulti) < 0 :\n #Target speed is outside the bounds of -255 to 255\n BrickPi.MotorSpeed[port[0]] = -255 \n else :\n BrickPi.MotorSpeed[port[0]] = 255\n BrickPiUpdateValues()\n if debug : \n print \"Motor 0 speed : \" + `BrickPi.MotorSpeed[port[0]]`\n print \"Speed Multiplier : \" + `smulti`\n\n\n elif (last_encod[0] - curr_val[0]) > abs(last_encod[1] - curr_val[1]) :\n #Motor 0 is going faster\n delta= abs(curr_val[0]-last_encod[0])- abs(curr_val[1]-last_encod[1]) \n idelta = (abs(curr_val[0]-init_val[0]) - abs(curr_val[1]-init_val[1]))/alpha\n if debug :\n print \"Motor 0 is faster by \" + `delta`\n print \"last_encod = \" + `last_encod[0]` + \" , \" + `last_encod[1]`\n print \"idelta = \" + `idelta`\n print \"Current Encode = \" + `curr_val[0]` + \" , \" + `curr_val[1]`\n\n if abs(BrickPi.MotorSpeed[port[1]]) == 255 :\n #Motor 1 CANNOT be sped up, SLOW DOWN Motor 0\n smulti=BrickPi.MotorSpeed[port[0]]*delta*gain+idelta*gain\n #Speed Multiplier: the amount we want to slow down Motor 0\n if int(abs(BrickPi.MotorSpeed[port[0]]-smulti)) <= 255 :\n #Target speed is inside the bounds of Motor\n BrickPi.MotorSpeed[port[0]] = int (BrickPi.MotorSpeed[port[0]]-smulti)\n elif int (BrickPi.MotorSpeed[port[0]]-smulti) < 0 :\n #Target speed is outside the -255 to 255 bounds\n BrickPi.MotorSpeed[port[0]] = -255\n else : \n BrickPi.MotorSpeed[port[0]] = 255\n BrickPiUpdateValues()\n if debug : \n print \"Motor 0 speed : \" + `BrickPi.MotorSpeed[port[0]]`\n print \"Speed Multiplier : \" + `smulti`\n\n else :\n #Motor 1 CAN be sped up SPEED UP Motor 1\n smulti=BrickPi.MotorSpeed[port[0]]*delta*gain+idelta*gain\n #Speed Multiplier: the amount we want to speed up Motor 1\n if int(abs (BrickPi.MotorSpeed[port[1]]+smulti)) <= 255 :\n #Target speed is inside the bounds of Motor\n BrickPi.MotorSpeed[port[1]] = int (BrickPi.MotorSpeed[port[1]]+smulti)\n elif int (BrickPi.MotorSpeed[port[1]]+smulti) < 0 :\n #Target speed is outside the -255 to 255 bounds\n BrickPi.MotorSpeed[port[1]] = -255\n else :\n BrickPi.MotorSpeed[port[1]] = 255\n BrickPiUpdateValues()\n if debug : \n print \"Motor 1 speed : \" + `BrickPi.MotorSpeed[port[1]]`\n print \"Speed Multiplier : \" + `smulti`\n \n last_encod[0] = curr_val[0]\n last_encod[1] = curr_val[1]\n BrickPi.MotorEnable[MOTOR1] = 1\n BrickPi.MotorEnable[MOTOR2] = 1\n return 0", "def set_duty_cycle(self, pin, dutycycle):\n raise NotImplementedError", "def backward(x, pi, A, B):\n # TODO: Write this function.\n #Beta_index = np.ones(2)\n #ret = np.zeros((x.shape[0], pi.shape[0]))\n #ret[x.shape[0]-1] = Beta_index\n #for i in range(x.shape[0]-2, -1, -1):\n # B_col = B[:, x[i+1]]\n # sum_term = np.dot(B_col, A)\n # Beta_index = np.multiply(sum_term, Beta_index)\n # ret[i] = Beta_index\n #return ret\n Beta_index = np.ones(2)\n ret = np.zeros((x.shape[0], pi.shape[0]))\n ret[x.shape[0]-1] = Beta_index\n for i in range(x.shape[0]-2, -1, -1):\n B_col = B[:, x[i+1]]\n sum_term = np.multiply(Beta_index, B_col)\n Beta_index = np.dot(A, sum_term)\n ret[i] = Beta_index\n return ret", "def set_pin_pullup(self, pin, value):\n pin = pin - 1\n if pin < 8:\n self.__port_a_pullup = self.__helper.updatebyte(\n self.__port_a_pullup, pin, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPPUA, self.__port_a_pullup)\n else:\n self.__port_b_pullup = self.__helper.updatebyte(\n self.__port_b_pullup, pin - 8, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPPUB, self.__port_b_pullup)\n return", "def bulb_blink_call():\n times = blinkSlider.get()\n bulb_blink(times)", "async def blink(my_board, pin):\n\n # set the pin mode\n await my_board.set_pin_mode_digital_output(pin)\n\n # toggle the pin 4 times and exit\n for x in range(4):\n print('ON')\n await my_board.digital_write(pin, 1)\n await asyncio.sleep(1)\n print('OFF')\n await my_board.digital_write(pin, 0)\n await asyncio.sleep(1)", "def bounce(self):\n self.y_dir *= -1 # Reverse vertical direction of travel", "def setup_pin(self, pin, dutycycle, frequency=2000):\n raise NotImplementedError", "def right_forward(self):\n self.right_motor.run_forever(speed_sp=self.MAX_SPEED)", "def nine_punishment(self):\n self.direction_clock_wise = not self.direction_clock_wise", "def steer(self):\n\n while self.active:\n angle = self.driver.angle\n steering_pwm_calc = self.angle_to_pmw(angle)\n self.pwm.set_pwm(0, 0, steering_pwm_calc)", "def __update_speed_stop(self):\n if self.velocidade > SERVO_DUTY_CYCLE_MEIO:\n self.velocidade -= self.incremento_veloc\n \n # Para mesmo que haja arredondamento de float\n if self.velocidade <= SERVO_DUTY_CYCLE_MEIO:\n self.velocidade = SERVO_DUTY_CYCLE_MEIO\n self.servo.set_duty_cycle(0.0)\n else:\n self.servo.set_duty_cycle(self.velocidade)\n elif self.velocidade < SERVO_DUTY_CYCLE_MEIO:\n self.velocidade += self.incremento_veloc\n \n # Para mesmo que haja arredondamento de float\n if self.velocidade >= SERVO_DUTY_CYCLE_MEIO:\n self.velocidade = SERVO_DUTY_CYCLE_MEIO\n self.servo.set_duty_cycle(0.0)\n else:\n self.servo.set_duty_cycle(self.velocidade)\n else:\n self.servo.set_duty_cycle(0.0)", "def backward(self):\n gradient = blah\n return gradient", "def backward(self):\n gradient = blah\n return gradient" ]
[ "0.6781866", "0.6440657", "0.63590693", "0.6158023", "0.6145336", "0.6108258", "0.6108258", "0.6104545", "0.6029256", "0.6004843", "0.5955468", "0.59549665", "0.5944324", "0.5896161", "0.58845234", "0.5840875", "0.5830278", "0.5827748", "0.5796934", "0.5789884", "0.5785566", "0.57631195", "0.57624656", "0.56926507", "0.5689765", "0.5684283", "0.56448644", "0.56429464", "0.5642259", "0.56351006", "0.5598567", "0.55742705", "0.5565872", "0.5562474", "0.5534167", "0.55169404", "0.5503302", "0.5483962", "0.5432986", "0.54145426", "0.54145426", "0.5411786", "0.5411379", "0.54111516", "0.5399164", "0.5391273", "0.53728014", "0.53529835", "0.53525794", "0.5339184", "0.53282446", "0.5322998", "0.5320157", "0.5309519", "0.52944404", "0.5291785", "0.5284103", "0.5282795", "0.52819574", "0.52684116", "0.52684116", "0.52684116", "0.52623296", "0.52623296", "0.52608365", "0.52608365", "0.52581537", "0.52581537", "0.52581537", "0.52581537", "0.52581537", "0.52581537", "0.52581537", "0.52581537", "0.52581537", "0.52581537", "0.52581537", "0.52581537", "0.52581537", "0.52581054", "0.5255168", "0.52483666", "0.5247986", "0.52424455", "0.52386254", "0.5227132", "0.5223854", "0.5222647", "0.5214137", "0.5199451", "0.51994276", "0.51943815", "0.51929307", "0.51907593", "0.5184249", "0.5182085", "0.51662946", "0.5163338", "0.5163338" ]
0.67496103
2
pinForward is the forward Pin, so we change its duty cycle according to speed.
def left(self, speed): self.pwm_right.ChangeDutyCycle(0) self.pwm_left.ChangeDutyCycle(speed)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed)", "def forward(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed)", "def forward(self):\n global motor_direction\n with self._lock:\n GPIO.output(7, True)\n GPIO.output(11, False)\n GPIO.output(13, True)\n GPIO.output(15, False)\n # time.sleep(sec)\n motor_direction = 'Forward'\n return motor_direction", "def __init__(self, pinForward, pinBackward, pinControl):\n\n self.pinForward = pinForward\n self.pinBackward = pinBackward\n self.pinControl = pinControl\n GPIO.setup(self.pinForward, GPIO.OUT)\n GPIO.setup(self.pinBackward, GPIO.OUT)\n GPIO.setup(self.pinControl, GPIO.OUT)\n self.pwm_forward = GPIO.PWM(self.pinForward, 100)\n self.pwm_backward = GPIO.PWM(self.pinBackward, 100)\n self.pwm_forward.start(0)\n self.pwm_backward.start(0)\n GPIO.output(self.pinControl,GPIO.HIGH)", "def forward_left(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed) \n self.pwm_right.ChangeDutyCycle(0)\n self.pwm_left.ChangeDutyCycle(100)", "def __init__(self, pinForward1, pinBackward1,pinForward2, pinBackward2):\n\n self.pinForward1 = pinForward1\n self.pinBackward1 = pinBackward1\n self.pinForward2 = pinForward2\n self.pinBackward2 = pinBackward2\n\n GPIO.setup(self.pinForward1, GPIO.OUT)\n GPIO.setup(self.pinBackward1, GPIO.OUT)\n GPIO.setup(self.pinForward2, GPIO.OUT)\n GPIO.setup(self.pinBackward2, GPIO.OUT)\n\n self.pwm_forward1 = GPIO.PWM(self.pinForward1, 100)\n self.pwm_backward1 = GPIO.PWM(self.pinBackward1, 100)\n self.pwm_forward2 = GPIO.PWM(self.pinForward2, 100)\n self.pwm_backward2 = GPIO.PWM(self.pinBackward2, 100)\n \n self.pwm_forward1.start(0)\n self.pwm_backward1.start(0)\n self.pwm_forward2.start(0)\n self.pwm_backward2.start(0)", "def forward(speed, bias, biasDir):\n\t# todo: check directions for me please\n\tif biasDir == 1:\n rightMotor.run_direct(duty_cycle_sp=speed+bias)\n leftMotor.run_direct(duty_cycle_sp=speed)\n elif biasDir == -1:\n rightMotor.run_direct(duty_cycle_sp=speed)\n leftMotor.run_direct(duty_cycle_sp=speed+bias)\n else:\n rightMotor.run_direct(duty_cycle_sp=speed)\n leftMotor.run_direct(duty_cycle_sp=speed)", "def __init__(self, pinForward, pinBackward, pinControlStraight,pinLeft, pinRight, pinControlSteering):\n\n self.pinForward = pinForward\n self.pinBackward = pinBackward\n self.pinControlStraight = pinControlStraight\n self.pinLeft = pinLeft\n self.pinRight = pinRight\n self.pinControlSteering = pinControlSteering\n GPIO.setup(self.pinForward, GPIO.OUT)\n GPIO.setup(self.pinBackward, GPIO.OUT)\n GPIO.setup(self.pinControlStraight, GPIO.OUT)\n\n GPIO.setup(self.pinLeft, GPIO.OUT)\n GPIO.setup(self.pinRight, GPIO.OUT)\n GPIO.setup(self.pinControlSteering, GPIO.OUT)\n\n self.pwm_forward = GPIO.PWM(self.pinForward, 100)\n self.pwm_backward = GPIO.PWM(self.pinBackward, 100)\n self.pwm_forward.start(0)\n self.pwm_backward.start(0)\n\n self.pwm_left = GPIO.PWM(self.pinLeft, 100)\n self.pwm_right = GPIO.PWM(self.pinRight, 100)\n self.pwm_left.start(0)\n self.pwm_right.start(0)\n\n GPIO.output(self.pinControlStraight,GPIO.HIGH) \n GPIO.output(self.pinControlSteering,GPIO.HIGH)", "def motorDirection(self, motorPin, direction):\n # print \"motorPin: \", motorPin\n # print \"direction: \", direction\n GPIO.output(motorPin, direction)", "def motor_A(self, direction, speed):\n if direction == -1:\n GPIO.output(self.Motor_A_Pin1, GPIO.HIGH)\n GPIO.output(self.Motor_A_Pin2, GPIO.LOW)\n self.pwm_A.start(100)\n self.pwm_A.ChangeDutyCycle(speed)\n if direction == 1:\n GPIO.output(self.Motor_A_Pin1, GPIO.LOW)\n GPIO.output(self.Motor_A_Pin2, GPIO.HIGH)\n self.pwm_A.start(100)\n self.pwm_A.ChangeDutyCycle(speed)", "def forward(self, speed):\n self.controller.forward(speed)", "def setup_pin(self, pin, dutycycle, frequency=2000):\n raise NotImplementedError", "def motor_B(self, direction, speed):\n if direction == 1:\n GPIO.output(self.Motor_B_Pin1, GPIO.HIGH)\n GPIO.output(self.Motor_B_Pin2, GPIO.LOW)\n self.pwm_B.start(100)\n self.pwm_B.ChangeDutyCycle(speed)\n if direction == -1:\n GPIO.output(self.Motor_B_Pin1, GPIO.LOW)\n GPIO.output(self.Motor_B_Pin2, GPIO.HIGH)\n self.pwm_B.start(100)\n self.pwm_B.ChangeDutyCycle(speed)", "def forward_right(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed)\n self.pwm_left.ChangeDutyCycle(0)\n self.pwm_right.ChangeDutyCycle(100)", "def setup_motor(self,pin_num):\n pi.set_servo_pulsewidth(pin_num, 2000)\n sleep(2)\n pi.set_servo_pulsewidth(pin_num, 500 )\n sleep(2)", "def fwd(dist=0): #distance is in cm\n try:\n if dist>0:\n # this casting to int doesn't seem necessary\n pulse=int(PPR*(dist//WHEEL_CIRC) )\n enc_tgt(1,1,pulse)\n except Exception as e:\n print (\"gopigo fwd: {}\".format(e))\n pass\n return write_i2c_block(ADDRESS,motor_fwd_cmd+[0,0,0])", "def startAcceleratingForward(self,event):\n self.isAcceleratingForward=True", "def steer(direction):\n if direction == 1:\n steerMotor.run(Adafruit_MotorHAT.FORWARD)\n steerMotor.setSpeed(255)\n if direction == -1:\n steerMotor.run(Adafruit_MotorHAT.BACKWARD)\n steerMotor.setSpeed(255)\n if direction == 0:\n steerMotor.setSpeed(0)\n steerMotor.run(Adafruit_MotorHAT.RELEASE)", "def move_forward(self, speed):\n\t\t# You should modify the bias of 4 wheels depending on your hardware.\n\t\tself._front_left_wheel.anticlockwise_rotate(speed + LEFT_FR_BIAS + LEFT_RIGHT_BIAS)\n\t\tself._front_right_wheel.clockwise_rotate(speed + RIGHT_FR_BIAS)\n\t\tself._rear_left_wheel.anticlockwise_rotate(speed + LEFT_RIGHT_BIAS)\n\t\tself._rear_right_wheel.clockwise_rotate(speed)", "def forward_button(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=int(left_speed))\n self.right_motor.run_forever(speed_sp=int(right_speed))", "def forward(self, speed):\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_rr' + self.postfix], -speed,\n ONE_SHOT_MODE)\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_rl' + self.postfix], -speed,\n ONE_SHOT_MODE)\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_fr' + self.postfix], -speed,\n ONE_SHOT_MODE)\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_fl' + self.postfix], -speed,\n ONE_SHOT_MODE)", "def GET_forward(self):\n self.roomba.DriveStraight(pyrobot.VELOCITY_FAST)\n time.sleep(1)\n self.roomba.SlowStop(pyrobot.VELOCITY_FAST)", "def _reverseduty(self):\n if self.ir_pin.duty() == 0:\n self.ir_pin.duty(512)\n else:\n self.ir_pin.duty(0)", "def turn_90degrees(self, direction):\n if direction == \"right\" or direction == 1:\n self.myspeedctrl.send_speed(0,1)\n elif direction == \"left\" or direction == 2:\n self.myspeedctrl.send_speed(0,-1)\n rospy.sleep(1.61) #value found by trail and error\n self.myspeedctrl.send_speed(0,0)", "def drive_forward(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=left_speed)\n self.right_motor.run_forever(speed_sp=right_speed)", "def set_pin_direction(self, pin, direction):\n pin = pin - 1\n if pin < 8:\n self.__port_a_direction = self.__helper.updatebyte(\n self.__port_a_direction, pin, direction)\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRA, self.__port_a_direction)\n else:\n self.__port_b_direction = self.__helper.updatebyte(\n self.__port_b_direction, pin - 8, direction)\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRB, self.__port_b_direction)\n return", "def input_forward(self, joy_input):\n if self.saved[joy_input]:\n value = self.saved[joy_input]\n else:\n value = self.inputs[joy_input]\n yaw_pwm = np.interp(value, [-1, 1], [0, Joystick.MAX_YAW_PWM])\n print(\"(input forward) setting yaw pwm to \" + str(yaw_pwm))\n self.publish(Topic.YAW_PWM, yaw_pwm)", "def increment_speed(self):\n self.speed += 0.0004", "def move_forward(self, speed):\n\n # Clamp the speed\n speed = clamp(delta_unit(speed), 0, delta_unit(Car.max_speed))\n\n # Appends the speed according to the direction\n rad = np.radians(self.direction)\n self.fx += speed * np.cos(rad)\n self.fy += speed * np.sin(rad)\n\n # Set marker to move\n self.moved = True", "def change_motor_speed(self, speed=0.0):\r\n if not self.enabled:\r\n self.set_neutral(braked=False)\r\n return\r\n\r\n # logging.info(\"{} Motor Speed: {}\".format(self.motor_name, speed))\r\n self.current_speed = speed # Store current set speed\r\n\r\n # If speed is < 0.0, we are driving in reverse.\r\n self.forward = True\r\n if speed < 0.0:\r\n # Normalise speed value to be in range [0, 100]\r\n speed = -speed\r\n # Store direction\r\n self.forward = False\r\n\r\n # Apply a factor to the speed to limit speed\r\n speed *= self.speed_factor\r\n\r\n # Set motor directional pins\r\n if self.forward:\r\n if self.a_pin >= 0:\r\n self.GPIO.output(self.a_pin, 1)\r\n if self.b_pin >= 0:\r\n self.GPIO.output(self.b_pin, 0)\r\n else:\r\n if self.a_pin >= 0:\r\n self.GPIO.output(self.a_pin, 0)\r\n if self.b_pin >= 0:\r\n self.GPIO.output(self.b_pin, 1)\r\n\r\n # Convert speed into PWM duty cycle\r\n # and clamp values to min/max ranges.\r\n dutycycle = speed\r\n if dutycycle < 0.0:\r\n dutycycle = 0.0\r\n elif dutycycle > self.max_speed:\r\n dutycycle = self.max_speed\r\n\r\n # Change the PWM duty cycle based on fabs() of speed value.\r\n self.PWM.ChangeDutyCycle(dutycycle)", "def skipp(self):\n for x in range(4):\n self.fwd(right=100, left=100)\n time.sleep(.5)\n self.servo(1000)\n time.sleep(.1)\n self.servo(2000)\n time.sleep(.1)\n self.fwd(right=-100, left=-100)\n time.sleep(.1)\n self.servo(-1000)\n self.stop()", "def drive(self,direction, speed=100) -> None:\n if direction == 1:\n driveMotor.run(Adafruit_MotorHAT.FORWARD)\n driveMotor.setSpeed(speed)\n if direction == -1:\n driveMotor.run(Adafruit_MotorHAT.BACKWARD)\n driveMotor.setSpeed(speed)\n if direction == 0:\n driveMotor.setSpeed(0)\n driveMotor.run(Adafruit_MotorHAT.RELEASE)", "def drive(self,direction, speed=100):\n if direction == 1:\n self.leftMotor.run(Adafruit_MotorHAT.FORWARD)\n self.rightMotor.run(Adafruit_MotorHAT.FORWARD)\n self.leftMotor.setSpeed(speed)\n self.rightMotor.setSpeed(speed)\n if direction == -1:\n self.leftMotor.run(Adafruit_MotorHAT.BACKWARD)\n self.rightMotor.run(Adafruit_MotorHAT.BACKWARD)\n self.leftMotor.setSpeed(speed)\n self.rightMotor.setSpeed(speed)\n if direction == 0:\n self.leftMotor.setSpeed(0)\n self.rightMotor.setSpeed(0)\n self.leftMotor.run(Adafruit_MotorHAT.RELEASE)\n self.rightMotor.run(Adafruit_MotorHAT.RELEASE)", "def move_forward(self, val):\n val = val * 180 / math.pi\n print(\"gyro diff\", self.gyro - val)\n print(\"gyrof\", self.gyro)\n if math.fabs(self.gyro - val) > 0.6:\n if self.gyro - val > 0:\n self.om_right = self.om_right - 0.7\n self.om_left = self.om_left + 0.5\n self.set_speed(self.om_left, self.om_right)\n print(\"om_l\", self.om_left)\n print(\"om_r\", self.om_right)\n else:\n self.om_right = self.om_right + 0.3\n self.om_left = self.om_left - 0.5\n self.set_speed(self.om_left, self.om_right)\n print(\"om_l\", self.om_left)\n print(\"om_r\", self.om_right)\n else:\n self.om_right = 10\n self.om_left = 10", "def forward(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=left_speed)\n self.right_motor.run_forever(speed_sp=right_speed)", "def right_forward(self):\n self.right_motor.run_forever(speed_sp=self.MAX_SPEED)", "def steer(self):\n\n while self.active:\n angle = self.driver.angle\n steering_pwm_calc = self.angle_to_pmw(angle)\n self.pwm.set_pwm(0, 0, steering_pwm_calc)", "def drive_forward(self):\n print(f\"{self.make.title()} is now driving forward.\")", "def __init__(self, pwm_pin, dir_pin_1, dir_pin_2, pwm_freq):\n\t\tself._pwm_pin = pwm_pin # PWM input pin.\n\t\tself._dir_pin_1 = dir_pin_1 # GPIO number to control the direction of rotation of the wheel.\n\t\tself._dir_pin_2 = dir_pin_2 # GPIO number to control the direction of rotation of the wheel.\n\t\tself._pwm_freq = pwm_freq # PWM cycle.\n\n\t\tself._last_dir = 's' # Last rotation direction of this wheel. 's' indicates stop.\n\t\tself._last_dc_val = 0 # Last duty cycle value.\n\t\tself._current_dc_val = 0 # Current duty cycle value.\n\n\t\tGPIO.setmode(GPIO.BOARD)\n\n\t\t# Set the direction control GPIO output mode.\n\t\tGPIO.setup(self._pwm_pin, GPIO.OUT)\n\t\tGPIO.setup(self._dir_pin_1, GPIO.OUT)\n\t\tGPIO.setup(self._dir_pin_2, GPIO.OUT)\n\n\t\t# Inits PWM pin.\n\t\tself._motor_pwm = GPIO.PWM(self._pwm_pin, self._pwm_freq) # pwm_freq: Hz\n\t\tself._motor_pwm.start(0) # Set duty cycle to 0.", "def right_forward(self, state, speed):\n if state:\n self.right_motor.run_forever(speed_sp=speed)\n ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.GREEN)\n else:\n self.right_motor.stop()\n ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.BLACK)", "def moveForward(self):\n if self.onGround:\n self.vx = 4", "def backward(self, speed):\n\n self.pwm_forward.ChangeDutyCycle(0)\n self.pwm_backward.ChangeDutyCycle(speed)", "def backward(self, speed):\n\n self.pwm_forward.ChangeDutyCycle(0)\n self.pwm_backward.ChangeDutyCycle(speed)", "def _set_pwm(self, raw_values):\n for i in range(len(self._pins)):\n self._pi.set_PWM_dutycycle(self._pins[i], raw_values[i])", "def __init__(self, pin1=24, pin2=28, pin3=25, pin4=33):\n self.GP = GPIOProcessor()\n self.pin1 = self.GP.getPin(pin1)\n self.pin2 = self.GP.getPin(pin2)\n self.pin3 = self.GP.getPin(pin3)\n self.pin4 = self.GP.getPin(pin4)\n self.pinl = [self.pin1, self.pin2, self.pin3, self.pin4]\n\n for k in range(4):\n self.pinl[k].out()\n\n self.speed = 100.0", "def forward(self, forward):\n\n self._forward = forward", "def __spur_on_if_needed(self):\n if len(self.waypoints) < 2:\n return\n next_speed = (get_waypoint_speed(self.waypoints[0]) +\n get_waypoint_speed(self.waypoints[1])) / 2.0\n set_waypoint_speed(self.waypoints[0], next_speed)", "def slither(self):\n #writedown where we started\n starting_direction = self.get_heading()\n #start driving forward\n self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT)\n self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT)\n self.fwd()\n # throttl down the left motor\n for power in range(self.LEFT_DEFAULT, 60,-10):\n self.set_motor_power(self.MOTOR_LEFT, power)\n time.sleep(.5)\n #throttle up the left while lowring the right\n for power in range(60, self.LEFT_DEFAULT +1, 10):\n self.set_motor_power(self.MOTOR_LEFT, power)\n time.sleep(.5)\n # throttl down the right motor\n for power in range(self.RIGHT_DEFAULT, 60,-10):\n self.set_motor_power(self.MOTOR_RIGHT, power)\n time.sleep(.5)\n #throttle up the right while lowring the right\n for power in range(60, self.RIGHT_DEFAULT +1, 10):\n self.set_motor_power(self.MOTOR_RIGHT, power)\n time.sleep(.5)\n \n #straighten out\n while self.get_heading() != starting_direction:\n #if I need to veer right\n if self.get_heading() < starting_direction:\n self.set_motor_power(self.MOTOR_LEFT, 90)\n self.set_motor_power(self.MOTOR_RIGHT, 60)\n #if I need to veer left\n elif self.get_heading() > starting_direction:\n self.set_motor_power(self.MOTOR_LEFT, 60)\n self.set_motor_power(self.MOTOR_RIGHT, 90)\n \n time.sleep(.1)\n self.stop()", "def on(self):\n if not self._is_on:\n self._pwms.enable(self._pin_index, self._frequency)\n self._is_on = True", "def toggleIpforward(v):\n file_path = \"/proc/sys/net/ipv4/ip_forward\"\n with open(file_path, \"w\") as f:\n if v.ipForward:\n print(0, file=f)\n v.ipForward = False\n else:\n print(1, file=f)\n v.ipForward = True\n return", "def set_duty_cycle(self, pin, dutycycle):\n raise NotImplementedError", "def activatePinReading(self):\n\n for pin in self.pinsToMeasure:\n arduino.samplePinDuringCapture(self.f, self.pinMap[pin], self.wallClock)", "def update(self):\n bondState = self._bond.getDeviceState(self._deviceId)\n if 'power' in bondState:\n self._state = True if bondState['power'] == 1 else False\n if self._state and bondState['speed'] in self._speed_name_by_value:\n self._attributes['current_speed'] = self._speed_name_by_value[bondState['speed']]\n else:\n self._attributes['current_speed'] = SPEED_OFF\n\n if 'direction' in bondState:\n if bondState['direction'] == Directions.REVERSE:\n self._attributes['current_direction'] = \"reverse\"\n else:\n self._attributes['current_direction'] = \"forward\"", "def _enable_pin(pin, direction):\n _write_value(pin, \"{}/export\".format(_path_prefix))\n _write_value(direction, \"{0}/gpio{1}/direction\".format(_path_prefix, pin))", "def togglePWMPinEnable(self, PWMpin):\n bitPos = PWMpin + 8\n mask = 1 << bitPos\n self._injectFault(\"PWM1PCR\",self.PCR,mask)", "def forward(self):\n print('forward')\n self.linearVector = Vector3(x=1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def strut(self):\n self.fwd(left=50, right=50)\n for x in range(2):\n self.servo(1000)\n time.sleep(.1) \n self.servo(1500) # Look Straight\n time.sleep(1)\n self.servo(2000)\n time.sleep(.1)\n self.servo(1500)", "def _get_forward_speed(self):\n\n velocity = self._vehicle.get_velocity()\n transform = self._vehicle.get_transform()\n vel_np = np.array([velocity.x, velocity.y, velocity.z])\n pitch = np.deg2rad(transform.rotation.pitch)\n yaw = np.deg2rad(transform.rotation.yaw)\n orientation = np.array([np.cos(pitch) * np.cos(yaw), np.cos(pitch) * np.sin(yaw), np.sin(pitch)])\n speed = np.dot(vel_np, orientation)\n return speed", "def left_forward(self):\n self.left_motor.run_forever(speed_sp=self.MAX_SPEED)", "def move_forward():\n twister = Twist(linear=Vector3(x=0.5,y=0,z=0),angular=Vector3(x=0,y=0,z=0))\n pub.publish(twister)", "def forward(self):\n self.position += 1", "def forward(self, param):\n\t\tif param:\n\t\t\tself.linear_move(param * .3048)\n\t\telse:\n\t\t\tself.linear_move(riu.default_dist * .3048)", "def handle_go_forward(entry_box, mqtt_client):\n speed_string = entry_box.get()\n print('sending the go_forward message with speed', speed_string)\n mqtt_client.send_message('go_forward', [speed_string])\n # --------------------------------------------------------------------------", "def update_speed_input_step(self,curr_v):\n \n # update speed inputs \n self.speed_inputs_east*=0\n self.speed_inputs_west*=0\n self.speed_inputs_north*=0\n self.speed_inputs_south*=0\n\n if self.use_eight_directions is True: \n self.speed_inputs_north_east*=0\n self.speed_inputs_north_west*=0\n self.speed_inputs_south_east*=0\n self.speed_inputs_south_west*=0\n \n #speed_values=self.rr[:self.N_e,0] \n speed_values=np.ones((self.N_e,1))\n\n if curr_v[0]>0:\n \n # north-east\n if self.use_eight_directions is True and curr_v[1]>0:\n self.speed_inputs_north_east=speed_values \n \n # south-east \n elif self.use_eight_directions is True and curr_v[1]<0:\n self.speed_inputs_south_east=speed_values\n \n #east \n else:\n self.speed_inputs_east=speed_values\n\n\n elif curr_v[0]<0:\n\n # north-west \n if self.use_eight_directions is True and curr_v[1]>0:\n self.speed_inputs_north_west=speed_values\n\n # south-west \n elif self.use_eight_directions is True and curr_v[1]<0:\n self.speed_inputs_south_west=speed_values\n \n # west \n else:\n self.speed_inputs_west=speed_values\n\n else: \n # north\n if curr_v[1]>0:\n self.speed_inputs_north=speed_values\n\n # south\n elif curr_v[1]<0:\n self.speed_inputs_south=speed_values", "def forward(self):\n #print('forward\\r')\n self.linearVector = Vector3(x=1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def _birdUpdateHandler(self, pin):\n\n # Update movement value from PIR pin status\n self.p.update(pin)\n\n if(self.p.movement == 1):\n #print(\"Motion detected\")\n self._distanceCheck()\n\n timeO = 0\n while(self.birdHere == 0 and self.p.movement == 1 and timeO < self.timeout):\n sleep(1)\n self._distanceCheck()\n timeO += 1\n\n else:\n #print(\"Motion ended\")\n self.birdHere = 0", "def motorSpeed(self, speedRPM_l, speedRPM_r):\n\n self.motors__Direction(speedRPM_l, speedRPM_r)\n\n speedRPM_l = abs(speedRPM_l)\n speedRPM_r = abs(speedRPM_r)\n\n speedRPM_l = self.constrainSpeed(speedRPM_l)\n speedRPM_r = self.constrainSpeed(speedRPM_r)\n\n# Left motor\n pwmDuration = 4095.0 * speedRPM_l / self.motorMaxRPM\n# print(\"MuleBot.motorSpeed Duration left float: \", pwmDuration)\n pwmDuration = int( pwmDuration )\n# print(\"MuleBot.motorSpeed Duration left int: \", pwmDuration)\n startOfPulse = 0\n self.pwm.setPWM(self.dcMotorLeftMotor, startOfPulse, pwmDuration)\n MuleBot.dcMotorPWMDurationLeft = pwmDuration\n\n# Right motor\n #Adjust for right motor being faster\n pwmDuration = 4095.0 * speedRPM_r / self.motorMaxRPM\n pwmDuration = pwmDuration * 9727 / 10000 # 98.519113 percent\n pwmDuration = int( pwmDuration )\n# print(\"MuleBot.motorSpeed Duration right int: \", pwmDuration)\n startOfPulse = 0\n self.pwm.setPWM(self.dcMotorRightMotor, startOfPulse, pwmDuration)\n MuleBot.dcMotorPWMDurationRight = pwmDuration", "def move_forward():\n pass", "def motorsDirection(self, direction):\n\n print (direction)\n if direction == 'r' or direction == 'R':\n self.motorDirection(self.motor1DirectionPin, self.motorReverse)\n self.motorDirection(self.motor2DirectionPin, self.motorReverse)\n print (\"Direction reverse\")\n else:\n self.motorDirection(self.motor1DirectionPin, self.motorForward)\n self.motorDirection(self.motor2DirectionPin, self.motorForward)\n print (\"Direction forward\")", "def step(self, count, direction):\n for x in range(count):\n for bit in self.mode[::direction]:\n self.pin1.value(bit[0])\n self.pin2.value(bit[1])\n self.pin3.value(bit[2])\n self.pin4.value(bit[3])\n time.sleep(DELAY)\n self.reset()", "def nine_punishment(self):\n self.direction_clock_wise = not self.direction_clock_wise", "def int_handle_encoder(self,pin):\n\t\t#print \"DEBUG: self.int_handle_encoder! for pin: {0}\".format(pin)\n\t\t\t\n\t\tdevice = self.get_device_config_by_pin(pin)\n\t\t\n\t\tencoder_pinA = device['clk']\n\t\tencoder_pinB = device['dt']\n\n\t\tSwitch_A = self.gpio.input(encoder_pinA)\n\t\tSwitch_B = self.gpio.input(encoder_pinB)\n\t\t\n\t\t# debounce\n\t\t#if 'debounce' in self.pins_config[pin]:\n\t\t#\tdebounce = self.pins_config[pin]['debounce'] / 1000\n\t\t#\tprint \"DEBUG: sleeping: {0}\".format(debounce)\n\t\t#\tsleep(debounce)\n\t\t#\t\n\t\t#sleep(0.02)\n\t\t#if not self.gpio.input(encoder_pinA) == self.pins_config[encoder_pinA]:\n\t\t#\treturn None\n\t\t#if not self.gpio.input(encoder_pinB) == self.pins_config[encoder_pinB]:\n\t\t#\treturn None\n\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# now check if state of A or B has changed\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# if not that means that bouncing caused it\t\n\t\tCurrent_A = self.pins_state[encoder_pinA]\n\t\tCurrent_B = self.pins_state[encoder_pinB]\n\t\tif Current_A == Switch_A and Current_B == Switch_B:\t\t# Same interrupt as before (Bouncing)?\n\t\t\treturn\t\t\t\t\t\t\t\t\t\t\t\t# ignore interrupt!\n\n\t\tself.pins_state[encoder_pinA] = Switch_A\t\t\t\t# remember new state\n\t\tself.pins_state[encoder_pinB] = Switch_B\t\t\t\t# for next bouncing check\n\t\t\n\t\t# -------------------------------\n\t\tfunction = self.get_encoder_function_by_pin(pin)\n\t\tself.__mode_reset()\t\t\t\t\t\t\t\t\t# Keep resetting as long as the mode is being used\n\n\t\t# TODO, check if possible to only reset affected timer: self.ms_all[fun['mode_cycle']].\n\t\tif function is not None:\n\t\t\tif (Switch_A and Switch_B):\t\t\t\t\t\t# Both one active? Yes -> end of sequence\n\t\t\t\tthis_chg = datetime.now()\n\t\t\t\tdelta = this_chg - self.encoder_last_chg\n\t\t\t\t#print \"diff: {0}\".format(delta.total_seconds())\n\t\t\t\t#print type(delta.total_seconds())\t#float\n\t\t\t\tif delta.total_seconds() < 0.1:\n\t\t\t\t\tself.encoder_fast_count += 1\n\t\t\t\t\t#if self.encoder_fast_count > 3:\n\t\t\t\t\t#\tprint \"FAST {0}\".format(self.encoder_fast_count)\n\t\t\t\t\t#else:\n\t\t\t\t\t#\tprint \"Maybe.....\"\n\t\t\t\telse:\n\t\t\t\t\tself.encoder_fast_count = 0\n\t\t\t\n\t\t\t\t\"\"\" why do we do this?\n\t\t\t\tif self.modes.active_modes():\n\t\t\t\t\t#self.reset_mode_timer(self.modes_old[0]['reset'])\n\t\t\t\t\tif 'reset' in self.mode_sets[function['mode_cycle']]:\n\t\t\t\t\t\tself.reset_mode_timer(self.mode_sets[function['mode_cycle']]['reset'])\n\t\t\t\t\"\"\"\n\n\t\t\t\tf_args = None\n\t\t\t\tif pin == encoder_pinB:\t\t\t\t\t\t\t# Turning direction depends on \n\t\t\t\t\t#COUNTER CLOCKWISE (CCW) or DECREASE\n\t\t\t\t\tif self.encoder_fast_count > 3 and 'function_fast_ccw' in function:\t\t\t\t\n\t\t\t\t\t\tkey = 'function_fast_ccw'\n\t\t\t\t\t\tkey_args = 'function_fast_ccw_args'\n\n\t\t\t\t\telif 'function_ccw' in function:\n\t\t\t\t\t\tkey = 'function_ccw'\n\t\t\t\t\t\tkey_args = 'function_ccw_args'\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\t#CLOCKWISE (CW) or INCREASE\n\t\t\t\t\tif self.encoder_fast_count > 3 and 'function_fast_cw' in function:\n\t\t\t\t\t\tkey = 'function_fast_cw'\n\t\t\t\t\t\tkey_args = 'function_cw_args'\n\t\t\t\t\t\n\t\t\t\t\telif 'function_cw' in function:\n\t\t\t\t\t\tkey = 'function_cw'\n\t\t\t\t\t\tkey_args = 'function_cw_args'\n\n\t\t\t\t# prepare arguments\n\t\t\t\tif key_args in function:\n\t\t\t\t\tif isinstance(function[key_args],str):\n\t\t\t\t\t\t#f_args = [function[key_args]]\n\t\t\t\t\t\tself.__exec_function_by_code(function[key], *[function[key_args]])\n\t\t\t\t\telse:\n\t\t\t\t\t\t#f_args = *function[key_args]\n\t\t\t\t\t\tself.__exec_function_by_code(function[key], *function[key_args])\n\t\t\t\telse:\n\t\t\t\t\tself.__exec_function_by_code(function[key])\n\t\t\t\t\t\n\t\t\t\t# execute\n\t\t\t\t#self.__exec_function_by_code(function[key], *[function[key_args]])\n\t\t\t\t\t\t\n\t\t\t\tself.encoder_last_chg = this_chg\n\t\telse:\n\t\t\tself.__printer(\"Encoder, no function\",level=LL_DEBUG)\n\n\n\t\t\tpigpio.pi()", "def DriveMotor():\n\n # cnt overflows at 25KHz (approximately)\n cnt = intbv(0, min = 0, max = CNT_MAX + 1)\n\n # 10-bit duty cycle\n duty_cycle = intbv(0)[10:]\n\n while True:\n yield clk25.posedge, rst_n.negedge\n if rst_n == LOW:\n cnt[:] = 0\n duty_cycle[:] = 0\n dir.next = HIGH_OPTO\n pwm.next = LOW_OPTO\n en_n.next = LOW_OPTO\n else:\n # accept new consign at the beginning of a period\n if cnt == 0:\n # extract duty cycle and direction\n if speed >= 0:\n duty_cycle[:] = speed\n dir.next = HIGH_OPTO\n elif -speed >= CNT_MAX: # handle -1024 case\n duty_cycle[:] = CNT_MAX\n dir.next = LOW_OPTO\n else:\n duty_cycle[:] = -speed\n dir.next = LOW_OPTO\n\n # reached consign?\n if cnt >= duty_cycle:\n pwm.next = LOW_OPTO\n else:\n pwm.next = HIGH_OPTO\n\n if cnt == CNT_MAX:\n cnt[:] = 0\n else:\n cnt += 1\n\n en_n.next = LOW_OPTO", "def set_speed(self, om_left, om_right):\n analog_om_left = self.LEFT_CONST + om_left*4\n analog_om_right = self.RIGHT_CONST - om_right*4\n self.servoWriteMicroseconds(self.PIN_LEFT, analog_om_left)\n self.servoWriteMicroseconds(self.PIN_RIGHT, analog_om_right)", "def go(self, position):\n if self._is_on:\n val = min(180.0, position)\n val = max(0.0, position)\n val = (val / 180.0) * (self._max_duty - self._min_duty) + self._min_duty\n val = val * 100.0\n self._pwms.set_duty(self._pin_index, val)\n else:\n raise Exception(\"You must turn the servo on by calling the `on()` method before you can tell the servo to `go()`!\")", "def advanceTan():\n global tanBallX, speed\n tanBallX += speed\n if tanBallX <= -4:\n # Reached the bottom - switch directions\n tanBallX = -4\n speed = -speed\n elif tanBallX >= 2.8:\n # Reached the top - switch directions\n tanBallX = 2.8\n speed = -speed", "def forward(self, distance):\n self.logger.debug(\"forward \" + str(distance))", "def _get_v0x01_v0x04_speed(self):\n fts = self.features\n pfts = PortFeatures01\n if fts and fts & pfts.OFPPF_10GB_FD:\n return 10 * 10**9 / 8\n if fts and fts & (pfts.OFPPF_1GB_HD | pfts.OFPPF_1GB_FD):\n return 10**9 / 8\n if fts and fts & (pfts.OFPPF_100MB_HD | pfts.OFPPF_100MB_FD):\n return 100 * 10**6 / 8\n if fts and fts & (pfts.OFPPF_10MB_HD | pfts.OFPPF_10MB_FD):\n return 10 * 10**6 / 8\n return None", "def setSpeed(self, v):\n\t\tconverted = self.convertSpeed(v)\n\t\tprint(converted)\n\t\t# set both stage speeds\n\t\tself.zaberSend(self.translation[\"hor\"], self.cmd[\"setTargetSpeed\"], data = converted)\n\t\tself.zaberSend(self.translation[\"ver\"], self.cmd[\"setTargetSpeed\"], data = converted)", "def stream_function(self, X, Y):\n self.psi = (self.strength / (2 * np.pi) *\n np.arctan2((Y - self.yc), (X - self.xc)))", "def stopAcceleratingForward(self,event):\n self.isAcceleratingForward=False", "def update_and_publish(self):\n # 1. Find next_waypoint based on ego position & orientation\n if self._update_next_waypoint():\n\n # 2. Generate the list of next LOOKAHEAD_WPS waypoints\n num_base_wp = len(self.base_waypoints)\n last_base_wp = num_base_wp-1\n waypoint_idx = [idx % num_base_wp for idx in range(self.next_waypoint,self.next_waypoint+LOOKAHEAD_WPS)]\n final_waypoints = [self.base_waypoints[wp] for wp in waypoint_idx]\n\n # 3. If there is a red light ahead, update velocity for them\n if self.stop_on_red:\n # Start from original velocities\n self.restore_velocities(waypoint_idx)\n try:\n red_idx = waypoint_idx.index(self.red_light_waypoint)\n self.decelerate(final_waypoints, red_idx, self.stop_distance)\n except ValueError:\n # No red light available: self.red_light_waypoint is None or not in final_waypoints\n red_idx = None\n if debugging:\n v = self.get_waypoint_velocity(final_waypoints, 0)\n rospy.loginfo(\"Target velocity: %.1f, RL:%s wps ahead\", v, str(red_idx))\n\n # 3b. If we are close to the end of the circuit, make sure that we stop there\n if self.force_stop_on_last_waypoint or self.base_wp_orig_v[-1] < 1e-5:\n try:\n last_wp_idx = waypoint_idx.index(last_base_wp)\n self.decelerate(final_waypoints, last_wp_idx, 0)\n except ValueError:\n # Last waypoint is not one of the next LOOKAHEAD_WPS\n pass\n\n # 4. Publish waypoints to \"/final_waypoints\"\n self.publish_msg(final_waypoints)", "def right(self, speed):\n self.pwm_left.ChangeDutyCycle(0)\n self.pwm_right.ChangeDutyCycle(speed)", "def set_pin_pullup(self, pin, value):\n pin = pin - 1\n if pin < 8:\n self.__port_a_pullup = self.__helper.updatebyte(\n self.__port_a_pullup, pin, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPPUA, self.__port_a_pullup)\n else:\n self.__port_b_pullup = self.__helper.updatebyte(\n self.__port_b_pullup, pin - 8, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPPUB, self.__port_b_pullup)\n return", "def set_speed(self,speed):\n self.speed_p = speed", "def pwm(self, index, on=None, off=None):\n raise NotImplementedError()", "def forward( self ):\n self._has_change = True\n print( \"Forward\" )", "def UpdateForward(self, deltaT):\n self.position += self.velocity * deltaT\n self.velocity += self.acceleration * deltaT", "def motors__Direction(self, speed_l, speed_r):\n\n if speed_l >= 0:\n self.motorDirection(self.motor1DirectionPin, self.motorForward)\n else:\n self.motorDirection(self.motor1DirectionPin, self.motorReverse)\n\n if speed_r >= 0:\n self.motorDirection(self.motor2DirectionPin, self.motorForward)\n else :\n self.motorDirection(self.motor2DirectionPin, self.motorReverse)", "def _get_v0x04_speed(self):\n fts = self.features\n pfts = PortFeatures04\n if fts and fts & pfts.OFPPF_1TB_FD:\n return 10**12 / 8\n if fts and fts & pfts.OFPPF_100GB_FD:\n return 100 * 10**9 / 8\n if fts and fts & pfts.OFPPF_40GB_FD:\n return 40 * 10**9 / 8\n return None", "def step(self):\n if self.change_rate != 0:\n self.speed += stats.norm(loc=0, scale=self.change_rate).rvs()\n\n if self.speed < 0.5 * self._initial_speed:\n self.speed = 0.5 * self._initial_speed\n if self.speed > 2.0 * self._initial_speed:\n self.speed = 2.0 * self._initial_speed\n else:\n pass", "def __init__(self, forward):\n self.forward = forward\n self.kp = 0.0\n self.ki = 0.0\n self.kd = 0.0\n self.p_on_e = False\n self.out_min = 0.0\n self.out_max = 0.0\n self.iterm = 0.0\n self.output = 0.0\n self.set_point = 0.0\n self.last_time = 0.0\n self.last_input = 0.0\n self.init_input = 0.0", "def increase_car_speed(self):\r\n self.car_speed += 5", "def left_forward(self, state, speed):\n if state:\n self.left_motor.run_forever(speed_sp=speed)\n ev3.Leds.set_color(ev3.Leds.LEFT, ev3.Leds.GREEN)\n else:\n self.left_motor.stop()\n ev3.Leds.set_color(ev3.Leds.LEFT, ev3.Leds.BLACK)", "def settle(self):\n if (self.angle >= self.max_angle) or (\n self.angle <= -self.max_angle\n ): # time to reverse\n print(\"reverse\", self.angle, self.max_angle)\n self.speed *= -0.9 # damped\n self.max_angle *= 0.9\n if self.speed > 0:\n self.angle = self.max_angle\n else:\n self.angle = -self.max_angle\n\n self.angle += radians(self.speed)\n print(self.angle, self.max_angle, self.speed)\n self.x = self.cx + self.length * sin(self.angle)\n self.y = self.cy + self.length * cos(self.angle)", "def rotate_servo_rel(pi, pin, pct):\n try:\n pw_old = pi.get_servo_pulsewidth(pin)\n except:\n pw_old = 0 # no PWM has been set yet, so assume 0 \n pct_old = pulsewidth2pct(pw_old)\n if pct_old == -25: # no PWM output commanded, go to center first to get a reference point\n pi.set_servo_pulsewidth(pin, pct2pulsewidth(50))\n pct_old = pulsewidth2pct(pi.get_servo_pulsewidth(pin))\n pct_cmd = pct_old + pct\n # saturate input to protect servo \n if pct_cmd < 10:\n pct_cmd = 10\n elif pct_cmd > 90:\n pct_cmd = 90\n pi.set_servo_pulsewidth(pin, pct2pulsewidth(pct_cmd))", "def move_forward(self, distance):\n quad_offset = self.quad_offset_mapping['forward']\n client.moveByVelocityAsync(self.velocity * quad_offset[0], self.velocity * quad_offset[1],\n 0.15, distance/self.velocity).join()\n # if self.logging:\n # self.log_arr.append(\"forward\")", "def updatePWM(self):\n v_dc = self.dcmotorSpeed * self.dcmotor_sgn # changed \"vr\" to \"v_dc\", \"rightSpeed\" to \"dcmotorSpeed\" and \"right_sgn\" to dcmotor_sgn\", RFMH_2019_02_26\n pwm_dc = self.PWMvalue(v_dc, self.DC_MOTOR_MIN_PWM,\n self.DC_MOTOR_MAX_PWM) # changed \"pwmr\" to \"pwm_dc\" and \"vr\" to \"v_dc\" and adjusted both orange constants to \"DC_MOTOR_MIN_PWM\" AND \"DC_MOTOR_MAX_PWM\", RFMH_2019_02_26\n\n # TODO: Fix this debug message. I am trying to port this code over from an old version, and I do not know\n # what v and u are supposed to be here. Timothy Scott, 5.11.2019\n # if self.debug: # where the duck does the \"u\" come from?!?, RFMH_2019_02_26\n # print(\"v = %5.3f, u = %5.3f, v_dc = %5.3f, pwm_dc = %3d\" % (\n # v, u, v_dc, pwm_dc)) # deleted \"vl\" and \"pwml\" and adjust \"vr\" to \"v_dc\" to \"pwm_dc\"\n\n if math.fabs(v_dc) < self.SPEED_TOLERANCE: # changed v_r to v_dc in if loop , RFMH_2019_02_28\n DcMotorMode = Adafruit_MotorHAT.RELEASE\n pwm_dc = 0\n elif v_dc > 0:\n DcMotorMode = Adafruit_MotorHAT.FORWARD\n elif v_dc < 0:\n DcMotorMode = Adafruit_MotorHAT.BACKWARD\n\n if not self.old_pwm_dc == pwm_dc:\n self.DcMotor.setSpeed(pwm_dc) # changed rightMotor to DcMotor and pwmr to pwm_dc , RFMH_2019_02_28\n self.DcMotor.run(DcMotorMode)\n\n self.old_pwm_dc = pwm_dc", "def reverse(self):\n global motor_direction\n with self._lock:\n GPIO.output(7, False)\n GPIO.output(11, True)\n GPIO.output(13, False)\n GPIO.output(15, True)\n # time.sleep(sec)\n motor_direction = 'Reverse'\n return motor_direction", "def set_led(self, pin, value=0):\n value = self.int_lim(lower=PWM_MIN, upper=PWM_MAX, value=value) #Standardise the value to our correct range\n if self.iface.connected:\n try:\n self.iface.set_PWM_dutycycle(pin, value)\n except (AttributeError, IOError):\n logging.error(\" Cannot output to pins. PWM of pin #%s would be %s\" % (pin,value))\n else:\n logging.error(\" Interface not connected. Cannot output to pins. PWM of pin #%s would be %s\" % (pin,value))\n return value" ]
[ "0.726632", "0.726632", "0.66786534", "0.66583955", "0.66447824", "0.6522863", "0.6463652", "0.6370594", "0.62973475", "0.6285964", "0.6233026", "0.6130106", "0.6119961", "0.6082686", "0.60227823", "0.6020035", "0.5921472", "0.5857334", "0.58558404", "0.57667553", "0.5758451", "0.5737794", "0.571628", "0.5694123", "0.56780034", "0.56552285", "0.5648163", "0.5646829", "0.55609435", "0.5547905", "0.5525594", "0.5504033", "0.5489867", "0.5474994", "0.5471796", "0.546215", "0.5442292", "0.54369867", "0.5426305", "0.54116637", "0.54039836", "0.53955567", "0.53955567", "0.53632617", "0.53522843", "0.5342727", "0.5337775", "0.5326285", "0.5326135", "0.53181064", "0.53155833", "0.5276137", "0.5275573", "0.5262687", "0.52505773", "0.5247028", "0.52371836", "0.52270997", "0.5222567", "0.52201855", "0.5211416", "0.52087164", "0.5201736", "0.51958144", "0.5193997", "0.5192996", "0.5191104", "0.5190375", "0.51799774", "0.5179109", "0.5167561", "0.5166904", "0.51662576", "0.5164349", "0.5150207", "0.5149646", "0.5148688", "0.5140731", "0.51379544", "0.5132469", "0.51266366", "0.5122312", "0.51202166", "0.5118013", "0.509914", "0.50988287", "0.50923795", "0.5090297", "0.5089257", "0.50637174", "0.50617045", "0.5054418", "0.5047024", "0.5044288", "0.5032554", "0.5031725", "0.50315034", "0.5030493", "0.5026359", "0.50234956" ]
0.545681
36
pinForward is the forward Pin, so we change its duty cycle according to speed.
def right(self, speed): self.pwm_left.ChangeDutyCycle(0) self.pwm_right.ChangeDutyCycle(speed)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed)", "def forward(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed)", "def forward(self):\n global motor_direction\n with self._lock:\n GPIO.output(7, True)\n GPIO.output(11, False)\n GPIO.output(13, True)\n GPIO.output(15, False)\n # time.sleep(sec)\n motor_direction = 'Forward'\n return motor_direction", "def __init__(self, pinForward, pinBackward, pinControl):\n\n self.pinForward = pinForward\n self.pinBackward = pinBackward\n self.pinControl = pinControl\n GPIO.setup(self.pinForward, GPIO.OUT)\n GPIO.setup(self.pinBackward, GPIO.OUT)\n GPIO.setup(self.pinControl, GPIO.OUT)\n self.pwm_forward = GPIO.PWM(self.pinForward, 100)\n self.pwm_backward = GPIO.PWM(self.pinBackward, 100)\n self.pwm_forward.start(0)\n self.pwm_backward.start(0)\n GPIO.output(self.pinControl,GPIO.HIGH)", "def forward_left(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed) \n self.pwm_right.ChangeDutyCycle(0)\n self.pwm_left.ChangeDutyCycle(100)", "def __init__(self, pinForward1, pinBackward1,pinForward2, pinBackward2):\n\n self.pinForward1 = pinForward1\n self.pinBackward1 = pinBackward1\n self.pinForward2 = pinForward2\n self.pinBackward2 = pinBackward2\n\n GPIO.setup(self.pinForward1, GPIO.OUT)\n GPIO.setup(self.pinBackward1, GPIO.OUT)\n GPIO.setup(self.pinForward2, GPIO.OUT)\n GPIO.setup(self.pinBackward2, GPIO.OUT)\n\n self.pwm_forward1 = GPIO.PWM(self.pinForward1, 100)\n self.pwm_backward1 = GPIO.PWM(self.pinBackward1, 100)\n self.pwm_forward2 = GPIO.PWM(self.pinForward2, 100)\n self.pwm_backward2 = GPIO.PWM(self.pinBackward2, 100)\n \n self.pwm_forward1.start(0)\n self.pwm_backward1.start(0)\n self.pwm_forward2.start(0)\n self.pwm_backward2.start(0)", "def forward(speed, bias, biasDir):\n\t# todo: check directions for me please\n\tif biasDir == 1:\n rightMotor.run_direct(duty_cycle_sp=speed+bias)\n leftMotor.run_direct(duty_cycle_sp=speed)\n elif biasDir == -1:\n rightMotor.run_direct(duty_cycle_sp=speed)\n leftMotor.run_direct(duty_cycle_sp=speed+bias)\n else:\n rightMotor.run_direct(duty_cycle_sp=speed)\n leftMotor.run_direct(duty_cycle_sp=speed)", "def __init__(self, pinForward, pinBackward, pinControlStraight,pinLeft, pinRight, pinControlSteering):\n\n self.pinForward = pinForward\n self.pinBackward = pinBackward\n self.pinControlStraight = pinControlStraight\n self.pinLeft = pinLeft\n self.pinRight = pinRight\n self.pinControlSteering = pinControlSteering\n GPIO.setup(self.pinForward, GPIO.OUT)\n GPIO.setup(self.pinBackward, GPIO.OUT)\n GPIO.setup(self.pinControlStraight, GPIO.OUT)\n\n GPIO.setup(self.pinLeft, GPIO.OUT)\n GPIO.setup(self.pinRight, GPIO.OUT)\n GPIO.setup(self.pinControlSteering, GPIO.OUT)\n\n self.pwm_forward = GPIO.PWM(self.pinForward, 100)\n self.pwm_backward = GPIO.PWM(self.pinBackward, 100)\n self.pwm_forward.start(0)\n self.pwm_backward.start(0)\n\n self.pwm_left = GPIO.PWM(self.pinLeft, 100)\n self.pwm_right = GPIO.PWM(self.pinRight, 100)\n self.pwm_left.start(0)\n self.pwm_right.start(0)\n\n GPIO.output(self.pinControlStraight,GPIO.HIGH) \n GPIO.output(self.pinControlSteering,GPIO.HIGH)", "def motorDirection(self, motorPin, direction):\n # print \"motorPin: \", motorPin\n # print \"direction: \", direction\n GPIO.output(motorPin, direction)", "def motor_A(self, direction, speed):\n if direction == -1:\n GPIO.output(self.Motor_A_Pin1, GPIO.HIGH)\n GPIO.output(self.Motor_A_Pin2, GPIO.LOW)\n self.pwm_A.start(100)\n self.pwm_A.ChangeDutyCycle(speed)\n if direction == 1:\n GPIO.output(self.Motor_A_Pin1, GPIO.LOW)\n GPIO.output(self.Motor_A_Pin2, GPIO.HIGH)\n self.pwm_A.start(100)\n self.pwm_A.ChangeDutyCycle(speed)", "def forward(self, speed):\n self.controller.forward(speed)", "def setup_pin(self, pin, dutycycle, frequency=2000):\n raise NotImplementedError", "def motor_B(self, direction, speed):\n if direction == 1:\n GPIO.output(self.Motor_B_Pin1, GPIO.HIGH)\n GPIO.output(self.Motor_B_Pin2, GPIO.LOW)\n self.pwm_B.start(100)\n self.pwm_B.ChangeDutyCycle(speed)\n if direction == -1:\n GPIO.output(self.Motor_B_Pin1, GPIO.LOW)\n GPIO.output(self.Motor_B_Pin2, GPIO.HIGH)\n self.pwm_B.start(100)\n self.pwm_B.ChangeDutyCycle(speed)", "def forward_right(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed)\n self.pwm_left.ChangeDutyCycle(0)\n self.pwm_right.ChangeDutyCycle(100)", "def setup_motor(self,pin_num):\n pi.set_servo_pulsewidth(pin_num, 2000)\n sleep(2)\n pi.set_servo_pulsewidth(pin_num, 500 )\n sleep(2)", "def fwd(dist=0): #distance is in cm\n try:\n if dist>0:\n # this casting to int doesn't seem necessary\n pulse=int(PPR*(dist//WHEEL_CIRC) )\n enc_tgt(1,1,pulse)\n except Exception as e:\n print (\"gopigo fwd: {}\".format(e))\n pass\n return write_i2c_block(ADDRESS,motor_fwd_cmd+[0,0,0])", "def startAcceleratingForward(self,event):\n self.isAcceleratingForward=True", "def move_forward(self, speed):\n\t\t# You should modify the bias of 4 wheels depending on your hardware.\n\t\tself._front_left_wheel.anticlockwise_rotate(speed + LEFT_FR_BIAS + LEFT_RIGHT_BIAS)\n\t\tself._front_right_wheel.clockwise_rotate(speed + RIGHT_FR_BIAS)\n\t\tself._rear_left_wheel.anticlockwise_rotate(speed + LEFT_RIGHT_BIAS)\n\t\tself._rear_right_wheel.clockwise_rotate(speed)", "def steer(direction):\n if direction == 1:\n steerMotor.run(Adafruit_MotorHAT.FORWARD)\n steerMotor.setSpeed(255)\n if direction == -1:\n steerMotor.run(Adafruit_MotorHAT.BACKWARD)\n steerMotor.setSpeed(255)\n if direction == 0:\n steerMotor.setSpeed(0)\n steerMotor.run(Adafruit_MotorHAT.RELEASE)", "def forward_button(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=int(left_speed))\n self.right_motor.run_forever(speed_sp=int(right_speed))", "def forward(self, speed):\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_rr' + self.postfix], -speed,\n ONE_SHOT_MODE)\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_rl' + self.postfix], -speed,\n ONE_SHOT_MODE)\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_fr' + self.postfix], -speed,\n ONE_SHOT_MODE)\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_fl' + self.postfix], -speed,\n ONE_SHOT_MODE)", "def GET_forward(self):\n self.roomba.DriveStraight(pyrobot.VELOCITY_FAST)\n time.sleep(1)\n self.roomba.SlowStop(pyrobot.VELOCITY_FAST)", "def _reverseduty(self):\n if self.ir_pin.duty() == 0:\n self.ir_pin.duty(512)\n else:\n self.ir_pin.duty(0)", "def turn_90degrees(self, direction):\n if direction == \"right\" or direction == 1:\n self.myspeedctrl.send_speed(0,1)\n elif direction == \"left\" or direction == 2:\n self.myspeedctrl.send_speed(0,-1)\n rospy.sleep(1.61) #value found by trail and error\n self.myspeedctrl.send_speed(0,0)", "def drive_forward(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=left_speed)\n self.right_motor.run_forever(speed_sp=right_speed)", "def set_pin_direction(self, pin, direction):\n pin = pin - 1\n if pin < 8:\n self.__port_a_direction = self.__helper.updatebyte(\n self.__port_a_direction, pin, direction)\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRA, self.__port_a_direction)\n else:\n self.__port_b_direction = self.__helper.updatebyte(\n self.__port_b_direction, pin - 8, direction)\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRB, self.__port_b_direction)\n return", "def input_forward(self, joy_input):\n if self.saved[joy_input]:\n value = self.saved[joy_input]\n else:\n value = self.inputs[joy_input]\n yaw_pwm = np.interp(value, [-1, 1], [0, Joystick.MAX_YAW_PWM])\n print(\"(input forward) setting yaw pwm to \" + str(yaw_pwm))\n self.publish(Topic.YAW_PWM, yaw_pwm)", "def increment_speed(self):\n self.speed += 0.0004", "def move_forward(self, speed):\n\n # Clamp the speed\n speed = clamp(delta_unit(speed), 0, delta_unit(Car.max_speed))\n\n # Appends the speed according to the direction\n rad = np.radians(self.direction)\n self.fx += speed * np.cos(rad)\n self.fy += speed * np.sin(rad)\n\n # Set marker to move\n self.moved = True", "def change_motor_speed(self, speed=0.0):\r\n if not self.enabled:\r\n self.set_neutral(braked=False)\r\n return\r\n\r\n # logging.info(\"{} Motor Speed: {}\".format(self.motor_name, speed))\r\n self.current_speed = speed # Store current set speed\r\n\r\n # If speed is < 0.0, we are driving in reverse.\r\n self.forward = True\r\n if speed < 0.0:\r\n # Normalise speed value to be in range [0, 100]\r\n speed = -speed\r\n # Store direction\r\n self.forward = False\r\n\r\n # Apply a factor to the speed to limit speed\r\n speed *= self.speed_factor\r\n\r\n # Set motor directional pins\r\n if self.forward:\r\n if self.a_pin >= 0:\r\n self.GPIO.output(self.a_pin, 1)\r\n if self.b_pin >= 0:\r\n self.GPIO.output(self.b_pin, 0)\r\n else:\r\n if self.a_pin >= 0:\r\n self.GPIO.output(self.a_pin, 0)\r\n if self.b_pin >= 0:\r\n self.GPIO.output(self.b_pin, 1)\r\n\r\n # Convert speed into PWM duty cycle\r\n # and clamp values to min/max ranges.\r\n dutycycle = speed\r\n if dutycycle < 0.0:\r\n dutycycle = 0.0\r\n elif dutycycle > self.max_speed:\r\n dutycycle = self.max_speed\r\n\r\n # Change the PWM duty cycle based on fabs() of speed value.\r\n self.PWM.ChangeDutyCycle(dutycycle)", "def skipp(self):\n for x in range(4):\n self.fwd(right=100, left=100)\n time.sleep(.5)\n self.servo(1000)\n time.sleep(.1)\n self.servo(2000)\n time.sleep(.1)\n self.fwd(right=-100, left=-100)\n time.sleep(.1)\n self.servo(-1000)\n self.stop()", "def drive(self,direction, speed=100) -> None:\n if direction == 1:\n driveMotor.run(Adafruit_MotorHAT.FORWARD)\n driveMotor.setSpeed(speed)\n if direction == -1:\n driveMotor.run(Adafruit_MotorHAT.BACKWARD)\n driveMotor.setSpeed(speed)\n if direction == 0:\n driveMotor.setSpeed(0)\n driveMotor.run(Adafruit_MotorHAT.RELEASE)", "def drive(self,direction, speed=100):\n if direction == 1:\n self.leftMotor.run(Adafruit_MotorHAT.FORWARD)\n self.rightMotor.run(Adafruit_MotorHAT.FORWARD)\n self.leftMotor.setSpeed(speed)\n self.rightMotor.setSpeed(speed)\n if direction == -1:\n self.leftMotor.run(Adafruit_MotorHAT.BACKWARD)\n self.rightMotor.run(Adafruit_MotorHAT.BACKWARD)\n self.leftMotor.setSpeed(speed)\n self.rightMotor.setSpeed(speed)\n if direction == 0:\n self.leftMotor.setSpeed(0)\n self.rightMotor.setSpeed(0)\n self.leftMotor.run(Adafruit_MotorHAT.RELEASE)\n self.rightMotor.run(Adafruit_MotorHAT.RELEASE)", "def move_forward(self, val):\n val = val * 180 / math.pi\n print(\"gyro diff\", self.gyro - val)\n print(\"gyrof\", self.gyro)\n if math.fabs(self.gyro - val) > 0.6:\n if self.gyro - val > 0:\n self.om_right = self.om_right - 0.7\n self.om_left = self.om_left + 0.5\n self.set_speed(self.om_left, self.om_right)\n print(\"om_l\", self.om_left)\n print(\"om_r\", self.om_right)\n else:\n self.om_right = self.om_right + 0.3\n self.om_left = self.om_left - 0.5\n self.set_speed(self.om_left, self.om_right)\n print(\"om_l\", self.om_left)\n print(\"om_r\", self.om_right)\n else:\n self.om_right = 10\n self.om_left = 10", "def forward(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=left_speed)\n self.right_motor.run_forever(speed_sp=right_speed)", "def right_forward(self):\n self.right_motor.run_forever(speed_sp=self.MAX_SPEED)", "def left(self, speed):\n self.pwm_right.ChangeDutyCycle(0)\n self.pwm_left.ChangeDutyCycle(speed)", "def steer(self):\n\n while self.active:\n angle = self.driver.angle\n steering_pwm_calc = self.angle_to_pmw(angle)\n self.pwm.set_pwm(0, 0, steering_pwm_calc)", "def drive_forward(self):\n print(f\"{self.make.title()} is now driving forward.\")", "def __init__(self, pwm_pin, dir_pin_1, dir_pin_2, pwm_freq):\n\t\tself._pwm_pin = pwm_pin # PWM input pin.\n\t\tself._dir_pin_1 = dir_pin_1 # GPIO number to control the direction of rotation of the wheel.\n\t\tself._dir_pin_2 = dir_pin_2 # GPIO number to control the direction of rotation of the wheel.\n\t\tself._pwm_freq = pwm_freq # PWM cycle.\n\n\t\tself._last_dir = 's' # Last rotation direction of this wheel. 's' indicates stop.\n\t\tself._last_dc_val = 0 # Last duty cycle value.\n\t\tself._current_dc_val = 0 # Current duty cycle value.\n\n\t\tGPIO.setmode(GPIO.BOARD)\n\n\t\t# Set the direction control GPIO output mode.\n\t\tGPIO.setup(self._pwm_pin, GPIO.OUT)\n\t\tGPIO.setup(self._dir_pin_1, GPIO.OUT)\n\t\tGPIO.setup(self._dir_pin_2, GPIO.OUT)\n\n\t\t# Inits PWM pin.\n\t\tself._motor_pwm = GPIO.PWM(self._pwm_pin, self._pwm_freq) # pwm_freq: Hz\n\t\tself._motor_pwm.start(0) # Set duty cycle to 0.", "def right_forward(self, state, speed):\n if state:\n self.right_motor.run_forever(speed_sp=speed)\n ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.GREEN)\n else:\n self.right_motor.stop()\n ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.BLACK)", "def moveForward(self):\n if self.onGround:\n self.vx = 4", "def backward(self, speed):\n\n self.pwm_forward.ChangeDutyCycle(0)\n self.pwm_backward.ChangeDutyCycle(speed)", "def backward(self, speed):\n\n self.pwm_forward.ChangeDutyCycle(0)\n self.pwm_backward.ChangeDutyCycle(speed)", "def _set_pwm(self, raw_values):\n for i in range(len(self._pins)):\n self._pi.set_PWM_dutycycle(self._pins[i], raw_values[i])", "def __init__(self, pin1=24, pin2=28, pin3=25, pin4=33):\n self.GP = GPIOProcessor()\n self.pin1 = self.GP.getPin(pin1)\n self.pin2 = self.GP.getPin(pin2)\n self.pin3 = self.GP.getPin(pin3)\n self.pin4 = self.GP.getPin(pin4)\n self.pinl = [self.pin1, self.pin2, self.pin3, self.pin4]\n\n for k in range(4):\n self.pinl[k].out()\n\n self.speed = 100.0", "def forward(self, forward):\n\n self._forward = forward", "def __spur_on_if_needed(self):\n if len(self.waypoints) < 2:\n return\n next_speed = (get_waypoint_speed(self.waypoints[0]) +\n get_waypoint_speed(self.waypoints[1])) / 2.0\n set_waypoint_speed(self.waypoints[0], next_speed)", "def slither(self):\n #writedown where we started\n starting_direction = self.get_heading()\n #start driving forward\n self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT)\n self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT)\n self.fwd()\n # throttl down the left motor\n for power in range(self.LEFT_DEFAULT, 60,-10):\n self.set_motor_power(self.MOTOR_LEFT, power)\n time.sleep(.5)\n #throttle up the left while lowring the right\n for power in range(60, self.LEFT_DEFAULT +1, 10):\n self.set_motor_power(self.MOTOR_LEFT, power)\n time.sleep(.5)\n # throttl down the right motor\n for power in range(self.RIGHT_DEFAULT, 60,-10):\n self.set_motor_power(self.MOTOR_RIGHT, power)\n time.sleep(.5)\n #throttle up the right while lowring the right\n for power in range(60, self.RIGHT_DEFAULT +1, 10):\n self.set_motor_power(self.MOTOR_RIGHT, power)\n time.sleep(.5)\n \n #straighten out\n while self.get_heading() != starting_direction:\n #if I need to veer right\n if self.get_heading() < starting_direction:\n self.set_motor_power(self.MOTOR_LEFT, 90)\n self.set_motor_power(self.MOTOR_RIGHT, 60)\n #if I need to veer left\n elif self.get_heading() > starting_direction:\n self.set_motor_power(self.MOTOR_LEFT, 60)\n self.set_motor_power(self.MOTOR_RIGHT, 90)\n \n time.sleep(.1)\n self.stop()", "def on(self):\n if not self._is_on:\n self._pwms.enable(self._pin_index, self._frequency)\n self._is_on = True", "def toggleIpforward(v):\n file_path = \"/proc/sys/net/ipv4/ip_forward\"\n with open(file_path, \"w\") as f:\n if v.ipForward:\n print(0, file=f)\n v.ipForward = False\n else:\n print(1, file=f)\n v.ipForward = True\n return", "def set_duty_cycle(self, pin, dutycycle):\n raise NotImplementedError", "def activatePinReading(self):\n\n for pin in self.pinsToMeasure:\n arduino.samplePinDuringCapture(self.f, self.pinMap[pin], self.wallClock)", "def update(self):\n bondState = self._bond.getDeviceState(self._deviceId)\n if 'power' in bondState:\n self._state = True if bondState['power'] == 1 else False\n if self._state and bondState['speed'] in self._speed_name_by_value:\n self._attributes['current_speed'] = self._speed_name_by_value[bondState['speed']]\n else:\n self._attributes['current_speed'] = SPEED_OFF\n\n if 'direction' in bondState:\n if bondState['direction'] == Directions.REVERSE:\n self._attributes['current_direction'] = \"reverse\"\n else:\n self._attributes['current_direction'] = \"forward\"", "def _enable_pin(pin, direction):\n _write_value(pin, \"{}/export\".format(_path_prefix))\n _write_value(direction, \"{0}/gpio{1}/direction\".format(_path_prefix, pin))", "def forward(self):\n print('forward')\n self.linearVector = Vector3(x=1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def togglePWMPinEnable(self, PWMpin):\n bitPos = PWMpin + 8\n mask = 1 << bitPos\n self._injectFault(\"PWM1PCR\",self.PCR,mask)", "def strut(self):\n self.fwd(left=50, right=50)\n for x in range(2):\n self.servo(1000)\n time.sleep(.1) \n self.servo(1500) # Look Straight\n time.sleep(1)\n self.servo(2000)\n time.sleep(.1)\n self.servo(1500)", "def _get_forward_speed(self):\n\n velocity = self._vehicle.get_velocity()\n transform = self._vehicle.get_transform()\n vel_np = np.array([velocity.x, velocity.y, velocity.z])\n pitch = np.deg2rad(transform.rotation.pitch)\n yaw = np.deg2rad(transform.rotation.yaw)\n orientation = np.array([np.cos(pitch) * np.cos(yaw), np.cos(pitch) * np.sin(yaw), np.sin(pitch)])\n speed = np.dot(vel_np, orientation)\n return speed", "def left_forward(self):\n self.left_motor.run_forever(speed_sp=self.MAX_SPEED)", "def move_forward():\n twister = Twist(linear=Vector3(x=0.5,y=0,z=0),angular=Vector3(x=0,y=0,z=0))\n pub.publish(twister)", "def forward(self):\n self.position += 1", "def forward(self, param):\n\t\tif param:\n\t\t\tself.linear_move(param * .3048)\n\t\telse:\n\t\t\tself.linear_move(riu.default_dist * .3048)", "def handle_go_forward(entry_box, mqtt_client):\n speed_string = entry_box.get()\n print('sending the go_forward message with speed', speed_string)\n mqtt_client.send_message('go_forward', [speed_string])\n # --------------------------------------------------------------------------", "def forward(self):\n #print('forward\\r')\n self.linearVector = Vector3(x=1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def update_speed_input_step(self,curr_v):\n \n # update speed inputs \n self.speed_inputs_east*=0\n self.speed_inputs_west*=0\n self.speed_inputs_north*=0\n self.speed_inputs_south*=0\n\n if self.use_eight_directions is True: \n self.speed_inputs_north_east*=0\n self.speed_inputs_north_west*=0\n self.speed_inputs_south_east*=0\n self.speed_inputs_south_west*=0\n \n #speed_values=self.rr[:self.N_e,0] \n speed_values=np.ones((self.N_e,1))\n\n if curr_v[0]>0:\n \n # north-east\n if self.use_eight_directions is True and curr_v[1]>0:\n self.speed_inputs_north_east=speed_values \n \n # south-east \n elif self.use_eight_directions is True and curr_v[1]<0:\n self.speed_inputs_south_east=speed_values\n \n #east \n else:\n self.speed_inputs_east=speed_values\n\n\n elif curr_v[0]<0:\n\n # north-west \n if self.use_eight_directions is True and curr_v[1]>0:\n self.speed_inputs_north_west=speed_values\n\n # south-west \n elif self.use_eight_directions is True and curr_v[1]<0:\n self.speed_inputs_south_west=speed_values\n \n # west \n else:\n self.speed_inputs_west=speed_values\n\n else: \n # north\n if curr_v[1]>0:\n self.speed_inputs_north=speed_values\n\n # south\n elif curr_v[1]<0:\n self.speed_inputs_south=speed_values", "def _birdUpdateHandler(self, pin):\n\n # Update movement value from PIR pin status\n self.p.update(pin)\n\n if(self.p.movement == 1):\n #print(\"Motion detected\")\n self._distanceCheck()\n\n timeO = 0\n while(self.birdHere == 0 and self.p.movement == 1 and timeO < self.timeout):\n sleep(1)\n self._distanceCheck()\n timeO += 1\n\n else:\n #print(\"Motion ended\")\n self.birdHere = 0", "def move_forward():\n pass", "def motorSpeed(self, speedRPM_l, speedRPM_r):\n\n self.motors__Direction(speedRPM_l, speedRPM_r)\n\n speedRPM_l = abs(speedRPM_l)\n speedRPM_r = abs(speedRPM_r)\n\n speedRPM_l = self.constrainSpeed(speedRPM_l)\n speedRPM_r = self.constrainSpeed(speedRPM_r)\n\n# Left motor\n pwmDuration = 4095.0 * speedRPM_l / self.motorMaxRPM\n# print(\"MuleBot.motorSpeed Duration left float: \", pwmDuration)\n pwmDuration = int( pwmDuration )\n# print(\"MuleBot.motorSpeed Duration left int: \", pwmDuration)\n startOfPulse = 0\n self.pwm.setPWM(self.dcMotorLeftMotor, startOfPulse, pwmDuration)\n MuleBot.dcMotorPWMDurationLeft = pwmDuration\n\n# Right motor\n #Adjust for right motor being faster\n pwmDuration = 4095.0 * speedRPM_r / self.motorMaxRPM\n pwmDuration = pwmDuration * 9727 / 10000 # 98.519113 percent\n pwmDuration = int( pwmDuration )\n# print(\"MuleBot.motorSpeed Duration right int: \", pwmDuration)\n startOfPulse = 0\n self.pwm.setPWM(self.dcMotorRightMotor, startOfPulse, pwmDuration)\n MuleBot.dcMotorPWMDurationRight = pwmDuration", "def motorsDirection(self, direction):\n\n print (direction)\n if direction == 'r' or direction == 'R':\n self.motorDirection(self.motor1DirectionPin, self.motorReverse)\n self.motorDirection(self.motor2DirectionPin, self.motorReverse)\n print (\"Direction reverse\")\n else:\n self.motorDirection(self.motor1DirectionPin, self.motorForward)\n self.motorDirection(self.motor2DirectionPin, self.motorForward)\n print (\"Direction forward\")", "def step(self, count, direction):\n for x in range(count):\n for bit in self.mode[::direction]:\n self.pin1.value(bit[0])\n self.pin2.value(bit[1])\n self.pin3.value(bit[2])\n self.pin4.value(bit[3])\n time.sleep(DELAY)\n self.reset()", "def DriveMotor():\n\n # cnt overflows at 25KHz (approximately)\n cnt = intbv(0, min = 0, max = CNT_MAX + 1)\n\n # 10-bit duty cycle\n duty_cycle = intbv(0)[10:]\n\n while True:\n yield clk25.posedge, rst_n.negedge\n if rst_n == LOW:\n cnt[:] = 0\n duty_cycle[:] = 0\n dir.next = HIGH_OPTO\n pwm.next = LOW_OPTO\n en_n.next = LOW_OPTO\n else:\n # accept new consign at the beginning of a period\n if cnt == 0:\n # extract duty cycle and direction\n if speed >= 0:\n duty_cycle[:] = speed\n dir.next = HIGH_OPTO\n elif -speed >= CNT_MAX: # handle -1024 case\n duty_cycle[:] = CNT_MAX\n dir.next = LOW_OPTO\n else:\n duty_cycle[:] = -speed\n dir.next = LOW_OPTO\n\n # reached consign?\n if cnt >= duty_cycle:\n pwm.next = LOW_OPTO\n else:\n pwm.next = HIGH_OPTO\n\n if cnt == CNT_MAX:\n cnt[:] = 0\n else:\n cnt += 1\n\n en_n.next = LOW_OPTO", "def nine_punishment(self):\n self.direction_clock_wise = not self.direction_clock_wise", "def int_handle_encoder(self,pin):\n\t\t#print \"DEBUG: self.int_handle_encoder! for pin: {0}\".format(pin)\n\t\t\t\n\t\tdevice = self.get_device_config_by_pin(pin)\n\t\t\n\t\tencoder_pinA = device['clk']\n\t\tencoder_pinB = device['dt']\n\n\t\tSwitch_A = self.gpio.input(encoder_pinA)\n\t\tSwitch_B = self.gpio.input(encoder_pinB)\n\t\t\n\t\t# debounce\n\t\t#if 'debounce' in self.pins_config[pin]:\n\t\t#\tdebounce = self.pins_config[pin]['debounce'] / 1000\n\t\t#\tprint \"DEBUG: sleeping: {0}\".format(debounce)\n\t\t#\tsleep(debounce)\n\t\t#\t\n\t\t#sleep(0.02)\n\t\t#if not self.gpio.input(encoder_pinA) == self.pins_config[encoder_pinA]:\n\t\t#\treturn None\n\t\t#if not self.gpio.input(encoder_pinB) == self.pins_config[encoder_pinB]:\n\t\t#\treturn None\n\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# now check if state of A or B has changed\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# if not that means that bouncing caused it\t\n\t\tCurrent_A = self.pins_state[encoder_pinA]\n\t\tCurrent_B = self.pins_state[encoder_pinB]\n\t\tif Current_A == Switch_A and Current_B == Switch_B:\t\t# Same interrupt as before (Bouncing)?\n\t\t\treturn\t\t\t\t\t\t\t\t\t\t\t\t# ignore interrupt!\n\n\t\tself.pins_state[encoder_pinA] = Switch_A\t\t\t\t# remember new state\n\t\tself.pins_state[encoder_pinB] = Switch_B\t\t\t\t# for next bouncing check\n\t\t\n\t\t# -------------------------------\n\t\tfunction = self.get_encoder_function_by_pin(pin)\n\t\tself.__mode_reset()\t\t\t\t\t\t\t\t\t# Keep resetting as long as the mode is being used\n\n\t\t# TODO, check if possible to only reset affected timer: self.ms_all[fun['mode_cycle']].\n\t\tif function is not None:\n\t\t\tif (Switch_A and Switch_B):\t\t\t\t\t\t# Both one active? Yes -> end of sequence\n\t\t\t\tthis_chg = datetime.now()\n\t\t\t\tdelta = this_chg - self.encoder_last_chg\n\t\t\t\t#print \"diff: {0}\".format(delta.total_seconds())\n\t\t\t\t#print type(delta.total_seconds())\t#float\n\t\t\t\tif delta.total_seconds() < 0.1:\n\t\t\t\t\tself.encoder_fast_count += 1\n\t\t\t\t\t#if self.encoder_fast_count > 3:\n\t\t\t\t\t#\tprint \"FAST {0}\".format(self.encoder_fast_count)\n\t\t\t\t\t#else:\n\t\t\t\t\t#\tprint \"Maybe.....\"\n\t\t\t\telse:\n\t\t\t\t\tself.encoder_fast_count = 0\n\t\t\t\n\t\t\t\t\"\"\" why do we do this?\n\t\t\t\tif self.modes.active_modes():\n\t\t\t\t\t#self.reset_mode_timer(self.modes_old[0]['reset'])\n\t\t\t\t\tif 'reset' in self.mode_sets[function['mode_cycle']]:\n\t\t\t\t\t\tself.reset_mode_timer(self.mode_sets[function['mode_cycle']]['reset'])\n\t\t\t\t\"\"\"\n\n\t\t\t\tf_args = None\n\t\t\t\tif pin == encoder_pinB:\t\t\t\t\t\t\t# Turning direction depends on \n\t\t\t\t\t#COUNTER CLOCKWISE (CCW) or DECREASE\n\t\t\t\t\tif self.encoder_fast_count > 3 and 'function_fast_ccw' in function:\t\t\t\t\n\t\t\t\t\t\tkey = 'function_fast_ccw'\n\t\t\t\t\t\tkey_args = 'function_fast_ccw_args'\n\n\t\t\t\t\telif 'function_ccw' in function:\n\t\t\t\t\t\tkey = 'function_ccw'\n\t\t\t\t\t\tkey_args = 'function_ccw_args'\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\t#CLOCKWISE (CW) or INCREASE\n\t\t\t\t\tif self.encoder_fast_count > 3 and 'function_fast_cw' in function:\n\t\t\t\t\t\tkey = 'function_fast_cw'\n\t\t\t\t\t\tkey_args = 'function_cw_args'\n\t\t\t\t\t\n\t\t\t\t\telif 'function_cw' in function:\n\t\t\t\t\t\tkey = 'function_cw'\n\t\t\t\t\t\tkey_args = 'function_cw_args'\n\n\t\t\t\t# prepare arguments\n\t\t\t\tif key_args in function:\n\t\t\t\t\tif isinstance(function[key_args],str):\n\t\t\t\t\t\t#f_args = [function[key_args]]\n\t\t\t\t\t\tself.__exec_function_by_code(function[key], *[function[key_args]])\n\t\t\t\t\telse:\n\t\t\t\t\t\t#f_args = *function[key_args]\n\t\t\t\t\t\tself.__exec_function_by_code(function[key], *function[key_args])\n\t\t\t\telse:\n\t\t\t\t\tself.__exec_function_by_code(function[key])\n\t\t\t\t\t\n\t\t\t\t# execute\n\t\t\t\t#self.__exec_function_by_code(function[key], *[function[key_args]])\n\t\t\t\t\t\t\n\t\t\t\tself.encoder_last_chg = this_chg\n\t\telse:\n\t\t\tself.__printer(\"Encoder, no function\",level=LL_DEBUG)\n\n\n\t\t\tpigpio.pi()", "def set_speed(self, om_left, om_right):\n analog_om_left = self.LEFT_CONST + om_left*4\n analog_om_right = self.RIGHT_CONST - om_right*4\n self.servoWriteMicroseconds(self.PIN_LEFT, analog_om_left)\n self.servoWriteMicroseconds(self.PIN_RIGHT, analog_om_right)", "def forward(self, distance):\n self.logger.debug(\"forward \" + str(distance))", "def go(self, position):\n if self._is_on:\n val = min(180.0, position)\n val = max(0.0, position)\n val = (val / 180.0) * (self._max_duty - self._min_duty) + self._min_duty\n val = val * 100.0\n self._pwms.set_duty(self._pin_index, val)\n else:\n raise Exception(\"You must turn the servo on by calling the `on()` method before you can tell the servo to `go()`!\")", "def advanceTan():\n global tanBallX, speed\n tanBallX += speed\n if tanBallX <= -4:\n # Reached the bottom - switch directions\n tanBallX = -4\n speed = -speed\n elif tanBallX >= 2.8:\n # Reached the top - switch directions\n tanBallX = 2.8\n speed = -speed", "def _get_v0x01_v0x04_speed(self):\n fts = self.features\n pfts = PortFeatures01\n if fts and fts & pfts.OFPPF_10GB_FD:\n return 10 * 10**9 / 8\n if fts and fts & (pfts.OFPPF_1GB_HD | pfts.OFPPF_1GB_FD):\n return 10**9 / 8\n if fts and fts & (pfts.OFPPF_100MB_HD | pfts.OFPPF_100MB_FD):\n return 100 * 10**6 / 8\n if fts and fts & (pfts.OFPPF_10MB_HD | pfts.OFPPF_10MB_FD):\n return 10 * 10**6 / 8\n return None", "def setSpeed(self, v):\n\t\tconverted = self.convertSpeed(v)\n\t\tprint(converted)\n\t\t# set both stage speeds\n\t\tself.zaberSend(self.translation[\"hor\"], self.cmd[\"setTargetSpeed\"], data = converted)\n\t\tself.zaberSend(self.translation[\"ver\"], self.cmd[\"setTargetSpeed\"], data = converted)", "def stream_function(self, X, Y):\n self.psi = (self.strength / (2 * np.pi) *\n np.arctan2((Y - self.yc), (X - self.xc)))", "def stopAcceleratingForward(self,event):\n self.isAcceleratingForward=False", "def update_and_publish(self):\n # 1. Find next_waypoint based on ego position & orientation\n if self._update_next_waypoint():\n\n # 2. Generate the list of next LOOKAHEAD_WPS waypoints\n num_base_wp = len(self.base_waypoints)\n last_base_wp = num_base_wp-1\n waypoint_idx = [idx % num_base_wp for idx in range(self.next_waypoint,self.next_waypoint+LOOKAHEAD_WPS)]\n final_waypoints = [self.base_waypoints[wp] for wp in waypoint_idx]\n\n # 3. If there is a red light ahead, update velocity for them\n if self.stop_on_red:\n # Start from original velocities\n self.restore_velocities(waypoint_idx)\n try:\n red_idx = waypoint_idx.index(self.red_light_waypoint)\n self.decelerate(final_waypoints, red_idx, self.stop_distance)\n except ValueError:\n # No red light available: self.red_light_waypoint is None or not in final_waypoints\n red_idx = None\n if debugging:\n v = self.get_waypoint_velocity(final_waypoints, 0)\n rospy.loginfo(\"Target velocity: %.1f, RL:%s wps ahead\", v, str(red_idx))\n\n # 3b. If we are close to the end of the circuit, make sure that we stop there\n if self.force_stop_on_last_waypoint or self.base_wp_orig_v[-1] < 1e-5:\n try:\n last_wp_idx = waypoint_idx.index(last_base_wp)\n self.decelerate(final_waypoints, last_wp_idx, 0)\n except ValueError:\n # Last waypoint is not one of the next LOOKAHEAD_WPS\n pass\n\n # 4. Publish waypoints to \"/final_waypoints\"\n self.publish_msg(final_waypoints)", "def set_pin_pullup(self, pin, value):\n pin = pin - 1\n if pin < 8:\n self.__port_a_pullup = self.__helper.updatebyte(\n self.__port_a_pullup, pin, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPPUA, self.__port_a_pullup)\n else:\n self.__port_b_pullup = self.__helper.updatebyte(\n self.__port_b_pullup, pin - 8, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPPUB, self.__port_b_pullup)\n return", "def pwm(self, index, on=None, off=None):\n raise NotImplementedError()", "def set_speed(self,speed):\n self.speed_p = speed", "def forward( self ):\n self._has_change = True\n print( \"Forward\" )", "def UpdateForward(self, deltaT):\n self.position += self.velocity * deltaT\n self.velocity += self.acceleration * deltaT", "def motors__Direction(self, speed_l, speed_r):\n\n if speed_l >= 0:\n self.motorDirection(self.motor1DirectionPin, self.motorForward)\n else:\n self.motorDirection(self.motor1DirectionPin, self.motorReverse)\n\n if speed_r >= 0:\n self.motorDirection(self.motor2DirectionPin, self.motorForward)\n else :\n self.motorDirection(self.motor2DirectionPin, self.motorReverse)", "def _get_v0x04_speed(self):\n fts = self.features\n pfts = PortFeatures04\n if fts and fts & pfts.OFPPF_1TB_FD:\n return 10**12 / 8\n if fts and fts & pfts.OFPPF_100GB_FD:\n return 100 * 10**9 / 8\n if fts and fts & pfts.OFPPF_40GB_FD:\n return 40 * 10**9 / 8\n return None", "def step(self):\n if self.change_rate != 0:\n self.speed += stats.norm(loc=0, scale=self.change_rate).rvs()\n\n if self.speed < 0.5 * self._initial_speed:\n self.speed = 0.5 * self._initial_speed\n if self.speed > 2.0 * self._initial_speed:\n self.speed = 2.0 * self._initial_speed\n else:\n pass", "def __init__(self, forward):\n self.forward = forward\n self.kp = 0.0\n self.ki = 0.0\n self.kd = 0.0\n self.p_on_e = False\n self.out_min = 0.0\n self.out_max = 0.0\n self.iterm = 0.0\n self.output = 0.0\n self.set_point = 0.0\n self.last_time = 0.0\n self.last_input = 0.0\n self.init_input = 0.0", "def increase_car_speed(self):\r\n self.car_speed += 5", "def left_forward(self, state, speed):\n if state:\n self.left_motor.run_forever(speed_sp=speed)\n ev3.Leds.set_color(ev3.Leds.LEFT, ev3.Leds.GREEN)\n else:\n self.left_motor.stop()\n ev3.Leds.set_color(ev3.Leds.LEFT, ev3.Leds.BLACK)", "def move_forward(self, distance):\n quad_offset = self.quad_offset_mapping['forward']\n client.moveByVelocityAsync(self.velocity * quad_offset[0], self.velocity * quad_offset[1],\n 0.15, distance/self.velocity).join()\n # if self.logging:\n # self.log_arr.append(\"forward\")", "def settle(self):\n if (self.angle >= self.max_angle) or (\n self.angle <= -self.max_angle\n ): # time to reverse\n print(\"reverse\", self.angle, self.max_angle)\n self.speed *= -0.9 # damped\n self.max_angle *= 0.9\n if self.speed > 0:\n self.angle = self.max_angle\n else:\n self.angle = -self.max_angle\n\n self.angle += radians(self.speed)\n print(self.angle, self.max_angle, self.speed)\n self.x = self.cx + self.length * sin(self.angle)\n self.y = self.cy + self.length * cos(self.angle)", "def updatePWM(self):\n v_dc = self.dcmotorSpeed * self.dcmotor_sgn # changed \"vr\" to \"v_dc\", \"rightSpeed\" to \"dcmotorSpeed\" and \"right_sgn\" to dcmotor_sgn\", RFMH_2019_02_26\n pwm_dc = self.PWMvalue(v_dc, self.DC_MOTOR_MIN_PWM,\n self.DC_MOTOR_MAX_PWM) # changed \"pwmr\" to \"pwm_dc\" and \"vr\" to \"v_dc\" and adjusted both orange constants to \"DC_MOTOR_MIN_PWM\" AND \"DC_MOTOR_MAX_PWM\", RFMH_2019_02_26\n\n # TODO: Fix this debug message. I am trying to port this code over from an old version, and I do not know\n # what v and u are supposed to be here. Timothy Scott, 5.11.2019\n # if self.debug: # where the duck does the \"u\" come from?!?, RFMH_2019_02_26\n # print(\"v = %5.3f, u = %5.3f, v_dc = %5.3f, pwm_dc = %3d\" % (\n # v, u, v_dc, pwm_dc)) # deleted \"vl\" and \"pwml\" and adjust \"vr\" to \"v_dc\" to \"pwm_dc\"\n\n if math.fabs(v_dc) < self.SPEED_TOLERANCE: # changed v_r to v_dc in if loop , RFMH_2019_02_28\n DcMotorMode = Adafruit_MotorHAT.RELEASE\n pwm_dc = 0\n elif v_dc > 0:\n DcMotorMode = Adafruit_MotorHAT.FORWARD\n elif v_dc < 0:\n DcMotorMode = Adafruit_MotorHAT.BACKWARD\n\n if not self.old_pwm_dc == pwm_dc:\n self.DcMotor.setSpeed(pwm_dc) # changed rightMotor to DcMotor and pwmr to pwm_dc , RFMH_2019_02_28\n self.DcMotor.run(DcMotorMode)\n\n self.old_pwm_dc = pwm_dc", "def rotate_servo_rel(pi, pin, pct):\n try:\n pw_old = pi.get_servo_pulsewidth(pin)\n except:\n pw_old = 0 # no PWM has been set yet, so assume 0 \n pct_old = pulsewidth2pct(pw_old)\n if pct_old == -25: # no PWM output commanded, go to center first to get a reference point\n pi.set_servo_pulsewidth(pin, pct2pulsewidth(50))\n pct_old = pulsewidth2pct(pi.get_servo_pulsewidth(pin))\n pct_cmd = pct_old + pct\n # saturate input to protect servo \n if pct_cmd < 10:\n pct_cmd = 10\n elif pct_cmd > 90:\n pct_cmd = 90\n pi.set_servo_pulsewidth(pin, pct2pulsewidth(pct_cmd))", "def reverse(self):\n global motor_direction\n with self._lock:\n GPIO.output(7, False)\n GPIO.output(11, True)\n GPIO.output(13, False)\n GPIO.output(15, True)\n # time.sleep(sec)\n motor_direction = 'Reverse'\n return motor_direction", "def set_led(self, pin, value=0):\n value = self.int_lim(lower=PWM_MIN, upper=PWM_MAX, value=value) #Standardise the value to our correct range\n if self.iface.connected:\n try:\n self.iface.set_PWM_dutycycle(pin, value)\n except (AttributeError, IOError):\n logging.error(\" Cannot output to pins. PWM of pin #%s would be %s\" % (pin,value))\n else:\n logging.error(\" Interface not connected. Cannot output to pins. PWM of pin #%s would be %s\" % (pin,value))\n return value" ]
[ "0.7267332", "0.7267332", "0.66788316", "0.66588324", "0.66452456", "0.6522946", "0.64650464", "0.6370248", "0.6297182", "0.6286759", "0.6234336", "0.6129071", "0.61207354", "0.60830605", "0.6021252", "0.6019744", "0.5922687", "0.58570683", "0.5856794", "0.57674026", "0.5759413", "0.5737956", "0.5714141", "0.5692802", "0.5678496", "0.5654111", "0.56484044", "0.5645786", "0.55621254", "0.5547726", "0.55245554", "0.5504462", "0.5490348", "0.5474369", "0.5472551", "0.5462932", "0.54566246", "0.5440891", "0.54375577", "0.54267484", "0.5412307", "0.5404999", "0.53960466", "0.53960466", "0.5362945", "0.5351662", "0.534471", "0.5337986", "0.5325374", "0.532454", "0.53181136", "0.5314115", "0.52759933", "0.5275445", "0.52611953", "0.52493405", "0.524873", "0.52356386", "0.5229099", "0.52234954", "0.52215767", "0.52128935", "0.52096397", "0.520367", "0.5196193", "0.5194782", "0.51924723", "0.519103", "0.5190705", "0.51802284", "0.51778376", "0.51657194", "0.51654845", "0.51650554", "0.5162503", "0.51498044", "0.51491714", "0.5147926", "0.5141137", "0.51373327", "0.5132007", "0.5127466", "0.512286", "0.5115778", "0.5098812", "0.50984097", "0.50931364", "0.50915813", "0.5089161", "0.5064066", "0.50609237", "0.5054922", "0.50457007", "0.5045043", "0.5032321", "0.50312126", "0.50299054", "0.5029683", "0.5025306", "0.5022861" ]
0.51197535
83
Set the duty cycle of both control pins to zero to stop the motor.
def stop(self): self.pwm_forward.ChangeDutyCycle(0) self.pwm_backward.ChangeDutyCycle(0) self.pwm_left.ChangeDutyCycle(0) self.pwm_right.ChangeDutyCycle(0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stop(self):\n\n self.pwm_forward.ChangeDutyCycle(0)\n self.pwm_backward.ChangeDutyCycle(0)", "def stop_motor(self):\n self.output(self.steering_pin, 0)\n self.pi.set_servo_pulsewidth(self.steering_pin, 0)", "def stop(self):\n\t\tGPIO.output(self._dir_pin_1, GPIO.HIGH)\n\t\tGPIO.output(self._dir_pin_2, GPIO.HIGH)\n\t\tself._last_dir = 's'\n\t\t# self._motor_pwm.ChangeDutyCycle(0)", "def turnOffMotors(self) -> None:\n mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)\n mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)\n mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)\n mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)", "def stop(self):\n self.left_motor.stop()\n self.right_motor.stop()", "def turnOffMotors(self):\n self.mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)\n self.mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)\n self.mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)\n self.mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)", "def servo_off(self):\n self.logger.info('Setting servo OFF')\n self.electronics.move_servo(0)\n self.config['servo']['status'] = 0", "def turn_off(self):\n self.set_pin(0, -1)\n self.set_pin(1, -1)\n self.set_pin(2, -1)", "def servo_off(self):\n msg = b'\\x0C\\x00'\n self.__bt.write(msg)", "def emitters_off(self):\n self.wp.digitalWrite(self.LEDON_PIN, self.wp.LOW)\n self.wp.delayMicroseconds(20)", "def turn_off(self, **kwargs):\n self._attributes['current_speed'] = SPEED_OFF\n self._bond.turnOff(self._deviceId)", "def stop(self):\n self.motor.stop()", "def disable_relays(self):\n #ensure clock low and data high\n self.e.clear_bit(7)\n self.e.set_bit(5)\n time.sleep(0.01)\n\n #pulse the clock line\n self.e.set_bit(7)\n time.sleep(0.01)\n self.e.clear_bit(7)\n\n #clear the data line\n self.e.clear_bit(5)", "def right(self, speed):\n self.pwm_left.ChangeDutyCycle(0)\n self.pwm_right.ChangeDutyCycle(speed)", "def stop_motors(self) -> None:\n motor_1 = self.robot.all_services.get('motor_1')\n motor_2 = self.robot.all_services.get('motor_2')\n if motor_1 is not None or motor_1 is not None:\n motor_1.stop_motor()\n motor_2.stop_motor()\n log.info(\"Motors stopped\")\n else:\n log.warning(\"One of the motors is not enabled!\")", "def _reverseduty(self):\n if self.ir_pin.duty() == 0:\n self.ir_pin.duty(512)\n else:\n self.ir_pin.duty(0)", "def stop(self):\n self.turnOffMotors()", "def turn_off(self):\n self._state = False\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":0 }', 5)\n if(self._device['type'] == '_REALY2' or self._device['type'] == '_REALY4'): \n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"'+ self._data_key +'\":0 }', 5)", "def set_duty_cycle(self, pin, dutycycle):\n raise NotImplementedError", "def motor_B(self, direction, speed):\n if direction == 1:\n GPIO.output(self.Motor_B_Pin1, GPIO.HIGH)\n GPIO.output(self.Motor_B_Pin2, GPIO.LOW)\n self.pwm_B.start(100)\n self.pwm_B.ChangeDutyCycle(speed)\n if direction == -1:\n GPIO.output(self.Motor_B_Pin1, GPIO.LOW)\n GPIO.output(self.Motor_B_Pin2, GPIO.HIGH)\n self.pwm_B.start(100)\n self.pwm_B.ChangeDutyCycle(speed)", "def stop(self):\n self.right_motor.stop(stop_action='brake')\n self.left_motor.stop(stop_action='brake')", "def turn_off(self, **kwargs: Any) -> None:\n if (\n DPCODE_LIGHT in self.tuya_device.status\n and DPCODE_SWITCH not in self.tuya_device.status\n ):\n commands = [{\"code\": DPCODE_LIGHT, \"value\": False}]\n else:\n commands = [{\"code\": DPCODE_SWITCH, \"value\": False}]\n self._send_command(commands)", "def poweron(self) -> None:\n self.servo_reset()", "def stop(self) -> None:\n turnOffMotors()", "def _on_stop_cycle(self, kwargs: dict) -> None:\n self._cancel_automation()\n self.toggle(state=\"off\")", "def backward(self, speed):\n\n self.pwm_forward.ChangeDutyCycle(0)\n self.pwm_backward.ChangeDutyCycle(speed)", "def backward(self, speed):\n\n self.pwm_forward.ChangeDutyCycle(0)\n self.pwm_backward.ChangeDutyCycle(speed)", "def zeroMotor(self):\n\t\tpass", "def power_down (self, DAC_A =1, DAC_B=1):\n try :\n bus.write_i2c_block_data(self.address, self.__pointer_register( power_down, DAC_A, DAC_B), [0,0])\n \n except IOError:\n print (\"Device is not connected\")", "def set_pwm(self, duty_cycle):\n PWM.set_duty_cycle(self.pwm_pin, duty_cycle)", "def stop(self):\n\n self.active = False\n angle_pwm = self.angle_to_pmw(const.Driving.NEUTRAL_STEERING_ANGLE)\n self.pwm.set_pwm(0, 0, angle_pwm)", "def shutdown(self):\n self.running = False\n ev3.Leds.all_off()\n self.left_motor.stop()\n self.right_motor.stop()", "def _set_pwm(self, raw_values):\n for i in range(len(self._pins)):\n self._pi.set_PWM_dutycycle(self._pins[i], raw_values[i])", "async def stop(self, *_) -> None:\n logger.debug(\"Stopping motors...\")\n await self.rmotor.stop()\n await self.lmotor.stop()", "def led_duty_cycle(val):\n set_tmr_ocr(TMR1, OCRxB, val)", "def stop(self):\n self.change_power(0)", "def stop():\n set_power(0)", "def stop(self):\n status=self.objdll.USBIO_GPIOWrite(self.id, c_byte(0b000), c_byte(0)) #ENA=0, DIR=0, bit0=0\n print(f\"Set all ports to LOW and stopped the step-motor:{status}\")\n\n return status", "def reset_and_stop(self):\n self.enabled = False\n self.start_time = None", "def set_duty_cycle(self, value):\n self._mixer.duty_cycle = value", "def stop():\n status = write_i2c_block(ADDRESS,stop_cmd+[0,0,0])\n set_left_speed(0)\n set_right_speed(0)\n return status", "def __update_speed_stop(self):\n if self.velocidade > SERVO_DUTY_CYCLE_MEIO:\n self.velocidade -= self.incremento_veloc\n \n # Para mesmo que haja arredondamento de float\n if self.velocidade <= SERVO_DUTY_CYCLE_MEIO:\n self.velocidade = SERVO_DUTY_CYCLE_MEIO\n self.servo.set_duty_cycle(0.0)\n else:\n self.servo.set_duty_cycle(self.velocidade)\n elif self.velocidade < SERVO_DUTY_CYCLE_MEIO:\n self.velocidade += self.incremento_veloc\n \n # Para mesmo que haja arredondamento de float\n if self.velocidade >= SERVO_DUTY_CYCLE_MEIO:\n self.velocidade = SERVO_DUTY_CYCLE_MEIO\n self.servo.set_duty_cycle(0.0)\n else:\n self.servo.set_duty_cycle(self.velocidade)\n else:\n self.servo.set_duty_cycle(0.0)", "def analogDaSetZero(self, Startchannel, Stopchannel=None, Debug=0): \n self.bib.DapiSpecialCommand.argtypes = \\\n [c_ulong, c_ulong, c_ulong, c_ulong, c_ulong]\n self.bib.DapiSpecialCommand.restype = None\n if Stopchannel == None or Stopchannel == Startchannel:\n #Stopchannel = \n for Startchannel in range(Startchannel, Startchannel+1):\n self.bib.DapiSpecialCommand(self.handle, self.DAPI_SPECIAL_CMD_DA, \\\n self.DAPI_SPECIAL_DA_PAR_DA_LOAD_DEFAULT, Startchannel, 0) \n print(\"D/A Channel\",Startchannel,\"set to zero volts.\")\n else:\n for Startchannel in range(Startchannel, Stopchannel+1):\n self.bib.DapiSpecialCommand(self.handle, self.DAPI_SPECIAL_CMD_DA, \\\n self.DAPI_SPECIAL_DA_PAR_DA_LOAD_DEFAULT, Startchannel, 0) \n print(\"D/A Channel\",Startchannel,\"set to zero volts.\")", "def turn_off(self) -> None:\n self._monoprice.set_power(self._zone_id, False)", "def _on_stop_cycle(self, kwargs: dict) -> None:\n for handle in (HANDLE_TOGGLE_IN_WINDOW, HANDLE_TOGGLE_OUT_WINDOW):\n if handle not in self.handles:\n continue\n name = self.handles.pop(handle)\n self.cancel_timer(name)\n\n self.toggle(opposite_of=self.properties[CONF_STATE])", "def daemonControlStop (self):\n self.stop()", "def turn_off(self):\n self.write(\"OUT0\\n\")", "def rc_off(self):\n # reset control values\n channels = [1500] * 8\n controlout = OverrideRCIn(channels=channels)\n self.contolp.publish(controlout)\n self.rate.sleep()\n # send twice to make sure\n controlout = OverrideRCIn(channels=channels)\n self.contolp.publish(controlout)", "def motor_A(self, direction, speed):\n if direction == -1:\n GPIO.output(self.Motor_A_Pin1, GPIO.HIGH)\n GPIO.output(self.Motor_A_Pin2, GPIO.LOW)\n self.pwm_A.start(100)\n self.pwm_A.ChangeDutyCycle(speed)\n if direction == 1:\n GPIO.output(self.Motor_A_Pin1, GPIO.LOW)\n GPIO.output(self.Motor_A_Pin2, GPIO.HIGH)\n self.pwm_A.start(100)\n self.pwm_A.ChangeDutyCycle(speed)", "def turn_off(self, **kwargs):\n self._is_on = False", "def turn_off_motors():\n MOTOR_HAT.release_motors()", "def setOff(self, command):\r\n self.setDriver('ST', 0)", "def turn_off(self, **kwargs: Any) -> None:\n self._device.power_on = False\n _LOGGER.debug(\"Turn off light %s\", self._device.ip)", "def left(self, speed):\n self.pwm_right.ChangeDutyCycle(0)\n self.pwm_left.ChangeDutyCycle(speed)", "def __init__(self, pinForward, pinBackward, pinControlStraight,pinLeft, pinRight, pinControlSteering):\n\n self.pinForward = pinForward\n self.pinBackward = pinBackward\n self.pinControlStraight = pinControlStraight\n self.pinLeft = pinLeft\n self.pinRight = pinRight\n self.pinControlSteering = pinControlSteering\n GPIO.setup(self.pinForward, GPIO.OUT)\n GPIO.setup(self.pinBackward, GPIO.OUT)\n GPIO.setup(self.pinControlStraight, GPIO.OUT)\n\n GPIO.setup(self.pinLeft, GPIO.OUT)\n GPIO.setup(self.pinRight, GPIO.OUT)\n GPIO.setup(self.pinControlSteering, GPIO.OUT)\n\n self.pwm_forward = GPIO.PWM(self.pinForward, 100)\n self.pwm_backward = GPIO.PWM(self.pinBackward, 100)\n self.pwm_forward.start(0)\n self.pwm_backward.start(0)\n\n self.pwm_left = GPIO.PWM(self.pinLeft, 100)\n self.pwm_right = GPIO.PWM(self.pinRight, 100)\n self.pwm_left.start(0)\n self.pwm_right.start(0)\n\n GPIO.output(self.pinControlStraight,GPIO.HIGH) \n GPIO.output(self.pinControlSteering,GPIO.HIGH)", "def setup_motor(self,pin_num):\n pi.set_servo_pulsewidth(pin_num, 2000)\n sleep(2)\n pi.set_servo_pulsewidth(pin_num, 500 )\n sleep(2)", "def _stop_all(self):\n # LEDs\n self.cam_led.off\n self.analysis_led[0].off\n self.analysis_led[1].off\n self.error.off\n \n # motors\n self.motor.stop()\n self.wash.stop()", "def led_off(args):\n _check_mcu()\n led_on_bytes = CMD_MODULE_ID_LEDS | 0x01\n i2c.write_bytes_to_address(MCU_MOUTH_ADDRESS, led_on_bytes)", "def disable_pulse_modulation(self):\n self.write(\":SOUR:PULM:STAT OFF\")", "def turn_off(self):\n self._interrupt_flash()\n if self.on:\n GPIO.output(self.pin, GPIO.LOW)\n self.on = False", "def stop(self, **kwargs):\n self.turn_off()", "def __init__(self, pinForward1, pinBackward1,pinForward2, pinBackward2):\n\n self.pinForward1 = pinForward1\n self.pinBackward1 = pinBackward1\n self.pinForward2 = pinForward2\n self.pinBackward2 = pinBackward2\n\n GPIO.setup(self.pinForward1, GPIO.OUT)\n GPIO.setup(self.pinBackward1, GPIO.OUT)\n GPIO.setup(self.pinForward2, GPIO.OUT)\n GPIO.setup(self.pinBackward2, GPIO.OUT)\n\n self.pwm_forward1 = GPIO.PWM(self.pinForward1, 100)\n self.pwm_backward1 = GPIO.PWM(self.pinBackward1, 100)\n self.pwm_forward2 = GPIO.PWM(self.pinForward2, 100)\n self.pwm_backward2 = GPIO.PWM(self.pinBackward2, 100)\n \n self.pwm_forward1.start(0)\n self.pwm_backward1.start(0)\n self.pwm_forward2.start(0)\n self.pwm_backward2.start(0)", "def turn_off(self, **kwargs):\n self.smartplug.turn_off()", "def set_throttle(self, val):\n\n # Set the motor to stop mode.\n if val is None:\n self.in1.off()\n self.in2.off()\n self.pwm.value = 1.0\n\n else:\n # Determine the orientation of the motor.\n if val > 0.0:\n self.in1.off()\n self.in2.on()\n else:\n self.in1.on()\n self.in2.off()\n\n # Clamp the pwm signal (throttle) to [0, 1].\n pwm = max(0.0, min(abs(val), 1.0))\n\n # Note that setting PWM to low will brake the motor no matter what\n # in1 and in2 input is.\n self.pwm.value = pwm", "def turn_off(self, **kwargs):\n set_sonoff_state(self._host, \"off\")\n self._state = False", "def zeroing(self):\n x_zeroed, y_zeroed, z_zeroed = False, False, False\n self._stepper_x.set_stepper(defines.STEPPER_X_MAX_HZ / 2, -defines.BOARD_X_LENGTH)\n self._stepper_y_left.set_stepper(defines.STEPPER_Y_MAX_HZ / 2, -defines.BOARD_Y_LENGTH)\n self._stepper_y_right.set_stepper(defines.STEPPER_Y_MAX_HZ / 2, -defines.BOARD_Y_LENGTH)\n self._stepper_z.set_stepper(defines.STEPPER_Z_MAX_HZ / 2, -defines.BOARD_Z_LENGTH)\n\n while x_zeroed is False or y_zeroed is False or z_zeroed is False:\n if x_zeroed is False and self._switch_reset_x.get_state() is True:\n self._stepper_x.set_stepper(0, 0)\n x_zeroed = True\n\n if y_zeroed is False and self._switch_reset_y.get_state() is True:\n self._stepper_y_left.set_stepper(0, 0)\n self._stepper_y_right.set_stepper(0, 0)\n y_zeroed = True\n\n if z_zeroed is False and self._switch_reset_z.get_state() is True:\n self._stepper_z.set_stepper(0, 0)\n z_zeroed = True", "def turn_off(self):\n print(\"Turning the lights off\")\n self.led.all_off()\n self.client.publish(STATE_TOPIC, OFF) #publish", "def turn_off(self, **kwargs):\n if self.is_on:\n _LOGGER.debug(\"Sending STOP command to: %s\", self._name)\n self._api.control('STOP')\n self._mower_status = STATUS_EXECUTING_STOP\n self.schedule_update_ha_state()", "def turn_off(self):\n GPIO.output(self.gpio, False) # turn off light", "def pswitchoff(chan) :\n s.phaseSwitching(False, chan)", "def turn_off(self, **kwargs):\n self.robot.pause_cleaning()\n time.sleep(1)\n self.robot.send_to_base()", "def togglePWMEnable(self):\n mask = 1 << 3\n self._injectFault(\"PWM1TCR\", self.TCR, mask)", "def set_PWM_dutycycle(user_gpio, dutycycle):\n return _u2i(_pigpio_command(_control, _PI_CMD_PWM, user_gpio, dutycycle))", "def disable_cl2(self):\n self.write_versa5(0x31,0x80) ## Disable divider output for clock2\n self.write_versa5(0x63,0x00) ## Disable clock2 output", "def off(self):\n if self._is_on:\n self._pwms.disable(self._pin_index)\n self._is_on = False", "def turnOff(self):\n self.write(\"E;O0;E;\")\n return self.output()", "def set_dacs_zero(self):\n # First set all \"parameters\" to zero.\n # this ensures that the safe slow rampdown is used and that the\n # correct values are known to the instrument.\n for ch in self.channel_map:\n self.set(ch, 0)\n\n # \"brute-set\" all sources in known modules to zero, this is because\n # this is also a safety method that should ensure we are in an all\n # zero state.\n for s in self.current_sources.values():\n for dac in range(4):\n s.set_current(dac, 0.0)", "def __init__(self, pinForward, pinBackward, pinControl):\n\n self.pinForward = pinForward\n self.pinBackward = pinBackward\n self.pinControl = pinControl\n GPIO.setup(self.pinForward, GPIO.OUT)\n GPIO.setup(self.pinBackward, GPIO.OUT)\n GPIO.setup(self.pinControl, GPIO.OUT)\n self.pwm_forward = GPIO.PWM(self.pinForward, 100)\n self.pwm_backward = GPIO.PWM(self.pinBackward, 100)\n self.pwm_forward.start(0)\n self.pwm_backward.start(0)\n GPIO.output(self.pinControl,GPIO.HIGH)", "def set_off(self, channel: int, tf: bool = True):\n oldmode = self._device.readU8(LED0_OFF_H+4*channel)\n if tf == 1:\n mode = oldmode | 0x10\n logger.info('Setting servo on channel %d to OFF', channel)\n else:\n mode = oldmode & 0xEF\n logger.info('Setting servo on channel %d to PWM', channel)\n self._device.write8(LED0_OFF_H+4*channel, mode)", "def switch_off(self):\n if threading.current_thread() != self._blinking_thread:\n self._blinking_thread.unregister(self)\n GPIO.output(self.pin, GPIO.LOW)", "def turn_off(self, **kwargs: Any) -> None:\n self._set_light(OFF_STATE)", "def init():\n print(\"initializing...\")\n print(\"setting relays off\")\n for pin in PINS:\n GPIO.setup(pin, GPIO.OUT)\n GPIO.output(pin, RELAYOFF)", "def power_off(self):\n return self.inst.write(':OUTP OFF')", "def resetservo(self):\n debug('ControllerStartup.resetservo()')\n if self.servostates is not None:\n setservo(self.pidevice, self.servostates)\n elif self._databuf['servobuf']:\n setservo(self.pidevice, self._databuf['servobuf'])", "def systemOff():\n # Updated 11/19/16\n I2C.write_byte_data(Valve_bus, pinOut_O, 0x00 )\n I2C.write_byte_data(Pump_Mag_bus, pinOut_O, 0x00)", "def idle(self):\n self.pi.set_servo_pulsewidth(self.gpio, 0)", "def stop(self):\n\n command = [0x00, 0x00, 0x00, 0x00]\n self.send_command(command)", "async def async_turn_off(self, **kwargs: Any) -> None:\n self._is_on = False\n await self.disable_rain_delay()", "def turn_off(self, **kwargs):\n self._client.set_brightness(self._id, 0)", "def set_right(self, spd):\n self.r_motor.set(spd)", "def stop_step_sweep(self):\n self.write(\":SOUR:SWE:CONT:STAT OFF\")", "def forward_right(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed)\n self.pwm_left.ChangeDutyCycle(0)\n self.pwm_right.ChangeDutyCycle(100)", "def off_switch(self):\n self._switch_callback = None", "def light_off(self, pin='D13'):\n self.light_set(pin, '0')", "def reset_DIO_outputs(self):\n self.seti('dios/0/output', 0)\n self.log.info(\"Set all DIO outputs to low.\")", "def turn_off(self, **kwargs):\n #self._light.turn_off()\n self._brightness = 0\n self._state = 'off'\n _LOGGER.info(\"turn_off() is called\")", "def off(self):\n for light in self.all:\n GPIO.output(light, 0)", "def toggleCounterEnable(self):\n mask = 1\n self._injectFault(\"PWM1TCR\", self.TCR, mask)", "def stop_driving(self):\n\n self.velocity = const.Driving.STOP_VELOCITY\n self.angle = const.Driving.NEUTRAL_STEERING_ANGLE\n\n if self.drive_thread is not None:\n self.drive_thread.stop()\n self.drive_thread = None", "def set_speed(self, om_left, om_right):\n analog_om_left = self.LEFT_CONST + om_left*4\n analog_om_right = self.RIGHT_CONST - om_right*4\n self.servoWriteMicroseconds(self.PIN_LEFT, analog_om_left)\n self.servoWriteMicroseconds(self.PIN_RIGHT, analog_om_right)" ]
[ "0.7451313", "0.71744835", "0.70240444", "0.64211655", "0.6414792", "0.6408484", "0.6395715", "0.63024884", "0.6269145", "0.62610817", "0.6226628", "0.6201555", "0.614517", "0.61346424", "0.6122426", "0.6099769", "0.6093762", "0.60788894", "0.6051263", "0.60405594", "0.6035325", "0.6029236", "0.6002926", "0.6000895", "0.5982927", "0.5968207", "0.5968207", "0.5968001", "0.58806914", "0.58674943", "0.5861014", "0.58578366", "0.58571935", "0.57988536", "0.5773956", "0.5772313", "0.57713807", "0.57652795", "0.57535595", "0.57423955", "0.5741846", "0.57378244", "0.57322496", "0.5729331", "0.5729215", "0.57129353", "0.5707614", "0.5697022", "0.56866926", "0.5675994", "0.5661568", "0.5631182", "0.56280637", "0.56271577", "0.5624825", "0.56140107", "0.56110317", "0.56034136", "0.5602348", "0.5578185", "0.5576176", "0.5575219", "0.55603933", "0.5555871", "0.55502564", "0.55455244", "0.55411166", "0.5531297", "0.5518628", "0.55060554", "0.55048347", "0.55039406", "0.550129", "0.549923", "0.5492521", "0.5487077", "0.54709655", "0.5469659", "0.545463", "0.5452964", "0.5452558", "0.542993", "0.5422215", "0.54193026", "0.54159087", "0.54102856", "0.5409801", "0.5408677", "0.5396296", "0.53916115", "0.5388178", "0.53879434", "0.5387868", "0.5385943", "0.5385008", "0.5384572", "0.5384523", "0.53823096", "0.5380344", "0.53793657" ]
0.736179
1
There are 4 dimensions simulation id(600), physical variables(4), runs(3) and timesteps(500). For every scene, we need to pull up data for all timesteps for selected physical variables for a given simulation id.
def onestatfile(): with hp.File('StatsFile.h5', 'w') as onefile: alldata = np.empty((600, 4, 3, 500), dtype=np.float32) for j in range(600): for i in range(3): msd, vol, rms, asp = getstats(i, j+1) alldata[j, 0, i, :] = msd alldata[j, 1, i, :] = vol alldata[j, 2, i, :] = rms alldata[j, 3, i, :] = asp onefile.create_dataset('Stats', data=alldata, chunks=(1, 4, 3, 500), compression='gzip', compression_opts=9)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def general_simulation_data(self):\n iterations = {}\n nstates = {}\n natoms = {}\n for phase in self.phases:\n positions = self.ncfiles[phase].variables['positions']\n iterations[phase], nstates[phase], natoms[phase], spatial = positions.shape\n\n leniter = max(len('Iterations'), *[len(str(i)) for i in iterations.values()]) + 2\n lenstates = max(len('States'), *[len(str(i)) for i in nstates.values()]) + 2\n lennatoms = max(len('Num Atoms'), *[len(str(i)) for i in natoms.values()]) + 2\n lenleftcol = max(len('Phase'), *[len(phase) for phase in self.phases]) + 2\n\n lines = []\n headstring = ''\n headstring += ('{:^' + '{}'.format(lenleftcol) + '}').format('Phase') + '|'\n headstring += ('{:^' + '{}'.format(leniter) + '}').format('Iterations') + '|'\n headstring += ('{:^' + '{}'.format(lenstates) + '}').format('States') + '|'\n headstring += ('{:^' + '{}'.format(lennatoms) + '}').format('Num Atoms')\n lines.append(headstring)\n lenline = len(headstring)\n topdiv = '=' * lenline\n lines.append(topdiv)\n for phase in self.phases:\n phasestring = ''\n phasestring += ('{:^' + '{}'.format(lenleftcol) + '}').format(phase) + '|'\n phasestring += ('{:^' + '{}'.format(leniter) + '}').format(iterations[phase]) + '|'\n phasestring += ('{:^' + '{}'.format(lenstates) + '}').format(nstates[phase]) + '|'\n phasestring += ('{:^' + '{}'.format(lennatoms) + '}').format(natoms[phase])\n lines.append(phasestring)\n lines.append('-' * lenline)\n\n for line in lines:\n print(line)\n self.iterations = iterations\n self._general_run = True", "def VarData(i, sim_dict, data_folder=\".\", iter_list = []):\n for num in iter_list: # now num is the variable\n temp_dict = _fix_vars(sim_dict, num)\n\n ReMapSim(temp_dict)\n sim = temp_dict['Simulation']\n\n file_name = \"{}/data{}.txt\".format(data_folder, i)\n sim.runVar(\n temp_dict['horizon'],\n temp_dict['cycles'],\n file_name\n )\n\n return None", "def AllindividualRuns():\n #800 nm\n RunData(getFiles(mintime=(15, 40, 07), maxtime=(15, 45, 14), folder='data/29Jul/'), out='I800nm')\n RunData(getFiles(mintime=(15, 12, 20), maxtime=(15, 24, 16), folder='data/31Jul/'), out='I800nm5k')\n RunData(getFiles(mintime=(15, 28, 40), maxtime=(15, 39, 21), folder='data/31Jul/'), out='I800nm10k')\n RunData(getFiles(mintime=(15, 43, 24), maxtime=(15, 51, 47), folder='data/31Jul/'), out='I800nm20k')\n RunData(getFiles(mintime=(15, 56, 11), maxtime=(16, 02, 58), folder='data/31Jul/'), out='I800nm30k')\n RunData(getFiles(mintime=(16, 12, 39), maxtime=(16, 18, 25), folder='data/31Jul/'), out='I800nm38k')\n RunData(getFiles(mintime=(16, 21, 52), maxtime=(16, 26, 16), folder='data/31Jul/'), out='I800nm50k')\n RunData(getFiles(mintime=(16, 32, 02), maxtime=(16, 35, 23), folder='data/31Jul/'), out='I800nm54k')\n #700 nm\n RunData(getFiles(mintime=(17, 20, 17), maxtime=(17, 33, 17), folder='data/30Jul/'), out='I700nm5k')\n RunData(getFiles(mintime=(17, 37, 35), maxtime=(17, 46, 51), folder='data/30Jul/'), out='I700nm9k')\n RunData(getFiles(mintime=(17, 48, 35), maxtime=(17, 56, 03), folder='data/30Jul/'), out='I700nm52k')\n RunData(getFiles(mintime=(17, 58, 18), maxtime=(17, 59, 31), folder='data/30Jul/'), out='I700nm32k')\n #600 nm\n RunData(getFiles(mintime=(15, 22, 00), maxtime=(15, 36, 32), folder='data/30Jul/'), out='I600nm5k')\n RunData(getFiles(mintime=(15, 39, 58), maxtime=(15, 47, 58), folder='data/30Jul/'), out='I600nm54k')\n RunData(getFiles(mintime=(15, 52, 07), maxtime=(16, 06, 32), folder='data/30Jul/'), out='I600nm10k')\n #890 nm\n RunData(getFiles(mintime=(13, 37, 37), maxtime=(13, 50, 58), folder='data/01Aug/'), out='I890nm5k')\n RunData(getFiles(mintime=(14, 00, 58), maxtime=(14, 11, 54), folder='data/01Aug/'), out='I890nm10k')\n RunData(getFiles(mintime=(14, 17, 57), maxtime=(14, 25, 49), folder='data/01Aug/'), out='I890nm30k')\n RunData(getFiles(mintime=(14, 30, 03), maxtime=(14, 34, 37), folder='data/01Aug/'), out='I890nm50k')", "def stageData(self,m):\n obs = Variable(filename = self.source,\n variable_name = self.variable,\n alternate_vars = self.alternate_vars)\n if obs.time is None: raise il.NotTemporalVariable()\n self.pruneRegions(obs)\n \n # Try to extract a commensurate quantity from the model\n mod = m.extractTimeSeries(self.variable,\n alt_vars = self.alternate_vars,\n expression = self.derived,\n initial_time = obs.time_bnds[ 0,0],\n final_time = obs.time_bnds[-1,1],\n lats = None if obs.spatial else obs.lat,\n lons = None if obs.spatial else obs.lon)\n obs,mod = il.MakeComparable(obs,mod,\n mask_ref = True,\n clip_ref = True,\n extents = self.extents,\n logstring = \"[%s][%s]\" % (self.longname,m.name))\n \n # Check the order of magnitude of the data and convert to help avoid roundoff errors\n def _reduceRoundoffErrors(var):\n if \"s-1\" in var.unit: return var.convert(var.unit.replace(\"s-1\",\"d-1\"))\n if \"kg\" in var.unit: return var.convert(var.unit.replace(\"kg\" ,\"g\" ))\n return var\n def _getOrder(var):\n return np.log10(np.abs(var.data).clip(1e-16)).mean()\n order = _getOrder(obs)\n count = 0\n while order < -2 and count < 2:\n obs = _reduceRoundoffErrors(obs)\n order = _getOrder(obs)\n count += 1\n \n # convert the model data to the same unit\n mod = mod.convert(obs.unit)\n\n return obs,mod", "def get_data(self,path):\n\t\twith open(path,'r') as file:\n\t\t\tdata = file.readlines()\n\t\t\tself.objects = int(data[0].split()[1])#getting num of object\n\t\t\tdata = data[2:]#Cutting header lines from data file\n\t\t\tself.positions = [[] for i in range(self.objects)]#creating array, each row represents all positions for agiven planet\n\t\t\tself.steps = int((len(data))/self.objects)#Cal steps\n\t\t\tself.center = [[] for i in range(self.objects)]\n\t\t\tprint \"steps = \", self.steps\n\t\t\tprint \"objects = \",self.objects \n\t\t\tprint(self.steps)\n\t\t\tfor el in range(self.objects):#iterator = all planets\n\t\t\t\tself.times =[]\n\t\t\t\tfor step in range(self.steps):#iterator = all simulation frames\n\t\t\t\t\ttemp_data = data[step*self.objects+el].split()#Separating t x y z from data lines\n\t\t\t\t\ttemp_data2 = [float(i) for i in temp_data[1:4]]#converting to float without t parameter\n\t\t\t\t\ttemp_center = [float(i) for i in temp_data[4:]]\n\t\t\t\t\tself.times.append(float(temp_data[0])) \n\t\t\t\t\tself.positions[el].append(temp_data2)#adding to self.positions list\n\t\t\t\t\tself.center[el].append(temp_center)\n\t\t\t\t\n\t\t\t\tself.add_planet(self.positions[el][0][0], self.positions[el][0][1], self.positions[el][0][2])#Inicialization planet with intial positions\n\t\t\tself.p = vis.points(pos=tuple(self.center[0][0]), size=3,color=color.black)#mass center \n\t\t\tself.p2 = vis.points(pos=(0,0,0),size=3,color=color.red)#sun center\n\t\t\tself.sun = vis.sphere(radius=0.2, pos = (0,0,0), opacity = 0.8, material=materials.emissive)\n\t\t\tself.dt= int(self.times[self.objects+1])", "def simulation():\n\n return {\n \"type\": \"class\",\n \"base\": \"iso.process_step\",\n \"is_abstract\": False,\n \"is_document\": True,\n \"pstr\": (\"({}/{}/{})\", (\"used\", \"ran_for_experiments\", \"ensemble_id\")),\n \"properties\": [\n (\n \"part_of_project\",\n \"linked_to(designing.project)\",\n \"1.N\",\n \"Project or projects for which simulation was run\",\n ),\n (\n \"ran_for_experiments\",\n \"linked_to(designing.numerical_experiment)\",\n \"1.N\",\n \"One or more experiments with which the simulation is \"\n \"associated\",\n ),\n (\n \"sub_experiment\",\n \"linked_to(designing.numerical_experiment)\",\n \"0.1\",\n \"For start-date ensembles, this will indicate the beginning \"\n \"year; for offline models driven by output from another \"\n \"model, this will provide the source_id and variant_label \"\n \"for the 'driving' model.\",\n ),\n (\n \"used\",\n \"linked_to(science.model)\",\n \"1.1\",\n \"The model used to run the simulation\",\n ),\n (\n \"primary_ensemble\",\n \"linked_to(activity.ensemble)\",\n \"0.1\",\n \"Primary Ensemble (ensemble for which this simulation was \"\n \"first run).\",\n ),\n (\n \"institution\",\n \"linked_to(shared.party)\",\n \"0.1\",\n \"institution which carried out the simulation\",\n ),\n (\n \"parent_of\",\n \"linked_to(activity.child_simulation)\",\n \"0.N\",\n \"If appropriate, links to simulations which branched from \"\n \"this one\",\n ),\n (\n \"produced\",\n \"linked_to(data.dataset)\",\n \"0.N\",\n \"Products of the simulation\",\n ),\n (\n \"had_performance\",\n \"linked_to(platform.performance)\",\n \"0.1\",\n \"Performance of the simulation.\",\n ),\n (\n \"ran_on\",\n \"linked_to(platform.machine)\",\n \"0.1\",\n \"The machine on which the simulation was run.\",\n ),\n (\n \"errata\",\n \"shared.online_resource\",\n \"0.1\",\n \"Link to errata associated with this simulation.\",\n ),\n (\n \"ensemble_id\",\n \"activity.axis_member\",\n \"0.N\",\n \"Identification within ensemble axes via axis member. \"\n \"(Multiple axis members within a simulation cannot share the \"\n \"same ensemble_axis.) (There must be an axis_member instance \"\n \"for each ensemble axis in a parent ensemble.)\",\n ),\n # Time\n (\n \"start_time\",\n \"time.date_time\",\n \"0.1\",\n \"The start date-time of the simulation. e.g. \"\n \"2012-04-01 00:00:00\",\n ),\n (\n \"end_time\",\n \"time.date_time\",\n \"0.1\",\n \"The end date-time of the simulation. e.g. \"\n \"2087-11-30 12:00:00\",\n ),\n (\n \"calendar\",\n \"time.calendar\",\n \"0.1\",\n \"The calendar used in the simulation\",\n ),\n # Further Info URL\n (\n \"documentation\",\n \"shared.online_resource\",\n \"0.1\",\n \"On-line location of additional documentation\",\n ),\n # Extra attributes\n (\n \"extra_attributes\",\n \"shared.extra_attribute\",\n \"0.N\",\n \"Additional attributes provided with simulation.\",\n ),\n ],\n \"constraints\": [\n (\"cardinality\", \"rationale\", \"0.0\"),\n ],\n }", "def build_experiments(self):\n\n # width=500, height=350, pos_x= 2.0, pos_y=0.0, pos_z= 1.4, angle=-30.0\n cameraRGB = Camera('Camera', PostProcessing='SceneFinal')\n cameraRGB.set_image_size(500, 350)\n cameraRGB.set_position(2.0, 0.0, 1.4)\n cameraRGB.set_rotation(-30.0, 0.0, 0.)\n cameraRGB.set(FOV=100)\n\n camera = Camera('CameraSem', PostProcessing='SemanticSegmentation')\n camera.set_image_size(320, 180)\n camera.set_position(2.0, 0.0, 1.4)\n camera.set_rotation(-30.0, 0.0, 0.)\n camera.set(FOV=100)\n\n if self._city_name == 'Town01':\n poses_tasks = self._poses_town01()\n vehicles_tasks = []\n pedestrians_tasks = []\n for i in range(len(poses_tasks)):\n vehicles_tasks.append(0)\n pedestrians_tasks.append(0)\n\n experiment_vector = []\n\n for weather in self.weathers:\n\n for iteration in range(len(poses_tasks)):\n poses = poses_tasks[iteration]\n vehicles = vehicles_tasks[iteration]\n pedestrians = pedestrians_tasks[iteration]\n\n conditions = CarlaSettings()\n conditions.set(\n SendNonPlayerAgentsInfo=True,\n NumberOfVehicles=vehicles,\n NumberOfPedestrians=pedestrians,\n WeatherId=weather,\n QualityLevel=1\n )\n\n conditions.set(SynchronousMode=True)\n conditions.set(DisableTwoWheeledVehicles=True)\n\n conditions.add_sensor(camera)\n conditions.add_sensor(cameraRGB)\n\n experiment = Experiment()\n experiment.set(\n Conditions=conditions,\n Poses=poses,\n Task=iteration,\n Repetitions=1\n )\n\n experiment_vector.append(experiment)\n\n return experiment_vector", "def getMeasures(unique_name=None):", "def extractQuantities(path, run, t0, t1):\n data = pyLTR.Models.MIX(path, run)\n\n # hard-coded input for testing & debugging:\n #data = pyLTR.Models.LFM('/hao/aim2/schmitt/data/LTR-2_0_1b/r1432/March1995/LR/single', 'LRs')\n \n #Make sure variables are defined in the model.\n modelVars = data.getVarNames()\n for v in ['Grid X', 'Grid Y', \n 'Potential North [V]', 'Potential South [V]', \n 'FAC North [A/m^2]', 'FAC South [A/m^2]',\n 'Pedersen conductance North [S]', 'Pedersen conductance South [S]', \n 'Hall conductance North [S]', 'Hall conductance South [S]', \n 'Average energy North [keV]', 'Average energy South [keV]',\n 'Number flux North [1/cm^2 s]', 'Number flux South [1/cm^2 s]']:\n assert( v in modelVars )\n\n timeRange = data.getTimeRange()\n if len(timeRange) == 0:\n raise Exception(('No data files found. Are you pointing to the correct run directory?'))\n\n index0 = 0\n if t0:\n for i,t in enumerate(timeRange):\n if t0 >= t:\n index0 = i\n\n index1 = len(timeRange)-1\n if t1:\n for i,t in enumerate(timeRange):\n if t1 >= t:\n index1 = i \n\n print(( 'Extracting MIX quantities for time series over %d time steps.' % (index1-index0) ))\n \n # Output a status bar displaying how far along the computation is.\n progress = pyLTR.StatusBar(0, index1-index0)\n progress.start()\n\n t_doy = []\n cpcpNorth = []\n cpcpSouth = []\n hpNorth = []\n hpSouth = []\n ipfacNorth = []\n ipfacSouth = []\n\n # Pre-compute area of the grid.\n x = data.read('Grid X', timeRange[index0])\n y = data.read('Grid Y', timeRange[index0])\n # Fix singularity at the pole\n x[:,0] = 0.0\n y[:,0] = 0.0\n z = numpy.sqrt(1.0-x**2-y**2)\n ri = 6500.0e3 # Radius of ionosphere\n areaMixGrid = pyLTR.math.integrate.calcFaceAreas(x,y,z)*ri*ri\n\n for i,time in enumerate(timeRange[index0:index1]):\n try:\n # -- Day of Year\n tt = time.timetuple()\n t_doy.append(tt.tm_yday+tt.tm_hour/24.0+tt.tm_min/1440.0+tt.tm_sec/86400.0)\n\n # --- Cross Polar Cap Potential\n psiNorth = data.read('Potential North [V]', time) / 1000.0\n cpcpNorth.append(psiNorth.max() - psiNorth.min())\n\n psiSouth = data.read('Potential South [V]', time) / 1000.0\n cpcpSouth.append(psiSouth.max() - psiSouth.min())\n \n # --- Hemispheric Power\n energy = data.read('Average energy North [keV]', time)\n flux = data.read('Number flux North [1/cm^2 s]', time)\n hp = areaMixGrid*energy[:-1,:-1] * flux[:-1,:-1]\n # KeV/cm^2s to mW/m^2 to GW\n hpNorth.append(hp.sum() * 1.6e-21) \n\n energy = data.read('Average energy South [keV]', time)\n flux = data.read('Number flux South [1/cm^2 s]', time)\n hp = areaMixGrid*energy[:-1,:-1] * flux[:-1,:-1]\n # KeV/cm^2s to mW/m^2 to GW\n hpSouth.append(hp.sum() * 1.6e-21)\n\n # --- Positive current density\n fac = data.read('FAC North [A/m^2]', time)\n fac[fac <= 0] = 0.0\n pfac = areaMixGrid * fac[:-1,:-1]\n ipfacNorth.append(pfac.sum()/1.0e6)\n\n fac = data.read('FAC South [A/m^2]', time)\n fac[fac <= 0] = 0.0\n pfac = areaMixGrid * fac[:-1,:-1]\n ipfacSouth.append(pfac.sum()/1.0e6)\n\n progress.increment()\n except KeyboardInterrupt:\n # Exit when the user hits CTRL+C.\n progress.stop()\n progress.join() \n print('Exiting.')\n import sys\n sys.exit(0)\n except:\n # Cleanup progress bar if something bad happened.\n progress.stop()\n progress.join()\n raise\n progress.stop()\n progress.join()\n\n dataNorth = pyLTR.TimeSeries()\n dataSouth = pyLTR.TimeSeries()\n dataNorth.append('datetime', 'Date & Time', '', timeRange[index0:index1])\n dataSouth.append('datetime', 'Date & Time', '', timeRange[index0:index1])\n dataNorth.append('doy', 'Day of Year', '', t_doy)\n dataSouth.append('doy', 'Day of Year', '', t_doy)\n \n # \"N\" and \"S\" label subscripts are redundant here, potentially leading to\n # mis-labeling of plots\n #dataNorth.append('cpcp', r'$\\Phi_N$', 'kV', cpcpNorth)\n #dataSouth.append('cpcp', r'$\\Phi_S$', 'kV', cpcpSouth)\n #\n #dataNorth.append('hp', r'$HP_N$', 'GW', hpNorth)\n #dataSouth.append('hp', r'$HP_S$', 'GW', hpSouth)\n #\n #dataNorth.append('ipfac', r'$FAC_N$', 'MA', ipfacNorth)\n #dataSouth.append('ipfac', r'$FAC_S$', 'MA', ipfacSouth)\n \n dataNorth.append('cpcp', r'$\\Phi$', 'kV', cpcpNorth)\n dataSouth.append('cpcp', r'$\\Phi$', 'kV', cpcpSouth)\n \n dataNorth.append('hp', r'$HP$', 'GW', hpNorth)\n dataSouth.append('hp', r'$HP$', 'GW', hpSouth)\n \n dataNorth.append('ipfac', r'$FAC$', 'MA', ipfacNorth)\n dataSouth.append('ipfac', r'$FAC$', 'MA', ipfacSouth)\n\n return (dataNorth, dataSouth)", "def get_data_experimentIDAndTimePointAndUnits_dataStage02GlogNormalized(self, experiment_id_I,time_point_I,concentration_units_I):\n #Tested\n try:\n data = self.session.query(data_stage02_quantification_glogNormalized.experiment_id,\n data_stage02_quantification_glogNormalized.sample_name_short,\n data_stage02_quantification_glogNormalized.time_point,\n data_stage02_quantification_glogNormalized.component_group_name,\n data_stage02_quantification_glogNormalized.component_name,\n data_stage02_quantification_glogNormalized.calculated_concentration,\n data_stage02_quantification_glogNormalized.calculated_concentration_units).filter(\n data_stage02_quantification_glogNormalized.experiment_id.like(experiment_id_I),\n data_stage02_quantification_glogNormalized.time_point.like(time_point_I),\n data_stage02_quantification_glogNormalized.calculated_concentration_units.like(concentration_units_I),\n data_stage02_quantification_glogNormalized.used_.is_(True)).group_by(\n data_stage02_quantification_glogNormalized.experiment_id,\n data_stage02_quantification_glogNormalized.sample_name_short,\n data_stage02_quantification_glogNormalized.time_point,\n data_stage02_quantification_glogNormalized.component_group_name,\n data_stage02_quantification_glogNormalized.component_name,\n data_stage02_quantification_glogNormalized.calculated_concentration,\n data_stage02_quantification_glogNormalized.calculated_concentration_units).all();\n data_O = [];\n for d in data: \n data_1 = {};\n data_1['experiment_id'] = d.experiment_id;\n data_1['sample_name_short'] = d.sample_name_short;\n data_1['time_point'] = d.time_point;\n data_1['component_group_name'] = d.component_group_name;\n data_1['component_name'] = d.component_name;\n data_1['calculated_concentration'] = d.calculated_concentration;\n data_1['calculated_concentration_units'] = d.calculated_concentration_units;\n data_O.append(data_1);\n return data_O;\n except SQLAlchemyError as e:\n print(e);", "def readExperiAll(varid,timeperiod,level):\n print('\\n>>>>>>>>>> Using readExperiAll function!')\n \n ### Import modules\n import numpy as np\n from netCDF4 import Dataset\n\n ###########################################################################\n ###########################################################################\n ###########################################################################\n ### Directories for Antarctic experiments (1-100 members)\n if any([timeperiod=='ANT_Fu',timeperiod=='ANT_Cu',timeperiod=='ANT_Pi']):\n if timeperiod == 'ANT_Fu':\n experi = 'PAMIP-1.8'\n directorydata = '/seley/ypeings/simu/'\n totaldirectory = directorydata + experi + '/monthly/'\n filename = totaldirectory + varid + '_1900-2000.nc'\n print('Reading in Antarctic Future Sea Ice!')\n elif timeperiod == 'ANT_Cu':\n experi = 'PAMIP-1.1-QBO'\n directorydata = '/seley/ypeings/simu/'\n totaldirectory = directorydata + experi + '/monthly/'\n filename = totaldirectory + varid + '_1900-2000.nc'\n if varid == 'SIC':\n experi = 'PAMIP_Cu' # missing SIC data in 1.1-QBO\n directorydata = '/seley/zlabe/simu/'\n totaldirectory = directorydata + experi + '/monthly/'\n filename = totaldirectory + varid + '_1701-2000.nc'\n print('Reading in Antarctic Present-Day Sea Ice!')\n elif timeperiod == 'ANT_Pi':\n experi = 'PAMIP-1.7'\n directorydata = '/seley/ypeings/simu/'\n totaldirectory = directorydata + experi + '/monthly/'\n filename = totaldirectory + varid + '_1900-2000.nc'\n print('Reading in Antarctic Pre-Industrial Sea Ice!')\n else:\n print(ValueError('Selected wrong time period!')) \n else:\n print(ValueError('Selected wrong experiment name!'))\n \n if varid == 'EGR' and level == 'surface': # integrated from 500-850 hPa\n filename = totaldirectory + varid + '_500_850.nc'\n\n ### Read in Data\n if level == 'surface': # 3d variables\n data = Dataset(filename,'r')\n lev = 'surface'\n lat = data.variables['latitude'][:]\n lon = data.variables['longitude'][:]\n varq = data.variables['%s' % varid][:]\n data.close()\n elif level == 'profile': # 4d variables\n data = Dataset(filename,'r')\n lev = data.variables['level'][:]\n lat = data.variables['latitude'][:]\n lon = data.variables['longitude'][:]\n varq = data.variables['%s' % varid][:]\n data.close()\n elif level == 'zonmean': # 3d variables (zonal mean!)\n varidz = varid + '_' + level\n filename = totaldirectory + varidz + '_1900-2000.nc'\n data = Dataset(filename,'r')\n lev = data.variables['level'][:]\n lat = data.variables['lat'][:]\n lon = data.variables['lon'][:]\n varq = data.variables['%s' % varid][:].squeeze()\n data.close()\n else:\n print(ValueError('Selected wrong height - (surface or profile!)!')) \n print('Completed: Read data for *%s* : %s!' % (experi[:],varid))\n\n ### Reshape to split years and months\n months = 12\n if level == 'surface': # 3d variables\n var = np.reshape(varq,(varq.shape[0]//months,months,\n int(lat.shape[0]),int(lon.shape[0])))\n elif level == 'profile': # 4d variables\n var = np.reshape(varq,(varq.shape[0]//months,months,int(lev.shape[0]),\n int(lat.shape[0]),int(lon.shape[0])))\n elif level == 'zonmean': # 3d variables (zonal mean!)\n var = np.reshape(varq,(varq.shape[0]//months,months,int(lev.shape[0]),\n int(lat.shape[0])))\n else:\n print(ValueError('Selected wrong height - (surface or profile!)!')) \n print('Completed: Reshaped %s array!' % (varid))\n \n ### Convert units\n if varid in ('TEMP','T2M'):\n var = var - 273.15 # Kelvin to degrees Celsius \n print('Completed: Changed units (K to C)!')\n elif varid == 'SWE':\n var = var*1000. # Meters to Millimeters \n print('Completed: Changed units (m to mm)!')\n \n print('Completed: Read members 1-100!')\n\n print('>>>>>>>>>> Completed: Finished readExperiAll function!')\n return lat,lon,lev,var", "def read_data(infiles, variable, time_constraint):\n\n cube = iris.load(infiles, gio.check_iris_var(variable), callback=save_history)\n equalise_attributes(cube)\n iris.util.unify_time_units(cube)\n cube = cube.concatenate_cube()\n cube = gio.check_time_units(cube)\n\n cube = cube.extract(time_constraint)\n\n if not 'J' in str(cube.units):\n cube = convert_to_joules(cube) \n\n if variable == 'surface_downward_heat_flux_in_sea_water':\n agg_method = 'sum'\n elif variable == 'ocean_heat_content':\n agg_method = 'mean'\n cube = timeseries.convert_to_annual(cube, aggregation=agg_method) \n \n coord_names = [coord.name() for coord in cube.dim_coords]\n aux_coord_names = [coord.name() for coord in cube.aux_coords]\n assert 'time' in coord_names\n assert len(coord_names) == 3\n grid_type = 'curvilinear' if aux_coord_names == ['latitude', 'longitude'] else 'latlon'\n\n return cube, coord_names, aux_coord_names, grid_type", "def run():\n #Initialise variables\n data = build_station_list()\n update_water_levels(data)\n ls = []\n ID = []\n \n #Number of days in past taken data from\n dt = 7\n #How many graphs per window\n limit = 4\n #How many stations\n number = 6\n \n #Create list of measuring_id's sorted by water level\n for station in data:\n if station.typical_range_consistent() == True and station.relative_water_level() != None:\n ls.append((station, station.relative_water_level()))\n\n ls = sorted_by_key(ls, 1)\n \n for station in ls:\n ID.append(station[0])\n \n s = count_inconsistent_sets(ID[:number], dt)\n \n ID = ID[:number+s]\n\n plot_water_levels(ID, dt, limit, s)", "def get_stim_onset_times(sessions, metadata_dict):\n if not isinstance(sessions, list):\n sessions = list(sessions)\n\n for line in sessions:\n session_id = line['Sess.ID']\n if session_id: # we loaded a line with session info\n session_name = '{}_{}'.format(line['Experiment'], line['Sess.ID'])\n\n # Check if session is already in database\n if database is not None and session_name in database.index:\n continue\n session_stimuli = {}\n session_stimuli['session_id'] = session_id\n session_stimuli['stimuli'] = {}\n session_stimuli['stimuli']['visual'] = []\n session_stimuli['stimuli']['audio'] = []\n session_stimuli['stimuli']['digital'] = []\n videopaths = []\n # load data from .tdms and .avi fils\n for recording in line['Recordings']:\n path = os.path.join(line['Base fld'], line['Exp fld'], recording)\n for f in os.listdir(path):\n if '.avi' in f:\n videopaths.append(os.path.join(path, f))\n print(videopaths)\n elif '.tdms' == f[-5:]:\n tdmspath = os.path.join(path, f)\n # Loop over each .tdms file and extract stimuli frames\n print(colored('Loading {}: {}'.format(session_name,os.path.basename(tdmspath)),'yellow'))\n tdms = TdmsFile(tdmspath)\n if metadata_dict[session_name]['software'] == 'behaviour':\n visual_rec_stims, audio_rec_stims, digital_rec_stims = [], [], []\n for group in tdms.groups():\n for obj in tdms.group_channels(group):\n if 'stimulis' in str(obj).lower():\n for idx in obj.as_dataframe().loc[0].index:\n if \"/' \" in idx:\n framen = int(idx.split(\"/' \")[1].split('-')[0])\n elif \"/' \" in idx:\n framen = int(idx.split(\"/' \")[1].split('-')[0])\n else:\n framen = int(idx.split(\"/'\")[2].split('-')[0])\n if 'visual' in str(obj).lower():\n visual_rec_stims.append(framen)\n elif 'audio' in str(obj).lower():\n audio_rec_stims.append(framen)\n elif 'digital' in str(obj).lower():\n digital_rec_stims.append(framen)\n else:\n print(colored('Couldnt load stim correctly','yellow'))\n # Now use the AI channels to find the *real* stimulus onset times and replace them\n if audio_rec_stims:\n stimulus_on_idx = np.where(tdms.group_channels('AI')[3].data > .55)[0] #in first data sets this is AI 1, later AI 2\n idx_since_last_stimulus_on = np.diff(stimulus_on_idx)\n if stimulus_on_idx.size:\n stimulus_start_idx = stimulus_on_idx[np.append(np.ones(1).astype(bool),idx_since_last_stimulus_on>2*10000)] #usually 10 or 30\n stimulus_start_frame = np.ceil(stimulus_start_idx / 10000 / (33 + 1 / 3) * 1000).astype(int)\n stimulus_start_frame = stimulus_start_frame[stimulus_start_frame > 300]\n else:\n stimulus_start_frame = np.array(audio_rec_stims)\n print('NO STIMULI FOUND!!')\n\n if len(stimulus_start_frame) != len(audio_rec_stims):\n print('audio AI channel does not match number of timestamps by ' + str(len(audio_rec_stims)-len(stimulus_start_frame)) )\n else:\n discrepancy = stimulus_start_frame - audio_rec_stims\n if sum(discrepancy>8):\n print('audio AI channel does not match values of timestamps')\n else:\n print(discrepancy)\n # for conditioning experiment, just use what the tdms says\n # if 'food' in line['Experiment']:\n # stimulus_start_frame = np.array(audio_rec_stims)\n audio_rec_stims = list(stimulus_start_frame)\n\n session_stimuli['stimuli']['visual'].append(visual_rec_stims)\n session_stimuli['stimuli']['audio'].append(audio_rec_stims)\n session_stimuli['stimuli']['digital'].append(digital_rec_stims)\n\n else:\n \"\"\" HERE IS WHERE THE CODE TO GET THE STIM TIMES IN MANTIS WILL HAVE TO BE ADDEDD \"\"\"\n pass\n\n # Add to dictionary (or update entry)\n stimulus_dict[session_name] = session_stimuli\n return stimulus_dict", "def flatten_dae_variables(model, time):\n assert time.model() is model.model()\n\n block_queue = [model]\n regular_vars = []\n time_indexed_vars = []\n while block_queue:\n b = block_queue.pop(0)\n b_sets = identify_member_sets(b.index_set())\n if time in b_sets:\n for _slice in generate_time_indexed_block_slices(b, time):\n time_indexed_vars.append(Reference(_slice))\n continue\n block_queue.extend(\n list(b.component_objects(Block, descend_into=False))\n )\n for v in b.component_objects(SubclassOf(Var), descend_into=False):\n v_sets = identify_member_sets(v.index_set())\n if time in v_sets:\n for _slice in generate_time_only_slices(v, time):\n time_indexed_vars.append(Reference(_slice))\n else:\n regular_vars.extend(list(v.values()))\n\n return regular_vars, time_indexed_vars", "def simulate_test_data(self):\n # simulation config\n if self.instrument == 'IMA':\n sim_config = SimConfig.makeSim(name=\"IMA Simulation\", rel_obsdate=0.0, scene=\"scene.ini\", POP='IMA',\n ConfigPath='IMA_FULL', Dither=False,StartInd=1, NDither=4,\n DitherPat=\"ima_recommended_dither.dat\", filter=\"F1130W\",\n readDetect= 'FULL', ima_mode= 'FAST', ima_exposures=1,\n ima_integrations=1, ima_frames=50, disperser= 'SHORT', detector= 'SW',\n mrs_mode= 'SLOW', mrs_exposures=5,mrs_integrations=4,mrs_frames=10)\n\n # scene config\n background = Background(level='low', gradient=5., pa=15.0, centreFOV=(0., 0.))\n\n SED1 = BBSed(Temp=300., wref=10., flux=1.e8)\n Gal1 = Galaxy(Cen=(0., 0.), n=1., re=200, q=0.99, pa=0.1)\n Gal1.set_SED(SED1)\n targets = [Gal1]\n\n scene_config = SceneConfig.makeScene(loglevel=0, background=background, targets=targets)\n\n elif self.instrument == 'MRS':\n sim_config = SimConfig.makeSim(name=\"MRS Simulation\", rel_obsdate=0.0, scene=\"scene.ini\",\n POP='MRS', ConfigPath='MRS_1SHORT', Dither=False, StartInd=1,\n NDither=4, DitherPat=\"mrs_recommended_dither.dat\", filter=\"F1130W\",\n readDetect='FULL', ima_mode='FAST', ima_exposures=1,\n ima_integrations=1, ima_frames=20, disperser='SHORT', detector='SW',\n mrs_mode='FAST', mrs_exposures=1, mrs_integrations=1, mrs_frames=50)\n\n # scene config\n background = Background(level='low',gradient=5.,pa=15.0,centreFOV=(0., 0.))\n\n SED1 = BBSed(Temp=300., wref=10., flux=5.e6)\n Gal1 = Galaxy(Cen=(0.,0.), n=1., re=2, q=0.99, pa=0.1)\n Gal1.set_SED(SED1)\n targets = [Gal1]\n\n scene_config = SceneConfig.makeScene(loglevel=0, background=background, targets=targets)\n\n # simulator config\n if self.noise:\n simulator_config = SimulatorConfig.makeSimulator(max_fsm=0.050, max_dither=20.0, mrs_ref_channel=1,\n mrs_ref_band=\"SHORT\", tau_telescope=0.88,tau_eol=0.8,\n telescope_area=25.032, telescope_pupil_diam=6.6052,\n take_webbPsf=False, include_refpix=True,\n include_poisson=True, include_readnoise=True,\n include_badpix=True, include_dark=True,\n include_flat=False, include_gain=True,\n include_nonlinearity=self.linearity, include_drifts=True,\n include_latency=False, cosmic_ray_mode='NONE')\n else:\n simulator_config = SimulatorConfig.makeSimulator(max_fsm=0.050, max_dither=20.0, mrs_ref_channel=1,\n mrs_ref_band=\"SHORT\", tau_telescope=0.88, tau_eol=0.8,\n telescope_area=25.032, telescope_pupil_diam=6.6052,\n take_webbPsf=False, include_refpix=True,\n include_poisson=False, include_readnoise=False,\n include_badpix=True, include_dark=True,\n include_flat=False, include_gain=True,\n include_nonlinearity=self.linearity, include_drifts=True,\n include_latency=False, cosmic_ray_mode='NONE')\n\n\n # run the simulation\n simulation = MiriSimulation(sim_config=sim_config, scene_config=scene_config,\n simulator_config=simulator_config, loglevel='DEBUG')\n simulation.run()\n\n # we only need the sim file so move it to output_dir and remove everthing else\n det_image_file = glob.glob(os.path.join(simulation.path_out, 'det_images', '*.fits'))[0]\n self.ramp_file = os.path.join(self.output_dir, os.path.basename(det_image_file))\n shutil.move(det_image_file, self.ramp_file)\n shutil.rmtree(simulation.path_out)", "def _store_dpc_data(self, all_runs, data_store=None, include_vac=False):\n\n if data_store is None:\n data_store = self.data\n\n times = self.data['dpc_times']\n\n sus = np.zeros((3, 2, len(times), len(all_runs)))\n inf = np.zeros((3, 2, len(times), len(all_runs)))\n vac = np.zeros((3, 2, len(times), len(all_runs)))\n\n initial_state = np.zeros((1, 18))\n\n for run, (run_data, *_) in enumerate(all_runs):\n run_times0 = [x[0] for x in run_data[\"RegionA\"]]\n run_times1 = [x[0] for x in run_data[\"RegionB\"]]\n run_times2 = [x[0] for x in run_data[\"RegionC\"]]\n for i, time in enumerate(times):\n idx0 = np.searchsorted(run_times0, time, side=\"right\")\n idx1 = np.searchsorted(run_times1, time, side=\"right\")\n idx2 = np.searchsorted(run_times2, time, side=\"right\")\n states = [run_data[\"RegionA\"][idx0-1][1:], run_data[\"RegionB\"][idx1-1][1:],\n run_data[\"RegionC\"][idx2-1][1:]]\n\n for reg in range(3):\n sus[reg, 0, i, run] = states[reg][State.SUS_H]\n inf[reg, 0, i, run] = states[reg][State.INF_H]\n vac[reg, 0, i, run] = states[reg][State.VAC_H]\n\n sus[reg, 1, i, run] = states[reg][State.SUS_L]\n inf[reg, 1, i, run] = states[reg][State.INF_L]\n vac[reg, 1, i, run] = states[reg][State.VAC_L]\n\n if time == 0 and run == 0:\n for reg in range(3):\n for j, state_val in enumerate(sorted(HIGH_LIVE_STATES | LOW_LIVE_STATES)):\n initial_state[0, 6*reg+j] = states[reg][state_val]\n\n names = ['dpc_sus', 'dpc_inf']\n data = [sus, inf]\n if include_vac:\n names += ['dpc_vac']\n data += [vac]\n\n for name, dat in zip(names, data):\n if name in data_store:\n data_store[name] = np.concatenate((data_store[name], dat), axis=3)\n else:\n data_store[name] = dat\n\n if 'init_state' in data_store:\n data_store['init_state'] = np.vstack((data_store['init_state'], initial_state))\n else:\n data_store['init_state'] = initial_state", "def _collect_scene_data(self, config):\n\n self._config = config\n self.scenes_root_path = config['scenes_root_path']\n assert(os.path.isdir(self.scenes_root_path))\n\n self._scene_dict = dict()\n # each one is a list of scenes\n self._all_image_paths = {\"train\": [], \"test\": []}\n\n for key, val in self._all_image_paths.items():\n for scene_collection_name in config[key]:\n scene_collection_dir = os.path.join(self.scenes_root_path, scene_collection_name)\n assert os.path.isdir(scene_collection_dir), scene_collection_dir\n # Scan all scenes in this scene dir\n for scene_name in os.listdir(scene_collection_dir):\n full = os.path.join(scene_collection_dir, scene_name)\n if os.path.isdir(full):\n val += self._get_all_rgb_image_paths_in_scene_dir(full)", "def dvars_detector(data):\n # init dvars\n dvars = []\n # for 0 to timepoints-1\n for i in range(data.shape[-1] - 1):\n # get difference between volumes\n vol_diff = data[...,i + 1] - data[..., i]\n # calculate rms\n dvars.append(np.sqrt(np.mean(vol_diff ** 2)))\n # get outliers using iqr_detector\n _, outliers = iqr_detector(dvars)\n # return rms dvars and outliers\n return dvars, outliers", "def explore_runs(df, option, gamma, alpha):\n\n n_states = len(np.unique(df.objnum))\n SR_matrices = {}\n M = np.zeros([n_states, n_states])\n\n # This option allows the SR matrix to persist in Part 1 and Part 2,\n # but resets it between them.\n if option == \"reset\":\n for part in np.unique(df.part):\n if part == 2:\n M = np.zeros([n_states, n_states])\n for run in np.unique(df.loc[df.part == part, 'run']):\n envstep = df.loc[(df.part == part) & (df.run == run),\n 'objnum'].values\n M = np.array(run_experiment(envstep, gamma, alpha, np.copy(M),\n n_states))\n M = M / np.sum(M)\n SR_matrices[(part, run)] = M\n\n # This option resets the SR matrix between each run.\n if option == \"independent\":\n for part in np.unique(df.part):\n for run in np.unique(df.loc[df.part == part, 'run']):\n M = np.zeros([n_states, n_states])\n envstep = df.loc[(df.part == part) & (df.run == run),\n 'objnum'].values\n M = np.array(run_experiment(envstep, gamma, alpha, np.copy(M),\n n_states))\n M = M / np.sum(M)\n SR_matrices[(part, run)] = M\n\n return SR_matrices", "def runSimulation(num_robots, speed, width, height, min_coverage, num_trials,\n robot_type, visualize):\n #initialization of variables\n list_of_results = []\n \n #trial loop\n for i in range(num_trials):\n list_of_results.append(singleSimulation(num_robots, speed, width, height, min_coverage, robot_type, visualize))\n return list_of_results", "def extract_scene_info(self) -> None:\n records = [\n (self.level5data.get(\"sample\", rec[\"first_sample_token\"])[\"timestamp\"], rec)\n for rec in self.level5data.scene\n ]\n\n entries = []\n for start_time, record in sorted(records):\n start_time = (\n self.level5data.get(\"sample\", record[\"first_sample_token\"])[\"timestamp\"]\n / 1000000\n )\n\n token = record[\"token\"]\n name = record[\"name\"]\n date = datetime.utcfromtimestamp(start_time)\n host = \"-\".join(record[\"name\"].split(\"-\")[:2])\n first_sample_token = record[\"first_sample_token\"]\n\n entries.append((host, name, date, token, first_sample_token))\n\n self.df = pd.DataFrame(\n entries,\n columns=[\"host\", \"scene_name\", \"date\", \"scene_token\", \"first_sample_token\"],\n )\n host_count_df = self.df.groupby(\"host\")[\"scene_token\"].count()\n print(\"the number of host\", host_count_df)", "def get_all_data(ds_names, ds_types, indxs, fields, **kwargs):\n data = {f:{} for f in fields+['time']}\n\n for ds_type, keys in ds_types.items():\n for dsk in keys:\n print('Getting data for: ',dsk)\n\n dsf = ds_names[dsk]\n\n if ds_type == 'maven':\n ds = pd.read_csv(dsf)\n for field in fields:\n\n ds_dat = get_ds_data(ds, field, indxs[ds_type],\n maven=True, grid=False)\n data[field][dsk] = ds_dat\n time = get_ds_data(ds, 'time', indxs[ds_type],\n maven=True, grid=False)\n time = time-time[0]\n time = time/time[-1]\n data['time'][dsk] = time\n \n\n\n else:\n for field in fields:\n with h5py.File(dsf, 'r') as ds:\n \n if '_x' in field or '_y' in field or '_z' in field:\n get_data_func = get_rotated_data\n else: get_data_func = get_ds_data\n try:\n ds_dat = get_data_func(ds, field, indxs[ds_type],\n grid='batsrus' not in ds_type, **kwargs)\n #grid=ds_type=='heliosares', **kwargs)\n except ValueError:\n ds_dat = np.array([])\n data[field][dsk] = ds_dat\n\n data['time'][dsk] = np.linspace(0, 1, np.max(indxs[ds_type].shape))\n\n return data", "def compute(self):\n self.find_n()\n\n # call hotspot field plots\n for scenario in self.scenarios:\n fields_dict = {}\n ancestor_files = []\n for filename in io.get_all_ancestor_files(self.cfg,\n pattern='hotspot_*.nc'):\n key = os.path.basename(os.path.dirname(filename))\n splitname = os.path.basename(filename).split(\"_\")\n if key.split(\"_\")[-1] == scenario:\n fields_dict[(\n f\"{splitname[-1].split('.nc')[0]}_\"\n f\"{splitname[1]}_{key}\")] = iris.load_cube(filename)\n ancestor_files.append(filename)\n fields_dict[\"scenario\"] = scenario\n fields_dict[\"ancestors\"] = ancestor_files\n self.hotspot_fields_plot(fields_dict)\n\n # call scatter plots\n for season in self.seasons:\n timeseries_dict = {\"large_scale\": {}, \"regional\": {}}\n for region, value in timeseries_dict.items():\n for filename in io.get_all_ancestor_files(\n self.cfg,\n pattern=f'rolling_mean_{region}_{season}.nc'):\n value[os.path.basename(os.path.dirname(filename))] = (\n iris.load_cube(filename))\n value[os.path.basename(\n os.path.dirname(filename))] = (filename)\n for var_combination in self.var_combinations:\n self.timeseries_scatter_plot(deepcopy(timeseries_dict), season,\n var_combination)", "def test_get_measure_parameters_by_id(self):\n pass", "def get_subset_by_cortex(sess_no, raw_path, \n align_on, from_time, to_time,\n cortex,\n only_correct_trials = True, renorm = True ):\n tinfo_path = raw_path + 'trial_info.mat'\n rinfo_path = raw_path + 'recording_info.mat'\n \n # get all data\n data_filtered = get_preprocessed_from_raw(sess_no, raw_path, \n align_on, from_time, to_time )\n \n # don't keep missing data // keep only_correct_trials if True\n \n responses = io.get_responses(tinfo_path)\n if only_correct_trials == False:\n ind_to_keep = (responses == responses).flatten()\n else:\n ind_to_keep = (responses == 1).flatten()\n \n #data1 =data1[ind_to_keep, :, :] # in the same time\n #data2 =data2[ind_to_keep, :, :]\n \n data_filtered = data_filtered[ind_to_keep,:,:]\n\n \n # select electrode\n \n dico_area_to_cortex = io.get_dico_area_to_cortex()\n area_names = io.get_area_names(rinfo_path)\n \n dtype = [('name', '<U6'), ('index', int), ('cortex', '<U16')]\n values = []\n for count, area in enumerate(area_names):\n if area in dico_area_to_cortex: # if not, area isn't in Visual or Parietal or Prefontal or Motor or Somatosensory\n \n values.append( (area, count, dico_area_to_cortex[area]) )\n else:\n print('Unknow area')\n \n s = np.array(values, dtype=dtype)\n \n elec = s[s['cortex'] == cortex]['index']\n \n data_filtered = data_filtered[:, elec, :]\n\n \n \n ### variable for shape\n #n_chans1 = len(elec1)\n #n_chans2 = len(elec2)\n \n #samples_per_trial1 = data1.shape[2] # = window_size1\n #samples_per_trial2 = data2.shape[2] # = window_size2\n \n # renorm data : mean = 0 and var = 1\n if renorm == True :\n data_filtered = pp.renorm(data_filtered)\n\n ## change type \n data_filtered = data.astype(np.float32)\n\n \n return( data_filtered )", "def dataframe():\n\t#allows function to access station, gmt, and miss_station functions\n global stations\n\tglobal gmt\n\tglobal miss_station\n\t\n\t#read predictor file\n\tcontrol = cfg.read_yaml('../registry/graphs.yaml')\n\tpred_ctrl = cfg.read_yaml(cfg.get_config_path(control.pred_file))\n\tpredd_ctrl = cfg.read_yaml(cfg.get_config_path(control.predd_file))\n\n\t#get file paths and update database\n\tpredictor_file_path = control.predictor_file_path\n\tpredictand_file_path = control.predictand_file_path\n\tpred_file_id = update(predictor_file_path)\n\tpredd_file_id = update(predictand_file_path)\n\t\n\t#store lead time and date range\n\tlead_time = control.lead_time\n\tdate_range = control.date_range\n\n\t#get info for fetch many dates\n\tstart,end,stride = read_pred.parse_range(date_range)\n\tfcst_ref_time = control.date_range[0].split('-')[0][-2:]\n\t\n\t#initialize list of predictors\n\tpred_list = pred_ctrl.predictors\n\tpredictor = []\n\n\t#loops through predictors to build camps data objects\n\tfor entry_dict in pred_list:\n\t\t#formats metadata\n\t\tpred = create.preprocess_entries(entry_dict, fcst_ref_time)\n\t\t\n\t\t#adds info to metadata that's not currently being stored\n\t\tpred.search_metadata['reserved2'] = lead_time*3600\n pred.search_metadata['file_id'] = pred_file_id\n\t\tpred.search_metadata['reserved1'] = 'vector'\n\n\t\t#build camps data objects for each day\n\t\tvariable = fetch_many_dates(predictor_file_path,start,end,stride,pred.search_metadata)\n\t\t\n\t\t#appends all data to single camps object\n\t\tif variable[0] is not None:\n\t\t\tvar = variable[0]\n\t\t\tarrs = []\n\t\t\tfor i in range(len(variable)):\n\t\t\t\tarrs.append(variable[i].data)\n\t\t\tvar.data = np.stack(arrs)\n\t\t\tpredictor.append(var)\n\n\t#initializes list of predictands\n\tpredd_list = predd_ctrl.predictands\n predictand = []\n\t\n\t#loops through predictands to build camps data objects\n for entry_dict in predd_list:\n\t\t#formats metadata\n \tvertical_coordinate = entry_dict.pop('Vertical_Coordinate')\n\t\tentry_dict['file_id'] = predd_file_id\n\n\t\t#build camps objects for each day\n variable = fetch_many_dates(predictand_file_path,start, end, stride, entry_dict)\n\n\t\t#append all data to single camps object\n var = variable[0]\n arrs = []\n for i in range(len(variable)):\n arrs.append(variable[i].data)\n try:\n\t\t\tvar.data = np.stack(arrs)\n\t\t\tpredictand.append(var)\n\t\texcept:\n\t\t\tprint(\"Can't read \" + variable.name)\n\n\t#getting predictor station and time data\n\tpredr = Dataset(predictor_file_path[0])\n\tpredr_stat = predr.variables['station'][:]\n\tif lead_time == 3:\n\t\tpredr_time = predr.variables['OM__phenomenonTimeInstant'][:]\n\telif lead_time == 6:\n\t\tpredr_time = predr.variables['OM__phenomenonTimeInstant1'][:]\n\telif lead_time == 12:\n\t\tpredr_time = predr.variables['OM__phenomenonTimeInstant2'][:]\n\tpredr.close()\n\n\t#reformatting predictor station and time data\n\tpredr_stations = stations(predr_stat)\n\tpredr_gmt = gmt(predr_time)\n\t\n\t#getting predictand station and time data\n\tpredd = Dataset(predictand_file_path[0])\n\tpredd_stat = predd.variables['station'][:]\n\tpredd_time = predd.variables['OM__resultTime'][:]\n\tpredd.close()\n\t\n\t#reformatting predictand station and time data\n\tpredd_stations = stations(predd_stat)\n\tpredd_gmt = gmt(predd_time)\n\n\t#choosing predictand observations that line up with predictor time\n\thour = (predictor[0].metadata['FcstTime_hour']/3600) + lead_time\n\tdays = len(predd_gmt)/24\n\tpredd_hours = [0]*days\n k=0\n for i in range(len(predd_gmt)):\n if i%24 == hour:\n\t\t\tpredd_hours[k]=predd_gmt[i]\n\t\t\tk+=1\n\t\n\t#catches when GFS data doesn't cover the last day of the month\n\tif len(predr_gmt) < len(predd_hours):\n\t\tpredd_hours = predd_hours[:-1]\t\n\t\n\t#find missing stations\n\tmiss_stations = miss_station(predr_stations,predd_stations)\n\tstations = predd_stations\n\t\n\t#station and time array\n\tinfo = [['',''] for k in range(len(predr_gmt)*len(stations))]\n\tfor i in range(len(predr_gmt)):\n\t\tfor j in range(len(stations)):\n\t\t\tk = i*len(stations)+j\n\t\t\tinfo[k][0]=predr_gmt[i]\n\t\t\tinfo[k][1]=stations[j]\n\n\t#create column names\n\tnames = ['']*(len(predictor)+len(predictand)+2)\n\tnames[0]='Time'\n\tnames[1]='Station'\n\n\t#creating array\n\tarr = np.zeros((len(stations)*len(predr_gmt),len(predictor)+len(predictand)))\n\t\n\t#adding predictor data\n\tfor i in range(len(predictor)):\n\t\t#remove lead time and forecast reference time from variable name\n\t\t#and add variable name to column list of final dataframe\n\t\tif lead_time == 12:\n\t\t\tnames[i+2]='GFS_'+predictor[i].get_variable_name()[:-11]\n\t\telse:\n\t\t\t names[i+2]='GFS_'+predictor[i].get_variable_name()[:-10]\n\n\t\t#create pandas dataframe of data and sort alphabetically by station name\n\t\tpredictor[i].data = np.squeeze(predictor[i].data,axis=2)\n\t\tpredictor[i].data = pd.DataFrame(predictor[i].data,columns=predr_stations,index=predr_gmt)\n\t\tpredictor[i].data = predictor[i].data.reindex(sorted(predictor[i].data.columns),axis=1)\n\t\t\n\t\t#remove stations with no predictand data\n\t\tk=0\n\t\ta=miss_stations[:]\n\t\tfor j in predictor[i].data.columns:\n\t\t\tif not a:\n\t\t\t\tbreak\n\t\t\tif j==a[k]:\n\t\t\t\tpredictor[i].data=predictor[i].data.drop(j,axis=1)\n\t\t\t\tdel a[k]\n\t\t\n\t\t#add data to final dataframe\n\t\tfor b in range(len(predr_gmt)):\n\t\t\tfor c in range(len(stations)):\n\t\t\t\tk = b*len(stations)+c\n\t\t\t\tarr[k][i] = predictor[i].data.iloc[b][c]\n\n\t#add predictand data\n\tfor i in range(len(predictand)):\n\t\t#removing extra underscore, adding variable name to column names\n\t\tnames[len(predictor)+2+i]='METAR_'+predictand[i].get_variable_name()[:-1]\n\t\n\t\t#resize array and create pandas dataframe\n\t\tpredictand[i].data = np.squeeze(predictand[i].data,axis=2)\n\t\tpredictand[i].data = pd.DataFrame(predictand[i].data,columns=predd_stations,index=predd_hours)\n\t\tpredictand[i].data = predictand[i].data.reindex(sorted(predictand[i].data.columns),axis=1)\n\t\t\n\t\t#remove extra days of predictand data\n\t\tpredictand[i].data = predictand[i].data.iloc[0:len(predr_time),:]\n\t\t\t\n\t\t#add predictand data to array\n\t\tfor b in range(len(predr_gmt)):\n\t\t\tfor c in range(len(stations)):\n\t\t\t\tk = b*len(stations)+c\n\t\t\t\tval = predictand[i].data.iloc[b][c]\n\t\t\t\t\n\t\t\t\t#catch metar fill data\n\t\t\t\tif val == 9999: \n\t\t\t\t\tval = np.nan\n\t\t\t\tarr[k][len(predictor)+i]=val\n\t\n\t#add station and time data to array and save as csv\n\tdata = np.concatenate([info,arr],axis = 1)\n\tto_save = pd.DataFrame(data,columns=names)\n\tto_save.to_csv(str(start)+'_'+str(end)+'_'+str(lead_time)+'hrs.csv')", "def sample_simulation() -> Dict[str, Tuple[str, float]]:\n sim = Simulation('stations.json', 'sample_rides.csv')\n sim.run(datetime(2017, 6, 1, 8, 0, 0),\n datetime(2017, 6, 1, 9, 0, 0))\n\n return sim.calculate_statistics()", "def readExperi(directory,varid,experi,level):\n print('\\n>>> Using readExperi function! \\n')\n \n ### Import modules\n import numpy as np\n from netCDF4 import Dataset\n \n ### Call files\n totaldirectory = directory + experi + '/monthly/'\n filename = totaldirectory + varid + '_1900-2000.nc'\n \n if any([experi == 'FPOL',experi == 'FSUB']):\n directory = '/home/zlabe/green/simu/'\n totaldirectory = directory + experi + '/monthly/'\n filename = totaldirectory + varid + '_1900-2000.nc'\n \n ### Read in Data\n if level == 'surface': # 3d variables\n data = Dataset(filename,'r')\n varq = data.variables['%s' % varid][:,:,:,0]\n data.close()\n \n dataq = Dataset(totaldirectory + 'T2M_1900-2000.nc')\n time = dataq.variables['time'][:]\n lev = 'surface'\n lat = dataq.variables['latitude'][:]\n lon = dataq.variables['longitude'][:]\n dataq.close()\n elif level == 'profile': # 4d variables\n data = Dataset(filename,'r')\n varq = data.variables['%s' % varid][:,:,:,0]\n data.close()\n \n dataq = Dataset(totaldirectory + 'TEMP_1900-2000.nc')\n time = dataq.variables['time'][:]\n lev = dataq.variables['level'][:]\n lat = dataq.variables['latitude'][:]\n lon = dataq.variables['longitude'][:]\n dataq.close()\n else:\n print(ValueError('Selected wrong height - (surface or profile!)!')) \n print('Completed: Read data for *%s* : %s!' % (experi[:4],varid))\n \n ### Reshape to split years and months\n months = 12\n if level == 'surface': # 3d variables\n var = np.reshape(varq,(int(varq.shape[0]/12),months,\n int(lat.shape[0])))\n elif level == 'profile': # 4d variables\n var = np.reshape(varq,(int(varq.shape[0]/12),months,int(lev.shape[0]),\n int(lat.shape[0])))\n else:\n print(ValueError('Selected wrong height - (surface or profile!)!')) \n print('Completed: Reshaped %s array!' % (varid))\n \n ### Convert units\n if varid in ('TEMP','T2M'):\n var = var - 273.15 # Kelvin to degrees Celsius \n print('Completed: Changed units (K to C)!')\n\n print('\\n*Completed: Finished readExperi function!')\n return lat,lon,time,lev,var", "def initialize_data(self , station = '', datasets = {} ):\n \n self.datasets = datasets\n self.datasets_keys = datasets.keys()\n self.station = station\n \n data = {} # container for the data of each dataset\n source_configuration = {} # container for the source_configuration of each dataset\n \n\n \n \"\"\" Looping over the datasets \"\"\"\n logging.info('*** Reading and Initializing the data from the netCDF files ')\n \n \n for k,v in datasets.items() :\n logging.info(' Initialising the dataset: *** %s ' , k )\n data[k] = {} \n data['cdm_tables'] = {} \n \n ### alternative with xarray \n #ds = xr.load_dataset(v) \n #observations_table = xr.open_dataset(v , engine = 'h5netcdf' , group = 'observations_table') \n \n ### alternative with netCDF4\n #ds = nc.Dataset(v) \n #data[k]['dateindex'] = ds.variables['dateindex'][0,:] # storing the dateindex \n \n ###for h5py but cant extract date time units !!!\n ds = h5py.File(v , driver=\"core\" ) \n data[k]['df'] = ds # storing the entire file \n try: \n data[k]['source_file'] = ds['source_configuration']['source_file'][0]\n except:\n data[k]['source_file'] = str(v) # temp fix \n \n #data[k]['product_code'] = ds['source_configuration']['product_code'][0] \n #data[k]['recordtimestamp'] = ds['recordtimestamp'].value\n #data[k]['recordindex'] = ds['recordindex'].value \n #ds.close() \n logging.debug('Reading the file with h5py ')\n \n \n # add here appending datasets for the case of ncar_w and ncar_t \n \n \n self.data = data\n self.make_dataframe()\n ds.close()\n \n \"\"\" Reading the header_table, station_configuration, source_configuration \"\"\"\n for k,v in datasets.items() : \n \n #d = xr.open_dataset(v , engine = 'h5netcdf' ) \n #data[k]['recordtimestamp'] = d['recordtimestamp'].values\n #data[k]['recordindex'] = d['recordindex'].values \n \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'station_configuration') \n data[k]['station_configuration'] = d.to_dataframe() \n #data[k]['station_configuration'] = d ### USELESS ? \n logging.debug('Done with %s station_configuration' , str(k) )\n \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'header_table') \n logging.debug('Loading the header_table') \n if 'header_table' not in list( self.attributes.keys() ): # saving the attributes to be re-applied at the end\n self.attributes['header_table'] = {}\n for var in d.variables:\n self.attributes['header_table'][var] = {}\n self.attributes['header_table'][var]['description'] = d[var].description\n self.attributes['header_table'][var]['external_table'] = d[var].external_table \n data[k]['header_table'] = d.to_dataframe() \n logging.debug('Done with %s ' , k )\n \n logging.info(\"*** Loading the observations_table (might take time) %s\" , k ) \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'observations_table') \n \n if 'observations_table' not in list( self.attributes.keys() ): # saving the attributes to be re-applied at the end\n self.attributes['observations_table'] = {}\n for var in d.variables:\n self.attributes['observations_table'][var] = {}\n self.attributes['observations_table'][var]['description'] = d[var].description\n self.attributes['observations_table'][var]['external_table'] = d[var].external_table\n \n \n logging.info(\"*** Loading the source configuration %s\" , k ) \n try: \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'source_configuration')\n d = d.isel(hdrlen=[0])\n data[k]['source_configuration'] = d.to_dataframe() ### USELESS ? \n logging.debug('Done with %s source_configuration' , k )\n except: \n data[k]['source_configuration']= pd.DataFrame(np.array( [ [ self.data[k]['source_file'] ] ] ) , columns=['source_file'] ) \n \n if k == 'era5_1': # reading the whole era5_1 feedback (including reanalysis)\n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'era5fb') \n data[k]['era5fb'] = d.to_dataframe() \n logging.debug('Done with %s era5 feedback ', k )\n \n \"\"\" Reading the CDM tables that do not depend on specific stations or observations (fixed values), for the first file only \"\"\" \n if list(datasets.keys()).index(k) == 0 :\n for t in [ 'crs' , 'observed_variable', 'units' , 'z_coordinate_type' , 'station_type']: \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = t) \n #data['cdm_tables'][t] = d.to_dataframe() ### USELESS ?\n data['cdm_tables'][t] = d \n \n d.close() \n ds.close()\n\n \"\"\" Reading the name of the original source file \"\"\"\n source_configuration[k] = {} \n source_configuration[k]['source_file'] = [ c for c in v.split('/') if '.nc' in c][0]\n\n \n \"\"\" Storing the station configurations \"\"\" \n self.source_configuration = source_configuration \n \n \"\"\" Making all date_times \"\"\" \n self.make_all_datetime()\n \n \n \"\"\" feedback columns \"\"\"\n if 'era5_1' in list (self.data.keys() ):\n self.fb_columns = list(self.data['era5_1']['era5fb'].columns ) \n else:\n self.fb_columns = ['empty']", "def get_simulation(self, _id):\n\n simulation = self.collection.find_one({'_id': ObjectId(_id)})\n\n return simulation", "def executeScriptToGetData():\n ulv = random.randrange(42, 420)\n llv = random.randrange(42, 420)\n urv = random.randrange(42, 420)\n lrv = ulv + llv + urv\n return {\n 'title': random.choice(['Sensors title', None]),\n 'description': random.choice(['Sensors description', None]),\n 'big-value': random.randrange(214, 514),\n 'upper-left-label': 'Critical:',\n 'upper-left-value': ulv,\n 'lower-left-label': 'Major:',\n 'lower-left-value': llv,\n 'upper-right-label': 'Minor:',\n 'upper-right-value': urv,\n 'lower-right-label': 'All:',\n 'lower-right-value': lrv\n }", "def get_data_db(id_s):\n mat_contents = sio.loadmat(id_s)\n conc = mat_contents['conc']\n rel = mat_contents['rel']\n dim=len(rel.shape)\n if (dim==3):\n if (len(conc)>len(conc[0][0])):\n conc = np.transpose(conc, (2, 1, 0))\n rel = np.transpose(rel, (2, 1, 0))\n if (len(conc)==8):\n conc = np.transpose(conc, (2, 0, 1))\n rel = np.transpose(rel, (2, 0, 1))\n data_time = np.zeros((len(conc)*2, len(conc[0]), len(conc[0][0])))\n data_time[0:len(conc)] = conc\n data_time[len(conc)::] = rel\n \n if (dim ==2):\n data_time = np.zeros((2,len(conc), len(conc[0])))\n data_time[0:len(conc)] = conc\n data_time[len(conc)::] = rel\n return data_time", "def run_global(start_year, end_year, depth_from, depth_to, animate=True):\n# years, times, rootgrps = retrieve(1950,2018)\n# rootgrps_1950 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1950\\EN.4.2.1.f.analysis.g10.195001.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1951 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1951\\EN.4.2.1.f.analysis.g10.195101.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1952 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1952\\EN.4.2.1.f.analysis.g10.195201.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1953 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1953\\EN.4.2.1.f.analysis.g10.195301.nc\", \"r+\", format=\"NETCDF4\")]\n#\n#\n# rootgrps_2015 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2015\\EN.4.2.1.f.analysis.g10.201501.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_2016 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2016\\EN.4.2.1.f.analysis.g10.201601.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_2017 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2017\\EN.4.2.1.f.analysis.g10.201701.nc\", \"r+\", format=\"NETCDF4\")]\n rootgrps_2018 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2018\\EN.4.2.1.f.analysis.g10.201801.nc\", \"r+\", format=\"NETCDF4\")]\n\n# HC_1950 = calculate_HC_global(rootgrps_1950, 0, 2000)\n# print('1950', time.time()-start)\n# HC_1951 = calculate_HC_global(rootgrps_1951, 0, 2000)\n# print('1951', time.time()-start)\n# HC_1952 = calculate_HC_global(rootgrps_1952, 0, 2000)\n# print('1952', time.time()-start)\n# HC_1953 = calculate_HC_global(rootgrps_1953, 0, 2000)\n# print('1953', time.time()-start) \n#\n# HC_2015 = calculate_HC_global(rootgrps_2015, 0, 2000)\n# print('2015', time.time()-start)\n# HC_2016 = calculate_HC_global(rootgrps_2016, 0, 2000)\n# print('2016', time.time()-start)\n# HC_2017 = calculate_HC_global(rootgrps_2017, 0, 2000)\n# print('2017', time.time()-start)\n HC_2018 = calculate_HC_global(rootgrps_2018, 0, 2000)\n# print('2018', time.time()-start)\n# HC_1950_mean = (HC_1950+HC_1951+HC_1952+HC_1953)/4\n# HC_2018_mean = (HC_2015+HC_2016+HC_2017+HC_2018)/4\n\n# dHC = (HC_2018_mean-HC_1950_mean)/(65*365*24*3600)\n if animate == True:\n plot(rootgrps_2018, HC_2018)\n return HC_2018", "def build_model_data(model_run, debug=False):\n # We build up a dictionary of the data, then convert it to an xarray Dataset\n # before applying time dimensions\n data = xr.Dataset(coords=add_sets(model_run), attrs=add_attributes(model_run))\n\n data_dict = dict()\n data_dict.update(constraints_to_dataset(model_run))\n data_dict.update(costs_to_dataset(model_run))\n data_dict.update(location_specific_to_dataset(model_run))\n data_dict.update(tech_specific_to_dataset(model_run))\n data_dict.update(carrier_specific_to_dataset(model_run))\n data_dict.update(group_constraints_to_dataset(model_run))\n\n data = data.merge(xr.Dataset.from_dict(data_dict))\n\n data = add_lookup_arrays(data, model_run)\n\n if debug:\n data_pre_time = data.copy(deep=True)\n\n data = add_time_dimension(data, model_run)\n\n # apply scaling as specified in scale.yaml\n ranges_start = compute_unit_ranges(data)\n\n # determine tolerance used by solver to guide scaling\n if 'solver_options' in model_run['run'] and model_run['run']['solver_options'] is not None:\n feasibilityTol = model_run['run']['solver_options'].get(\n 'FeasibilityTol', 1e-6\n )\n optimalityTol = model_run['run']['solver_options'].get(\n 'OptimalityTol', 1e-6\n )\n tol = max(feasibilityTol, optimalityTol)\n else:\n tol = 1e-6\n\n\n \n # apply scaling as defined by scaling factors in the input data\n if model_run['run']['scale'] == 1 and 'scale' in model_run:\n print('scale')\n data.attrs['scale'] = True\n print('factors', model_run['scale'])\n data['scale'] = xr.DataArray(\n [v for v in model_run['scale'].values()],\n [('unit', [k for k in model_run['scale'].keys()])]\n )\n scale(data)\n \n # apply autoscaling, deriving optimal scaling factors\n elif model_run['run']['scale'] == 2:\n print('autoscale')\n data.attrs['scale'] = True\n\n stt_factor = model_run['run']['scaling_tolerance_threshold']\n scaling_factors = get_scale(\n ranges_start, \n model_run['run']['solver'], \n model_run['run'].get('solver_io', None), \n stt_factor*tol\n )\n print('factors', scaling_factors)\n data['scale'] = xr.DataArray(\n [v for v in scaling_factors.values()],\n [('unit', [k for k in scaling_factors.keys()])]\n )\n scale(data)\n # don't do any scaling\n else:\n print('unscaled')\n data.attrs['scale'] = False\n\n ranges_end = compute_unit_ranges(data)\n \n # print data ranges for inspection purposes\n for ranges, word in [(ranges_start, \"before\"), (ranges_end, \"after\")]:\n print('\\nUnit ranges {} scaling'.format(word))\n print('{:20} {:12} {:12}'.format(\"unit\", \"min\", \"max\"))\n for k,v in ranges.items():\n print('{:20} {:<12.8f} {:<12.8f}'.format(k, v[\"min\"], v[\"max\"]))\n\n print('cost: {}'.format(\n max(map(lambda x: x['max'], ranges.values()))/\n min(map(lambda x: x['min'], ranges.values()))\n )) \n\n for rng in ranges_end.values():\n if rng['min'] < tol:\n print('Warning: unit {}/{} has min value {}.\\nThis is below solver tolerance {} and may lead to numerical issues.\\nConsider changing scaling strategy or solver tolerances.'.format(rng['num'], rng['den'], rng['min'], tol)) \n\n print('\\n')\n\n \n \n # Carrier information uses DataArray indexing in the function, so we merge\n # these directly into the main xarray Dataset\n\n if debug:\n return data, data_dict, data_pre_time\n else:\n return data", "def _create_projection_datasets(self):\n # First grab the spectroscopic indices and values and position indices\n self._sho_spec_inds = self.h5_main.h5_spec_inds\n self._sho_spec_vals = self.h5_main.h5_spec_vals\n self._sho_pos_inds = self.h5_main.h5_pos_inds\n\n fit_dim_ind = self.h5_main.spec_dim_labels.index(self._fit_dim_name)\n\n self._fit_spec_index = fit_dim_ind\n self._fit_offset_index = 1 + fit_dim_ind\n\n # Calculate the number of loops per position\n cycle_start_inds = np.argwhere(self._sho_spec_inds[fit_dim_ind, :] == 0).flatten()\n tot_cycles = cycle_start_inds.size\n\n # Make the results group\n self._h5_group = create_results_group(self.h5_main, 'Loop_Fit')\n write_simple_attrs(self._h5_group, {'projection_method': 'pycroscopy BE loop model'})\n\n # Write datasets\n self.h5_projected_loops = create_empty_dataset(self.h5_main, np.float32, 'Projected_Loops',\n h5_group=self._h5_group)\n\n h5_loop_met_spec_inds, h5_loop_met_spec_vals = write_reduced_spec_dsets(self._h5_group, self._sho_spec_inds,\n self._sho_spec_vals, self._fit_dim_name,\n basename='Loop_Metrics')\n\n self.h5_loop_metrics = write_main_dataset(self._h5_group, (self.h5_main.shape[0], tot_cycles), 'Loop_Metrics',\n 'Metrics', 'compound', None, None, dtype=loop_metrics32,\n h5_pos_inds=self.h5_main.h5_pos_inds,\n h5_pos_vals=self.h5_main.h5_pos_vals,\n h5_spec_inds=h5_loop_met_spec_inds,\n h5_spec_vals=h5_loop_met_spec_vals)\n\n # Copy region reference:\n copy_region_refs(self.h5_main, self.h5_projected_loops)\n copy_region_refs(self.h5_main, self.h5_loop_metrics)\n\n self.h5_main.file.flush()\n self._met_spec_inds = self.h5_loop_metrics.h5_spec_inds\n\n return", "def create(self) -> dict:\n variable_values = {}\n for simulation in self.simulation_list:\n name_array = []\n name_matrix = []\n list_name_col = []\n name_col = []\n name_row = []\n\n for line in self.filecontents(simulation):\n do_not_store_this_line = 0\n\n # checks if line contains name of subsequent array\n if line[0:12] == \" ! Variable \" and line[-2:] == \"#\\n\":\n name_array = (line[12:].split(\" \"))[0]\n name_matrix = []\n list_name_col = []\n name_col = []\n do_not_store_this_line = 1\n\n # checks if line contains name of columns (and also matrix)\n if name_array != []:\n if line.split(\"(\")[0] == \" \" + name_array:\n # line defines name of matrix if its equal to array name followed by \"(\"\n list_name_col = line.split(\",\")\n name_matrix = line.split(\")\")[0].split(\":\")[-1].strip(\"\\\"\")\n # defines name of matrix equal to the entry before the first \")\" then after the last \":\"\n do_not_store_this_line = 1 # matrix name line doesn't contain usable values\n\n # row name is just first entry of a line\n name_row = line.split(\",\")[0].strip()\n\n # foreach cell in a line, assigns its value to a dictionary with keys for the\n # array, matrix, col, and row names\n for i, cell in enumerate(line.split(\",\")):\n if name_array != [] and name_matrix != [] and name_row != [] and list_name_col != []:\n name_col = list_name_col[i].strip()\n if (do_not_store_this_line == 0 and i != 0 and name_col != \"\"):\n key = (simulation, name_array, name_matrix, name_row, name_col)\n variable_values[key] = float(cell.strip())\n\n return variable_values", "def simulateDataOnHimster(thisExperiment: Experiment, thisScenario: Scenario) -> Scenario:\n\n for task in thisScenario.SimulationTasks:\n\n print(f\"running simulation of type {str(task.simDataType)} and path ({task.dirPath} at states:\")\n print(f\"current state: {str(task.simState)}\")\n print(f\"last state: {str(task.lastState)}\")\n\n data_keywords = []\n data_pattern = \"\"\n\n cut_keyword = generateCutKeyword(thisExperiment.recoParams)\n\n print(f\"cut keyword is {cut_keyword}\")\n\n merge_keywords = [\"merge_data\", \"binning_300\"]\n # if \"v\" in task.simType:\n if task.simDataType == SimulationDataType.VERTEX:\n data_keywords = [\"uncut\", \"bunches\", \"binning_300\"]\n data_pattern = \"lmd_vertex_data_\"\n # elif \"a\" in task.simType:\n elif task.simDataType == SimulationDataType.ANGULAR:\n data_keywords = [cut_keyword, \"bunches\", \"binning_300\"]\n data_pattern = \"lmd_data_\"\n elif task.simDataType == SimulationDataType.EFFICIENCY_RESOLUTION:\n data_keywords = [cut_keyword, \"bunches\", \"binning_300\"]\n data_pattern = \"lmd_res_data_\"\n else:\n raise NotImplementedError(f\"Simulation type {task.simDataType} is not implemented!\")\n\n # 1. simulate data\n if task.simState == SimulationState.START_SIM:\n os.chdir(lmd_fit_script_path)\n status_code = 1\n # if \"er\" in task.simType:\n if task.simDataType == SimulationDataType.EFFICIENCY_RESOLUTION:\n \"\"\"\n efficiency / resolution calculation.\n\n Takes an offset of the IP into account.\n\n TODO: This needs to know the misalignment of the detector.\n \"\"\"\n found_dirs = []\n # what the shit, this should never be empty in the first place\n if (task.dirPath != \"\") and (task.dirPath is not None):\n temp_dir_searcher = general.DirectorySearcher(\n [\n thisExperiment.recoParams.simGenTypeForResAcc.value,\n data_keywords[0],\n ] # look for the folder name including sim_type_for_resAcc\n )\n temp_dir_searcher.searchListOfDirectories(task.dirPath, thisScenario.track_file_pattern)\n found_dirs = temp_dir_searcher.getListOfDirectories()\n print(f\"found dirs now: {found_dirs}\")\n else:\n # path may be empty, then the directory searcher tries to find it\n pass\n\n if found_dirs:\n status_code = wasSimulationSuccessful(\n thisExperiment,\n found_dirs[0],\n thisScenario.track_file_pattern + \"*.root\",\n )\n elif task.lastState < SimulationState.START_SIM:\n # then lets simulate!\n # this command runs the full sim software with box gen data\n # to generate the acceptance and resolution information\n # for this sample\n # note: beam tilt and divergence are not necessary here,\n # because that is handled completely by the model\n\n # because we don't want to change the experiment config or\n # anything in the simParams, recoParam, alignParams,\n # we'll create temp objects here.\n\n tempSimParams = thisExperiment.simParams\n tempRecoParams = thisExperiment.recoParams\n tempAlignParams = thisExperiment.alignParams\n\n thisIPX = tempRecoParams.recoIPX\n thisIPY = tempRecoParams.recoIPY\n thisIPZ = tempRecoParams.recoIPZ\n\n max_xy_shift = math.sqrt(thisIPX**2 + thisIPY**2)\n max_xy_shift = float(\"{0:.2f}\".format(round(float(max_xy_shift), 2)))\n\n # since this is the res/acc case, these parameters must be changed\n tempSimParams.simGeneratorType = tempRecoParams.simGenTypeForResAcc\n tempSimParams.num_events_per_sample = tempRecoParams.num_events_per_resAcc_sample\n tempSimParams.num_samples = tempRecoParams.num_resAcc_samples\n tempSimParams.theta_min_in_mrad -= max_xy_shift\n tempSimParams.theta_max_in_mrad += max_xy_shift\n tempSimParams.ip_offset_x = thisIPX\n tempSimParams.ip_offset_y = thisIPY\n tempSimParams.ip_offset_z = thisIPZ\n\n # since this is the res/acc case, these parameters must be updated\n tempRecoParams.num_samples = tempRecoParams.num_resAcc_samples\n tempRecoParams.num_events_per_sample = tempRecoParams.num_events_per_resAcc_sample\n\n # TODO: alignment part\n # if alignement matrices were specified, we used them as a mis-alignment\n # and alignment for the box simulations\n\n (job, returnPath) = create_simulation_and_reconstruction_job(\n tempSimParams,\n tempAlignParams,\n tempRecoParams,\n application_command=thisScenario.Sim,\n use_devel_queue=args.use_devel_queue,\n )\n job_manager.append(job)\n\n task.dirPath = returnPath\n thisScenario.acc_and_res_dir_path = returnPath\n # last_state += 1\n # last state was < 1, so 0. That means an increase is now 1\n task.lastState = SimulationState.START_SIM\n\n # elif \"a\" in task.simType:\n elif task.simDataType == SimulationDataType.ANGULAR:\n \"\"\"\n a is the angular case. this is the data set onto which the luminosiy fit is performed.\n it is therefore REAL digi data (or DPM data of course) that must be reconstructed again\n with the updated reco parameter (like the IP position, cuts applied and alignment).\n note: beam tilt and divergence are not used here because\n only the last reco steps are rerun of the track reco\n \"\"\"\n found_dirs = []\n status_code = 1\n # what the shit, this should never be empty in the first place\n if (task.dirPath != \"\") and (task.dirPath is not None):\n temp_dir_searcher = general.DirectorySearcher([\"dpm_elastic\", data_keywords[0]])\n temp_dir_searcher.searchListOfDirectories(task.dirPath, thisScenario.track_file_pattern)\n found_dirs = temp_dir_searcher.getListOfDirectories()\n\n else:\n # path may be empty, then the directory searcher tries to find it\n pass\n\n if found_dirs:\n status_code = wasSimulationSuccessful(\n thisExperiment,\n found_dirs[0],\n thisScenario.track_file_pattern + \"*.root\",\n )\n\n # oh boi that's bound to be trouble with IntEnums\n elif task.lastState < task.simState:\n\n # * reco params must be adjusted if the res/acc sample had more jobs or samples that the real (or dpm) data\n rec_par = thisExperiment.recoParams\n if thisExperiment.recoParams.num_samples > 0 and rec_par.num_samples > thisExperiment.recoParams.num_samples:\n rec_par.num_samples = thisExperiment.recoParams.num_samples\n\n # TODO: have alignment parameters changed? take them from the experiment\n align_par = thisExperiment.alignParams\n\n (job, returnPath) = create_reconstruction_job(\n rec_par,\n align_par,\n str(thisExperiment.baseDataOutputDir),\n application_command=thisScenario.Reco,\n use_devel_queue=args.use_devel_queue,\n )\n job_manager.append(job)\n\n task.dirPath = returnPath\n thisScenario.filteredTrackDirectory = returnPath\n\n # Simulation is done, so update the last_state\n task.lastState = SimulationState.START_SIM\n\n # elif \"v\" in task.simType:\n elif task.simDataType == SimulationDataType.VERTEX:\n\n # TODO: check if the sim data is already there, if yes return 0, else start sim\n status_code = 0\n\n # # vertex Data must always be created without any cuts first\n # tempRecoPars = thisExperiment.recoParams\n # tempRecoPars.use_xy_cut = False\n # tempRecoPars.use_m_cut = False\n\n # # TODO: misalignment is important here. the vertex data can have misalignment (because it's real data)\n # # but it has no alignment yet. that is only for the second reconstruction\n # tempAlignPars = thisExperiment.alignParams\n # tempAlignPars.alignment_matrices_path = None\n\n # job, _ = create_simulation_and_reconstruction_job(\n # thisExperiment.simParams,\n # tempAlignPars,\n # tempRecoPars,\n # use_devel_queue=args.use_devel_queue,\n # application_command=thisScenario.Sim,\n # )\n # job_manager.append(job)\n\n else:\n raise ValueError(f\"This tasks simType is {task.simDataType}, which is invalid!\")\n\n if status_code == 0:\n print(\"found simulation files, skipping\")\n task.simState = SimulationState.MAKE_BUNCHES\n task.lastState = SimulationState.START_SIM\n elif status_code > 0:\n print(f\"still waiting for himster simulation jobs for {task.simDataType} data to complete...\")\n else:\n raise ValueError(\"status_code is negative, which means number of running jobs can't be determined. \")\n\n # 2. create data (that means bunch data, create data objects)\n if task.simState == SimulationState.MAKE_BUNCHES:\n # check if data objects already exists and skip!\n temp_dir_searcher = general.DirectorySearcher(data_keywords)\n temp_dir_searcher.searchListOfDirectories(task.dirPath, data_pattern)\n found_dirs = temp_dir_searcher.getListOfDirectories()\n status_code = 1\n if found_dirs:\n status_code = wasSimulationSuccessful(\n thisExperiment,\n found_dirs[0],\n data_pattern + \"*\",\n is_bunches=True,\n )\n\n elif task.lastState < task.simState:\n os.chdir(lmd_fit_script_path)\n # bunch data\n # TODO: pass experiment config, or better yet, make class instead of script\n bashcommand = (\n \"python makeMultipleFileListBunches.py \"\n + f\" --filenamePrefix {thisScenario.track_file_pattern}\"\n + \" --files_per_bunch 10 --maximum_number_of_files \"\n + str(thisExperiment.recoParams.num_samples)\n + \" \"\n + task.dirPath\n )\n print(f\"Bash command for bunch creation:\\n{bashcommand}\\n\")\n _ = subprocess.call(bashcommand.split())\n # TODO: pass experiment config, or better yet, make class instead of script\n # create data\n bashArgs = []\n # if \"a\" in task.simType:\n if task.simDataType == SimulationDataType.ANGULAR:\n el_cs = thisScenario.elastic_pbarp_integrated_cross_secion_in_mb\n bashArgs.append(\"python\")\n bashArgs.append(\"createMultipleLmdData.py\")\n bashArgs.append(\"--dir_pattern\")\n bashArgs.append(data_keywords[0])\n bashArgs.append(\"--jobCommand\")\n bashArgs.append(thisScenario.LmdData)\n bashArgs.append(f\"{thisScenario.momentum:.2f}\")\n bashArgs.append(str(task.simDataType.value)) # we have to give the value because the script expects a/er/v !\n bashArgs.append(task.dirPath)\n bashArgs.append(\"../dataconfig_xy.json\")\n\n if el_cs:\n bashArgs.append(\"--elastic_cross_section\")\n bashArgs.append(str(el_cs))\n # bashcommand += \" --elastic_cross_section \" + str(el_cs)\n else:\n bashArgs.append(\"python\")\n bashArgs.append(\"createMultipleLmdData.py\")\n bashArgs.append(\"--dir_pattern\")\n bashArgs.append(data_keywords[0])\n bashArgs.append(\"--jobCommand\")\n bashArgs.append(thisScenario.LmdData)\n bashArgs.append(f\"{thisScenario.momentum:.2f}\")\n bashArgs.append(str(task.simDataType.value)) # we have to give the value because the script expects a/er/v !\n bashArgs.append(task.dirPath)\n bashArgs.append(\"../dataconfig_xy.json\")\n\n print(bashArgs)\n _ = subprocess.call(bashArgs)\n\n # last_state = last_state + 1\n # was apparently bunches\n task.lastState = SimulationState.MERGE\n\n bashArgs.clear()\n\n # else:\n # raise RuntimeError(\"No data could be found, but no commands are to be executed. This can't be!\")\n\n if status_code == 0:\n print(\"skipping bunching and data object creation...\")\n # state = 3\n task.simState = SimulationState.MERGE\n task.lastState = SimulationState.MAKE_BUNCHES\n elif status_code > 0:\n print(f\"status_code {status_code}: still waiting for himster simulation jobs for {task.simDataType} data to complete...\")\n else:\n # ok something went wrong there, exit this scenario and\n # push on bad scenario stack\n task.simState = SimulationState.FAILED\n raise ValueError(\"Something went wrong with the cluster jobs! This scenario will no longer be processed.\")\n\n # 3. merge data\n if task.simState == SimulationState.MERGE:\n # check first if merged data already exists and skip it!\n temp_dir_searcher = general.DirectorySearcher(merge_keywords)\n temp_dir_searcher.searchListOfDirectories(task.dirPath, data_pattern)\n found_dirs = temp_dir_searcher.getListOfDirectories()\n if not found_dirs:\n os.chdir(lmd_fit_script_path)\n # merge data\n # if \"a\" in task.simType:\n bashArgs = []\n if task.simDataType == SimulationDataType.ANGULAR:\n bashArgs.append(\"python\")\n bashArgs.append(\"mergeMultipleLmdData.py\")\n bashArgs.append(\"--dir_pattern\")\n bashArgs.append(data_keywords[0])\n bashArgs.append(\"--num_samples\")\n bashArgs.append(str(bootstrapped_num_samples))\n bashArgs.append(str(task.simDataType.value)) # we have to give the value because the script expects a/er/v !\n bashArgs.append(task.dirPath)\n\n else:\n bashArgs.append(\"python\")\n bashArgs.append(\"mergeMultipleLmdData.py\")\n bashArgs.append(\"--dir_pattern\")\n bashArgs.append(data_keywords[0])\n bashArgs.append(str(task.simDataType.value)) # we have to give the value because the script expects a/er/v !\n bashArgs.append(task.dirPath)\n\n print(\"working directory:\")\n print(f\"{os.getcwd()}\")\n print(f\"running command:\\n{bashArgs}\")\n _ = subprocess.call(bashArgs)\n\n task.simState = SimulationState.DONE\n\n if task.lastState == SimulationState.FAILED:\n thisScenario.is_broken = True\n break\n\n # remove done tasks\n thisScenario.SimulationTasks = [simTask for simTask in thisScenario.SimulationTasks if simTask.simState != SimulationState.DONE]\n\n return thisScenario", "def all(config_file):\n with open(config_file) as f:\n config = json.load(f)\n scenes = get_realsense_scenes(config['realsense_dir'])\n all_dfs = []\n for scene in scenes:\n scene_data = get_data_from_scene(scene)\n logger.info(\"Evaluating - %s\", scene['scene_name'])\n df = run_test_on_scene(scene_data, config)\n all_dfs.append(df)\n\n df = pd.concat(all_dfs, axis=0)\n df = df.reset_index()\n print(df)\n df.to_csv(config['save_csv'])", "def part1(input):\n ps = PlanetSystem(input)\n for i in range(3):\n ps.simulate_dimension(i, 1000)\n return ps.total_energy", "def step(self, actions):\n assert (len(actions) == len(self.simulators))\n\n data_out = {outp: self.num_simulators*[None] for outp in self.outputs}\n\n # def act(idx, s):\n # try:\n # response = s.step(actions[idx])\n # if self.simulator_type == 'room_simulator':\n # response = self._convert_observation(s, response, self.outputs) ## ATTENTION DANS DOOM ON MODIFIE DIRECTEMENT LES DATA DANS DOOM SIMULATOR\n # for outp in self.outputs:\n # data_out[outp][idx] = response[outp] # ICI LES DATA ONT LA BONNE SHAPE\n # except Exception as exc:\n # print('Exception when stepping simulator with id: ' + str(idx))\n # raise exc\n\n # with ThreadPoolExecutor(max_workers=self.num_simulators) as executor:\n # futures = []\n # for i in range(self.num_simulators):\n # future = executor.submit(act, i, self.simulators[i])\n # futures.append(future)\n # concurrent.futures.wait(futures)\n # # check if any exception\n # for f in futures:\n # f.result()\n\n data_out = {outp: [] for outp in self.outputs}\n \n for (sim, act) in zip(self.simulators, actions):\n data_one_sim = sim.step(act)\n for outp in self.outputs:\n data_out[outp].append(data_one_sim[outp])\n\n # print(data_out.keys())\n return data_out", "def extract_by_rivid(rivid, folder_path, outpath):\n\n files = sorted([os.path.join(folder_path, i) for i in os.listdir(folder_path)])\n ensemble_columns = [\"Ensemble_{}\".format(i) for i in range(51)]\n\n # Generate start date time series\n dates_list = sorted([i[:-3] for i in os.listdir(folder_path)])\n dates_pandas = pd.to_datetime(dates_list)\n\n # Get rivids as an array\n ds = xr.open_dataset(files[0])\n rivids = ds[\"rivid\"].data\n ds.close()\n\n # Try to find the index of the rivid\n try:\n rivid_index = np.where(rivids == rivid)[0][0]\n except Exception as e:\n raise ValueError(\"The given rivid does not exist in this stream network.\")\n\n # Creating dask dataframes for the data\n list_of_dask_q_arrays = []\n list_of_dask_init_arrays = []\n list_of_dask_q_high_res_arrays = []\n\n for file in files:\n ds = xr.open_dataset(file, chunks={\"rivid\": 5000}) # arbitrary chunk value\n\n tmp_dask_q_array = ds[\"Qout\"].data\n list_of_dask_q_arrays.append(tmp_dask_q_array)\n\n tmp_dask_init_array = ds[\"initialization_values\"].data\n list_of_dask_init_arrays.append(tmp_dask_init_array)\n\n tmp_dask_q_high_res_array = ds[\"Qout_high_res\"].data\n list_of_dask_q_high_res_arrays.append(tmp_dask_q_high_res_array)\n\n ds.close()\n\n big_dask_q_array = da.stack(list_of_dask_q_arrays)\n big_dask_init_array = da.stack(list_of_dask_init_arrays)\n big_dask_q_high_res_array = da.stack(list_of_dask_q_high_res_arrays)\n\n # Extracting the initialization flows\n init_data = np.asarray(big_dask_init_array[:, rivid_index])\n init_df = pd.DataFrame(init_data, columns=[\"Initialization (m^3/s)\"], index=dates_pandas)\n file_name = os.path.join(outpath, \"Initialization_Values.csv\")\n init_df.to_csv(file_name, index_label=\"Date\")\n\n # Extracting the Flow Data\n q_data = np.asarray(big_dask_q_array[:, rivid_index, :, :])\n for i in range(15):\n\n q_data_tmp = q_data[:, i, :]\n\n temp_df = pd.DataFrame(\n q_data_tmp, index=(dates_pandas + pd.DateOffset(days=(i + 1))), columns=ensemble_columns\n )\n\n file_name = \"{}_Day_Forecasts.csv\".format(i + 1)\n temp_df.to_csv(os.path.join(outpath, file_name), index_label=\"Date\")\n\n # Extracting the high resolution flow data\n q_high_res_data = np.asarray(big_dask_q_high_res_array[:, rivid_index, :])\n for i in range(10):\n\n q_high_res_data_tmp = q_high_res_data[:, i]\n\n temp_df = pd.DataFrame(\n q_high_res_data_tmp, index=(dates_pandas + pd.DateOffset(days=(i + 1))),\n columns=[\"High Resolution Forecast (m^3/s)\"]\n )\n\n file_name = \"{}_Day_Forecasts_High_Res.csv\".format(i + 1)\n temp_df.to_csv(os.path.join(outpath, file_name), index_label=\"Date\")", "def get_RExpressionData_AnalysisIDAndExperimentIDAndTimePointAndUnitsAndSampleNameShort_dataStage01ReplicatesMI(self, analysis_id_I,experiment_id_I,time_point_I,concentration_units_I,sample_name_short_I):\n #Tested\n try:\n data = self.session.query(data_stage01_quantification_replicatesMI.experiment_id,\n data_stage02_quantification_analysis.sample_name_abbreviation,\n data_stage02_quantification_analysis.analysis_id,\n data_stage01_quantification_replicatesMI.sample_name_short,\n data_stage01_quantification_replicatesMI.time_point,\n data_stage01_quantification_replicatesMI.component_group_name,\n data_stage01_quantification_replicatesMI.component_name,\n data_stage01_quantification_replicatesMI.calculated_concentration,\n data_stage01_quantification_replicatesMI.calculated_concentration_units).filter(\n data_stage02_quantification_analysis.analysis_id.like(analysis_id_I),\n data_stage02_quantification_analysis.experiment_id.like(experiment_id_I),\n data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),\n data_stage02_quantification_analysis.time_point.like(time_point_I),\n data_stage01_quantification_replicatesMI.time_point.like(time_point_I),\n data_stage01_quantification_replicatesMI.calculated_concentration_units.like(concentration_units_I),\n data_stage01_quantification_replicatesMI.sample_name_short.like(sample_name_short_I),\n data_stage02_quantification_analysis.sample_name_short.like(sample_name_short_I),\n data_stage01_quantification_replicatesMI.used_.is_(True)).group_by(\n data_stage01_quantification_replicatesMI.experiment_id,\n data_stage02_quantification_analysis.sample_name_abbreviation,\n data_stage02_quantification_analysis.analysis_id,\n data_stage01_quantification_replicatesMI.sample_name_short,\n data_stage01_quantification_replicatesMI.time_point,\n data_stage01_quantification_replicatesMI.component_group_name,\n data_stage01_quantification_replicatesMI.component_name,\n data_stage01_quantification_replicatesMI.calculated_concentration,\n data_stage01_quantification_replicatesMI.calculated_concentration_units).all();\n data_O = [];\n for d in data: \n data_1 = {};\n data_1['analysis_id'] = d.analysis_id;\n data_1['experiment_id'] = d.experiment_id;\n data_1['sample_name_abbreviation'] = d.sample_name_abbreviation;\n data_1['sample_name_short'] = d.sample_name_short;\n data_1['time_point'] = d.time_point;\n data_1['component_group_name'] = d.component_group_name;\n data_1['component_name'] = d.component_name;\n data_1['calculated_concentration'] = d.calculated_concentration;\n data_1['calculated_concentration_units'] = d.calculated_concentration_units;\n data_O.append(data_1);\n return data_O;\n except SQLAlchemyError as e:\n print(e);", "def loadSimData(datafile):\n \n global dt, ti, Lx, Ly, nsamp, N, M, L, B, totalStep, Fmc, Kbend, kT, \\\n dtSamp, T, box_area, nt, body_length, Pe, persistence, flexure \n\n datafile = open(datafile,\"r\")\n for line in datafile:\n A = line.split()\n if A[0] == \"dt\": # Time interval between MD steps\n dt = float(A[-1])\n elif A[0] == \"ti\": # Beginning time for data acquisition\n ti = float(A[-1])\n elif A[0] == \"Lx\": # Box size in x\n Lx = float(A[-1]) \n elif A[0] == \"Ly\": # Box size in y\n Ly = float(A[-1])\n elif A[0] == \"totalStep\": # Total MD steps\n totalStep = float(A[-1])\n elif A[0] == \"nsamp\": # Data sampling frequency\n nsamp = float(A[-1])\n elif A[0] == \"nfil\": # Number of particles per polymer\n N = float(A[-1])\n elif A[0] == \"L\": # Number of particles\n L = float(A[-1])\n elif A[0] == \"B\": # Bond length between particles of a body\n B = float(A[-1])\n elif A[0] == \"kT\": # Boltzmann constant*Temperature\n kT = float(A[-1])\n elif A[0] == \"Fmc\": # Self propulsion force constant\n Fmc = float(A[-1]) \n elif A[0] == \"Kbend\": # Bending constant\n Kbend = float(A[-1])\n \n L = int(L)\n N = int(N)\n Lx /= B\n Ly /= B\n M = L/N\n dtSamp = dt*nsamp\n T = totalStep - ti\n nt = T/nsamp\n box_area = Lx*Ly\n body_length = B*(N-1)\n Pe = Fmc*body_length**2/kT\n persistence = Kbend/(kT*body_length)\n flexure = Pe/persistence\n \n return", "def get_tomo_data(self,threshold=20.):\n\t\tdset = raytomo.RayTomoDataSet(self.attrs['tomo_f'])\n\t\tfor prd in self.attrs['prd_arr']:\n\t\t\tgroup = self['%g_sec'%( prd )]\n\t\t\tdset.get_data4plot(dataid=self.attrs['dataid'].decode('utf-8'), period=prd)\n\t\t\tpdens = dset.pdens\n\t\t\tmask_pdens = dset.pdens < threshold\n\t\t\ttomo_data = np.ma.masked_array(dset.vel_iso, mask=mask_pdens)\n\t\t\tgroup.create_dataset(name='tomo_data', data=dset.vel_iso) # phase velocity map\n\t\t\tgroup.create_dataset(name='tomo_data_msk', data=mask_pdens) # save the mask array seperately. h5 file doesn't support masked array\n\t\t\tgroup.create_dataset(name='latArr', data=dset.latArr)\n\t\t\tgroup.create_dataset(name='lonArr', data=dset.lonArr)\n\t\treturn", "def get_measurements_by_time(self):\n data_path = os.path.abspath(\n os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n \"..\",\n \"data/NVB_rescale_dataset.p\",\n )\n )\n self.log_print([\"Getting experimental data from {}\".format(data_path)])\n self.measurements = pickle.load(open(data_path, \"rb\"))\n return self.measurements", "def get_RExpressionData_analysisIDAndUnits_dataStage02GlogNormalized(self, analysis_id_I,concentration_units_I):\n #Tested\n try:\n data = self.session.query(data_stage02_quantification_glogNormalized.analysis_id,\n data_stage02_quantification_glogNormalized.experiment_id,\n data_stage02_quantification_analysis.sample_name_abbreviation,\n data_stage02_quantification_glogNormalized.sample_name_short,\n data_stage02_quantification_glogNormalized.time_point,\n data_stage02_quantification_glogNormalized.component_group_name,\n data_stage02_quantification_glogNormalized.component_name,\n data_stage02_quantification_glogNormalized.calculated_concentration,\n data_stage02_quantification_glogNormalized.calculated_concentration_units).filter(\n data_stage02_quantification_glogNormalized.analysis_id.like(analysis_id_I),\n data_stage02_quantification_glogNormalized.calculated_concentration_units.like(concentration_units_I),\n data_stage02_quantification_analysis.analysis_id.like(analysis_id_I),\n data_stage02_quantification_glogNormalized.experiment_id.like(data_stage02_quantification_analysis.experiment_id),\n data_stage02_quantification_glogNormalized.sample_name_short.like(data_stage02_quantification_analysis.sample_name_short),\n data_stage02_quantification_glogNormalized.time_point.like(data_stage02_quantification_analysis.time_point),\n data_stage02_quantification_glogNormalized.used_.is_(True)).group_by(\n data_stage02_quantification_glogNormalized.analysis_id,\n data_stage02_quantification_glogNormalized.experiment_id,\n data_stage02_quantification_analysis.sample_name_abbreviation,\n data_stage02_quantification_glogNormalized.sample_name_short,\n data_stage02_quantification_glogNormalized.time_point,\n data_stage02_quantification_glogNormalized.component_group_name,\n data_stage02_quantification_glogNormalized.component_name,\n data_stage02_quantification_glogNormalized.calculated_concentration,\n data_stage02_quantification_glogNormalized.calculated_concentration_units).all();\n data_O = [];\n for d in data: \n data_1 = {};\n data_1['analysis_id'] = d.analysis_id;\n data_1['experiment_id'] = d.experiment_id;\n data_1['sample_name_abbreviation'] = d.sample_name_abbreviation;\n data_1['sample_name_short'] = d.sample_name_short;\n data_1['time_point'] = d.time_point;\n data_1['component_group_name'] = d.component_group_name;\n data_1['component_name'] = d.component_name;\n data_1['calculated_concentration'] = d.calculated_concentration;\n data_1['calculated_concentration_units'] = d.calculated_concentration_units;\n data_O.append(data_1);\n return data_O;\n except SQLAlchemyError as e:\n print(e);", "def __data_generation(self, folder_id, file_id, el_ids):\n self.reload(folder_id, file_id)\n X = np.array(self.X.iloc[el_ids]).reshape(self.batch_size, len(self.used_variables), self.lev)\n Y = self.Y[:,:,:,el_ids]\n return X,Y", "def create_loadshape_pmult_dataframe_for_simulation(settings: SimulationSettingsModel):\n df = create_loadshape_pmult_dataframe(settings)\n simulation_index = create_datetime_index_from_settings(settings)\n return df.loc[simulation_index]", "def run_dataset(data: DataSetBase) -> None:\n\n tracks_manager = data.load_tracks_manager()\n reconstructions = data.load_reconstruction()\n\n all_shot_ids = set(tracks_manager.get_shot_ids())\n for r in reconstructions:\n for shot in r.shots.values():\n if shot.id in all_shot_ids:\n vertices, faces = mesh.triangle_mesh(shot.id, r, tracks_manager)\n shot.mesh.vertices = vertices\n shot.mesh.faces = faces\n\n data.save_reconstruction(\n reconstructions, filename=\"reconstruction.meshed.json\", minify=True\n )", "def loadSimData(datafile):\n \n global dt, ti, Lx, Ly, nsamp, N, M, L, B, totalStep, Fmc, Kbend, kT, \\\n dtSamp, T, box_area, nt, body_length, Pe, persistence, flexure \n\n datafile = open(datafile,\"r\")\n for line in datafile:\n A = line.split()\n if A[0] == \"dt\": # Time interval between MD steps\n dt = float(A[-1])\n elif A[0] == \"ti\": # Beginning time for data acquisition\n ti = float(A[-1])\n elif A[0] == \"Lx\": # Box size in x\n Lx = float(A[-1]) \n elif A[0] == \"Ly\": # Box size in y\n Ly = float(A[-1])\n elif A[0] == \"totalStep\": # Total MD steps\n totalStep = float(A[-1])\n elif A[0] == \"nsamp\": # Data sampling frequency\n nsamp = float(A[-1])\n elif A[0] == \"nfil\": # Number of particles per polymer\n N = int(float(A[-1]))\n elif A[0] == \"L\": # Number of particles\n L = int(float(A[-1]))\n elif A[0] == \"B\": # Bond length between particles of a body\n B = float(A[-1])\n elif A[0] == \"kT\": # Boltzmann constant*Temperature\n kT = float(A[-1])\n elif A[0] == \"Fmc\": # Self propulsion force constant\n Fmc = float(A[-1]) \n elif A[0] == \"Kbend\": # Bending constant\n Kbend = float(A[-1])\n \n Lx /= B\n Ly /= B\n M = L/N\n dtSamp = dt*nsamp\n T = totalStep - ti\n nt = T/nsamp\n box_area = Lx*Ly\n body_length = B*(N-1)\n Pe = Fmc*body_length**2/kT\n persistence = Kbend/(kT*body_length)\n flexure = Pe/persistence", "def create_data_model(con,route_id):\n data = {}\n df1 = pd.read_sql('SELECT * FROM travel_times WHERE route_id = \"{0}\";'.format(route_id), con)\n #df1_data = df1.pivot().values\n data['distance_matrix'] = df1.pivot(index='stop1',columns='stop2',values='travel_time').values\n print('data loaded for {0}'.format(route_id))\n data['num_vehicles'] = 1\n data['depot'] = 0\n return data", "def loadSimData(datafile):\n \n global dt, ti, Lx, Ly, nsamp, N, M, L, B, totalStep, Fmc, Kbend, kT, \\\n dtSamp, T, box_area, nt, body_length, Pe, persistence, flexure \n\n datafile = open(datafile,\"r\")\n for line in datafile:\n A = line.split()\n if A[0] == \"dt\": # Time interval between MD steps\n dt = float(A[-1])\n elif A[0] == \"ti\": # Beginning time for data acquisition\n ti = float(A[-1])\n elif A[0] == \"Lx\": # Box size in x\n Lx = float(A[-1]) \n elif A[0] == \"Ly\": # Box size in y\n Ly = float(A[-1])\n elif A[0] == \"totalStep\": # Total MD steps\n totalStep = float(A[-1])\n elif A[0] == \"nsamp\": # Data sampling frequency\n nsamp = float(A[-1])\n elif A[0] == \"nfil\": # Number of particles per polymer\n N = float(A[-1])\n elif A[0] == \"L\": # Number of particles\n L = float(A[-1])\n elif A[0] == \"B\": # Bond length between particles of a body\n B = float(A[-1])\n elif A[0] == \"kT\": # Boltzmann constant*Temperature\n kT = float(A[-1])\n elif A[0] == \"Fmc\": # Self propulsion force constant\n Fmc = float(A[-1]) \n elif A[0] == \"Kbend\": # Bending constant\n Kbend = float(A[-1])\n \n Lx /= B\n Ly /= B\n M = L/N\n dtSamp = dt*nsamp\n T = totalStep - ti\n nt = T/nsamp\n box_area = Lx*Ly\n body_length = B*(N-1)\n Pe = Fmc*body_length**2/kT\n persistence = Kbend/(kT*body_length)\n flexure = Pe/persistence", "def read_and_select(fles, var, area):\n \n ds = xr.open_mfdataset(fles)\n \n # For 20CRv2c geopotential height \n if(var=='hgt'): \n ds = ds.sel(level=150.0)\n \n try:\n ds = ds.rename({'longitude': 'lon', 'latitude': 'lat'}) \n except: \n pass\n \n \n if(ds.lon.values.max() > 350):\n ds = ds.assign_coords(lon=(((ds.lon + 180) % 360) - 180))\n rolls = np.sum(ds.lon.values < 0); ds = ds.roll(lon=rolls*(-1))\n\n if(ds.lat.values[0] > ds.lat.values[-1]):\n ds['lat'] = np.flipud(ds['lat'])\n ds[var].values = np.flip(ds[var], axis=1)\n\n # For 20CRv2c snow cover\n if(var=='snowc'): \n ds[var] = ds[var]/100.\n ds[var] = ds[var].where(ds[var]>=0.5, other=0.0)\n ds[var] = ds[var].where(ds[var] <0.5, other=1.0)\n \n # For HadISST1\n if((var=='sst')|(var=='sic')): \n mask = ds[var].values == -1000.\n ds[var].values[mask] = np.nan\n \n if( area=='europe'): ds = ds.squeeze().sel(lat=slice( 33,73), lon=slice(-12,40)) \n elif(area=='westeu'): ds = ds.squeeze().sel(lat=slice(42,59), lon=slice(-10,17))\n elif(area=='easeur'): ds = ds.squeeze().sel(lat=slice(38,56), lon=slice(17,43))\n elif(area=='meditr'): ds = ds.squeeze().sel(lat=slice(30,45), lon=slice(0,25))\n elif(area=='scandi'): ds = ds.squeeze().sel(lat=slice( 55,71), lon=slice( 4,34)) \n elif(area=='norhem'): ds = ds.squeeze().sel(lat=slice(-10,87)) \n elif(area=='norpol'): ds = ds.squeeze().sel(lat=slice( 50,87))\n else: ds = ds.squeeze()\n \n return ds", "def extract_data( galaxy_ID):\n\n data_cube = marvin.tools.Maps( galaxy_ID)\n\n #ssp = main_file[1].data\n #flux_elines = main_file[3].data\n #org_hdr = main_file[0].header\n\n ###########################################################################\n # NOTE: The visual band fluxes are multiplied by 10^-16 as stated in the\n # units of the MaNGA Data Model.\n #\n # <https://data.sdss.org/datamodel/files/MANGA_PIPE3D/MANGADRP_VER\n # /PIPE3D_VER/PLATE/manga.Pipe3D.cube.html#hdu1>\n ###########################################################################\n #v_band = ssp[0] # in units of erg / s / cm^2\n #v_band_err = ssp[4] # in units of erg / s / cm^2\n #sMass_density = ssp[19] * u.dex( u.M_sun) # in units of log10( Msun / spaxel**2)\n\n Ha_vel = data_cube.emlines_gvel_ha_6564 # in units of km/s\n Ha_vel_err = data_cube.emlines_g # in units of km/s\n\n manga_plate = org_hdr['PLATEID']\n manga_fiberID = org_hdr['IFUDSGN']\n gal_ra = org_hdr['OBJRA']\n gal_dec = org_hdr['OBJDEC']\n\n return Ha_vel, Ha_vel_err, v_band, v_band_err, sMass_density, manga_plate, \n manga_fiberID, gal_ra, gal_dec", "def getMeasures():", "def SystemID_spec_match(q,dt=.1):\n\n Dim = q.shape[1]\n\n k_opt,D_opt = np.zeros(Dim),np.zeros(Dim)\n\n # loop over dimensions\n for j in range(0,Dim):\n print('identifying system #'+str(j+1)+'via spectral match')\n print(50*'-')\n # first we optimize in q-space\n k_opt[j],D_opt[j]=Optimize_Sqq(q[:,j],dt)\n\n Sys_Params={'k': k_opt, 'beta':D_opt/k_opt,'D':D_opt}\n \n return Sys_Params", "def run(weather_data, cfg):\n # -------------------------------------------------------------------------\n # VDI 4655 - Step 1: Determine the \"typtag\" key for each timestep\n # -------------------------------------------------------------------------\n logger.info('Determine \"typtag\" keys for each time step')\n get_typical_days(weather_data, cfg)\n\n # -------------------------------------------------------------------------\n # VDI 4655 - Step 2:\n # Match 'typtag' keys and reference load profile factors for each timestep\n # (for each 'typtag' key, one load profile is defined by VDI 4655)\n # -------------------------------------------------------------------------\n logger.info('Read in reference load profile factors and match them to ' +\n '\"typtag\" keys for each timestep')\n load_profile_df = load_profile_factors(weather_data, cfg)\n\n # -------------------------------------------------------------------------\n # VDI 4655 - Step 3: Load houses and generate their load profiles\n #\n # In the end, we will multiply the energy factors contained in DataFrame\n # load_profile_df for each time step of the weather_data with the\n # corresponding daily energy demand of each house we want to simulate. This\n # will yield the DataFrame load_curve_houses, which contains the actual\n # energy demand of each house in each time step.\n # -------------------------------------------------------------------------\n\n # (6) Application of guideline:\n # -------------------------------------------------------------------------\n # The following numbered sections are the implementations of the\n # corresponding sections in the VDI 4655.\n\n # (6.1) Specification of building type:\n # -------------------------------------------------------------------------\n # Building type (EFH or MFH) is defined by user in YAML file.\n # - \"single-family houses are residential buildings with up to\n # three flasts and one common heating system\"\n # - \"multi-family houses are residential buildings with no less\n # then four flasts and one common heating system\"\n\n # (6.2) Specification of annual energy demand:\n # -------------------------------------------------------------------------\n logger.info('Read in houses and calculate their annual energy demand')\n houses_dict = get_annual_energy_demand(cfg)\n\n # (6.3) Allocation of building site:\n # -------------------------------------------------------------------------\n # The user has to give the number of the TRY climate zone\n # in the yaml file. It is used in (6.4).\n\n # (6.4) Determination of the houses' energy demand values for each 'typtag'\n # -------------------------------------------------------------------------\n logger.info(\"Determine the houses' energy demand values for each typtag\")\n daily_energy_demand_houses = get_daily_energy_demand_houses(houses_dict,\n cfg)\n\n # (6.5) Determination of a daily demand curve for each house:\n # -------------------------------------------------------------------------\n logger.info(\"Generate the houses' energy demand values for each timestep\")\n load_curve_houses = get_load_curve_houses(load_profile_df, houses_dict,\n weather_data, cfg,\n daily_energy_demand_houses)\n\n return load_curve_houses, houses_dict", "def pick_area(data ,total_process, interval ,list_of_vars, list_of_areas, init_time=0, pr_height=None, ):\n \n \n \n #trying if the longitude values change from 0 to 360 or -180 to 180?\n \n if data['lon'].values[0] < 0:\n \n p_d = {'europe' : [0, 48, 30, 65],\n 'northamerica' : [-142,-42,0,60],\n 'australia' : [80,180,-50,10],\n 'gulfofmexico' : [-100,-75,18,31],\n 'carribeans' : [-85,-60,12,38], \n 'indianocean' : [30, 130,-35,35],\n 'NH' : [-180, 180 ,0,90]}\n \n # -180 to 180 change the values given in the dictionary to relevant\n else:\n \n p_d = {'europe' : [0, 48, 30, 65],\n 'northamerica' : [218,318,-10,70],\n 'australia' : [80,180,-50,10],\n 'gulfofmexico' : [260,285,14,37],\n 'carribeans' : [275,300,12,38], \n 'indianocean' : [30, 130,-35,35],\n 'NH' : [0, 360 ,0,90]}\n \n \n \n places_dict = {}\n #looping in the list of areas\n say_pl = 1\n for pl in list_of_areas:\n variables_l = {}\n #looping in the list of variables\n say_var =1\n for var in list_of_vars:\n #check if data contains 'lev' coords.\n try:\n \n #wrap the data\n single = data[var].sel(lon=slice(p_d[pl][0],p_d[pl][1]), \n lat=slice(p_d[pl][2],p_d[pl][3]), \n lev=pr_height).isel(time=slice(init_time, total_process, interval))\n \n #if no 'lev' coords exist.\n except:\n single = data[var].sel(lon=slice(p_d[pl][0],p_d[pl][1]), \n lat=slice(p_d[pl][2],p_d[pl][3]),).isel(time=slice(init_time, total_process, interval))\n \n #append a single variable given by the user\n variables_l[var] = single\n \n \n #append all the variables with respect to their area of interest.\n places_dict[pl] = variables_l\n \n #return\n return places_dict", "def timesteps_experiment():\n\n print(\"TIMESTEPS EXPERIMENT\")\n\n # set the name of the experiment\n now = datetime.datetime.now()\n experiment_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute)\n experiment_name = 'timestep_' + str(experiment_id)\n\n # define if you want to use preprocessed data from file\n use_prep_data = False\n if use_prep_data:\n set_params(preproc_data_id='16_5_10.16.47')\n else:\n set_params(use_preproc_data=False)\n\n # define the changing parameter and its value\n changing_param_name = 'time_steps'\n changing_param_value = [1, 2, 4, 8, 16, 32, 64, 128, 256]\n # {0:4, 1:100}, {0:3, 1:100}, {0:2, 1:100}, {0:1, 1:100}] #[{0:1, 1:1}, {0:15, 1:85}]#\n\n # set constant parameters\n set_params(epochs=20)\n set_params(dropout=0.3)\n set_params(use_word_emb=1)\n\n # save constant parameters to a new \"experiment_..\" file\n save_constant_parameters(experiment_name, changing_param_name)\n\n # run experiment for every parameter value\n for value in changing_param_value:\n process = psutil.Process(os.getpid())\n print(\"-----MEMORY before starting experiment ------\", int(process.memory_info().rss/(8*10**(3))), \"KB\")\n\n # update the parameter value\n set_params(use_word_emb = value)\n\n # update the model_id for this new model\n now = datetime.datetime.now()\n new_model_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute) + \".\" + str(now.second)\n set_params(model_id = new_model_id)\n\n # evaluate the new model and save the results in the experiment file\n oneExperiment = Process(target=run_experiment, args=(experiment_name, new_model_id, changing_param_name, value,))\n oneExperiment.start()\n oneExperiment.join()\n\n if value == changing_param_value[0]:\n set_params(preproc_data_id=new_model_id)", "def get_raw_data(trace: Trace, cmdl_args):\n # Retrieves states dataframe with the intresting columns\n df_state = trace.df_state[[STATE_COLS.APP.value, STATE_COLS.TASK.value, STATE_COLS.THREAD.value, STATE_COLS.START.value,\n STATE_COLS.END.value, STATE_COLS.VAL.value]]\n\n # Computes the elapse times of each state\n df_state['el_time'] = df_state[STATE_COLS.END.value] - df_state[STATE_COLS.START.value]\n\n # Removes start and end columns from rows cause we don't need them. Load the data into memory.\n df_state = df_state.drop(columns=[STATE_COLS.START.value, STATE_COLS.END.value]).compute()\n\n # Computes runtime (in us)\n runtime = df_state.groupby([STATE_COLS.APP.value, STATE_COLS.TASK.value, STATE_COLS.THREAD.value])['el_time'].sum().max() / 1000\n # Filters rows by useful and groups dataframe by process\n df_state_useful_grouped = df_state.loc[df_state[STATE_COLS.VAL.value] == STATE_VALUES.RUNNING.value].groupby([STATE_COLS.APP.value, STATE_COLS.TASK.value, STATE_COLS.THREAD.value])\n # Computes useful average time (in us)\n useful_av = df_state_useful_grouped['el_time'].sum().mean() / 1000\n # Computes useful max time (in us)\n useful_max = df_state_useful_grouped['el_time'].sum().max() / 1000\n # Computes useful tot time (in us)\n useful_tot = df_state_useful_grouped['el_time'].sum().sum() / 1000\n\n # Dimemas simulation for ideal times\n if cmdl_args.dimemas:\n runtime_id, useful_id = get_ideal_data(trace.metadata.path, len(trace.metadata.cpu_list))\n else:\n runtime_id = float('NaN')\n useful_id = float('NaN')\n\n\n # Loads only meaningful columns from df_states and filters useful rows\n df_state_useful = trace.df_state[[STATE_COLS.APP.value, STATE_COLS.TASK.value, STATE_COLS.THREAD.value, STATE_COLS.END.value, STATE_COLS.VAL.value]]\n df_state_useful = df_state_useful.loc[df_state_useful[STATE_COLS.VAL.value] == STATE_VALUES.RUNNING.value].drop(columns=STATE_COLS.VAL.value).compute()\n\n # Loads only meaningful columns from df_events\n df_event = trace.df_event[[EVENT_COLS.APP.value, EVENT_COLS.TASK.value, EVENT_COLS.THREAD.value, EVENT_COLS.TIME.value,\n EVENT_COLS.EVTYPE.value, EVENT_COLS.EVVAL.value]]\n\n # Filters for PAPI_TOT_INS and set as index the process identifier\n df_event_ins = df_event.loc[df_event[EVENT_COLS.EVTYPE.value] == EVENT_VALUES.PAPI_TOT_INS.value].drop(columns=EVENT_COLS.EVTYPE.value).compute().set_index([EVENT_COLS.APP.value, EVENT_COLS.TASK.value, EVENT_COLS.THREAD.value])\n\n # Gets total useful instructions by grouping and applying a custom filtering function\n useful_ins = df_event_ins.groupby([EVENT_COLS.APP.value,EVENT_COLS.TASK.value, EVENT_COLS.THREAD.value]).apply(is_useful, useful_states=df_state_useful)[EVENT_COLS.EVVAL.value].sum()\n\n # Filter for PAPI_TOT_CYC and set as indexes the process identifier\n df_event_cyc = df_event.loc[df_event[EVENT_COLS.EVTYPE.value] == EVENT_VALUES.PAPI_TOT_CYC.value].drop(columns=EVENT_COLS.EVTYPE.value).compute().set_index([EVENT_COLS.APP.value, EVENT_COLS.TASK.value, EVENT_COLS.THREAD.value])\n\n # Gets total useful cycles by grouping and applying a custom filtering function\n useful_cyc= df_event_cyc.groupby([EVENT_COLS.APP.value,EVENT_COLS.TASK.value, EVENT_COLS.THREAD.value]).apply(is_useful, useful_states=df_state_useful)[EVENT_COLS.EVVAL.value].sum()\n\n # Computes average IPC\n try:\n ipc = useful_ins / useful_cyc\n except ValueError:\n ipc = float('NaN')\n # Computes average frequency\n try:\n freq = useful_cyc / useful_tot / 1000\n except ValueError:\n freq = float('NaN')\n\n return ipc, freq, runtime, runtime_id, useful_av, useful_max, useful_tot, useful_id, useful_ins, useful_cyc", "def extract_data(self):\n values = {}\n for injkey in self.data_sets.keys():\n values[injkey] = {}\n alldata = self.data_sets[injkey]\n paramkeys = alldata['params'].keys()\n for datakey in alldata.keys():\n if not datakey == 'params':\n values[injkey][datakey] = {}\n values[injkey][datakey]['metric_val'] = {}\n values[injkey][datakey]['metric_val']['vals'] = []\n for paramkey in paramkeys:\n values[injkey][datakey][paramkey] = {}\n values[injkey][datakey][paramkey]['vals'] = []\n trials = alldata[datakey]\n for trial_num in trials.keys():\n trial = trials[trial_num]\n values[injkey][datakey]['metric_val']['vals'] \\\n .append(trial['metric_val'])\n values[injkey][datakey]['metric_val']['type'] \\\n = trial['metric']\n values[injkey][datakey]['metric_val']['units'] \\\n = 'dimensionless'\n param_vals = trial['params']\n for param_name in param_vals.keys():\n val, units = self.parse_pint_string(\n pint_string=param_vals[param_name]\n )\n values[injkey][datakey][param_name]['vals'] \\\n .append(float(val))\n values[injkey][datakey][param_name]['units'] \\\n = units\n self.values = values", "def get_scene(self, id):\n if not isinstance(id, int):\n id = int(id)\n payload = self.get('data_request?id=scene&action=list&scene=%d&output_format=json' % id)\n return payload", "def getExample1Data(conditionData, conditions, shockTubeEffectiveHeatingTimes, smilesInExperiments):\n #First get the time points close to\n allResults=[]\n for T, timepoint in shockTubeEffectiveHeatingTimes.iteritems():\n for smiles in smilesInExperiments:\n temperatureResults=GenericData(label=label='Temperature',\n data = []\n units = 'K')\n for conIndex, data in conditionData:\n if conditions[conIndex].T0==T:\n #index1 is the index of time from the simulation closest to timepoint\n #data is organized as (timeArray, rest of data)\n index1=findNearest(data[0], timepoint)\n for speciesData in data[1]:\n if smiles==speciesData.species:\n\n if smiles not in exptCompDict1: exptCompDict1[smiles]=[]\n #need to multiply by 20, in the experiment, they don't seem to count the Ar in the mole fraction\n exptCompDict1[smiles].append(resultsDictionary[T][1][index1][index2+1]*20)\n (index1, timepoint2)=rt.getNearestTime(timepoint1, resultsDictionary[T][2])\n for index2, smiles in enumerate(majorSpecies[0:4]):\n if smiles not in exptCompDict2: exptCompDict2[smiles]=[]\n exptCompDict2[smiles].append(resultsDictionary[T][3][index1][index2+1]*20)", "def _variables(self):\n # Allocation A\n self.model.A = Var(self.model.timeslots * self.model.tasks,\n domain=pe.Boolean, initialize=0)\n # Total utility of allocation A\n self.model.A_total = Var(domain=pe.Reals)\n\n # Multi-resolution allocation (size 1-4 chunks)\n self.model.A2 = Var(self.model.timeslots * self.model.tasks,\n domain=pe.Boolean, initialize=0)\n self.model.A2_total = Var(domain=pe.Reals)\n self.model.A3 = Var(self.model.timeslots * self.model.tasks,\n domain=pe.Boolean, initialize=0)\n self.model.A3_total = Var(domain=pe.Reals)\n self.model.A4 = Var(self.model.timeslots * self.model.tasks,\n domain=pe.Boolean, initialize=0)\n self.model.A4_total = Var(domain=pe.Reals)\n\n # Completion bonus\n self.model.T_total = Var(self.model.tasks, domain=pe.Integers,\n initialize=0)\n self.model.Completion_total = Var(domain=pe.Reals)\n\n self.model.Affinity_cognitive_total = Var(domain=pe.Reals)\n\n # Slots within a day\n self.model.intradayslots = RangeSet(0, self.num_timeslots/7-1) # 7 days\n # Day slots\n self.model.dayslots = RangeSet(0, 6) # 7 days\n # Tasks assigned on days\n self.model.S = Var(self.model.dayslots * self.model.tasks,\n domain=pe.Integers, initialize=0)\n # Spread utility\n self.model.S_total = Var(domain=pe.Reals)\n\n # Task start/end slots (per day)\n self.model.T_end = Var(self.model.dayslots, self.model.tasks,\n domain=pe.Integers,\n bounds=(0, self.num_timeslots / 7 - 1))\n # self.model.T_start = Var(self.model.dayslots, self.model.tasks,\n # domain=pe.Integers,\n # bounds=(0, self.num_timeslots / 7 - 1))\n\n # Categories assigned on days\n self.model.S_cat = Var(self.model.dayslots * self.model.categories,\n domain=pe.Boolean, initialize=0)\n # Total days on which categories are assigned\n self.model.S_cat_total = Var(self.model.categories, domain=pe.Integers)\n\n # Contiguity slots (half-days)\n self.cont_incr = int(CONT_STRIDE * tutil.SLOTS_PER_HOUR)\n self.cont_slots = self.num_timeslots / self.cont_incr - 1\n self.model.contslots = RangeSet(0, self.cont_slots - 1)\n self.model.CTu = Var(self.model.contslots * self.model.tasks,\n domain=pe.Integers, initialize=0)\n self.model.CTl = Var(self.model.contslots * self.model.tasks,\n domain=pe.Integers, initialize=0)\n # Contiguity utility\n self.model.CTu_total = Var(domain=pe.Reals)\n self.model.CTl_total = Var(domain=pe.Reals)\n\n # Category durations\n self.model.C_total = Var(self.model.categories, domain=pe.Reals,\n initialize=0)", "def get_RDataFrame_analysisIDAndExperimentIDAndTimePointAndUnitsAndComponentNames_dataStage02GlogNormalized(self,analysis_id_I, experiment_id_I,time_point_I,concentration_units_I,component_name_I):\n #Tested\n try:\n data = self.session.query(data_stage02_quantification_glogNormalized.experiment_id,\n data_stage02_quantification_analysis.sample_name_abbreviation,\n data_stage02_quantification_glogNormalized.sample_name_short,\n data_stage02_quantification_glogNormalized.time_point,\n data_stage02_quantification_glogNormalized.component_group_name,\n data_stage02_quantification_glogNormalized.component_name,\n data_stage02_quantification_glogNormalized.calculated_concentration,\n data_stage02_quantification_glogNormalized.calculated_concentration_units).filter(\n data_stage02_quantification_analysis.analysis_id.like(analysis_id_I),\t\t\n data_stage02_quantification_glogNormalized.analysis_id.like(analysis_id_I),\t\t\t\t\t\n data_stage02_quantification_glogNormalized.experiment_id.like(experiment_id_I),\t\t\t\n data_stage02_quantification_analysis.experiment_id.like(experiment_id_I),\n data_stage02_quantification_glogNormalized.time_point.like(time_point_I),\n data_stage02_quantification_analysis.time_point.like(time_point_I),\n data_stage02_quantification_glogNormalized.component_name.like(component_name_I),\n data_stage02_quantification_glogNormalized.calculated_concentration_units.like(concentration_units_I),\n data_stage02_quantification_glogNormalized.sample_name_short.like(data_stage02_quantification_analysis.sample_name_short),\n data_stage02_quantification_glogNormalized.used_.is_(True)).group_by(\n data_stage02_quantification_glogNormalized.experiment_id,\n data_stage02_quantification_analysis.sample_name_abbreviation,\n data_stage02_quantification_glogNormalized.sample_name_short,\n data_stage02_quantification_glogNormalized.time_point,\n data_stage02_quantification_glogNormalized.component_group_name,\n data_stage02_quantification_glogNormalized.component_name,\n data_stage02_quantification_glogNormalized.calculated_concentration,\n data_stage02_quantification_glogNormalized.calculated_concentration_units).all();\n data_O = [];\n for d in data: \n data_1 = {};\n data_1['experiment_id'] = d.experiment_id;\n data_1['sample_name_abbreviation'] = d.sample_name_abbreviation;\n data_1['sample_replicate'] = d.sample_replicate;\n data_1['sample_name_short'] = d.sample_name_short;\n data_1['time_point'] = d.time_point;\n data_1['component_group_name'] = d.component_group_name;\n data_1['component_name'] = d.component_name;\n data_1['calculated_concentration'] = d.calculated_concentration;\n data_1['calculated_concentration_units'] = d.calculated_concentration_units;\n data_O.append(data_1);\n return data_O;\n except SQLAlchemyError as e:\n print(e);", "def get_observations(self):\n joint_states = self.joints_state\n self.force = self.wrench_stamped.wrench.force\n self.torque = self.wrench_stamped.wrench.torque\n self.static_taxel = self.tactile_static.taxels\n# dynamic_taxel= tactile_dynamic\n\n# print(\"[force]\", self.force.x, self.force.y, self.force.z)\n# print(\"[torque]\", self.torque.x, self.torque.y, self.torque.z)\n shp_joint_ang = joint_states.position[0]\n shl_joint_ang = joint_states.position[1]\n elb_joint_ang = joint_states.position[2]\n wr1_joint_ang = joint_states.position[3]\n wr2_joint_ang = joint_states.position[4]\n wr3_joint_ang = joint_states.position[5]\n\n shp_joint_vel = joint_states.velocity[0]\n shl_joint_vel = joint_states.velocity[1]\n elb_joint_vel = joint_states.velocity[2]\n wr1_joint_vel = joint_states.velocity[3]\n wr2_joint_vel = joint_states.velocity[4]\n wr3_joint_vel = joint_states.velocity[5]\n\n q = [shp_joint_ang, shl_joint_ang, elb_joint_ang, wr1_joint_ang, wr2_joint_ang, wr3_joint_ang]\n# print(\"q(observation):\", q)\n eef_x, eef_y, eef_z = self.get_xyz(q)\n self.end_effector = self.get_xyz(q)\n eef_x_ini, eef_y_ini, eef_z_ini = self.get_xyz(self.init_joint_pose2) \n\n delta_image_r, delta_image_l = self.get_image()\n self.cnn_image_r = agent.update_cnn(delta_image_r)\n self.cnn_image_l = agent.update_cnn(delta_image_l)\n self.cnn_image_r_list = self.cnn_image_r.tolist()\n self.cnn_image_l_list = self.cnn_image_l.tolist()\n print(\"r_list\", self.cnn_image_r_list)\n print(\"l_list\", self.cnn_image_l_list)\n\n observation = []\n# rospy.logdebug(\"List of Observations==>\"+str(self.observations))\n for obs_name in self.observations:\n if obs_name == \"shp_joint_ang\":\n observation.append((shp_joint_ang - self.init_joint_pose2[0]) * self.joint_n)\n elif obs_name == \"shl_joint_ang\":\n observation.append((shl_joint_ang - self.init_joint_pose2[1]) * self.joint_n)\n elif obs_name == \"elb_joint_ang\":\n observation.append((elb_joint_ang - self.init_joint_pose2[2]) * self.joint_n)\n elif obs_name == \"wr1_joint_ang\":\n observation.append((wr1_joint_ang - self.init_joint_pose2[3]) * self.joint_n)\n elif obs_name == \"wr2_joint_ang\":\n observation.append((wr2_joint_ang - self.init_joint_pose2[4]) * self.joint_n)\n elif obs_name == \"wr3_joint_ang\":\n observation.append((wr3_joint_ang - self.init_joint_pose2[5]) * self.joint_n)\n elif obs_name == \"shp_joint_vel\":\n observation.append(shp_joint_vel)\n elif obs_name == \"shl_joint_vel\":\n observation.append(shl_joint_vel)\n elif obs_name == \"elb_joint_vel\":\n observation.append(elb_joint_vel)\n elif obs_name == \"wr1_joint_vel\":\n observation.append(wr1_joint_vel)\n elif obs_name == \"wr2_joint_vel\":\n observation.append(wr2_joint_vel)\n elif obs_name == \"wr3_joint_vel\":\n observation.append(wr3_joint_vel)\n elif obs_name == \"eef_x\":\n observation.append((eef_x - eef_x_ini) * self.eef_n)\n elif obs_name == \"eef_y\":\n observation.append((eef_y - eef_y_ini) * self.eef_n)\n elif obs_name == \"eef_z\":\n observation.append((eef_z - eef_z_ini) * self.eef_n)\n elif obs_name == \"force_x\":\n observation.append((self.force.x - self.force_ini.x) / self.force_limit1 * self.force_n)\n elif obs_name == \"force_y\":\n observation.append((self.force.y - self.force_ini.y) / self.force_limit1 * self.force_n)\n elif obs_name == \"force_z\":\n observation.append((self.force.z - self.force_ini.z) / self.force_limit1 * self.force_n)\n elif obs_name == \"torque_x\":\n observation.append((self.torque.x - self.torque_ini.x) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"torque_y\":\n observation.append((self.torque.y - self.torque_ini.y) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"torque_z\":\n observation.append((self.torque.z - self.torque_ini.z) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"image_cnn\":\n for x in range(0, 10):\n observation.append(self.cnn_image_r_list[0][x])\n# print(\"r_list\", self.cnn_image_r_list[0][x])\n for x in range(0, 10):\n observation.append(self.cnn_image_l_list[0][x])\n# print(\"l_list\", self.cnn_image_l_list[0][x])\n elif obs_name == \"static_taxel\":\n for x in range(0, 28):\n observation.append((self.static_taxel[0].values[x] - self.static_taxel_ini[0].values[x]) * self.taxel_n)\n for x in range(0, 28):\n observation.append((self.static_taxel[1].values[x] - self.static_taxel_ini[1].values[x]) * self.taxel_n)\n# elif obs_name == \"dynamic_taxel\":\n# observation.append(dynamic_taxel[0].values) * self.taxel_n\n# observation.append(dynamic_taxel[1].values) * self.taxel_n\n else:\n raise NameError('Observation Asked does not exist=='+str(obs_name))\n\n print(\"observation\", list(map(round, observation, [3]*len(observation))))\n# print(\"observation\", observation)\n\n return observation", "def SC_generation(hourly_radiation, prop_observers, number_groups, weather_data, g, Sz, Az, ha, Tin_C, height,\n panel_properties, latitude):\n\n\n n0 = panel_properties['n0']\n c1 = panel_properties['c1']\n c2 = panel_properties['c2']\n mB0_r = panel_properties['mB0_r']\n mB_max_r = panel_properties['mB_max_r']\n mB_min_r = panel_properties['mB_min_r']\n C_eff = panel_properties['C_eff']\n t_max = panel_properties['t_max']\n IAM_d = panel_properties['IAM_d']\n Aratio = panel_properties['aperture_area_ratio']\n Apanel = panel_properties['module_area']\n dP1 = panel_properties['dP1']\n dP2 = panel_properties['dP2']\n dP3 = panel_properties['dP3']\n dP4 = panel_properties['dP4']\n Cp_fluid_JperkgK = panel_properties['Cp_fluid'] # J/kgK\n\n # create lists to store results\n list_results = [None] * number_groups\n list_areas_groups = [None] * number_groups\n Sum_mcp_kWperC = np.zeros(8760)\n Sum_qout_kWh = np.zeros(8760)\n Sum_Eaux_kWh = np.zeros(8760)\n Sum_qloss = np.zeros(8760)\n Sum_radiation_kWh = np.zeros(8760)\n\n Tin_array_C = np.zeros(8760) + Tin_C\n aperature_area_per_module = Aratio * Apanel\n total_area_module = prop_observers['total_area_module'].sum() # total area for panel installation\n\n # calculate equivalent length of pipes\n lv = panel_properties['module_length'] # module length\n number_modules = round(total_area_module/Apanel) # this is an estimation\n l_ext_mperm2 = (2 * lv * number_modules/ (total_area_module * Aratio)) # pipe length within the collectors\n l_int_mperm2 = 2 * height / (total_area_module * Aratio) # pipe length from building substation to roof top collectors\n Leq_mperm2 = l_int_mperm2 + l_ext_mperm2 # in m/m2 aperture\n\n if panel_properties['type'] == 'ET': # for evacuated tubes\n Nseg = 100 # default number of subsdivisions for the calculation\n else:\n Nseg = 10 # default number of subsdivisions for the calculation\n\n for group in range(number_groups):\n # load panel angles from group\n teta_z = prop_observers.loc[group, 'surface_azimuth'] # azimuth of panels of group\n area_per_group = prop_observers.loc[group, 'total_area_module']\n tilt_angle_deg = prop_observers.loc[group, 'tilt'] # tilt angle of panels\n\n # create dataframe with irradiation from group\n\n radiation_Wh = pd.DataFrame({'I_sol': hourly_radiation[group]})\n radiation_Wh['I_diffuse'] = weather_data.ratio_diffhout * radiation_Wh.I_sol # calculate diffuse radiation\n radiation_Wh['I_direct'] = radiation_Wh['I_sol'] - radiation_Wh['I_diffuse'] # calculate direct radiation\n radiation_Wh.fillna(0, inplace=True) # set nan to zero\n\n # calculate incidence angle modifier for beam radiation\n IAM_b = calc_IAM_beam_SC(Az, g, ha, teta_z, tilt_angle_deg, panel_properties['type'], Sz, latitude)\n\n # calculate heat production from a solar collector of each group\n list_results[group] = calc_SC_module(tilt_angle_deg, IAM_b, IAM_d, radiation_Wh.I_direct,\n radiation_Wh.I_diffuse, weather_data.drybulb_C, n0,\n c1, c2, mB0_r, mB_max_r, mB_min_r, C_eff, t_max,\n aperature_area_per_module, dP1, dP2, dP3, dP4,\n Cp_fluid_JperkgK, Tin_C, Leq_mperm2, l_ext_mperm2,\n l_int_mperm2, Nseg)\n\n\n # multiplying the results with the number of panels in each group and write to list\n number_modules_per_group = area_per_group / Apanel\n list_areas_groups[group] = area_per_group\n radiation_array = hourly_radiation[group] * list_areas_groups[group] / 1000 # kWh\n Sum_qout_kWh = Sum_qout_kWh + list_results[group][1] * number_modules_per_group\n Sum_Eaux_kWh = Sum_Eaux_kWh + list_results[group][2] * number_modules_per_group\n Sum_qloss = Sum_qloss + list_results[group][0] * number_modules_per_group\n Sum_mcp_kWperC = Sum_mcp_kWperC + list_results[group][5] * number_modules_per_group\n Sum_radiation_kWh = Sum_radiation_kWh + radiation_Wh['I_sol']*area_per_group/1000\n\n Tout_group_C = (Sum_qout_kWh / Sum_mcp_kWperC) + Tin_C # in C assuming all collectors are connected in parallel\n\n Final = pd.DataFrame(\n {'Q_SC_gen_kWh': Sum_qout_kWh, 'T_SC_sup_C': Tin_array_C, 'T_SC_re_C': Tout_group_C, 'mcp_SC_kWperC': Sum_mcp_kWperC, 'Eaux_SC_kWh': Sum_Eaux_kWh,\n 'Q_SC_l_kWh': Sum_qloss, 'Area_SC_m2': sum(list_areas_groups), 'radiation_kWh': Sum_radiation_kWh}, index=range(8760))\n\n return list_results, Final", "def retrieve_multiple_time_series(self,run='latest',run_data=None,criteria={},timestep='daily',name_fn=name_element_variable):\n if timestep==\"daily\":\n suffix = \"\"\n else:\n suffix = \"/aggregated/%s\"%timestep\n\n if run_data is None:\n run_data = self.retrieve_run(run)\n\n retrieved={}\n def name_column(result):\n col_name = name_fn(result)\n if col_name in retrieved:\n i = 1\n alt_col_name = '%s %d'%(col_name,i)\n while alt_col_name in retrieved:\n i += 1\n alt_col_name = '%s %d'%(col_name,i)\n col_name = alt_col_name\n return col_name\n\n units_store = {}\n for result in run_data['Results']:\n if self.result_matches_criteria(result,criteria):\n d = self.retrieve_json(result['TimeSeriesUrl']+suffix)\n result.update(d)\n col_name = name_column(result)\n# raise Exception(\"Duplicate column name: %s\"%col_name)\n if 'Events' in d:\n retrieved[col_name] = d['Events']\n units_store[col_name] = result['Units']\n else:\n all_ts = d['TimeSeries']\n for ts in all_ts:\n col_name = name_column(ts)\n units_store[col_name] = ts['Units']\n\n vals = ts['Values']\n s = self.parse_veneer_date(ts['StartDate'])\n e = self.parse_veneer_date(ts['EndDate'])\n if ts['TimeStep']=='Daily':\n f='D'\n elif ts['TimeStep']=='Monthly':\n f='M'\n elif ts['TimeStep']=='Annual':\n f='A'\n dates = pd.date_range(s,e,freq=f)\n retrieved[col_name] = [{'Date':d,'Value':v} for d,v in zip(dates,vals)]\n # Multi Time Series!\n\n result = self._create_timeseries_dataframe(retrieved)\n for k,u in units_store.items():\n result[k].units = u\n\n return result", "def generate(self):\n i = self.start_index\n df_array = []\n # load the dataframes from the multiple files(one file per frame_id, agent_id pair)\n for k in range(self.frame_length): # segment length\n frame_id = i + k\n # print(frame_id)\n # breakpoint()\n if frame_id >= len(self.agent_maps):\n print(\n f\"Trying to access frame {frame_id} but only have {len(self.agent_maps)}\"\n )\n # breakpoint()\n break\n df = pd.read_csv(self.agent_maps[frame_id], compression=\"gzip\")\n df = df.query(f\"abs(pos_x)<{self.radius} & abs(pos_y)<{self.radius}\")\n df_array.append(df)\n # merge all dataframes together\n if len(df_array) == 0:\n self.df_merged = pd.DataFrame()\n self.agent_tracks = {}\n self.agent_metadata_dfs = {}\n self.agent_track_len = {}\n self.sorted_agent_ids = []\n return\n self.df_merged = pd.concat(df_array).reset_index()\n # group all dataframes by id, so that we can get agent level metrics across time\n agent_grp = self.df_merged.groupby(\"id\")\n self.agent_tracks = {}\n self.agent_metadata_dfs = {}\n self.agent_track_len = {}\n for agent_id in agent_grp.groups:\n sub_df = self.df_merged.iloc[agent_grp.groups[agent_id]]\n # empty template for the trajectories\n tracks = [\n [self.radius + 1, self.radius + 1] for _ in range(self.frame_length)\n ]\n track_len = 0\n # populate the empty template\n for idx, row in sub_df.iterrows():\n frame_idx = row.frame_id - self.start_index\n # if row.frame_id == 0:\n # # check if the object is within the car position add to the current frame\n # print(\"0 frame id\")\n try:\n tracks[frame_idx] = [row.pos_x, row.pos_y]\n except:\n breakpoint()\n track_len += 1\n self.agent_tracks[agent_id] = np.array(tracks)\n self.agent_metadata_dfs[agent_id] = sub_df\n self.agent_track_len[agent_id] = track_len\n self.sorted_agent_ids = list(self.agent_track_len.keys())\n self.sorted_agent_ids.sort(key=lambda x: self.agent_track_len[x], reverse=True)", "def separate_sessions(sc, session, reg_data, run_names=('Run1', 'Run2'), run_number=(1, 2)):\n from prep.VisSession import VisSession\n from prep.IO import saveRegData\n # separate\n start = 0\n for run, name in zip(run_number, run_names):\n files = glob.glob(os.path.join(session.path, '') + name + '*.tif')\n timepoints = count_timepoints(sc, session, files)\n stop = start + sum(timepoints)\n logger.info('Run %s, Number %d, start: %d, stop %d' % (name, run, start, stop))\n session_temp = VisSession(animalID=session.animalID, date=session.date, run='Run' + str(run))\n session_temp.initBase(sc)\n session_temp.start = 0\n session_temp.stop = len(files)\n reg_temp = reg_data[start:stop, :, :, :]\n\n saveRegData(session_temp, reg_temp, overwrite=False)\n reg_dict_temp = copy.deepcopy(session.regDict)\n for key, value in iteritems(session.regDict):\n if isinstance(value, np.ndarray):\n shape = value.shape\n loc = np.where(np.array(shape) == reg_data.shape[0])[0]\n if len(loc) > 0:\n print('%s: %s - %s' % (key, shape, loc[0]))\n if loc[0] == 0:\n reg_dict_temp[key] = value[start:stop, ...]\n session_temp.regDict = reg_dict_temp\n time_dict_temp = copy.deepcopy(session.timeDict)\n for key, value in iteritems(session.timeDict):\n if isinstance(value, np.ndarray):\n shape = value.shape\n loc = np.where(np.array(shape) == reg_data.shape[0])[0]\n if len(loc) > 0:\n print('%s: %s - %s' % (key, shape, loc[0]))\n if loc[0] == 0:\n time_dict_temp[key] = value[start:stop, ...]\n if loc[0] == 1:\n time_dict_temp[key] = value[:, start:stop, ...]\n session_temp.timeDict = time_dict_temp\n session_temp.save('time2_new')\n start = stop", "def get_RExpressionData_analysisIDAndExperimentIDAndTimePointAndUnits_dataStage02GlogNormalized(self,analysis_id_I, experiment_id_I,time_point_I,concentration_units_I,exp_type_I=4):\n #Tested\n try:\n data = self.session.query(data_stage02_quantification_glogNormalized.experiment_id,\n sample_description.sample_name_abbreviation,\n sample_description.sample_replicate,\n data_stage02_quantification_glogNormalized.sample_name_short,\n data_stage02_quantification_glogNormalized.time_point,\n data_stage02_quantification_glogNormalized.component_group_name,\n data_stage02_quantification_glogNormalized.component_name,\n data_stage02_quantification_glogNormalized.calculated_concentration,\n data_stage02_quantification_glogNormalized.calculated_concentration_units).filter(\n data_stage02_quantification_glogNormalized.analysis_id.like(analysis_id_I),\n data_stage02_quantification_glogNormalized.experiment_id.like(experiment_id_I),\n data_stage02_quantification_glogNormalized.time_point.like(time_point_I),\n data_stage02_quantification_glogNormalized.calculated_concentration_units.like(concentration_units_I),\n data_stage02_quantification_glogNormalized.sample_name_short.like(sample_description.sample_name_short),\n sample_description.sample_id.like(sample.sample_id),\n sample.sample_name.like(experiment.sample_name),\n experiment.id.like(experiment_id_I),\n experiment.exp_type_id == exp_type_I,\n data_stage02_quantification_glogNormalized.used_.is_(True)).group_by(\n data_stage02_quantification_glogNormalized.experiment_id,\n sample_description.sample_name_abbreviation,\n sample_description.sample_replicate,\n data_stage02_quantification_glogNormalized.sample_name_short,\n data_stage02_quantification_glogNormalized.time_point,\n data_stage02_quantification_glogNormalized.component_group_name,\n data_stage02_quantification_glogNormalized.component_name,\n data_stage02_quantification_glogNormalized.calculated_concentration,\n data_stage02_quantification_glogNormalized.calculated_concentration_units).all();\n data_O = [];\n for d in data: \n data_1 = {};\n data_1['experiment_id'] = d.experiment_id;\n data_1['sample_name_abbreviation'] = d.sample_name_abbreviation;\n data_1['sample_replicate'] = d.sample_replicate;\n data_1['sample_name_short'] = d.sample_name_short;\n data_1['time_point'] = d.time_point;\n data_1['component_group_name'] = d.component_group_name;\n data_1['component_name'] = d.component_name;\n data_1['calculated_concentration'] = d.calculated_concentration;\n data_1['calculated_concentration_units'] = d.calculated_concentration_units;\n data_O.append(data_1);\n return data_O;\n except SQLAlchemyError as e:\n print(e);", "def _load_data(self, variables=None):\n\n # Get a list of all the variables from the netCDF dataset.\n if not variables:\n variables = list(self.ds.variables.keys())\n\n got_time = 'time' in self._dims\n got_horizontal = 'node' in self._dims or 'nele' in self._dims\n got_vertical = 'siglay' in self._dims or 'siglev' in self._dims\n\n if self._debug:\n print(self._dims.keys())\n print('time: {} vertical: {} horizontal: {}'.format(got_time, got_vertical, got_horizontal))\n\n if got_time:\n start, end = self._dims['time']\n else:\n start, end = False, False # load everything\n\n nodes, elements, layers, levels = False, False, False, False\n # Make sure we don't have single values for the dimensions otherwise everything gets squeezed and figuring out\n # what dimension is where gets difficult.\n if 'node' in self._dims:\n nodes = self._dims['node']\n if isinstance(nodes, int):\n nodes = [nodes]\n if 'nele' in self._dims:\n elements = self._dims['nele']\n if isinstance(elements, int):\n elements = [elements]\n if 'siglay' in self._dims:\n layers = self._dims['siglay']\n if isinstance(layers, int):\n layers = [layers]\n if 'siglev' in self._dims:\n levels = self._dims['siglev']\n if isinstance(levels, int):\n levels = [levels]\n self.load_data(variables, start=start, end=end, node=nodes, nele=elements, layer=layers, level=levels)\n\n # Update the dimensions to match the data.\n self._update_dimensions(variables)", "def read_simulation_model_and_data(self, model, data, index):\n var_id = read_var_table_as_id(self.dismod_file)\n sim_model = read_simulation_model(self.dismod_file, model, var_id, index)\n sim_data = read_simulation_data(self.dismod_file, data, index)\n return sim_model, sim_data", "def get_define_step_data(pool):\n\n LOG.info('Searching Define Steps')\n\n process = pool.parent_process\n\n while process.type.name not in MASTER_STEPS_UDFS['reagent_labels'][\n 'steps']['define']:\n art = process.all_inputs()[0]\n process = art.parent_process\n\n define_step_outputs = {}\n flowcell_target_reads = 0\n\n for art in process.all_outputs():\n if art.type != 'Analyte':\n continue\n\n index_target_reads = _get_target_reads(art)\n if index_target_reads:\n flowcell_target_reads += index_target_reads\n\n if len(art.samples) != 1: # ignore pools\n continue\n sample_id = art.samples[0].id\n define_step_outputs[sample_id] = index_target_reads\n\n LOG.info('Done')\n\n return define_step_outputs, flowcell_target_reads, process", "def make_result_table(self, for_time):\n time = [t.AsPython()\n for t in cmf.timerange(cmf.AsCMFtime(self.starttime),\n cmf.AsCMFtime(self.starttime + for_time),\n self.dt)]\n depth = self.depth\n layer_template = np.zeros((len(time), len(depth))) * np.NaN\n ds = xr.Dataset(\n {'N': (('time', 'depth'), layer_template.copy()),\n 'Corg': (('time', 'depth'), layer_template.copy()),\n 'DOC': (('time', 'depth'), layer_template.copy()),\n 'Temp': (('time', 'depth'), layer_template.copy()),\n 'wetness': (('time', 'depth'), layer_template.copy()),\n },\n {'time': time, 'depth': list(depth)}\n )\n return ds", "def get_simulation(\n component: ComponentOrFactory,\n port_extension: Optional[float] = 4.0,\n layer_stack: LayerStack = LAYER_STACK,\n thickness_pml: float = 1.0,\n xmargin: float = 0,\n ymargin: float = 0,\n xmargin_left: float = 0,\n xmargin_right: float = 0,\n ymargin_top: float = 0,\n ymargin_bot: float = 0,\n zmargin: float = 1.0,\n clad_material: str = \"sio2\",\n port_source_name: str = \"o1\",\n port_margin: float = 0.5,\n port_source_offset: float = 0.1,\n distance_source_to_monitors: float = 0.2,\n resolution: float = 50,\n wavelength_start: float = 1.50,\n wavelength_stop: float = 1.60,\n wavelength_points: int = 50,\n plot_modes: bool = False,\n num_modes: int = 2,\n run_time_ps: float = 10.0,\n dispersive: bool = False,\n material_name_to_tidy3d_index: Dict[str, float] = MATERIAL_NAME_TO_TIDY3D_INDEX,\n material_name_to_tidy3d_name: Dict[str, str] = MATERIAL_NAME_TO_TIDY3D_NAME,\n is_3d: bool = True,\n with_all_monitors: bool = False,\n) -> td.Simulation:\n component = component() if callable(component) else component\n assert isinstance(component, Component)\n\n layer_to_thickness = layer_stack.get_layer_to_thickness()\n layer_to_material = layer_stack.get_layer_to_material()\n layer_to_zmin = layer_stack.get_layer_to_zmin()\n # layer_to_sidewall_angle = layer_stack.get_layer_to_sidewall_angle()\n\n if dispersive:\n material_name_to_tidy3d = material_name_to_tidy3d_name\n else:\n material_name_to_tidy3d = material_name_to_tidy3d_index\n\n assert isinstance(\n component, Component\n ), f\"component needs to be a gf.Component, got Type {type(component)}\"\n if port_source_name not in component.ports:\n warnings.warn(\n f\"port_source_name={port_source_name} not in {component.ports.keys()}\"\n )\n port_source = component.get_ports_list(port_type=\"optical\")[0]\n port_source_name = port_source.name\n warnings.warn(f\"Selecting port_source_name={port_source_name} instead.\")\n\n component_padding = gf.add_padding_container(\n component,\n default=0,\n top=ymargin or ymargin_top,\n bottom=ymargin or ymargin_bot,\n left=xmargin or xmargin_left,\n right=xmargin or xmargin_right,\n )\n component_extended = (\n gf.components.extension.extend_ports(\n component=component_padding, length=port_extension, centered=True\n )\n if port_extension\n else component_padding\n )\n\n gf.show(component_extended)\n component_extended = component_extended.flatten()\n\n component_ref = component_padding.ref()\n component_ref.x = 0\n component_ref.y = 0\n\n clad_material_name_or_index = material_name_to_tidy3d[clad_material]\n clad = td.Structure(\n geometry=td.Box(\n size=(td.inf, td.inf, td.inf),\n center=(0, 0, 0),\n ),\n medium=get_medium(name_or_index=clad_material_name_or_index),\n )\n structures = [clad]\n\n layers_thickness = [\n layer_to_thickness[layer]\n for layer in component.get_layers()\n if layer in layer_to_thickness\n ]\n\n if len(layer_to_thickness) < 1:\n raise ValueError(f\"{component.get_layers()} not in {layer_to_thickness.keys()}\")\n\n t_core = max(layers_thickness)\n cell_thickness = (\n thickness_pml + t_core + thickness_pml + 2 * zmargin\n if is_3d\n else 1 / resolution\n )\n\n sim_size = [\n component_ref.xsize + 2 * thickness_pml,\n component_ref.ysize + 2 * thickness_pml,\n cell_thickness,\n ]\n\n for layer in component.layers:\n if layer in layer_to_thickness and layer in layer_to_material:\n thickness = layer_to_thickness[layer]\n zmin = layer_to_zmin[layer] if is_3d else -td.inf\n zmax = zmin + thickness if is_3d else td.inf\n\n if (\n layer in layer_to_material\n and layer_to_material[layer] in material_name_to_tidy3d\n ):\n name_or_index = material_name_to_tidy3d[layer_to_material[layer]]\n medium = get_medium(name_or_index=name_or_index)\n index = get_index(name_or_index=name_or_index)\n logger.debug(\n f\"Add {layer}, {name_or_index!r}, index = {index:.3f}, \"\n f\"thickness = {thickness}, zmin = {zmin}, zmax = {zmax}\"\n )\n\n polygons = td.PolySlab.from_gds(\n gds_cell=component_extended,\n gds_layer=layer[0],\n gds_dtype=layer[1],\n axis=2,\n slab_bounds=(zmin, zmax),\n )\n\n for polygon in polygons:\n geometry = td.Structure(\n geometry=polygon,\n medium=medium,\n )\n structures.append(geometry)\n elif layer not in layer_to_material:\n logger.debug(f\"Layer {layer} not in {layer_to_material.keys()}\")\n elif layer_to_material[layer] not in material_name_to_tidy3d:\n materials = list(material_name_to_tidy3d.keys())\n logger.debug(f\"material {layer_to_material[layer]} not in {materials}\")\n\n # Add source\n port = component_ref.ports[port_source_name]\n angle = port.orientation\n width = port.width + 2 * port_margin\n size_x = width * abs(np.sin(angle * np.pi / 180))\n size_y = width * abs(np.cos(angle * np.pi / 180))\n size_x = 0 if size_x < 0.001 else size_x\n size_y = 0 if size_y < 0.001 else size_y\n size_z = cell_thickness - 2 * zmargin if is_3d else td.inf\n\n source_size = [size_x, size_y, size_z]\n source_center = port.center.tolist() + [0] # (x, y, z=0)\n\n xy_shifted = move_polar_rad_copy(\n np.array(port.center), angle=angle * np.pi / 180, length=port_source_offset\n )\n source_center_offset = xy_shifted.tolist() + [0] # (x, y, z=0)\n\n wavelengths = np.linspace(wavelength_start, wavelength_stop, wavelength_points)\n freqs = td.constants.C_0 / wavelengths\n freq0 = td.constants.C_0 / np.mean(wavelengths)\n fwidth = freq0 / 10\n\n msource = td.ModeSource(\n size=source_size,\n center=source_center,\n source_time=td.GaussianPulse(freq0=freq0, fwidth=fwidth),\n direction=\"+\",\n )\n\n # Add port monitors\n monitors = {}\n ports = sort_ports_x(sort_ports_y(component_ref.get_ports_list()))\n for port in ports:\n port_name = port.name\n angle = port.orientation\n width = port.width + 2 * port_margin\n size_x = width * abs(np.sin(angle * np.pi / 180))\n size_y = width * abs(np.cos(angle * np.pi / 180))\n size_x = 0 if size_x < 0.001 else size_x\n size_y = 0 if size_y < 0.001 else size_y\n size = (size_x, size_y, size_z)\n\n # if monitor has a source move monitor inwards\n length = -distance_source_to_monitors if port_name == port_source_name else 0\n xy_shifted = move_polar_rad_copy(\n np.array(port.center), angle=angle * np.pi / 180, length=length\n )\n center = xy_shifted.tolist() + [0] # (x, y, z=0)\n\n monitors[port_name] = td.ModeMonitor(\n center=center,\n size=size,\n freqs=freqs,\n mode_spec=td.ModeSpec(num_modes=1),\n name=port.name,\n )\n\n zcenter = (zmax + zmin) / 2 if is_3d else 0\n domain_monitor = td.FieldMonitor(\n center=[0, 0, zcenter],\n size=[sim_size[0], sim_size[1], 0] if is_3d else [td.inf, td.inf, 0],\n freqs=[freq0],\n name=\"field\",\n )\n monitors = list(monitors.values())\n monitors += [domain_monitor] if with_all_monitors else []\n\n sim = td.Simulation(\n size=sim_size,\n grid_size=3 * [1 / resolution],\n structures=structures,\n sources=[msource],\n monitors=monitors,\n run_time=20 * run_time_ps / fwidth,\n pml_layers=3 * [td.PML()] if is_3d else [td.PML(), td.PML(), None],\n )\n\n if plot_modes:\n src_plane = td.Box(center=source_center_offset, size=source_size)\n ms = td.plugins.ModeSolver(simulation=sim, plane=src_plane, freq=freq0)\n mode_spec = td.ModeSpec(num_modes=num_modes)\n modes = ms.solve(mode_spec=mode_spec)\n\n print(\n \"Effective index of computed modes: \",\n \", \".join([f\"{mode.n_eff:1.4f}\" for mode in modes]),\n )\n\n if is_3d:\n fig, axs = plt.subplots(num_modes, 2, figsize=(12, 12))\n else:\n fig, axs = plt.subplots(num_modes, 3, figsize=(12, 12))\n\n for mode_ind in range(num_modes):\n if is_3d:\n abs(modes[mode_ind].field_data.Ey).plot(\n x=\"y\", y=\"z\", cmap=\"magma\", ax=axs[mode_ind, 0]\n )\n abs(modes[mode_ind].field_data.Ez).plot(\n x=\"y\", y=\"z\", cmap=\"magma\", ax=axs[mode_ind, 1]\n )\n else:\n abs(modes[mode_ind].field_data.Ex).plot(ax=axs[mode_ind, 0])\n abs(modes[mode_ind].field_data.Ey).plot(ax=axs[mode_ind, 1])\n abs(modes[mode_ind].field_data.Ez).plot(ax=axs[mode_ind, 2])\n\n axs[mode_ind, 0].set_title(f\"|Ex|: mode_index={mode_ind}\")\n axs[mode_ind, 1].set_title(f\"|Ey|: mode_index={mode_ind}\")\n axs[mode_ind, 2].set_title(f\"|Ez|: mode_index={mode_ind}\")\n\n if is_3d:\n axs[mode_ind, 0].set_aspect(\"equal\")\n axs[mode_ind, 1].set_aspect(\"equal\")\n plt.show()\n return sim", "def generate_measurement_matrices(self):\n if petab.measurement_table_has_timepoint_specific_mappings(\n self.measurement_df):\n raise RuntimeError(\"Timepoint-specific overrides are not yet \"\n \"supported.\")\n\n self.f.create_group(\"/measurements\")\n self.observable_ids = self.amici_model.getObservableIds()\n # trim observable_ TODO: should be done in amici import\n self.observable_ids = [o[len('observable_'):]\n if o.startswith('observable_') else o\n for o in self.observable_ids]\n self.ny = self.amici_model.ny\n write_string_array(self.f, \"/measurements/observableNames\",\n self.observable_ids)\n\n print(Fore.CYAN + \"Number of observables:\", self.ny)\n\n self.write_measurements()\n self.f.flush()", "def get_bodyparts(project_dir):\n print(f\"\\n\\n\\nLoading data\")\n df_paths = sorted(glob.glob(os.path.join(project_dir, '*.h5')))\n points_2d_df = utils.create_dlc_points_2d_file(df_paths)\n arr = points_2d_df[points_2d_df[\"frame\"]==0][[\"marker\"]][points_2d_df[\"camera\"]==0].values\n final_arr = arr.flatten().tolist()\n return(final_arr)", "def get_sensor_summary_info(self):\n import statistics\n info_dict = dict()\n logger.debug(\"Creating Database Engine and Session.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n\n logger.debug(\"Find the scene count.\")\n vld_scn_count = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.Invalid == False).count()\n invld_scn_count = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.Invalid == True).count()\n dwn_scn_count = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.Downloaded == True).count()\n ard_scn_count = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.ARDProduct == True).count()\n dcload_scn_count = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.DCLoaded == True).count()\n arch_scn_count = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.Archived == True).count()\n info_dict['n_scenes'] = dict()\n info_dict['n_scenes']['n_valid_scenes'] = vld_scn_count\n info_dict['n_scenes']['n_invalid_scenes'] = invld_scn_count\n info_dict['n_scenes']['n_downloaded_scenes'] = dwn_scn_count\n info_dict['n_scenes']['n_ard_processed_scenes'] = ard_scn_count\n info_dict['n_scenes']['n_dc_loaded_scenes'] = dcload_scn_count\n info_dict['n_scenes']['n_archived_scenes'] = arch_scn_count\n logger.debug(\"Calculated the scene count.\")\n\n logger.debug(\"Find the scene file sizes.\")\n file_sizes = ses.query(EDDSentinel1ASF.Total_Size).filter(EDDSentinel1ASF.Invalid == False).all()\n if file_sizes is not None:\n if len(file_sizes) > 0:\n file_sizes_nums = list()\n for file_size in file_sizes:\n if file_size[0] is not None:\n file_sizes_nums.append(file_size[0])\n if len(file_sizes_nums) > 0:\n total_file_size = sum(file_sizes_nums)\n info_dict['file_size'] = dict()\n info_dict['file_size']['file_size_total'] = total_file_size\n if total_file_size > 0:\n info_dict['file_size']['file_size_mean'] = statistics.mean(file_sizes_nums)\n info_dict['file_size']['file_size_min'] = min(file_sizes_nums)\n info_dict['file_size']['file_size_max'] = max(file_sizes_nums)\n if len(file_sizes_nums) > 1:\n info_dict['file_size']['file_size_stdev'] = statistics.stdev(file_sizes_nums)\n info_dict['file_size']['file_size_median'] = statistics.median(file_sizes_nums)\n if (len(file_sizes_nums) > 1) and (eodatadown.py_sys_version_flt >= 3.8):\n info_dict['file_size']['file_size_quartiles'] = statistics.quantiles(file_sizes_nums)\n logger.debug(\"Calculated the scene file sizes.\")\n\n logger.debug(\"Find download and processing time stats.\")\n download_times = []\n ard_process_times = []\n scns = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.Downloaded == True)\n for scn in scns:\n download_times.append((scn.Download_End_Date - scn.Download_Start_Date).total_seconds())\n if scn.ARDProduct:\n ard_process_times.append((scn.ARDProduct_End_Date - scn.ARDProduct_Start_Date).total_seconds())\n\n if len(download_times) > 0:\n info_dict['download_time'] = dict()\n info_dict['download_time']['download_time_mean_secs'] = statistics.mean(download_times)\n info_dict['download_time']['download_time_min_secs'] = min(download_times)\n info_dict['download_time']['download_time_max_secs'] = max(download_times)\n if len(download_times) > 1:\n info_dict['download_time']['download_time_stdev_secs'] = statistics.stdev(download_times)\n info_dict['download_time']['download_time_median_secs'] = statistics.median(download_times)\n if (len(download_times) > 1) and (eodatadown.py_sys_version_flt >= 3.8):\n info_dict['download_time']['download_time_quartiles_secs'] = statistics.quantiles(download_times)\n\n if len(ard_process_times) > 0:\n info_dict['ard_process_time'] = dict()\n info_dict['ard_process_time']['ard_process_time_mean_secs'] = statistics.mean(ard_process_times)\n info_dict['ard_process_time']['ard_process_time_min_secs'] = min(ard_process_times)\n info_dict['ard_process_time']['ard_process_time_max_secs'] = max(ard_process_times)\n if len(ard_process_times) > 1:\n info_dict['ard_process_time']['ard_process_time_stdev_secs'] = statistics.stdev(ard_process_times)\n info_dict['ard_process_time']['ard_process_time_median_secs'] = statistics.median(ard_process_times)\n if (len(ard_process_times) > 1) and (eodatadown.py_sys_version_flt >= 3.8):\n info_dict['ard_process_time']['ard_process_time_quartiles_secs'] = statistics.quantiles(\n ard_process_times)\n logger.debug(\"Calculated the download and processing time stats.\")\n\n if self.calc_scn_usr_analysis():\n plgin_lst = self.get_usr_analysis_keys()\n info_dict['usr_plugins'] = dict()\n for plgin_key in plgin_lst:\n info_dict['usr_plugins'][plgin_key] = dict()\n scns = ses.query(EDDSentinel1ASFPlugins).filter(EDDSentinel1ASFPlugins.PlugInName == plgin_key).all()\n n_err_scns = 0\n n_complete_scns = 0\n n_success_scns = 0\n plugin_times = []\n for scn in scns:\n if scn.Completed:\n plugin_times.append((scn.End_Date - scn.Start_Date).total_seconds())\n n_complete_scns += 1\n if scn.Success:\n n_success_scns += 1\n if scn.Error:\n n_err_scns += 1\n info_dict['usr_plugins'][plgin_key]['n_success'] = n_success_scns\n info_dict['usr_plugins'][plgin_key]['n_completed'] = n_complete_scns\n info_dict['usr_plugins'][plgin_key]['n_error'] = n_err_scns\n if len(plugin_times) > 0:\n info_dict['usr_plugins'][plgin_key]['processing'] = dict()\n info_dict['usr_plugins'][plgin_key]['processing']['time_mean_secs'] = statistics.mean(plugin_times)\n info_dict['usr_plugins'][plgin_key]['processing']['time_min_secs'] = min(plugin_times)\n info_dict['usr_plugins'][plgin_key]['processing']['time_max_secs'] = max(plugin_times)\n if len(plugin_times) > 1:\n info_dict['usr_plugins'][plgin_key]['processing']['time_stdev_secs'] = statistics.stdev(plugin_times)\n info_dict['usr_plugins'][plgin_key]['processing']['time_median_secs'] = statistics.median(plugin_times)\n if (len(plugin_times) > 1) and (eodatadown.py_sys_version_flt >= 3.8):\n info_dict['usr_plugins'][plgin_key]['processing']['time_quartiles_secs'] = statistics.quantiles(plugin_times)\n ses.close()\n return info_dict", "def read_all_runs(hdf5_file):\n with pd.HDFStore(hdf5_file) as store:\n\n for key in store.keys():\n df = store.get(key)\n\n ## Load ancestor (up to symmetry) who generated the panel for\n ## this validaiton sim\n sym_gen_anc = store.get_storer(key).attrs.sym_gen_anc\n\n if df.shape[0] == 300000:\n yield df, sym_gen_anc\n else:\n print \"Wrong shape!\", df.shape", "def Data_init(**kwargs):\n if 'file' in kwargs:\n print \"Reading the file\"\n else:\n print \"Randomizing the initial data\"\n XV = np.random.rand(kwargs['particles'],kwargs['dimensions']*2) * 2 - 1\n M = np.random.rand(kwargs['particles'])\n\n t_f,num = kwargs['time']\n t = np.linspace(0,t_f,num)\n\n return XV,M,t", "def generate_observed_mdv(self):\n id_array = []\n ratio_array = []\n std_array = []\n use_array = []\n data = numpy.zeros(self.number_of_replicate)\n for fragment in sorted(self.observed_fragments):\n for number in sorted(self.mdv[fragment].keys()):\n ratio, std, use = self.get_data(fragment, number)\n id_array.append(self.mdv[fragment][number]['id'])#これださい\n ratio_array.append(ratio)\n std_array.append(std)\n if self.number_of_replicate >= 3:\n data = numpy.vstack((data, self.mdv[fragment][number]['data']))\n if use == 'use':\n use_array.append(1)\n else:\n use_array.append(0)\n if self.number_of_replicate >= 3:\n data = data[1:,:]\n return id_array, numpy.array(ratio_array), numpy.array(std_array), use_array, self.observed_fragments, data", "def dataIdentify(self, in_nc):\r\n data_nc = NET.Dataset(in_nc)\r\n time = data_nc.variables['time'][:]\r\n diff = NUM.unique(NUM.diff(time))\r\n data_nc.close()\r\n #time_interval_highres = NUM.array([1.0,3.0,6.0],dtype=float)\r\n #time_interval_lowres_full = NUM.array([3.0, 6.0],dtype=float)\r\n #time_interval_lowres = NUM.array([6.0],dtype=float)\r\n #time_interval_lowres_3Hr = NUM.array([3.0],dtype=float)\r\n\t\t\r\n time_interval_HRES1 = NUM.array([1.0],dtype=float) # Line Added/Modified CJB 20190108\r\n time_interval_HRES13 = NUM.array([1.0,3.0],dtype=float) # Line Added/Modified CJB 20190108\r\n time_interval_HRES136 = NUM.array([1.0,3.0,6.0],dtype=float) # Line Added/Modified CJB 20190108\r\n time_interval_ENS3 = NUM.array([3.0],dtype=float) # Line Added/Modified CJB 20190108\r\n time_interval_ENS36 = NUM.array([3.0,6.0],dtype=float) # Line Added/Modified CJB 20190108\r\n time_interval_ENS6 = NUM.array([6.0],dtype=float) # Line Added/Modified CJB 20190108\r\n\r\n\r\n #print \"SDR - diff:\", diff, time_interval_highres, time_interval_lowres_full, time_interval_lowres\r\n #if NUM.array_equal(diff, time_interval_highres):\r\n # return \"HighRes\"\r\n #elif NUM.array_equal(diff, time_interval_lowres_full):\r\n # return \"LowResFull\"\r\n #elif NUM.array_equal(diff, time_interval_lowres):\r\n # return \"LowRes\"\r\n #elif NUM.array_equal(diff, time_interval_lowres_3Hr):\r\n # return \"Low3HrRes\"\r\n #else:\r\n # return None\r\n\t\t\t\r\n if NUM.array_equal(diff, time_interval_HRES1): # Line Added/Modified CJB 20190108\r\n return \"HRES1\" # Line Added/Modified CJB 20190108\r\n elif NUM.array_equal(diff, time_interval_HRES13): # Line Added/Modified CJB 20190108\r\n return \"HRES13\" # Line Added/Modified CJB 20190108\r\n elif NUM.array_equal(diff, time_interval_HRES136): # Line Added/Modified CJB 20190108\r\n return \"HRES136\" # Line Added/Modified CJB 20190108\r\n elif NUM.array_equal(diff, time_interval_ENS3): # Line Added/Modified CJB 20190108\r\n return \"ENS3\" # Line Added/Modified CJB 20190108\r\n elif NUM.array_equal(diff, time_interval_ENS36): # Line Added/Modified CJB 20190108\r\n return \"ENS36\" # Line Added/Modified CJB 20190108\r\n elif NUM.array_equal(diff, time_interval_ENS6): # Line Added/Modified MJS, CJB 20190108\r\n return \"ENS6\" # Line Added/Modified CJB 20190108\r\n else: # Line Added/Modified CJB 20190108\r\n return None # Line Added/Modified CJB 20190108\r", "def main(datafilepath):\n #create midline\n sectionsize = 10000\n TrackData = TrackMaker(sectionsize) # 10000\n moving_window = sectionsize*2\n midline = TrackData[0] \n sections = TrackData[2]\n #midline = midline[sections[0]:sections[5],:] #only work with the midline of the trial \n #steergaze_df = pd.read_feather(datafilepath)\n steergaze_df = pd.read_csv(datafilepath, sep=',',header=0)\n #steergaze_df.reset_index()\n master_steergaze = pd.DataFrame()\n datafolder = os.path.split(datafilepath)[0] \n\n #TODO: due to grouping the future path cuts - off at end of slalom, use the continuous trajectory across roadsections for fp mapping\n\n #modes taken from gaze_through_midline_densities.py\n entry = find_closest_index(midline, [-23, 69])\n firstobject = find_closest_index(midline, [25, 52])\n gazemodes = [entry, firstobject]\n\n mid_diff = np.linalg.norm(np.diff(midline, axis=0, prepend = np.array([[0,0]])), axis = 1)\n midline_dist_array = np.cumsum(mid_diff)\n\n tree = spatial.cKDTree(midline)\n\n #for trial in picked_trials:\t\n for block, blockdata in steergaze_df.groupby(['ID','block']):\n\n print(block)\n begin = timer()\n\n\n blockdata = blockdata.copy()\n blockdata.sort_values('currtime', inplace=True)\n # blockdata.reset_index()\n\n ####pick target\n \"\"\"\n condition = blockdata.condition.values[0]\n target_centres = targets.loc[targets['condition']==int(condition),:]\n #pprint(target_centres)\n\n target_centres = target_centres.reset_index(drop=True)\n #pick starting position.\n start_x = np.sign(blockdata['posx']).values[0]\n #select targets with opposite sign for xcentre, these will be the ones encountered in that block\n target_centres = target_centres.loc[np.sign(target_centres['xcentre'])!=start_x,:] \n target_circles = dp.target_position_circles(target_centres)\n\n \"\"\"\n\n traj_x = blockdata['posx'].values\n traj_z = blockdata['posz'].values\n trajectory = np.transpose(np.array([traj_x, traj_z]))\n\n yaw = blockdata['yaw'].values\n \n #gaze_on_screen = blockdata['hangle'].values, blockdata['vangle'].values\n gaze_on_screen = np.transpose(np.array([blockdata['hangle'].values, blockdata['vangle'].values]))\n\n #print(yaw[0])\n #index = i\n #\tviewpoint = blockdata['posx'].values, blockdata['posz'].values\n roadsection = blockdata['roadsection'].values\n\n #find time headway along MIDLINE \n \"\"\"\n start = timer()\n #idx, *_ = find_closest_index(midline, trajectory[0,:])\n idx = [find_closest_index(midline, viewpoint) for viewpoint in trajectory] \n print(idx[:10])\n print(timer()-start)\n \"\"\"\n\n #closest_indexes = [closest_node(midline, viewpoint) for viewpoint in trajectory] \n #closest indexes\n #print(np.take(midline, 5, axis = 0, mode = 'wrap'))\n #print(np.take(midline, len(midline), axis = 0, mode = 'wrap'))\n #print(np.take(midline, 0, axis = 0, mode = 'wrap'))\n _, closest_indexes = tree.query(trajectory) \n\n end_of_view = closest_indexes + moving_window\n\n #futuremid = np.take(midline, range(closest_indexes[0], end_of_view[0]), axis = 0, mode = 'wrap')\n def takemid(c,e):\n return (np.take(midline, range(c, e), axis = 0, mode = 'wrap'))\n\n start = timer()\n ml_idx, ml_screen_refs, ml_world_refs, ml_th = zip(*[\n closest_on_screen_point(takemid(c,e), t, y, g) \n for c, e, t, y, g in zip(closest_indexes, end_of_view, trajectory, yaw, gaze_on_screen)\n ])\n print(timer() - start) \n \n print(ml_screen_refs.shape)\n print(type(ml_screen_refs))\n ml_screen_refs = ml_screen_refs.reshape(-1, 2)\n ml_world_refs = ml_world_refs.reshape(-1, 2)\n print(ml_th)\n\n blockdata['midline_ref_onscreen_x'] = ml_screen_refs[:, 0]\n blockdata['midline_ref_onscreen_z'] = ml_screen_refs[:, 1]\n blockdata['midline_ref_world_x'] = ml_world_refs[:, 0]\n blockdata['midline_ref_world_z'] = ml_world_refs[:, 1]\n blockdata['th_along_midline'] = ml_th\n\n #find closest point on FUTURE PATH, with th calc along the path \n \n traj_index = range(len(trajectory))\n fp_idx, fp_screen_refs, fp_world_refs, fp_th = zip(*[\n closest_on_screen_point(trajectory[i:(i+1000),:], t, y, g) \n for i, t, y, g in zip(traj_index, trajectory, yaw, gaze_on_screen)\n ])\n #future_traj = trajectory[index:(index+window_fp), :]\n #fp_world_ref, fp_idx, dists, fp_angles = closest_on_screen_point(future_traj, viewpoint, yaw, gaze_on_screen)\n print(fp_screen_refs.shape)\n print(type(fp_screen_refs))\n fp_screen_refs = fp_screen_refs.reshape(-1, 2)\n fp_world_refs = fp_world_refs.reshape(-1, 2)\n print(ml_th)\n\n blockdata['futurepath_ref_onscreen_x'] = fp_screen_refs[:, 0]\n blockdata['futurepath_ref_onscreen_z'] = fp_screen_refs[:, 1]\n blockdata['futurepath_ref_world_x'] = fp_world_refs[:, 0]\n blockdata['futurepath_ref_world_z'] = fp_world_refs[:, 1]\n blockdata['th_along_futurepath'] = fp_th\n \n \n\n #TODO: current method runs into problems if the viewpoint is just before the midline resets (i.e. very large midline_dist_array value).\n #but not a problem for current analysis because trial starts from beginning of midline.\n #th_to_entry\n mid_dist_viewpoint = midline_dist_array[idx]\n\n mid_dist_entry = midline_dist_array[gazemodes[0]]\n th_to_entry = (mid_dist_entry - mid_dist_viewpoint) / 8.0 #if it's negative you have passed the point\n blockdata.loc[index,'veh_th_to_entry'] = th_to_entry\n\n #th_to_object\n mid_dist_object = midline_dist_array[gazemodes[1]]\n th_to_object = (mid_dist_object - mid_dist_viewpoint) / 8.0 #if it's negative you have passed the point\n blockdata.loc[index,'veh_th_to_object'] = th_to_object\t\t\n \n \"\"\"\n trialcode = row['trialcode']\n #plot\t\t\t \n #print(\"th_along_midline\", ml_timeheadway)\n #print('ml_ref', ml_world_ref)\n #print(\"th_along_futurepath\", fp_timeheadway)\n #print(\"fp_ref\", fp_world_ref)\n\n world_gaze = dp.angles_to_world(gaze_on_screen, viewpoint, yaw)\n #print(\"world_gaze\", world_gaze)\n\n plt.ylim(angles_limits_bottom[1],angles_limits_top[1])\n plt.xlim(angles_limits_bottom[0],angles_limits_top[0])\n\n plt.plot(ml_angles[:,0],ml_angles[:,1], 'C3o', markersize = .5, )\n plt.plot(fp_angles[:,0],fp_angles[:,1], 'C2o', markersize = .5)\n plt.plot(ml_screen_ref[0],ml_screen_ref[1], 'C1o', markersize = 5, markeredgecolor = 'k')\n plt.plot(fp_screen_ref[0],fp_screen_ref[1], 'C0o', markersize = 5, markeredgecolor = 'k')\n\n plt.plot(gaze_on_screen[0],gaze_on_screen[1], 'mo', markersize = 5, markeredgecolor = 'k')\n plt.title(str(trialcode))\n\n\n plt.pause(.016) \n plt.cla()\n\n plt.show()\n \"\"\"\n\t\t\n #master_steergaze = pd.concat([master_steergaze, blockdata])\n\n\n compute_time = timer()-begin\n print(\"Processing block took %f seconds\" % compute_time)\n\n\n print(\"APPENDING DATA FRAME\")\n outfilepath = datafolder + '/trout_gazeandsteering_addthfrompath2.csv'\n\n with open(outfilepath, 'a', newline = '') as sgfile:\n blockdata.to_csv(sgfile, mode='a', header=sgfile.tell()==0)\n\n #master_steergaze.to_csv(datafolder + '/trout_gazeandsteering_addthfrompath.csv')\n\n #master_steergaze.to_feather(datafilepath)", "def get_RDataFrame_analysisIDAndUnitsAndComponentNames_dataStage02GlogNormalized(self,analysis_id_I,concentration_units_I,component_name_I):\n #Tested\n try:\n data = self.session.query(data_stage02_quantification_glogNormalized.experiment_id,\n data_stage02_quantification_analysis.sample_name_abbreviation,\n data_stage02_quantification_glogNormalized.sample_name_short,\n data_stage02_quantification_glogNormalized.time_point,\n data_stage02_quantification_glogNormalized.component_group_name,\n data_stage02_quantification_glogNormalized.component_name,\n data_stage02_quantification_glogNormalized.calculated_concentration,\n data_stage02_quantification_glogNormalized.calculated_concentration_units).filter(\n data_stage02_quantification_analysis.analysis_id.like(analysis_id_I),\t\t\n data_stage02_quantification_glogNormalized.analysis_id.like(analysis_id_I),\t\t\t\t\t\n data_stage02_quantification_glogNormalized.experiment_id.like(data_stage02_quantification_analysis.experiment_id),\t\n data_stage02_quantification_glogNormalized.time_point.like(data_stage02_quantification_analysis.time_point),\n data_stage02_quantification_glogNormalized.component_name.like(component_name_I),\n data_stage02_quantification_glogNormalized.calculated_concentration_units.like(concentration_units_I),\n data_stage02_quantification_glogNormalized.sample_name_short.like(data_stage02_quantification_analysis.sample_name_short),\n data_stage02_quantification_glogNormalized.used_.is_(True)).group_by(\n data_stage02_quantification_glogNormalized.experiment_id,\n data_stage02_quantification_analysis.sample_name_abbreviation,\n data_stage02_quantification_glogNormalized.sample_name_short,\n data_stage02_quantification_glogNormalized.time_point,\n data_stage02_quantification_glogNormalized.component_group_name,\n data_stage02_quantification_glogNormalized.component_name,\n data_stage02_quantification_glogNormalized.calculated_concentration,\n data_stage02_quantification_glogNormalized.calculated_concentration_units).all();\n data_O = [];\n for d in data: \n data_1 = {};\n data_1['experiment_id'] = d.experiment_id;\n data_1['sample_name_abbreviation'] = d.sample_name_abbreviation;\n data_1['sample_name_short'] = d.sample_name_short;\n data_1['time_point'] = d.time_point;\n data_1['component_group_name'] = d.component_group_name;\n data_1['component_name'] = d.component_name;\n data_1['calculated_concentration'] = d.calculated_concentration;\n data_1['calculated_concentration_units'] = d.calculated_concentration_units;\n data_O.append(data_1);\n return data_O;\n except SQLAlchemyError as e:\n print(e);", "def _copy_streamflow_values(self):\r\n log('Creating streamflow variable', 'INFO')\r\n q_var = self.cf_nc.createVariable(\r\n self.output_flow_var_name, 'f4', (self.output_id_dim_name, 'time'))\r\n q_var.long_name = 'Discharge'\r\n q_var.units = 'm^3/s'\r\n q_var.coordinates = 'time lat lon z'\r\n q_var.grid_mapping = 'crs'\r\n q_var.source = ('Generated by the Routing Application for Parallel ' +\r\n 'computatIon of Discharge (RAPID) river routing model.')\r\n q_var.references = 'http://rapid-hub.org/'\r\n q_var.comment = ('lat, lon, and z values taken at midpoint of river ' +\r\n 'reach feature')\r\n\r\n log('Copying streamflow values', 'INFO')\r\n master_begin_time_step_index = 1\r\n master_end_time_step_index = len(self.cf_nc.dimensions['time'])\r\n \r\n #to reduce RAM, copy by chunks\r\n max_2d_dimension = 1000000000 #~8GB Max\r\n for raw_nc_index, raw_nc in enumerate(self.raw_nc_list):\r\n max_time_step_size = min(raw_nc.size_time, max(1, int(float(max_2d_dimension)/float(raw_nc.size_river_id))))\r\n raw_nc_begin_time_step_index = 0\r\n raw_nc_end_time_step_index = raw_nc.size_time\r\n for raw_nc_time_index in xrange(0, raw_nc.size_time, max_time_step_size):\r\n time_interval_size = max(1, min(raw_nc.size_time-raw_nc_time_index, max_time_step_size))\r\n\r\n raw_nc_end_time_step_index = raw_nc_begin_time_step_index + time_interval_size\r\n master_end_time_step_index = master_begin_time_step_index + time_interval_size\r\n \r\n q_var[:,master_begin_time_step_index:master_end_time_step_index] = raw_nc.get_qout(time_index_start=raw_nc_begin_time_step_index,\r\n time_index_end=raw_nc_end_time_step_index)\r\n \r\n master_begin_time_step_index = master_end_time_step_index\r\n raw_nc_begin_time_step_index = raw_nc_end_time_step_index\r\n\r\n log('Adding initial streamflow values', 'INFO')\r\n #add initial flow to RAPID output file\r\n if self.qinit_file and self.rapid_connect_file:\r\n lookup_table = csv_to_list(self.rapid_connect_file)\r\n lookup_comids = np.array([int(float(row[0])) for row in lookup_table])\r\n \r\n init_flow_table = csv_to_list(self.qinit_file)\r\n \r\n for index, comid in enumerate(self.cf_nc.variables[self.output_id_dim_name][:]):\r\n try:\r\n lookup_index = np.where(lookup_comids == comid)[0][0]\r\n except Exception:\r\n log('COMID %s misssing in rapid_connect file' % comid,\r\n 'ERROR')\r\n q_var[index,0] = float(init_flow_table[lookup_index][0])\r\n else:\r\n for index, comid in enumerate(self.cf_nc.variables[self.output_id_dim_name][:]):\r\n q_var[index,0] = 0", "def run(config, tim=None):\n import common_lib\n import dr_lib\n import DST\n \n if tim is not None:\n tim.getTime(False)\n old_time = tim.getOldTime()\n\n if config.data is None:\n raise RuntimeError(\"Need to pass a data filename to the driver \"\\\n +\"script.\")\n\n # Read in geometry if one is provided\n if config.inst_geom is not None:\n if config.verbose:\n print \"Reading in instrument geometry file\"\n \n inst_geom_dst = DST.getInstance(\"application/x-NxsGeom\",\n config.inst_geom)\n else:\n inst_geom_dst = None\n\n config.so_axis = \"time_of_flight\"\n\n # Steps 1-3: Produce a scaled summed dark current dataset\n dc_som = dr_lib.scaled_summed_data(config.dkcur, config,\n dataset_type=\"dark_current\",\n timer=tim)\n\n # Perform Steps 3-6 on black can data\n if config.bcan is not None:\n b_som1 = dr_lib.calibrate_dgs_data(config.bcan, config, dc_som,\n dataset_type=\"black_can\",\n inst_geom_dst=inst_geom_dst,\n tib_const=config.tib_const,\n cwp=config.cwp_bcan,\n timer=tim)\n else:\n b_som1 = None\n\n # Perform Steps 3-6 on empty can data \n if config.ecan is not None:\n e_som1 = dr_lib.calibrate_dgs_data(config.ecan, config, dc_som,\n dataset_type=\"empty_can\",\n inst_geom_dst=inst_geom_dst,\n tib_const=config.tib_const,\n cwp=config.cwp_ecan,\n timer=tim)\n else:\n e_som1 = None\n\n # Perform Steps 3-6 on normalization data\n n_som1 = dr_lib.calibrate_dgs_data(config.data, config, dc_som,\n dataset_type=\"normalization\",\n inst_geom_dst=inst_geom_dst,\n tib_const=config.tib_const,\n cwp=config.cwp_data,\n timer=tim)\n\n # Perform Steps 7-16 on normalization data\n if config.norm_trans_coeff is None:\n norm_trans_coeff = None\n else:\n norm_trans_coeff = config.norm_trans_coeff.toValErrTuple()\n\n # Determine if we need to rebin the empty or black can data\n if config.ecan is not None and e_som1 is not None:\n ecan_cwp = True\n else:\n ecan_cwp = False\n\n if config.bcan is not None and b_som1 is not None:\n bcan_cwp = True\n else:\n bcan_cwp = False \n\n cwp_used = ecan_cwp or bcan_cwp\n\n n_som2 = dr_lib.process_dgs_data(n_som1, config, b_som1, e_som1,\n norm_trans_coeff,\n dataset_type=\"normalization\",\n cwp_used=cwp_used,\n timer=tim)\n \n del n_som1, b_som1, e_som1\n\n # Step 17: Integrate normalization spectra\n if config.verbose:\n print \"Integrating normalization spectra\"\n\n if tim is not None:\n tim.getTime(False)\n\n if config.norm_int_range is None:\n start_val = float(\"inf\")\n end_val = float(\"inf\")\n else:\n if not config.wb_norm:\n # Translate energy transfer to final energy\n ef_start = config.initial_energy.getValue() - \\\n config.norm_int_range[0]\n ef_end = config.initial_energy.getValue() - \\\n config.norm_int_range[1]\n # Convert final energy to final wavelength\n start_val = common_lib.energy_to_wavelength((ef_start, 0.0))[0]\n end_val = common_lib.energy_to_wavelength((ef_end, 0.0))[0]\n else:\n start_val = config.norm_int_range[0]\n end_val = config.norm_int_range[1]\n \n n_som3 = dr_lib.integrate_spectra(n_som2, start=start_val,\n end=end_val, width=True)\n\n del n_som2\n \n if tim is not None:\n tim.getTime(msg=\"After integrating normalization spectra \")\n\n file_comment = \"Normalization Integration range: %0.3fA, %0.3fA\" \\\n % (start_val, end_val)\n \n hlr_utils.write_file(config.output, \"text/num-info\", n_som3,\n output_ext=\"norm\",\n data_ext=config.ext_replacement,\n path_replacement=config.path_replacement,\n verbose=config.verbose,\n message=\"normalization values\",\n comments=[file_comment],\n tag=\"Integral\", units=\"counts\") \n \n if tim is not None:\n tim.getTime(False)\n\n if config.verbose:\n print \"Making mask file\"\n\n # Make mask file from threshold\n dr_lib.filter_normalization(n_som3, config.lo_threshold,\n config.hi_threshold, config)\n\n if tim is not None:\n tim.getTime(msg=\"After making mask file \")\n\n # Write out RMD file\n n_som3.attr_list[\"config\"] = config\n\n hlr_utils.write_file(config.output, \"text/rmd\", n_som3,\n output_ext=\"rmd\",\n data_ext=config.ext_replacement, \n path_replacement=config.path_replacement,\n verbose=config.verbose,\n message=\"metadata\")\n \n if tim is not None:\n tim.setOldTime(old_time)\n tim.getTime(msg=\"Total Running Time\")", "def get_run_info_novaseq( instrument_model, application_version, tree, pipeline_type ):\n run_stats = {}\n\n setup_node = tree.getroot().find(\"Setup\")\n if setup_node is None:\n setup_node = tree.getroot()\n\n flowcell_node = tree.getroot().find(\"RfidsInfo\")\n instrument_id_node = tree.getroot().find('InstrumentName')\n run_start_date_node = tree.getroot().find('RunStartDate')\n\n # Now actually populate various stats\n run_stats['flow_cell_id'] = flowcell_node.find('FlowCellSerialBarcode').text\n run_stats['date'] = run_start_date_node.text\n run_stats['instrument'] = instrument_id_node.text\n run_stats['flow_cell_mode'] = flowcell_node.find('FlowCellMode').text\n if( run_stats['flow_cell_mode'] in [ 'SP', 'S1', 'S2' ] ):\n run_stats['lanes'] = 2\n elif( run_stats['flow_cell_mode'] in [ 'S4' ] ):\n run_stats['lanes'] = 4\n else:\n raise ValueError( 'Unrecognized flow cell mode \\'%s\\'' % ( run_stats['flow_cell_mode'] ) )\n run_stats['run_id'] = tree.getroot().find('RunId').text\n\n # Read1 and Read2 may be absent\n run_stats['r1_length'] = int(setup_node.find('Read1NumberOfCycles').text)\n run_stats['p7_index_length'] = int(setup_node.find('IndexRead1NumberOfCycles').text)\n\n if( setup_node.find('Read2NumberOfCycles') != None ):\n run_stats['r2_length'] = int(setup_node.find('Read2NumberOfCycles').text)\n run_stats['p5_index_length'] = int(setup_node.find('IndexRead2NumberOfCycles').text)\n run_stats['paired_end'] = True\n else:\n run_stats['paired_end'] = False\n\n application = setup_node.find('Application').text\n application_version = setup_node.find('ApplicationVersion').text\n\n run_stats['instrument_type'] = instrument_model\n\n # Notes:\n # o NovaSeq application 1.7.0 can run reagent kit version 1.0 and 1.5\n # o the NWGC tells us:\n # The NovaSeq v1.5 reagents are run on the NovaSeq that has an updated\n # software which is version 1.7 that flips the i5 indices already on\n # the sequencer when the data comes off. Typically, when the data\n # comes off the sequencers, we need to flip both the i7 and i5 indices\n # to the reverse complement in order to run fastqs or demux the data.\n # With this being the case, only the i7 will need to be reverse\n # complemented typically when data comes off the v1.5 version.\n # o however, not reverse complementing v1.5 fastqs in demultiplexing\n # gives 'normal' looking sample-specific fastq files so I do not\n # reverse complement here but allow for the possiblity in future\n # reagent kits.\n # o The SBS consumable version differs between the two kits. The line is\n # <SbsConsumableVersion>1</SbsConsumableVersion>\n # Key\n # 1= v1.0 SBS Reagents\n # 3= v1.5 SBS Reagents\n if( application_version == '1.7.0' or application_version == '1.7.5' ):\n sbs_consumable_version = flowcell_node.find('SbsConsumableVersion').text\n if( sbs_consumable_version == '1' ):\n run_stats['reverse_complement_i5'] = False\n elif( sbs_consumable_version == '3' ):\n if( pipeline_type == 'RNA-seq' ):\n run_stats['reverse_complement_i5'] = True\n elif( pipeline_type == 'ATAC-seq' ):\n run_stats['reverse_complement_i5'] = False\n else:\n raise ValueError('Unrecognized pipeline_type value \\'%s\\'' % ( pipeline_type ))\n else:\n run_stats['reverse_complement_i5'] = False\n\n return run_stats", "def build_sequences(dcm):\n dimension_organization_uid = '1.2.276.0.7230010.3.1.4.8323329.20175.1573232544.237437'\n ds0 = Dataset()\n ds0.DimensionOrganizationUID = dimension_organization_uid\n dcm.DimensionOrganizationSequence = Sequence([ds0])\n del ds0\n\n ds1 = Dataset()\n ds1.DimensionOrganizationUID = dimension_organization_uid\n ds1.DimensionIndexPointer = Tag(0x0048021E)\n ds1.FunctionalGroupPointer = Tag(0x0048021A)\n\n ds2 = Dataset()\n ds2.DimensionOrganizationUID = dimension_organization_uid\n ds2.DimensionIndexPointer = Tag(0x0048021F)\n ds2.FunctionalGroupPointer = Tag(0x0048021A)\n\n dcm.DimensionIndexSequence = Sequence([ds1, ds2])\n del ds1, ds2\n\n ds3 = Dataset()\n ds3.XOffsetInSlideCoordinateSystem = 20\n ds3.YOffsetInSlideCoordinateSystem = 40\n dcm.TotalPixelMatrixOriginSequence = Sequence([ds3])\n del ds3\n\n ds4 = Dataset()\n ds5 = Dataset()\n\n # IlluminationTypeCodeSequence\n ds4.CodingSchemeDesignator = 'DCM'\n ds4.CodeMeaning = 'Brightfield illumination'\n ds4.CodeValue = '111744'\n\n # IlluminationColorCodeSequence\n ds5.CodingSchemeDesignator = 'DCM'\n ds5.CodeMeaning = 'No filter'\n ds5.CodeValue = '111609'\n\n ds7 = Dataset()\n ds7.IlluminationTypeCodeSequence = Sequence([ds4])\n ds7.IlluminationColorCodeSequence = Sequence([ds5])\n # noinspection PyPep8,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection\n ds7.ICCProfile = b'\\x00\\x00\\x1b\\nlcms\\x020\\x00\\x00mntrRGB XYZ \\x07\\xd4\\x00\\x08\\x00\\r\\x00\\x0c\\x00\\x12\\x00\\x06acspMSFT\\x00\\x00\\x00\\x00lcms\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xf6\\xd6\\x00\\x01\\x00\\x00\\x00\\x00\\xd3-lcms\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x0cdmnd\\x00\\x00\\x01\\x14\\x00\\x00\\x00jdesc\\x00\\x00\\x01\\x80\\x00\\x00\\x00hdmdd\\x00\\x00\\x01\\xe8\\x00\\x00\\x00hwtpt\\x00\\x00\\x02P\\x00\\x00\\x00\\x14rXYZ\\x00\\x00\\x02d\\x00\\x00\\x00\\x14bXYZ\\x00\\x00\\x02x\\x00\\x00\\x00\\x14gXYZ\\x00\\x00\\x02\\x8c\\x00\\x00\\x00\\x14rTRC\\x00\\x00\\x02\\xa0\\x00\\x00\\x08\\x0cgTRC\\x00\\x00\\n\\xac\\x00\\x00\\x08\\x0cbTRC\\x00\\x00\\x12\\xb8\\x00\\x00\\x08\\x0cchrm\\x00\\x00\\x1a\\xc4\\x00\\x00\\x00$cprt\\x00\\x00\\x1a\\xe8\\x00\\x00\\x00!desc\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x10lcms generated \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00desc\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x05sRGB\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00desc\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x05sRGB\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00XYZ \\x00\\x00\\x00\\x00\\x00\\x00\\xf3=\\x00\\x01\\x00\\x00\\x00\\x01\\x16\\x98XYZ \\x00\\x00\\x00\\x00\\x00\\x00o\\x94\\x00\\x008\\xee\\x00\\x00\\x03\\x90XYZ \\x00\\x00\\x00\\x00\\x00\\x00$\\x9d\\x00\\x00\\x0f\\x83\\x00\\x00\\xb6\\xbeXYZ \\x00\\x00\\x00\\x00\\x00\\x00b\\xa5\\x00\\x00\\xb7\\x90\\x00\\x00\\x18\\xdecurv\\x00\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x05\\x00\\n\\x00\\x0f\\x00\\x14\\x00\\x19\\x00\\x1e\\x00#\\x00(\\x00-\\x002\\x007\\x00;\\x00@\\x00E\\x00J\\x00O\\x00T\\x00Y\\x00^\\x00c\\x00h\\x00m\\x00r\\x00w\\x00|\\x00\\x81\\x00\\x86\\x00\\x8b\\x00\\x90\\x00\\x95\\x00\\x9a\\x00\\x9f\\x00\\xa4\\x00\\xa9\\x00\\xae\\x00\\xb2\\x00\\xb7\\x00\\xbc\\x00\\xc1\\x00\\xc6\\x00\\xcb\\x00\\xd0\\x00\\xd5\\x00\\xdb\\x00\\xe0\\x00\\xe5\\x00\\xeb\\x00\\xf0\\x00\\xf6\\x00\\xfb\\x01\\x01\\x01\\x07\\x01\\r\\x01\\x13\\x01\\x19\\x01\\x1f\\x01%\\x01+\\x012\\x018\\x01>\\x01E\\x01L\\x01R\\x01Y\\x01`\\x01g\\x01n\\x01u\\x01|\\x01\\x83\\x01\\x8b\\x01\\x92\\x01\\x9a\\x01\\xa1\\x01\\xa9\\x01\\xb1\\x01\\xb9\\x01\\xc1\\x01\\xc9\\x01\\xd1\\x01\\xd9\\x01\\xe1\\x01\\xe9\\x01\\xf2\\x01\\xfa\\x02\\x03\\x02\\x0c\\x02\\x14\\x02\\x1d\\x02&\\x02/\\x028\\x02A\\x02K\\x02T\\x02]\\x02g\\x02q\\x02z\\x02\\x84\\x02\\x8e\\x02\\x98\\x02\\xa2\\x02\\xac\\x02\\xb6\\x02\\xc1\\x02\\xcb\\x02\\xd5\\x02\\xe0\\x02\\xeb\\x02\\xf5\\x03\\x00\\x03\\x0b\\x03\\x16\\x03!\\x03-\\x038\\x03C\\x03O\\x03Z\\x03f\\x03r\\x03~\\x03\\x8a\\x03\\x96\\x03\\xa2\\x03\\xae\\x03\\xba\\x03\\xc7\\x03\\xd3\\x03\\xe0\\x03\\xec\\x03\\xf9\\x04\\x06\\x04\\x13\\x04 \\x04-\\x04;\\x04H\\x04U\\x04c\\x04q\\x04~\\x04\\x8c\\x04\\x9a\\x04\\xa8\\x04\\xb6\\x04\\xc4\\x04\\xd3\\x04\\xe1\\x04\\xf0\\x04\\xfe\\x05\\r\\x05\\x1c\\x05+\\x05:\\x05I\\x05X\\x05g\\x05w\\x05\\x86\\x05\\x96\\x05\\xa6\\x05\\xb5\\x05\\xc5\\x05\\xd5\\x05\\xe5\\x05\\xf6\\x06\\x06\\x06\\x16\\x06\\'\\x067\\x06H\\x06Y\\x06j\\x06{\\x06\\x8c\\x06\\x9d\\x06\\xaf\\x06\\xc0\\x06\\xd1\\x06\\xe3\\x06\\xf5\\x07\\x07\\x07\\x19\\x07+\\x07=\\x07O\\x07a\\x07t\\x07\\x86\\x07\\x99\\x07\\xac\\x07\\xbf\\x07\\xd2\\x07\\xe5\\x07\\xf8\\x08\\x0b\\x08\\x1f\\x082\\x08F\\x08Z\\x08n\\x08\\x82\\x08\\x96\\x08\\xaa\\x08\\xbe\\x08\\xd2\\x08\\xe7\\x08\\xfb\\t\\x10\\t%\\t:\\tO\\td\\ty\\t\\x8f\\t\\xa4\\t\\xba\\t\\xcf\\t\\xe5\\t\\xfb\\n\\x11\\n\\'\\n=\\nT\\nj\\n\\x81\\n\\x98\\n\\xae\\n\\xc5\\n\\xdc\\n\\xf3\\x0b\\x0b\\x0b\"\\x0b9\\x0bQ\\x0bi\\x0b\\x80\\x0b\\x98\\x0b\\xb0\\x0b\\xc8\\x0b\\xe1\\x0b\\xf9\\x0c\\x12\\x0c*\\x0cC\\x0c\\\\\\x0cu\\x0c\\x8e\\x0c\\xa7\\x0c\\xc0\\x0c\\xd9\\x0c\\xf3\\r\\r\\r&\\r@\\rZ\\rt\\r\\x8e\\r\\xa9\\r\\xc3\\r\\xde\\r\\xf8\\x0e\\x13\\x0e.\\x0eI\\x0ed\\x0e\\x7f\\x0e\\x9b\\x0e\\xb6\\x0e\\xd2\\x0e\\xee\\x0f\\t\\x0f%\\x0fA\\x0f^\\x0fz\\x0f\\x96\\x0f\\xb3\\x0f\\xcf\\x0f\\xec\\x10\\t\\x10&\\x10C\\x10a\\x10~\\x10\\x9b\\x10\\xb9\\x10\\xd7\\x10\\xf5\\x11\\x13\\x111\\x11O\\x11m\\x11\\x8c\\x11\\xaa\\x11\\xc9\\x11\\xe8\\x12\\x07\\x12&\\x12E\\x12d\\x12\\x84\\x12\\xa3\\x12\\xc3\\x12\\xe3\\x13\\x03\\x13#\\x13C\\x13c\\x13\\x83\\x13\\xa4\\x13\\xc5\\x13\\xe5\\x14\\x06\\x14\\'\\x14I\\x14j\\x14\\x8b\\x14\\xad\\x14\\xce\\x14\\xf0\\x15\\x12\\x154\\x15V\\x15x\\x15\\x9b\\x15\\xbd\\x15\\xe0\\x16\\x03\\x16&\\x16I\\x16l\\x16\\x8f\\x16\\xb2\\x16\\xd6\\x16\\xfa\\x17\\x1d\\x17A\\x17e\\x17\\x89\\x17\\xae\\x17\\xd2\\x17\\xf7\\x18\\x1b\\x18@\\x18e\\x18\\x8a\\x18\\xaf\\x18\\xd5\\x18\\xfa\\x19 \\x19E\\x19k\\x19\\x91\\x19\\xb7\\x19\\xdd\\x1a\\x04\\x1a*\\x1aQ\\x1aw\\x1a\\x9e\\x1a\\xc5\\x1a\\xec\\x1b\\x14\\x1b;\\x1bc\\x1b\\x8a\\x1b\\xb2\\x1b\\xda\\x1c\\x02\\x1c*\\x1cR\\x1c{\\x1c\\xa3\\x1c\\xcc\\x1c\\xf5\\x1d\\x1e\\x1dG\\x1dp\\x1d\\x99\\x1d\\xc3\\x1d\\xec\\x1e\\x16\\x1e@\\x1ej\\x1e\\x94\\x1e\\xbe\\x1e\\xe9\\x1f\\x13\\x1f>\\x1fi\\x1f\\x94\\x1f\\xbf\\x1f\\xea \\x15 A l \\x98 \\xc4 \\xf0!\\x1c!H!u!\\xa1!\\xce!\\xfb\"\\'\"U\"\\x82\"\\xaf\"\\xdd#\\n#8#f#\\x94#\\xc2#\\xf0$\\x1f$M$|$\\xab$\\xda%\\t%8%h%\\x97%\\xc7%\\xf7&\\'&W&\\x87&\\xb7&\\xe8\\'\\x18\\'I\\'z\\'\\xab\\'\\xdc(\\r(?(q(\\xa2(\\xd4)\\x06)8)k)\\x9d)\\xd0*\\x02*5*h*\\x9b*\\xcf+\\x02+6+i+\\x9d+\\xd1,\\x05,9,n,\\xa2,\\xd7-\\x0c-A-v-\\xab-\\xe1.\\x16.L.\\x82.\\xb7.\\xee/$/Z/\\x91/\\xc7/\\xfe050l0\\xa40\\xdb1\\x121J1\\x821\\xba1\\xf22*2c2\\x9b2\\xd43\\r3F3\\x7f3\\xb83\\xf14+4e4\\x9e4\\xd85\\x135M5\\x875\\xc25\\xfd676r6\\xae6\\xe97$7`7\\x9c7\\xd78\\x148P8\\x8c8\\xc89\\x059B9\\x7f9\\xbc9\\xf9:6:t:\\xb2:\\xef;-;k;\\xaa;\\xe8<\\'<e<\\xa4<\\xe3=\"=a=\\xa1=\\xe0> >`>\\xa0>\\xe0?!?a?\\xa2?\\xe2@#@d@\\xa6@\\xe7A)AjA\\xacA\\xeeB0BrB\\xb5B\\xf7C:C}C\\xc0D\\x03DGD\\x8aD\\xceE\\x12EUE\\x9aE\\xdeF\"FgF\\xabF\\xf0G5G{G\\xc0H\\x05HKH\\x91H\\xd7I\\x1dIcI\\xa9I\\xf0J7J}J\\xc4K\\x0cKSK\\x9aK\\xe2L*LrL\\xbaM\\x02MJM\\x93M\\xdcN%NnN\\xb7O\\x00OIO\\x93O\\xddP\\'PqP\\xbbQ\\x06QPQ\\x9bQ\\xe6R1R|R\\xc7S\\x13S_S\\xaaS\\xf6TBT\\x8fT\\xdbU(UuU\\xc2V\\x0fV\\\\V\\xa9V\\xf7WDW\\x92W\\xe0X/X}X\\xcbY\\x1aYiY\\xb8Z\\x07ZVZ\\xa6Z\\xf5[E[\\x95[\\xe5\\\\5\\\\\\x86\\\\\\xd6]\\']x]\\xc9^\\x1a^l^\\xbd_\\x0f_a_\\xb3`\\x05`W`\\xaa`\\xfcaOa\\xa2a\\xf5bIb\\x9cb\\xf0cCc\\x97c\\xebd@d\\x94d\\xe9e=e\\x92e\\xe7f=f\\x92f\\xe8g=g\\x93g\\xe9h?h\\x96h\\xeciCi\\x9ai\\xf1jHj\\x9fj\\xf7kOk\\xa7k\\xfflWl\\xafm\\x08m`m\\xb9n\\x12nkn\\xc4o\\x1eoxo\\xd1p+p\\x86p\\xe0q:q\\x95q\\xf0rKr\\xa6s\\x01s]s\\xb8t\\x14tpt\\xccu(u\\x85u\\xe1v>v\\x9bv\\xf8wVw\\xb3x\\x11xnx\\xccy*y\\x89y\\xe7zFz\\xa5{\\x04{c{\\xc2|!|\\x81|\\xe1}A}\\xa1~\\x01~b~\\xc2\\x7f#\\x7f\\x84\\x7f\\xe5\\x80G\\x80\\xa8\\x81\\n\\x81k\\x81\\xcd\\x820\\x82\\x92\\x82\\xf4\\x83W\\x83\\xba\\x84\\x1d\\x84\\x80\\x84\\xe3\\x85G\\x85\\xab\\x86\\x0e\\x86r\\x86\\xd7\\x87;\\x87\\x9f\\x88\\x04\\x88i\\x88\\xce\\x893\\x89\\x99\\x89\\xfe\\x8ad\\x8a\\xca\\x8b0\\x8b\\x96\\x8b\\xfc\\x8cc\\x8c\\xca\\x8d1\\x8d\\x98\\x8d\\xff\\x8ef\\x8e\\xce\\x8f6\\x8f\\x9e\\x90\\x06\\x90n\\x90\\xd6\\x91?\\x91\\xa8\\x92\\x11\\x92z\\x92\\xe3\\x93M\\x93\\xb6\\x94 \\x94\\x8a\\x94\\xf4\\x95_\\x95\\xc9\\x964\\x96\\x9f\\x97\\n\\x97u\\x97\\xe0\\x98L\\x98\\xb8\\x99$\\x99\\x90\\x99\\xfc\\x9ah\\x9a\\xd5\\x9bB\\x9b\\xaf\\x9c\\x1c\\x9c\\x89\\x9c\\xf7\\x9dd\\x9d\\xd2\\x9e@\\x9e\\xae\\x9f\\x1d\\x9f\\x8b\\x9f\\xfa\\xa0i\\xa0\\xd8\\xa1G\\xa1\\xb6\\xa2&\\xa2\\x96\\xa3\\x06\\xa3v\\xa3\\xe6\\xa4V\\xa4\\xc7\\xa58\\xa5\\xa9\\xa6\\x1a\\xa6\\x8b\\xa6\\xfd\\xa7n\\xa7\\xe0\\xa8R\\xa8\\xc4\\xa97\\xa9\\xa9\\xaa\\x1c\\xaa\\x8f\\xab\\x02\\xabu\\xab\\xe9\\xac\\\\\\xac\\xd0\\xadD\\xad\\xb8\\xae-\\xae\\xa1\\xaf\\x16\\xaf\\x8b\\xb0\\x00\\xb0u\\xb0\\xea\\xb1`\\xb1\\xd6\\xb2K\\xb2\\xc2\\xb38\\xb3\\xae\\xb4%\\xb4\\x9c\\xb5\\x13\\xb5\\x8a\\xb6\\x01\\xb6y\\xb6\\xf0\\xb7h\\xb7\\xe0\\xb8Y\\xb8\\xd1\\xb9J\\xb9\\xc2\\xba;\\xba\\xb5\\xbb.\\xbb\\xa7\\xbc!\\xbc\\x9b\\xbd\\x15\\xbd\\x8f\\xbe\\n\\xbe\\x84\\xbe\\xff\\xbfz\\xbf\\xf5\\xc0p\\xc0\\xec\\xc1g\\xc1\\xe3\\xc2_\\xc2\\xdb\\xc3X\\xc3\\xd4\\xc4Q\\xc4\\xce\\xc5K\\xc5\\xc8\\xc6F\\xc6\\xc3\\xc7A\\xc7\\xbf\\xc8=\\xc8\\xbc\\xc9:\\xc9\\xb9\\xca8\\xca\\xb7\\xcb6\\xcb\\xb6\\xcc5\\xcc\\xb5\\xcd5\\xcd\\xb5\\xce6\\xce\\xb6\\xcf7\\xcf\\xb8\\xd09\\xd0\\xba\\xd1<\\xd1\\xbe\\xd2?\\xd2\\xc1\\xd3D\\xd3\\xc6\\xd4I\\xd4\\xcb\\xd5N\\xd5\\xd1\\xd6U\\xd6\\xd8\\xd7\\\\\\xd7\\xe0\\xd8d\\xd8\\xe8\\xd9l\\xd9\\xf1\\xdav\\xda\\xfb\\xdb\\x80\\xdc\\x05\\xdc\\x8a\\xdd\\x10\\xdd\\x96\\xde\\x1c\\xde\\xa2\\xdf)\\xdf\\xaf\\xe06\\xe0\\xbd\\xe1D\\xe1\\xcc\\xe2S\\xe2\\xdb\\xe3c\\xe3\\xeb\\xe4s\\xe4\\xfc\\xe5\\x84\\xe6\\r\\xe6\\x96\\xe7\\x1f\\xe7\\xa9\\xe82\\xe8\\xbc\\xe9F\\xe9\\xd0\\xea[\\xea\\xe5\\xebp\\xeb\\xfb\\xec\\x86\\xed\\x11\\xed\\x9c\\xee(\\xee\\xb4\\xef@\\xef\\xcc\\xf0X\\xf0\\xe5\\xf1r\\xf1\\xff\\xf2\\x8c\\xf3\\x19\\xf3\\xa7\\xf44\\xf4\\xc2\\xf5P\\xf5\\xde\\xf6m\\xf6\\xfb\\xf7\\x8a\\xf8\\x19\\xf8\\xa8\\xf98\\xf9\\xc7\\xfaW\\xfa\\xe7\\xfbw\\xfc\\x07\\xfc\\x98\\xfd)\\xfd\\xba\\xfeK\\xfe\\xdc\\xffm\\xff\\xffcurv\\x00\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x05\\x00\\n\\x00\\x0f\\x00\\x14\\x00\\x19\\x00\\x1e\\x00#\\x00(\\x00-\\x002\\x007\\x00;\\x00@\\x00E\\x00J\\x00O\\x00T\\x00Y\\x00^\\x00c\\x00h\\x00m\\x00r\\x00w\\x00|\\x00\\x81\\x00\\x86\\x00\\x8b\\x00\\x90\\x00\\x95\\x00\\x9a\\x00\\x9f\\x00\\xa4\\x00\\xa9\\x00\\xae\\x00\\xb2\\x00\\xb7\\x00\\xbc\\x00\\xc1\\x00\\xc6\\x00\\xcb\\x00\\xd0\\x00\\xd5\\x00\\xdb\\x00\\xe0\\x00\\xe5\\x00\\xeb\\x00\\xf0\\x00\\xf6\\x00\\xfb\\x01\\x01\\x01\\x07\\x01\\r\\x01\\x13\\x01\\x19\\x01\\x1f\\x01%\\x01+\\x012\\x018\\x01>\\x01E\\x01L\\x01R\\x01Y\\x01`\\x01g\\x01n\\x01u\\x01|\\x01\\x83\\x01\\x8b\\x01\\x92\\x01\\x9a\\x01\\xa1\\x01\\xa9\\x01\\xb1\\x01\\xb9\\x01\\xc1\\x01\\xc9\\x01\\xd1\\x01\\xd9\\x01\\xe1\\x01\\xe9\\x01\\xf2\\x01\\xfa\\x02\\x03\\x02\\x0c\\x02\\x14\\x02\\x1d\\x02&\\x02/\\x028\\x02A\\x02K\\x02T\\x02]\\x02g\\x02q\\x02z\\x02\\x84\\x02\\x8e\\x02\\x98\\x02\\xa2\\x02\\xac\\x02\\xb6\\x02\\xc1\\x02\\xcb\\x02\\xd5\\x02\\xe0\\x02\\xeb\\x02\\xf5\\x03\\x00\\x03\\x0b\\x03\\x16\\x03!\\x03-\\x038\\x03C\\x03O\\x03Z\\x03f\\x03r\\x03~\\x03\\x8a\\x03\\x96\\x03\\xa2\\x03\\xae\\x03\\xba\\x03\\xc7\\x03\\xd3\\x03\\xe0\\x03\\xec\\x03\\xf9\\x04\\x06\\x04\\x13\\x04 \\x04-\\x04;\\x04H\\x04U\\x04c\\x04q\\x04~\\x04\\x8c\\x04\\x9a\\x04\\xa8\\x04\\xb6\\x04\\xc4\\x04\\xd3\\x04\\xe1\\x04\\xf0\\x04\\xfe\\x05\\r\\x05\\x1c\\x05+\\x05:\\x05I\\x05X\\x05g\\x05w\\x05\\x86\\x05\\x96\\x05\\xa6\\x05\\xb5\\x05\\xc5\\x05\\xd5\\x05\\xe5\\x05\\xf6\\x06\\x06\\x06\\x16\\x06\\'\\x067\\x06H\\x06Y\\x06j\\x06{\\x06\\x8c\\x06\\x9d\\x06\\xaf\\x06\\xc0\\x06\\xd1\\x06\\xe3\\x06\\xf5\\x07\\x07\\x07\\x19\\x07+\\x07=\\x07O\\x07a\\x07t\\x07\\x86\\x07\\x99\\x07\\xac\\x07\\xbf\\x07\\xd2\\x07\\xe5\\x07\\xf8\\x08\\x0b\\x08\\x1f\\x082\\x08F\\x08Z\\x08n\\x08\\x82\\x08\\x96\\x08\\xaa\\x08\\xbe\\x08\\xd2\\x08\\xe7\\x08\\xfb\\t\\x10\\t%\\t:\\tO\\td\\ty\\t\\x8f\\t\\xa4\\t\\xba\\t\\xcf\\t\\xe5\\t\\xfb\\n\\x11\\n\\'\\n=\\nT\\nj\\n\\x81\\n\\x98\\n\\xae\\n\\xc5\\n\\xdc\\n\\xf3\\x0b\\x0b\\x0b\"\\x0b9\\x0bQ\\x0bi\\x0b\\x80\\x0b\\x98\\x0b\\xb0\\x0b\\xc8\\x0b\\xe1\\x0b\\xf9\\x0c\\x12\\x0c*\\x0cC\\x0c\\\\\\x0cu\\x0c\\x8e\\x0c\\xa7\\x0c\\xc0\\x0c\\xd9\\x0c\\xf3\\r\\r\\r&\\r@\\rZ\\rt\\r\\x8e\\r\\xa9\\r\\xc3\\r\\xde\\r\\xf8\\x0e\\x13\\x0e.\\x0eI\\x0ed\\x0e\\x7f\\x0e\\x9b\\x0e\\xb6\\x0e\\xd2\\x0e\\xee\\x0f\\t\\x0f%\\x0fA\\x0f^\\x0fz\\x0f\\x96\\x0f\\xb3\\x0f\\xcf\\x0f\\xec\\x10\\t\\x10&\\x10C\\x10a\\x10~\\x10\\x9b\\x10\\xb9\\x10\\xd7\\x10\\xf5\\x11\\x13\\x111\\x11O\\x11m\\x11\\x8c\\x11\\xaa\\x11\\xc9\\x11\\xe8\\x12\\x07\\x12&\\x12E\\x12d\\x12\\x84\\x12\\xa3\\x12\\xc3\\x12\\xe3\\x13\\x03\\x13#\\x13C\\x13c\\x13\\x83\\x13\\xa4\\x13\\xc5\\x13\\xe5\\x14\\x06\\x14\\'\\x14I\\x14j\\x14\\x8b\\x14\\xad\\x14\\xce\\x14\\xf0\\x15\\x12\\x154\\x15V\\x15x\\x15\\x9b\\x15\\xbd\\x15\\xe0\\x16\\x03\\x16&\\x16I\\x16l\\x16\\x8f\\x16\\xb2\\x16\\xd6\\x16\\xfa\\x17\\x1d\\x17A\\x17e\\x17\\x89\\x17\\xae\\x17\\xd2\\x17\\xf7\\x18\\x1b\\x18@\\x18e\\x18\\x8a\\x18\\xaf\\x18\\xd5\\x18\\xfa\\x19 \\x19E\\x19k\\x19\\x91\\x19\\xb7\\x19\\xdd\\x1a\\x04\\x1a*\\x1aQ\\x1aw\\x1a\\x9e\\x1a\\xc5\\x1a\\xec\\x1b\\x14\\x1b;\\x1bc\\x1b\\x8a\\x1b\\xb2\\x1b\\xda\\x1c\\x02\\x1c*\\x1cR\\x1c{\\x1c\\xa3\\x1c\\xcc\\x1c\\xf5\\x1d\\x1e\\x1dG\\x1dp\\x1d\\x99\\x1d\\xc3\\x1d\\xec\\x1e\\x16\\x1e@\\x1ej\\x1e\\x94\\x1e\\xbe\\x1e\\xe9\\x1f\\x13\\x1f>\\x1fi\\x1f\\x94\\x1f\\xbf\\x1f\\xea \\x15 A l \\x98 \\xc4 \\xf0!\\x1c!H!u!\\xa1!\\xce!\\xfb\"\\'\"U\"\\x82\"\\xaf\"\\xdd#\\n#8#f#\\x94#\\xc2#\\xf0$\\x1f$M$|$\\xab$\\xda%\\t%8%h%\\x97%\\xc7%\\xf7&\\'&W&\\x87&\\xb7&\\xe8\\'\\x18\\'I\\'z\\'\\xab\\'\\xdc(\\r(?(q(\\xa2(\\xd4)\\x06)8)k)\\x9d)\\xd0*\\x02*5*h*\\x9b*\\xcf+\\x02+6+i+\\x9d+\\xd1,\\x05,9,n,\\xa2,\\xd7-\\x0c-A-v-\\xab-\\xe1.\\x16.L.\\x82.\\xb7.\\xee/$/Z/\\x91/\\xc7/\\xfe050l0\\xa40\\xdb1\\x121J1\\x821\\xba1\\xf22*2c2\\x9b2\\xd43\\r3F3\\x7f3\\xb83\\xf14+4e4\\x9e4\\xd85\\x135M5\\x875\\xc25\\xfd676r6\\xae6\\xe97$7`7\\x9c7\\xd78\\x148P8\\x8c8\\xc89\\x059B9\\x7f9\\xbc9\\xf9:6:t:\\xb2:\\xef;-;k;\\xaa;\\xe8<\\'<e<\\xa4<\\xe3=\"=a=\\xa1=\\xe0> >`>\\xa0>\\xe0?!?a?\\xa2?\\xe2@#@d@\\xa6@\\xe7A)AjA\\xacA\\xeeB0BrB\\xb5B\\xf7C:C}C\\xc0D\\x03DGD\\x8aD\\xceE\\x12EUE\\x9aE\\xdeF\"FgF\\xabF\\xf0G5G{G\\xc0H\\x05HKH\\x91H\\xd7I\\x1dIcI\\xa9I\\xf0J7J}J\\xc4K\\x0cKSK\\x9aK\\xe2L*LrL\\xbaM\\x02MJM\\x93M\\xdcN%NnN\\xb7O\\x00OIO\\x93O\\xddP\\'PqP\\xbbQ\\x06QPQ\\x9bQ\\xe6R1R|R\\xc7S\\x13S_S\\xaaS\\xf6TBT\\x8fT\\xdbU(UuU\\xc2V\\x0fV\\\\V\\xa9V\\xf7WDW\\x92W\\xe0X/X}X\\xcbY\\x1aYiY\\xb8Z\\x07ZVZ\\xa6Z\\xf5[E[\\x95[\\xe5\\\\5\\\\\\x86\\\\\\xd6]\\']x]\\xc9^\\x1a^l^\\xbd_\\x0f_a_\\xb3`\\x05`W`\\xaa`\\xfcaOa\\xa2a\\xf5bIb\\x9cb\\xf0cCc\\x97c\\xebd@d\\x94d\\xe9e=e\\x92e\\xe7f=f\\x92f\\xe8g=g\\x93g\\xe9h?h\\x96h\\xeciCi\\x9ai\\xf1jHj\\x9fj\\xf7kOk\\xa7k\\xfflWl\\xafm\\x08m`m\\xb9n\\x12nkn\\xc4o\\x1eoxo\\xd1p+p\\x86p\\xe0q:q\\x95q\\xf0rKr\\xa6s\\x01s]s\\xb8t\\x14tpt\\xccu(u\\x85u\\xe1v>v\\x9bv\\xf8wVw\\xb3x\\x11xnx\\xccy*y\\x89y\\xe7zFz\\xa5{\\x04{c{\\xc2|!|\\x81|\\xe1}A}\\xa1~\\x01~b~\\xc2\\x7f#\\x7f\\x84\\x7f\\xe5\\x80G\\x80\\xa8\\x81\\n\\x81k\\x81\\xcd\\x820\\x82\\x92\\x82\\xf4\\x83W\\x83\\xba\\x84\\x1d\\x84\\x80\\x84\\xe3\\x85G\\x85\\xab\\x86\\x0e\\x86r\\x86\\xd7\\x87;\\x87\\x9f\\x88\\x04\\x88i\\x88\\xce\\x893\\x89\\x99\\x89\\xfe\\x8ad\\x8a\\xca\\x8b0\\x8b\\x96\\x8b\\xfc\\x8cc\\x8c\\xca\\x8d1\\x8d\\x98\\x8d\\xff\\x8ef\\x8e\\xce\\x8f6\\x8f\\x9e\\x90\\x06\\x90n\\x90\\xd6\\x91?\\x91\\xa8\\x92\\x11\\x92z\\x92\\xe3\\x93M\\x93\\xb6\\x94 \\x94\\x8a\\x94\\xf4\\x95_\\x95\\xc9\\x964\\x96\\x9f\\x97\\n\\x97u\\x97\\xe0\\x98L\\x98\\xb8\\x99$\\x99\\x90\\x99\\xfc\\x9ah\\x9a\\xd5\\x9bB\\x9b\\xaf\\x9c\\x1c\\x9c\\x89\\x9c\\xf7\\x9dd\\x9d\\xd2\\x9e@\\x9e\\xae\\x9f\\x1d\\x9f\\x8b\\x9f\\xfa\\xa0i\\xa0\\xd8\\xa1G\\xa1\\xb6\\xa2&\\xa2\\x96\\xa3\\x06\\xa3v\\xa3\\xe6\\xa4V\\xa4\\xc7\\xa58\\xa5\\xa9\\xa6\\x1a\\xa6\\x8b\\xa6\\xfd\\xa7n\\xa7\\xe0\\xa8R\\xa8\\xc4\\xa97\\xa9\\xa9\\xaa\\x1c\\xaa\\x8f\\xab\\x02\\xabu\\xab\\xe9\\xac\\\\\\xac\\xd0\\xadD\\xad\\xb8\\xae-\\xae\\xa1\\xaf\\x16\\xaf\\x8b\\xb0\\x00\\xb0u\\xb0\\xea\\xb1`\\xb1\\xd6\\xb2K\\xb2\\xc2\\xb38\\xb3\\xae\\xb4%\\xb4\\x9c\\xb5\\x13\\xb5\\x8a\\xb6\\x01\\xb6y\\xb6\\xf0\\xb7h\\xb7\\xe0\\xb8Y\\xb8\\xd1\\xb9J\\xb9\\xc2\\xba;\\xba\\xb5\\xbb.\\xbb\\xa7\\xbc!\\xbc\\x9b\\xbd\\x15\\xbd\\x8f\\xbe\\n\\xbe\\x84\\xbe\\xff\\xbfz\\xbf\\xf5\\xc0p\\xc0\\xec\\xc1g\\xc1\\xe3\\xc2_\\xc2\\xdb\\xc3X\\xc3\\xd4\\xc4Q\\xc4\\xce\\xc5K\\xc5\\xc8\\xc6F\\xc6\\xc3\\xc7A\\xc7\\xbf\\xc8=\\xc8\\xbc\\xc9:\\xc9\\xb9\\xca8\\xca\\xb7\\xcb6\\xcb\\xb6\\xcc5\\xcc\\xb5\\xcd5\\xcd\\xb5\\xce6\\xce\\xb6\\xcf7\\xcf\\xb8\\xd09\\xd0\\xba\\xd1<\\xd1\\xbe\\xd2?\\xd2\\xc1\\xd3D\\xd3\\xc6\\xd4I\\xd4\\xcb\\xd5N\\xd5\\xd1\\xd6U\\xd6\\xd8\\xd7\\\\\\xd7\\xe0\\xd8d\\xd8\\xe8\\xd9l\\xd9\\xf1\\xdav\\xda\\xfb\\xdb\\x80\\xdc\\x05\\xdc\\x8a\\xdd\\x10\\xdd\\x96\\xde\\x1c\\xde\\xa2\\xdf)\\xdf\\xaf\\xe06\\xe0\\xbd\\xe1D\\xe1\\xcc\\xe2S\\xe2\\xdb\\xe3c\\xe3\\xeb\\xe4s\\xe4\\xfc\\xe5\\x84\\xe6\\r\\xe6\\x96\\xe7\\x1f\\xe7\\xa9\\xe82\\xe8\\xbc\\xe9F\\xe9\\xd0\\xea[\\xea\\xe5\\xebp\\xeb\\xfb\\xec\\x86\\xed\\x11\\xed\\x9c\\xee(\\xee\\xb4\\xef@\\xef\\xcc\\xf0X\\xf0\\xe5\\xf1r\\xf1\\xff\\xf2\\x8c\\xf3\\x19\\xf3\\xa7\\xf44\\xf4\\xc2\\xf5P\\xf5\\xde\\xf6m\\xf6\\xfb\\xf7\\x8a\\xf8\\x19\\xf8\\xa8\\xf98\\xf9\\xc7\\xfaW\\xfa\\xe7\\xfbw\\xfc\\x07\\xfc\\x98\\xfd)\\xfd\\xba\\xfeK\\xfe\\xdc\\xffm\\xff\\xffcurv\\x00\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x05\\x00\\n\\x00\\x0f\\x00\\x14\\x00\\x19\\x00\\x1e\\x00#\\x00(\\x00-\\x002\\x007\\x00;\\x00@\\x00E\\x00J\\x00O\\x00T\\x00Y\\x00^\\x00c\\x00h\\x00m\\x00r\\x00w\\x00|\\x00\\x81\\x00\\x86\\x00\\x8b\\x00\\x90\\x00\\x95\\x00\\x9a\\x00\\x9f\\x00\\xa4\\x00\\xa9\\x00\\xae\\x00\\xb2\\x00\\xb7\\x00\\xbc\\x00\\xc1\\x00\\xc6\\x00\\xcb\\x00\\xd0\\x00\\xd5\\x00\\xdb\\x00\\xe0\\x00\\xe5\\x00\\xeb\\x00\\xf0\\x00\\xf6\\x00\\xfb\\x01\\x01\\x01\\x07\\x01\\r\\x01\\x13\\x01\\x19\\x01\\x1f\\x01%\\x01+\\x012\\x018\\x01>\\x01E\\x01L\\x01R\\x01Y\\x01`\\x01g\\x01n\\x01u\\x01|\\x01\\x83\\x01\\x8b\\x01\\x92\\x01\\x9a\\x01\\xa1\\x01\\xa9\\x01\\xb1\\x01\\xb9\\x01\\xc1\\x01\\xc9\\x01\\xd1\\x01\\xd9\\x01\\xe1\\x01\\xe9\\x01\\xf2\\x01\\xfa\\x02\\x03\\x02\\x0c\\x02\\x14\\x02\\x1d\\x02&\\x02/\\x028\\x02A\\x02K\\x02T\\x02]\\x02g\\x02q\\x02z\\x02\\x84\\x02\\x8e\\x02\\x98\\x02\\xa2\\x02\\xac\\x02\\xb6\\x02\\xc1\\x02\\xcb\\x02\\xd5\\x02\\xe0\\x02\\xeb\\x02\\xf5\\x03\\x00\\x03\\x0b\\x03\\x16\\x03!\\x03-\\x038\\x03C\\x03O\\x03Z\\x03f\\x03r\\x03~\\x03\\x8a\\x03\\x96\\x03\\xa2\\x03\\xae\\x03\\xba\\x03\\xc7\\x03\\xd3\\x03\\xe0\\x03\\xec\\x03\\xf9\\x04\\x06\\x04\\x13\\x04 \\x04-\\x04;\\x04H\\x04U\\x04c\\x04q\\x04~\\x04\\x8c\\x04\\x9a\\x04\\xa8\\x04\\xb6\\x04\\xc4\\x04\\xd3\\x04\\xe1\\x04\\xf0\\x04\\xfe\\x05\\r\\x05\\x1c\\x05+\\x05:\\x05I\\x05X\\x05g\\x05w\\x05\\x86\\x05\\x96\\x05\\xa6\\x05\\xb5\\x05\\xc5\\x05\\xd5\\x05\\xe5\\x05\\xf6\\x06\\x06\\x06\\x16\\x06\\'\\x067\\x06H\\x06Y\\x06j\\x06{\\x06\\x8c\\x06\\x9d\\x06\\xaf\\x06\\xc0\\x06\\xd1\\x06\\xe3\\x06\\xf5\\x07\\x07\\x07\\x19\\x07+\\x07=\\x07O\\x07a\\x07t\\x07\\x86\\x07\\x99\\x07\\xac\\x07\\xbf\\x07\\xd2\\x07\\xe5\\x07\\xf8\\x08\\x0b\\x08\\x1f\\x082\\x08F\\x08Z\\x08n\\x08\\x82\\x08\\x96\\x08\\xaa\\x08\\xbe\\x08\\xd2\\x08\\xe7\\x08\\xfb\\t\\x10\\t%\\t:\\tO\\td\\ty\\t\\x8f\\t\\xa4\\t\\xba\\t\\xcf\\t\\xe5\\t\\xfb\\n\\x11\\n\\'\\n=\\nT\\nj\\n\\x81\\n\\x98\\n\\xae\\n\\xc5\\n\\xdc\\n\\xf3\\x0b\\x0b\\x0b\"\\x0b9\\x0bQ\\x0bi\\x0b\\x80\\x0b\\x98\\x0b\\xb0\\x0b\\xc8\\x0b\\xe1\\x0b\\xf9\\x0c\\x12\\x0c*\\x0cC\\x0c\\\\\\x0cu\\x0c\\x8e\\x0c\\xa7\\x0c\\xc0\\x0c\\xd9\\x0c\\xf3\\r\\r\\r&\\r@\\rZ\\rt\\r\\x8e\\r\\xa9\\r\\xc3\\r\\xde\\r\\xf8\\x0e\\x13\\x0e.\\x0eI\\x0ed\\x0e\\x7f\\x0e\\x9b\\x0e\\xb6\\x0e\\xd2\\x0e\\xee\\x0f\\t\\x0f%\\x0fA\\x0f^\\x0fz\\x0f\\x96\\x0f\\xb3\\x0f\\xcf\\x0f\\xec\\x10\\t\\x10&\\x10C\\x10a\\x10~\\x10\\x9b\\x10\\xb9\\x10\\xd7\\x10\\xf5\\x11\\x13\\x111\\x11O\\x11m\\x11\\x8c\\x11\\xaa\\x11\\xc9\\x11\\xe8\\x12\\x07\\x12&\\x12E\\x12d\\x12\\x84\\x12\\xa3\\x12\\xc3\\x12\\xe3\\x13\\x03\\x13#\\x13C\\x13c\\x13\\x83\\x13\\xa4\\x13\\xc5\\x13\\xe5\\x14\\x06\\x14\\'\\x14I\\x14j\\x14\\x8b\\x14\\xad\\x14\\xce\\x14\\xf0\\x15\\x12\\x154\\x15V\\x15x\\x15\\x9b\\x15\\xbd\\x15\\xe0\\x16\\x03\\x16&\\x16I\\x16l\\x16\\x8f\\x16\\xb2\\x16\\xd6\\x16\\xfa\\x17\\x1d\\x17A\\x17e\\x17\\x89\\x17\\xae\\x17\\xd2\\x17\\xf7\\x18\\x1b\\x18@\\x18e\\x18\\x8a\\x18\\xaf\\x18\\xd5\\x18\\xfa\\x19 \\x19E\\x19k\\x19\\x91\\x19\\xb7\\x19\\xdd\\x1a\\x04\\x1a*\\x1aQ\\x1aw\\x1a\\x9e\\x1a\\xc5\\x1a\\xec\\x1b\\x14\\x1b;\\x1bc\\x1b\\x8a\\x1b\\xb2\\x1b\\xda\\x1c\\x02\\x1c*\\x1cR\\x1c{\\x1c\\xa3\\x1c\\xcc\\x1c\\xf5\\x1d\\x1e\\x1dG\\x1dp\\x1d\\x99\\x1d\\xc3\\x1d\\xec\\x1e\\x16\\x1e@\\x1ej\\x1e\\x94\\x1e\\xbe\\x1e\\xe9\\x1f\\x13\\x1f>\\x1fi\\x1f\\x94\\x1f\\xbf\\x1f\\xea \\x15 A l \\x98 \\xc4 \\xf0!\\x1c!H!u!\\xa1!\\xce!\\xfb\"\\'\"U\"\\x82\"\\xaf\"\\xdd#\\n#8#f#\\x94#\\xc2#\\xf0$\\x1f$M$|$\\xab$\\xda%\\t%8%h%\\x97%\\xc7%\\xf7&\\'&W&\\x87&\\xb7&\\xe8\\'\\x18\\'I\\'z\\'\\xab\\'\\xdc(\\r(?(q(\\xa2(\\xd4)\\x06)8)k)\\x9d)\\xd0*\\x02*5*h*\\x9b*\\xcf+\\x02+6+i+\\x9d+\\xd1,\\x05,9,n,\\xa2,\\xd7-\\x0c-A-v-\\xab-\\xe1.\\x16.L.\\x82.\\xb7.\\xee/$/Z/\\x91/\\xc7/\\xfe050l0\\xa40\\xdb1\\x121J1\\x821\\xba1\\xf22*2c2\\x9b2\\xd43\\r3F3\\x7f3\\xb83\\xf14+4e4\\x9e4\\xd85\\x135M5\\x875\\xc25\\xfd676r6\\xae6\\xe97$7`7\\x9c7\\xd78\\x148P8\\x8c8\\xc89\\x059B9\\x7f9\\xbc9\\xf9:6:t:\\xb2:\\xef;-;k;\\xaa;\\xe8<\\'<e<\\xa4<\\xe3=\"=a=\\xa1=\\xe0> >`>\\xa0>\\xe0?!?a?\\xa2?\\xe2@#@d@\\xa6@\\xe7A)AjA\\xacA\\xeeB0BrB\\xb5B\\xf7C:C}C\\xc0D\\x03DGD\\x8aD\\xceE\\x12EUE\\x9aE\\xdeF\"FgF\\xabF\\xf0G5G{G\\xc0H\\x05HKH\\x91H\\xd7I\\x1dIcI\\xa9I\\xf0J7J}J\\xc4K\\x0cKSK\\x9aK\\xe2L*LrL\\xbaM\\x02MJM\\x93M\\xdcN%NnN\\xb7O\\x00OIO\\x93O\\xddP\\'PqP\\xbbQ\\x06QPQ\\x9bQ\\xe6R1R|R\\xc7S\\x13S_S\\xaaS\\xf6TBT\\x8fT\\xdbU(UuU\\xc2V\\x0fV\\\\V\\xa9V\\xf7WDW\\x92W\\xe0X/X}X\\xcbY\\x1aYiY\\xb8Z\\x07ZVZ\\xa6Z\\xf5[E[\\x95[\\xe5\\\\5\\\\\\x86\\\\\\xd6]\\']x]\\xc9^\\x1a^l^\\xbd_\\x0f_a_\\xb3`\\x05`W`\\xaa`\\xfcaOa\\xa2a\\xf5bIb\\x9cb\\xf0cCc\\x97c\\xebd@d\\x94d\\xe9e=e\\x92e\\xe7f=f\\x92f\\xe8g=g\\x93g\\xe9h?h\\x96h\\xeciCi\\x9ai\\xf1jHj\\x9fj\\xf7kOk\\xa7k\\xfflWl\\xafm\\x08m`m\\xb9n\\x12nkn\\xc4o\\x1eoxo\\xd1p+p\\x86p\\xe0q:q\\x95q\\xf0rKr\\xa6s\\x01s]s\\xb8t\\x14tpt\\xccu(u\\x85u\\xe1v>v\\x9bv\\xf8wVw\\xb3x\\x11xnx\\xccy*y\\x89y\\xe7zFz\\xa5{\\x04{c{\\xc2|!|\\x81|\\xe1}A}\\xa1~\\x01~b~\\xc2\\x7f#\\x7f\\x84\\x7f\\xe5\\x80G\\x80\\xa8\\x81\\n\\x81k\\x81\\xcd\\x820\\x82\\x92\\x82\\xf4\\x83W\\x83\\xba\\x84\\x1d\\x84\\x80\\x84\\xe3\\x85G\\x85\\xab\\x86\\x0e\\x86r\\x86\\xd7\\x87;\\x87\\x9f\\x88\\x04\\x88i\\x88\\xce\\x893\\x89\\x99\\x89\\xfe\\x8ad\\x8a\\xca\\x8b0\\x8b\\x96\\x8b\\xfc\\x8cc\\x8c\\xca\\x8d1\\x8d\\x98\\x8d\\xff\\x8ef\\x8e\\xce\\x8f6\\x8f\\x9e\\x90\\x06\\x90n\\x90\\xd6\\x91?\\x91\\xa8\\x92\\x11\\x92z\\x92\\xe3\\x93M\\x93\\xb6\\x94 \\x94\\x8a\\x94\\xf4\\x95_\\x95\\xc9\\x964\\x96\\x9f\\x97\\n\\x97u\\x97\\xe0\\x98L\\x98\\xb8\\x99$\\x99\\x90\\x99\\xfc\\x9ah\\x9a\\xd5\\x9bB\\x9b\\xaf\\x9c\\x1c\\x9c\\x89\\x9c\\xf7\\x9dd\\x9d\\xd2\\x9e@\\x9e\\xae\\x9f\\x1d\\x9f\\x8b\\x9f\\xfa\\xa0i\\xa0\\xd8\\xa1G\\xa1\\xb6\\xa2&\\xa2\\x96\\xa3\\x06\\xa3v\\xa3\\xe6\\xa4V\\xa4\\xc7\\xa58\\xa5\\xa9\\xa6\\x1a\\xa6\\x8b\\xa6\\xfd\\xa7n\\xa7\\xe0\\xa8R\\xa8\\xc4\\xa97\\xa9\\xa9\\xaa\\x1c\\xaa\\x8f\\xab\\x02\\xabu\\xab\\xe9\\xac\\\\\\xac\\xd0\\xadD\\xad\\xb8\\xae-\\xae\\xa1\\xaf\\x16\\xaf\\x8b\\xb0\\x00\\xb0u\\xb0\\xea\\xb1`\\xb1\\xd6\\xb2K\\xb2\\xc2\\xb38\\xb3\\xae\\xb4%\\xb4\\x9c\\xb5\\x13\\xb5\\x8a\\xb6\\x01\\xb6y\\xb6\\xf0\\xb7h\\xb7\\xe0\\xb8Y\\xb8\\xd1\\xb9J\\xb9\\xc2\\xba;\\xba\\xb5\\xbb.\\xbb\\xa7\\xbc!\\xbc\\x9b\\xbd\\x15\\xbd\\x8f\\xbe\\n\\xbe\\x84\\xbe\\xff\\xbfz\\xbf\\xf5\\xc0p\\xc0\\xec\\xc1g\\xc1\\xe3\\xc2_\\xc2\\xdb\\xc3X\\xc3\\xd4\\xc4Q\\xc4\\xce\\xc5K\\xc5\\xc8\\xc6F\\xc6\\xc3\\xc7A\\xc7\\xbf\\xc8=\\xc8\\xbc\\xc9:\\xc9\\xb9\\xca8\\xca\\xb7\\xcb6\\xcb\\xb6\\xcc5\\xcc\\xb5\\xcd5\\xcd\\xb5\\xce6\\xce\\xb6\\xcf7\\xcf\\xb8\\xd09\\xd0\\xba\\xd1<\\xd1\\xbe\\xd2?\\xd2\\xc1\\xd3D\\xd3\\xc6\\xd4I\\xd4\\xcb\\xd5N\\xd5\\xd1\\xd6U\\xd6\\xd8\\xd7\\\\\\xd7\\xe0\\xd8d\\xd8\\xe8\\xd9l\\xd9\\xf1\\xdav\\xda\\xfb\\xdb\\x80\\xdc\\x05\\xdc\\x8a\\xdd\\x10\\xdd\\x96\\xde\\x1c\\xde\\xa2\\xdf)\\xdf\\xaf\\xe06\\xe0\\xbd\\xe1D\\xe1\\xcc\\xe2S\\xe2\\xdb\\xe3c\\xe3\\xeb\\xe4s\\xe4\\xfc\\xe5\\x84\\xe6\\r\\xe6\\x96\\xe7\\x1f\\xe7\\xa9\\xe82\\xe8\\xbc\\xe9F\\xe9\\xd0\\xea[\\xea\\xe5\\xebp\\xeb\\xfb\\xec\\x86\\xed\\x11\\xed\\x9c\\xee(\\xee\\xb4\\xef@\\xef\\xcc\\xf0X\\xf0\\xe5\\xf1r\\xf1\\xff\\xf2\\x8c\\xf3\\x19\\xf3\\xa7\\xf44\\xf4\\xc2\\xf5P\\xf5\\xde\\xf6m\\xf6\\xfb\\xf7\\x8a\\xf8\\x19\\xf8\\xa8\\xf98\\xf9\\xc7\\xfaW\\xfa\\xe7\\xfbw\\xfc\\x07\\xfc\\x98\\xfd)\\xfd\\xba\\xfeK\\xfe\\xdc\\xffm\\xff\\xffchrm\\x00\\x00\\x00\\x00\\x00\\x03\\x00\\x00\\x00\\x00\\xa3\\xd7\\x00\\x00T{\\x00\\x00L\\xcd\\x00\\x00\\x99\\x9a\\x00\\x00&f\\x00\\x00\\x0f\\\\text\\x00\\x00\\x00\\x00no copyright, use freely\\x00\\n'\n ds7.OpticalPathIdentifier = '1'\n # noinspection SpellCheckingInspection\n ds7.OpticalPathDescription = 'Brightfield'\n\n dcm.OpticalPathSequence = Sequence([ds7])\n del ds7, ds5, ds4\n\n dcm.AcquisitionContextSequence = Sequence([])\n\n ds0 = Dataset()\n ds0.LocalNamespaceEntityID = 'UNKNOWN'\n dcm.IssuerOfTheContainerIdentifierSequence = Sequence([ds0])\n del ds0\n\n ds0 = Dataset()\n\n ds0.SpecimenIdentifier = 'UNKNOWN'\n ds0.SpecimenPreparationSequence = Sequence([])\n ds0.SpecimenUID = generate_uid(prefix=None)\n ds0.IssuerOfTheSpecimenIdentifierSequence = Sequence([])\n dcm.SpecimenDescriptionSequence = Sequence([ds0])\n dcm.ContainerTypeCodeSequence = Sequence([])\n dcm.ContainerIdentifier = 'UNKNOWN'\n return dcm", "def run(self):\n\n # driver=\"H5FD_CORE\" another driver for Solid State devs?\n theFile = tables.open_file(self.hdfFileName, \"w\")\n theFile.create_group(\"/\", \"transitionLogs\")\n theLog = theFile.create_earray(where=theFile.root,\n name=\"log\",\n atom=tables.StringAtom(itemsize=120),\n shape=(0,),\n title=\"log messages\",\n filters=tables.Filters(complevel=9,\n complib='zlib'))\n speciesTables = {}\n\n try:\n # do a loop!\n while True:\n try:\n msg = self.transitionsPipe.recv()\n # msg=messagequeue.get()\n except EOFError:\n break\n cmd = msg[0]\n if cmd == \"parameters\":\n # expect two dictionaries\n parameters, runParameters = msg[1], msg[2]\n\n if type(parameters) is dict:\n if \"/parameters\" in theFile:\n parameterTable = theFile.root.parameters\n else:\n parameterTable = theFile.create_table(\n \"/\",\n \"parameters\",\n HDFLoggingProcess.parameterTableFormat)\n parameterRow = parameterTable.row\n varTypeEnum = parameterTable.coldescrs[\"varType\"].enum\n varTypeDict = {int: varTypeEnum[\"INT\"],\n str: varTypeEnum[\"STR\"],\n float: varTypeEnum[\"FLOAT\"],\n bool: varTypeEnum[\"BOOL\"]}\n runType = varTypeEnum[\"RUN\"]\n\n for k, v in parameters.items():\n varType = varTypeDict[type(v)]\n parameterRow[\"varName\"] = str(k)\n parameterRow[\"varType\"] = varType\n parameterRow[\"varValue\"] = str(v)\n parameterRow.append()\n\n for k, v in runParameters.items():\n parameterRow[\"varName\"] = str(k)\n parameterRow[\"varType\"] = runType\n parameterRow[\"varValue\"] = str(v)\n parameterRow.append()\n\n parameterTable.close()\n del parameterRow, parameterTable\n elif type(parameters) is scenario:\n print(\"writing scenarios\")\n parameters.writeToHDF(theFile.root, 'scenario')\n else:\n print(\"unsupported type: {}\".format(type(parameters)))\n\n # need a table def and a transition log\n elif cmd == \"registerTransitionType\":\n # change lists to enumerations!\n # expect list of extra columns as msg[2]\n theColumns = {}\n for name, col in msg[2].items():\n if type(col) is dict:\n # this is an enumeration type used\n # for the from/to state\n col = tables.EnumCol(tables.Enum(col),\n \"start\",\n \"uint16\")\n elif type(col) is str:\n # column of type defined by string\n col = eval(col) # ToDo: remove eval\n theColumns[name] = col\n\n # gets species name and table format as dict\n transitions = type(\"transitions\",\n (tables.IsDescription,),\n theColumns)\n speciesTables[msg[1]] = theFile.create_table(\n \"/transitionLogs\",\n msg[1],\n transitions,\n filters=tables.Filters(\n complevel=9,\n complib=\"lzo\",\n least_significant_digit=3))\n\n elif cmd == \"changeFile\":\n # close tables and file\n for t in speciesTables.values():\n t.close()\n del t\n del speciesTables\n theLog.close()\n del theLog\n theFile.close()\n del theFile\n\n # set new file name\n self.hdfFileName = msg[1]\n # open new one\n # potentially a driver=\"H5FD_CORE\" ?\n theFile = tables.open_file(self.hdfFileName, \"w\")\n theFile.create_group(\"/\", \"transitionLogs\")\n theLog = theFile.create_earray(\n where=theFile.root,\n name=\"log\",\n atom=tables.StringAtom(itemsize=120),\n shape=(0,),\n title=\"log messages\",\n filters=tables.Filters(complevel=9,\n complib='zlib'))\n speciesTables = {}\n # expecting replay of species tables\n\n elif cmd == \"logTransition\":\n # gets species name and values in order as defined by the\n # table format\n # todo: check the format!\n table = speciesTables[msg[1]]\n row = table.row\n agentId, t1, t2, fromState, toState, effort = msg[2]\n row[\"agentId\"] = agentId\n row[\"timeStamp\"] = t2\n row[\"fromState\"] = fromState\n row[\"toState\"] = toState\n row[\"dwellTime\"] = t2-t1\n row[\"effort\"] = effort\n\n if len(msg) > 2:\n # are there any extra parameters?\n for name, value in msg[3].items():\n if type(value) is str:\n row[name] = numpy.array(value.encode(),\n dtype=\"S\")\n else:\n row[name] = value\n row.append()\n del table, row\n\n # also a progress table\n elif cmd == \"progress\":\n # if not there, create new table\n if \"/progress\" not in theFile:\n theFile.create_table(\n '/',\n 'progress',\n HDFLoggingProcess.hdfProgressTable)\n # add values as they are...\n theFile.root.progress.append([msg[1]])\n\n elif cmd == \"message\":\n theLog.append(numpy.array([str(msg[1])], dtype=\"S120\"))\n\n elif cmd == \"end\":\n break\n\n else:\n print(\"unknown type {}\".format(msg[0]))\n except:\n raise\n finally:\n # messagequeue.close()\n self.transitionsPipe.close()\n del self.transitionsPipe\n # print(\"finished \", messagepipe)\n # done, be pedantic about closing all resources\n for t in speciesTables.values():\n t.close()\n del t\n del speciesTables\n theLog.close()\n del theLog\n theFile.close()\n del theFile", "def test_scenes_scene_id_get(self):\n pass", "def get_data(self, run_id, metric_ids=None):\n now = datetime.datetime.now()\n end = datetime.datetime(now.year, now.month, now.day)\n start = end - datetime.timedelta(days=4*365)\n test_measures = self.repo.get_measurements(run_id=run_id,\n start_date=start,\n end_date=end,\n metric_ids=metric_ids)\n return test_measures", "def get_RDataFrame_analysisIDAndExperimentIDAndTimePointAndUnitsAndComponentNames_dataStage02GlogNormalized_v1(self,analysis_id_I, experiment_id_I,time_point_I,concentration_units_I,component_name_I,exp_type_I=4):\n #Tested\n try:\n data = self.session.query(data_stage02_quantification_glogNormalized.experiment_id,\n sample_description.sample_name_abbreviation,\n sample_description.sample_replicate,\n data_stage02_quantification_glogNormalized.sample_name_short,\n data_stage02_quantification_glogNormalized.time_point,\n data_stage02_quantification_glogNormalized.component_group_name,\n data_stage02_quantification_glogNormalized.component_name,\n data_stage02_quantification_glogNormalized.calculated_concentration,\n data_stage02_quantification_glogNormalized.calculated_concentration_units).filter(\n data_stage02_quantification_glogNormalized.analysis_id.like(analysis_id_I),\t\t\t\t\t\n data_stage02_quantification_glogNormalized.experiment_id.like(experiment_id_I),\n data_stage02_quantification_glogNormalized.time_point.like(time_point_I),\n data_stage02_quantification_glogNormalized.component_name.like(component_name_I),\n data_stage02_quantification_glogNormalized.calculated_concentration_units.like(concentration_units_I),\n data_stage02_quantification_glogNormalized.sample_name_short.like(sample_description.sample_name_short),\n sample_description.sample_id.like(sample.sample_id),\n sample.sample_name.like(experiment.sample_name),\n experiment.id.like(experiment_id_I),\n experiment.exp_type_id == exp_type_I,\n data_stage02_quantification_glogNormalized.used_.is_(True)).group_by(\n data_stage02_quantification_glogNormalized.experiment_id,\n sample_description.sample_name_abbreviation,\n sample_description.sample_replicate,\n data_stage02_quantification_glogNormalized.sample_name_short,\n data_stage02_quantification_glogNormalized.time_point,\n data_stage02_quantification_glogNormalized.component_group_name,\n data_stage02_quantification_glogNormalized.component_name,\n data_stage02_quantification_glogNormalized.calculated_concentration,\n data_stage02_quantification_glogNormalized.calculated_concentration_units).all();\n data_O = [];\n for d in data: \n data_1 = {};\n data_1['experiment_id'] = d.experiment_id;\n data_1['sample_name_abbreviation'] = d.sample_name_abbreviation;\n data_1['sample_replicate'] = d.sample_replicate;\n data_1['sample_name_short'] = d.sample_name_short;\n data_1['time_point'] = d.time_point;\n data_1['component_group_name'] = d.component_group_name;\n data_1['component_name'] = d.component_name;\n data_1['calculated_concentration'] = d.calculated_concentration;\n data_1['calculated_concentration_units'] = d.calculated_concentration_units;\n data_O.append(data_1);\n return data_O;\n except SQLAlchemyError as e:\n print(e);", "def getCube(unique_name):", "def get_data(subject, session, only_success, silent) :\n\n game_data = get_game_data3(subject, session, silent=silent)\n physio_data = get_physio_data(subject, session)\n\n # test if there is any data!\n if not all(game_data.shape):\n raise DataAccessError('empty game data')\n if not all(physio_data.shape):\n raise DataAccessError('empty physio data')\n \n\n # using my block times extraction\n trials = extract_trial_times(game_data, only_success)\n if len(trials[0]) == 0 :\n raise DataAccessError('no trials extracted')\n\n # convert trial times to UTC\n if int(subject) >= 400:\n one_hour_in_secs = 60*60\n trials[0] -= one_hour_in_secs\n trials[1] -= one_hour_in_secs \n \n # get first and last timestamp\n min_tr0 = min(trials[0])\n min_tr1 = min(trials[1])\n min_phy = min(physio_data['time'])\n min_time = min(min_tr0, min_tr1, min_phy)\n max_tr0 = max(trials[0])\n max_tr1 = max(trials[1])\n max_phy = max(physio_data['time'])\n max_time = max(max_tr0, max_tr1, max_phy)\n\n # transform to relative time scales\n physio_data['time'] -= min_time\n trials[0] -= min_time\n trials[1] -= min_time\n\n return physio_data, trials, (min_time, max_time)", "def get_RExpressionData_analysisIDAndUnits_dataStage02GlogNormalized_v1(self, analysis_id_I,concentration_units_I,exp_type_I=4):\n #Tested\n try:\n data = self.session.query(data_stage02_quantification_glogNormalized.analysis_id,\n data_stage02_quantification_glogNormalized.experiment_id,\n sample_description.sample_name_abbreviation,\n sample_description.sample_replicate,\n data_stage02_quantification_glogNormalized.sample_name_short,\n data_stage02_quantification_glogNormalized.time_point,\n data_stage02_quantification_glogNormalized.component_group_name,\n data_stage02_quantification_glogNormalized.component_name,\n data_stage02_quantification_glogNormalized.calculated_concentration,\n data_stage02_quantification_glogNormalized.calculated_concentration_units).filter(\n data_stage02_quantification_glogNormalized.analysis_id.like(analysis_id_I),\n data_stage02_quantification_glogNormalized.calculated_concentration_units.like(concentration_units_I),\n sample_description.sample_name_short.like(data_stage02_quantification_glogNormalized.sample_name_short),\n sample_description.time_point.like(data_stage02_quantification_glogNormalized.time_point),\n sample_description.sample_id.like(sample.sample_id),\n sample.sample_name.like(experiment.sample_name),\n experiment.id.like(data_stage02_quantification_glogNormalized.experiment_id),\n experiment.exp_type_id == exp_type_I,\n data_stage02_quantification_glogNormalized.used_.is_(True)).group_by(\n data_stage02_quantification_glogNormalized.analysis_id,\n data_stage02_quantification_glogNormalized.experiment_id,\n sample_description.sample_name_abbreviation,\n sample_description.sample_replicate,\n data_stage02_quantification_glogNormalized.sample_name_short,\n data_stage02_quantification_glogNormalized.time_point,\n data_stage02_quantification_glogNormalized.component_group_name,\n data_stage02_quantification_glogNormalized.component_name,\n data_stage02_quantification_glogNormalized.calculated_concentration,\n data_stage02_quantification_glogNormalized.calculated_concentration_units).all();\n data_O = [];\n for d in data: \n data_1 = {};\n data_1['analysis_id'] = d.analysis_id;\n data_1['experiment_id'] = d.experiment_id;\n data_1['sample_name_abbreviation'] = d.sample_name_abbreviation;\n data_1['sample_replicate'] = d.sample_replicate;\n data_1['sample_name_short'] = d.sample_name_short;\n data_1['time_point'] = d.time_point;\n data_1['component_group_name'] = d.component_group_name;\n data_1['component_name'] = d.component_name;\n data_1['calculated_concentration'] = d.calculated_concentration;\n data_1['calculated_concentration_units'] = d.calculated_concentration_units;\n data_O.append(data_1);\n return data_O;\n except SQLAlchemyError as e:\n print(e);", "def data_saving(self, sol):\r\n # Only export data once per time-step. We do this on the conduction\r\n # step.\r\n if self.save_rule is not None:\r\n save_rule_true = self.save_rule(self.step, self.d_T)\r\n else:\r\n save_rule_true = True\r\n \r\n if self.cond == True:\r\n series = \"Temperature\"\r\n elif self.rad is not None:\r\n series = (\"Radiation\", self.fq_list[self.rad])\r\n else:\r\n # before sim starts... EARLY EXIT\r\n return\r\n\r\n if self.cond == True and save_rule_true:\r\n # Save data to file with step no.\r\n # First, generate dictionaries with {nid:value}\r\n\r\n # CASE 1: Export mesh = FEM mesh (ie, no enrichment, easier!)\r\n if self.export_mesh is None:\r\n data_temp = {}\r\n for nid in self.mesh.nodes.keys():\r\n idx = self.node_map.tag_to_idx((nid, 0))\r\n data_temp[nid] = self.lst_tmp[idx]\r\n data_rad = {}\r\n for i in self.fq_list:\r\n data_rad[i] = {}\r\n for nid in self.mesh.nodes.keys():\r\n idx = self.node_map.tag_to_idx((nid, 0))\r\n for i in range(0, len(self.fq_list)):\r\n data_rad[self.fq_list[i]][nid] = self.lst_rad[i][idx]\r\n # End CASE 1 - see after case two for finishing export.\r\n\r\n\r\n # CASE 2: Exporting to a different mesh to the the XFEM / FEM\r\n # mesh. \r\n else:\r\n # We need to a mapping from global to local element \r\n # coordinates. We'll do this once and then store it.\r\n # We store it in self.export_to_elem dictionary.\r\n if self.export_to_elem is None:\r\n self.export_to_elem = \\\r\n self.mesh.project_points(self.export_mesh.nodes,\r\n failure_rule='closest')\r\n # Dictionaries to export:\r\n data_temp = {}\r\n data_rad = {}\r\n # Setup frequency data:\r\n for i in self.fq_list:\r\n data_rad[i] = {}\r\n\r\n for node_id, expt_data in self.export_to_elem.items():\r\n # Unpack the value of the dictionary value for clarity:\r\n elem = expt_data[0]\r\n eta = expt_data[1] # local coord\r\n\r\n # Get element / solution indexes:\r\n val = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n data_temp[node_id] = val\r\n # And for all the frequencies:\r\n for i in range(0, len(self.fq_list)):\r\n data_rad[self.fq_list[i]][node_id] \\\r\n = elem.eval_elem(self.node_map,\r\n self.lst_rad[i],\r\n [eta])[0]\r\n # END CASE 2\r\n\r\n expt_data = {\"Temperature\": data_temp}\r\n for freq, nvals in data_rad.items():\r\n expt_data[str(freq * 10) + \"THz\"] = data_rad[freq]\r\n\r\n # Send to be exported as a VTK.\r\n if self.export_mesh is None:\r\n self.mesh.export_to_vtk(self.save_path + str(self.step),\r\n expt_data)\r\n else:\r\n self.export_mesh.export_to_vtk(self.save_path + str(self.step),\r\n expt_data)\r\n\r\n try:\r\n container = self.saved_data[series]\r\n except:\r\n self.saved_data[series] = {}\r\n container = self.saved_data[series]\r\n if self.step % 10 == 0 or self.step < 10:\r\n container[self.step] = saved_data(sol, self.step, self.current_T)", "def mat_section(ball_id='MATX-BM005'):\r\n\r\n try:\r\n\r\n # get connection\r\n conn = get_sql_conn()\r\n\r\n # intialize variables\r\n hot_id = ''\r\n ball_out_id = ''\r\n hot_out_id = ''\r\n\r\n # query material procurement table for given id value\r\n query_mat = \\\r\n 'Select * from material_procurement where ball_milling_uid = \\'{}\\''.format(str(ball_id))\r\n df_mat = pd.read_sql_query(query_mat, con=conn)\r\n\r\n # query ball mill table for given id value\r\n query_ball = \\\r\n 'Select * from ball_milling where uid = \\'{}\\''.format(ball_id)\r\n df_ball = pd.read_sql_query(query_ball, con=conn)\r\n\r\n # if valid entry for a material exists, get output material id and hot press id\r\n if not df_ball.empty:\r\n ball_out_id = df_ball.iloc[0]['output_material_uid']\r\n hot_id = df_ball.iloc[0]['hot_press_uid']\r\n\r\n # if valid hot press id exsists query hot press table\r\n if hot_id:\r\n query_hot = \\\r\n 'Select * from hot_press where uid = \\'{}\\''.format(hot_id)\r\n df_hot = pd.read_sql_query(query_hot, con=conn)\r\n\r\n # get output material id from hot table\r\n if not df_hot.empty:\r\n hot_out_id = df_hot.iloc[0]['output_material_uid']\r\n\r\n # get lab reports for ball mill and hot press material from hall measurement and icp measurement tables\r\n if ball_out_id or hot_out_id:\r\n\r\n query_hall = \\\r\n 'Select * from hall_measurement where material_uid in (\\'{}\\', \\'{}\\')'.format(hot_out_id,\r\n ball_out_id)\r\n df_hall = pd.read_sql_query(query_hall, con=conn)\r\n\r\n query_icp = \\\r\n 'Select * from icp_measurement where material_uid in (\\'{}\\', \\'{}\\')'.format(hot_out_id,\r\n ball_out_id)\r\n df_icp = pd.read_sql_query(query_icp, con=conn)\r\n\r\n # format materials table\r\n df_mat = df_mat.drop(['uid', 'ball_milling_uid'], axis=1)\r\n\r\n # format ball mill table\r\n df_ball['milling_speed'] = df_ball['milling_speed'].astype(str) \\\r\n + ' ' + df_ball['milling_speed_units']\r\n df_ball['milling_time'] = df_ball['milling_time'].astype(str) \\\r\n + ' ' + df_ball['milling_time_units']\r\n df_ball = df_ball[['milling_speed', 'milling_time']]\r\n df_ball = df_ball.T.reset_index()\r\n df_ball = df_ball.rename(columns={'index': 'BALL MILLING', 0: ''\r\n })\r\n\r\n # format hot press table\r\n df_hot['hot_press_temperature'] = df_hot['hot_press_temperature'\r\n ].astype(str) + ' ' \\\r\n + df_hot['hot_press_temperature_units']\r\n df_hot['hot_press_pressure'] = df_hot['hot_press_pressure'\r\n ].astype(str) + ' ' + df_hot['hot_press_pressure_units']\r\n df_hot['hot_press_time'] = df_hot['hot_press_time'].astype(str) \\\r\n + ' ' + df_hot['hot_press_time_units']\r\n df_hot = df_hot[['hot_press_temperature', 'hot_press_pressure',\r\n 'hot_press_time', 'output_material_name']]\r\n df_hot = df_hot.T.reset_index()\r\n df_hot = df_hot.rename(columns={'index': 'HOT PROCESS', 0: ''})\r\n\r\n # format hall measurement table\r\n df_hall['probe_resistance'] = df_hall['probe_resistance'\r\n ].astype(str) + ' ' + df_hall['probe_resistance_units']\r\n df_hall['current'] = df_hall['current'].astype(str) + ' ' \\\r\n + df_hall['current_units']\r\n df_hall['field_strength'] = df_hall['field_strength'\r\n ].astype(str) + ' ' + df_hall['field_strength_units']\r\n df_hall = df_hall[['process_type', 'probe_resistance',\r\n 'probe_material', 'current', 'field_strength'\r\n ]]\r\n df_hall = df_hall.T.reset_index()\r\n df_hall = df_hall.rename(columns={'index': 'HALL REPORT', 0: ''\r\n , 1: ''})\r\n\r\n # format icp measurement table\r\n df_icp['gas_flow_rate'] = df_icp['gas_flow_rate'].astype(str) \\\r\n + ' ' + df_icp['gas_flow_rate_units']\r\n df_icp['radio_frequency'] = df_icp['radio_frequency'\r\n ].astype(str) + ' ' + df_icp['radio_frequency_units']\r\n df_icp = df_icp[[\r\n 'process_type',\r\n 'pb_concentration',\r\n 'sn_concentration',\r\n 'o_concentration',\r\n 'gas_flow_rate',\r\n 'radio_frequency',\r\n ]]\r\n df_icp = df_icp.T.reset_index()\r\n df_icp = df_icp.rename(columns={'index': 'ICP REPORT', 0: '',\r\n 1: ''})\r\n\r\n # create a side by side display of tables with a heading\r\n display_html('<h1> Material Properties {} </h1>'.format(str(ball_id)),\r\n raw=True)\r\n\r\n # function to print tables side by side\r\n display_side_by_side(df_mat, df_ball, df_hot, df_hall, df_icp)\r\n\r\n # close opened database connection\r\n conn.close()\r\n except Exception:\r\n print ('error occured: Please check Ball Mill ID')", "def sim_dataset(rs, num_encs,M,pos_class_rate = 0.5):\n np.random.seed(seed=rs)\n data = []\n num_timepoints = np.random.randint(30,50, size=num_encs)\n #signal used to modify timeseries of cases:\n channel_vec = np.random.randint(-1,2,M) #vector of values from -1 to 1 of length M \n #Define patient ids and cases & controls \n pat_ids = np.arange(num_encs)\n case_index = int(num_encs*pos_class_rate) \n case_ids = pat_ids[:case_index]\n control_ids = pat_ids[case_index:] \n \n print(f'Simming {num_encs} patients ..') \n #Generate Data for cases and controls\n for i in pat_ids:\n length = num_timepoints[i]\n if i < case_index:\n #generate case\n labels, onset = create_label(length, case=True)\n X = generate_time_series(length, M)\n X = add_signal(X, onset, channel_vec) \n X['SepsisLabel']= labels \n else:\n #generate control\n labels, _ = create_label(length, case=False)\n X = generate_time_series(length, M)\n X['SepsisLabel']= labels\n data.append(X) \n #Shuffle list of patients\n np.random.shuffle(data)\n return data", "def get_output_elements(self):\n\n self.buildings = self.dataset.groups['buildings']\n self.building_elements = self.buildings.groups['elements']\n\n eta_output_added = getattr(self.building_elements,'eta_output_added')\n uv_output_added = getattr(self.building_elements,'uv_output_added')\n eta = []\n uv = []\n elementIds = []\n time = []\n \n if(eta_output_added or uv_output_added ):\n time = self.building_elements.variables['time'][:].tolist()\n elementIds = self.building_elements.variables['id'][:].tolist()\n if eta_output_added: eta = self.building_elements.variables['eta'][:].tolist()\n if uv_output_added: uv = self.building_elements.variables['uv'][:].tolist()\n \n return elementIds,eta, uv, time" ]
[ "0.61006594", "0.5689221", "0.56312686", "0.5612549", "0.55848944", "0.55802494", "0.55152977", "0.5429191", "0.54044414", "0.5346934", "0.53425074", "0.53219706", "0.5282279", "0.52707255", "0.5253489", "0.5242618", "0.52306026", "0.5217722", "0.5216223", "0.5202785", "0.5199477", "0.5199074", "0.51916367", "0.51867384", "0.5159805", "0.51594216", "0.5149636", "0.5143907", "0.5136742", "0.5133924", "0.5115879", "0.5105993", "0.510475", "0.510053", "0.50930995", "0.508397", "0.50760293", "0.50753206", "0.5073903", "0.5072395", "0.5064903", "0.5063095", "0.50605553", "0.5057226", "0.50551087", "0.50463575", "0.5043899", "0.50331527", "0.5031855", "0.5028895", "0.5016786", "0.5008701", "0.500832", "0.50055397", "0.5005466", "0.5000073", "0.4999406", "0.49989176", "0.49982756", "0.49968684", "0.49934736", "0.49928576", "0.49883822", "0.4982387", "0.49821144", "0.4977377", "0.4967148", "0.49668074", "0.49625784", "0.49613765", "0.49580303", "0.4957397", "0.49554345", "0.49487925", "0.4944156", "0.49355417", "0.49269634", "0.49256817", "0.4925504", "0.4917038", "0.49160933", "0.49158663", "0.49107304", "0.49095196", "0.49028987", "0.48980224", "0.48964027", "0.48961222", "0.48925966", "0.48890123", "0.48884538", "0.48800015", "0.48763916", "0.48758653", "0.48747995", "0.48718858", "0.4871176", "0.48681128", "0.48643526", "0.4863113", "0.48626417" ]
0.0
-1
There are 4 dimensions simulation id(600), timesteps(500), runs(3) and coordinates(216). For every scene, we need to pull up 216 floats for the 72 (x, y, z) coordinates. The user may want to see the shell for another run of the same simulation parameters. Hence, runs are the second fastest varying dimension.
def onevtkfile(): basedir = '/home/amit/WorkSpace/UCLA/simulations/PhaseDiagram/RawData' with hp.File('VTKFile.h5', 'w') as onefile: allvtk = np.empty((600, 500, 3, 216), dtype=np.float32) for j in range(600): for i in range(3): vtkfilepath = '{}/Run{}/VTKFile-{}.h5'.format(basedir, i, j+1) with hp.File(vtkfilepath, 'r') as vtkfile: for t in range(500): allvtk[j, t, i, :] = vtkfile['T{}/Points'.format(2*t)][:].ravel() onefile.create_dataset('Points', data=allvtk, chunks=(1, 50, 3, 216), compression='gzip', compression_opts=9)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def general_simulation_data(self):\n iterations = {}\n nstates = {}\n natoms = {}\n for phase in self.phases:\n positions = self.ncfiles[phase].variables['positions']\n iterations[phase], nstates[phase], natoms[phase], spatial = positions.shape\n\n leniter = max(len('Iterations'), *[len(str(i)) for i in iterations.values()]) + 2\n lenstates = max(len('States'), *[len(str(i)) for i in nstates.values()]) + 2\n lennatoms = max(len('Num Atoms'), *[len(str(i)) for i in natoms.values()]) + 2\n lenleftcol = max(len('Phase'), *[len(phase) for phase in self.phases]) + 2\n\n lines = []\n headstring = ''\n headstring += ('{:^' + '{}'.format(lenleftcol) + '}').format('Phase') + '|'\n headstring += ('{:^' + '{}'.format(leniter) + '}').format('Iterations') + '|'\n headstring += ('{:^' + '{}'.format(lenstates) + '}').format('States') + '|'\n headstring += ('{:^' + '{}'.format(lennatoms) + '}').format('Num Atoms')\n lines.append(headstring)\n lenline = len(headstring)\n topdiv = '=' * lenline\n lines.append(topdiv)\n for phase in self.phases:\n phasestring = ''\n phasestring += ('{:^' + '{}'.format(lenleftcol) + '}').format(phase) + '|'\n phasestring += ('{:^' + '{}'.format(leniter) + '}').format(iterations[phase]) + '|'\n phasestring += ('{:^' + '{}'.format(lenstates) + '}').format(nstates[phase]) + '|'\n phasestring += ('{:^' + '{}'.format(lennatoms) + '}').format(natoms[phase])\n lines.append(phasestring)\n lines.append('-' * lenline)\n\n for line in lines:\n print(line)\n self.iterations = iterations\n self._general_run = True", "def AllindividualRuns():\n #800 nm\n RunData(getFiles(mintime=(15, 40, 07), maxtime=(15, 45, 14), folder='data/29Jul/'), out='I800nm')\n RunData(getFiles(mintime=(15, 12, 20), maxtime=(15, 24, 16), folder='data/31Jul/'), out='I800nm5k')\n RunData(getFiles(mintime=(15, 28, 40), maxtime=(15, 39, 21), folder='data/31Jul/'), out='I800nm10k')\n RunData(getFiles(mintime=(15, 43, 24), maxtime=(15, 51, 47), folder='data/31Jul/'), out='I800nm20k')\n RunData(getFiles(mintime=(15, 56, 11), maxtime=(16, 02, 58), folder='data/31Jul/'), out='I800nm30k')\n RunData(getFiles(mintime=(16, 12, 39), maxtime=(16, 18, 25), folder='data/31Jul/'), out='I800nm38k')\n RunData(getFiles(mintime=(16, 21, 52), maxtime=(16, 26, 16), folder='data/31Jul/'), out='I800nm50k')\n RunData(getFiles(mintime=(16, 32, 02), maxtime=(16, 35, 23), folder='data/31Jul/'), out='I800nm54k')\n #700 nm\n RunData(getFiles(mintime=(17, 20, 17), maxtime=(17, 33, 17), folder='data/30Jul/'), out='I700nm5k')\n RunData(getFiles(mintime=(17, 37, 35), maxtime=(17, 46, 51), folder='data/30Jul/'), out='I700nm9k')\n RunData(getFiles(mintime=(17, 48, 35), maxtime=(17, 56, 03), folder='data/30Jul/'), out='I700nm52k')\n RunData(getFiles(mintime=(17, 58, 18), maxtime=(17, 59, 31), folder='data/30Jul/'), out='I700nm32k')\n #600 nm\n RunData(getFiles(mintime=(15, 22, 00), maxtime=(15, 36, 32), folder='data/30Jul/'), out='I600nm5k')\n RunData(getFiles(mintime=(15, 39, 58), maxtime=(15, 47, 58), folder='data/30Jul/'), out='I600nm54k')\n RunData(getFiles(mintime=(15, 52, 07), maxtime=(16, 06, 32), folder='data/30Jul/'), out='I600nm10k')\n #890 nm\n RunData(getFiles(mintime=(13, 37, 37), maxtime=(13, 50, 58), folder='data/01Aug/'), out='I890nm5k')\n RunData(getFiles(mintime=(14, 00, 58), maxtime=(14, 11, 54), folder='data/01Aug/'), out='I890nm10k')\n RunData(getFiles(mintime=(14, 17, 57), maxtime=(14, 25, 49), folder='data/01Aug/'), out='I890nm30k')\n RunData(getFiles(mintime=(14, 30, 03), maxtime=(14, 34, 37), folder='data/01Aug/'), out='I890nm50k')", "def dimensions():", "def positions(self, tileID, numSamples):", "def calc_a_run(run):\n if run.get('N_dependance',False): ## light version's size is independent of N.\n # if all N are equal, we should use N, otherwise use largest.\n N = np.max(run['args']['number_of_points'])\n r = Factory_thouless_psi(run['npz_fname'].format(**run['args']), N= N)\n else:\n r = Factory_Transmission_g(run['npz_fname'].format(**run['args']))\n \n return r.create_if_missing(run['args'])", "def system_fleet_dimensioning(self):", "def runSimulation(num_robots, speed, width, height, min_coverage, num_trials,\n robot_type):\n trialsRecord = []\n for trail in range(num_trials):\n #VISUALIZING ROBOTS - refer course pdf note 'Optional_Visualizing Robots Problem Set 2.pdf'\n #anim = ps2_visualize.RobotVisualization(num_robots, width, height)\n #create room\n room = RectangularRoom(width, height)\n #create robots & store in array\n robots = []\n count = 0\n for i in range(num_robots):\n robots.append(robot_type(room, speed))\n #NB: how does robot_type(room, speed) create a robot object???? what magic is this???\n #while calcualted coverage is < min_coverage, update positions & repeat\n while float(room.getNumCleanedTiles()) / room.getNumTiles() < min_coverage:\n #anim.update(room, robots)\n #do more cleaning - update robot positions\n for robot in robots:\n robot.updatePositionAndClean()\n count += 1\n trialsRecord.append(count)#record number of steps to achieve min_coverage in this trial.\n #after loop, close animation\n #anim.done()\n #calculate average number of steps over trials.\n return sum(trialsRecord)/float(len(trialsRecord))\n #raise NotImplementedError", "def defineCoords(dimensions, steps):\n ### NOT CURRENTLY USED\n \n print(\"1D\")\n xCoords = np.arange(-X/2, X/2+dx, dx) # 1D \n fxCoords = np.arange(0, X/dx+dx)\n fxCoords = fxCoords - fxCoords[-1]/2 # Shift everything over so the center of array is at f = 0\n realCoords = xCoords\n fourierCoords = fxCoords\n realSpace = np.zeros(Nx+1)\n xRealSpace = np.zeros(Nx)\n #realSpace = np.zeros(Nx) #1D\n \n fourierSpace = np.zeros_like(realSpace, complex)\n\n return realCoords, fourierCoords, realSpace, fourierSpace", "def get_data(self,path):\n\t\twith open(path,'r') as file:\n\t\t\tdata = file.readlines()\n\t\t\tself.objects = int(data[0].split()[1])#getting num of object\n\t\t\tdata = data[2:]#Cutting header lines from data file\n\t\t\tself.positions = [[] for i in range(self.objects)]#creating array, each row represents all positions for agiven planet\n\t\t\tself.steps = int((len(data))/self.objects)#Cal steps\n\t\t\tself.center = [[] for i in range(self.objects)]\n\t\t\tprint \"steps = \", self.steps\n\t\t\tprint \"objects = \",self.objects \n\t\t\tprint(self.steps)\n\t\t\tfor el in range(self.objects):#iterator = all planets\n\t\t\t\tself.times =[]\n\t\t\t\tfor step in range(self.steps):#iterator = all simulation frames\n\t\t\t\t\ttemp_data = data[step*self.objects+el].split()#Separating t x y z from data lines\n\t\t\t\t\ttemp_data2 = [float(i) for i in temp_data[1:4]]#converting to float without t parameter\n\t\t\t\t\ttemp_center = [float(i) for i in temp_data[4:]]\n\t\t\t\t\tself.times.append(float(temp_data[0])) \n\t\t\t\t\tself.positions[el].append(temp_data2)#adding to self.positions list\n\t\t\t\t\tself.center[el].append(temp_center)\n\t\t\t\t\n\t\t\t\tself.add_planet(self.positions[el][0][0], self.positions[el][0][1], self.positions[el][0][2])#Inicialization planet with intial positions\n\t\t\tself.p = vis.points(pos=tuple(self.center[0][0]), size=3,color=color.black)#mass center \n\t\t\tself.p2 = vis.points(pos=(0,0,0),size=3,color=color.red)#sun center\n\t\t\tself.sun = vis.sphere(radius=0.2, pos = (0,0,0), opacity = 0.8, material=materials.emissive)\n\t\t\tself.dt= int(self.times[self.objects+1])", "def explore_runs(df, option, gamma, alpha):\n\n n_states = len(np.unique(df.objnum))\n SR_matrices = {}\n M = np.zeros([n_states, n_states])\n\n # This option allows the SR matrix to persist in Part 1 and Part 2,\n # but resets it between them.\n if option == \"reset\":\n for part in np.unique(df.part):\n if part == 2:\n M = np.zeros([n_states, n_states])\n for run in np.unique(df.loc[df.part == part, 'run']):\n envstep = df.loc[(df.part == part) & (df.run == run),\n 'objnum'].values\n M = np.array(run_experiment(envstep, gamma, alpha, np.copy(M),\n n_states))\n M = M / np.sum(M)\n SR_matrices[(part, run)] = M\n\n # This option resets the SR matrix between each run.\n if option == \"independent\":\n for part in np.unique(df.part):\n for run in np.unique(df.loc[df.part == part, 'run']):\n M = np.zeros([n_states, n_states])\n envstep = df.loc[(df.part == part) & (df.run == run),\n 'objnum'].values\n M = np.array(run_experiment(envstep, gamma, alpha, np.copy(M),\n n_states))\n M = M / np.sum(M)\n SR_matrices[(part, run)] = M\n\n return SR_matrices", "def getDimensions():", "def run(pos_data, ball_data, ht, stadium):\n pos_data_reshaped = reshape_pos_data(home, ball, ht)\n pos_data_rescaled = rescale_global_matrix(pos_data_reshaped, stadium)\n cutting_frames = determine_cutting_frames(pos_data_rescaled)\n segments = segment_position_data(pos_data_rescaled, cutting_frames)\n slices, possession_slices = segment_into_time_slices(segments)\n return slices, possession_slices", "def particle_images (sim,frame_id) :\n # get positions of all particles: define first the atom selection, then jump to\n # the user-requested trajectory frame, get the box dimensions (currently works\n # only for orthorhombic boxes, then calculate the image indices\n atoms = sim.u.select_atoms ('all')\n ts = sim.u.trajectory[frame_id]\n L = ts.dimensions[:3]\n pos = atoms.positions + L/2.\n return pos//L", "def testSimOuptputDimensions(self):\n self.tree.set_database(self.coal)\n sim_params = self.tree.get_simulation_parameters()\n self.assertEqual(sim_params[\"fine_map_x\"], 24)\n self.assertEqual(sim_params[\"fine_map_y\"], 24)\n self.assertEqual(sim_params[\"fine_map_x_offset\"], 0)\n self.assertEqual(sim_params[\"fine_map_y_offset\"], 0)\n self.assertEqual(sim_params[\"sim_complete\"], 1)", "def iridiumCatalyst():\n\n coords = [\n [-1.3672999, -1.4398999, 0.1359000],\n [-2.4911998, -0.6808999, 0.1396000],\n [-3.6534996, -1.1211999, -0.5090000],\n [-3.6468996, -2.3434998, -1.1725999],\n [-2.4848998, -3.1187997, -1.1555999],\n [-1.3670999, -2.6316997, -0.4883000],\n [-0.4373000, -3.1872997, -0.4306000],\n [-2.4432998, -4.0866996, -1.6463998],\n [-4.5575996, -0.5223999, -0.4887000],\n [-2.4206998, 0.5908999, 0.8954999],\n [-1.2879999, 0.7903999, 1.6181998],\n [-1.1378999, 1.9348998, 2.3084998],\n [-2.1077998, 2.9319997, 2.3219998],\n [-3.2770997, 2.7402997, 1.5819998],\n [-3.4330997, 1.5600998, 0.8608999],\n [-4.3267996, 1.4057999, 0.2659000],\n [-1.9411998, 3.8419996, 2.8913997],\n [-0.1872000, 2.0459998, 2.8181997],\n [0.4009000, -0.6061999, 1.1172999],\n [-1.2690999, -3.8143996, 3.7856996],\n [-0.1664000, -4.5494996, 4.2269996],\n [1.1218999, -4.0950996, 3.9273996],\n [1.2993999, -2.9384997, 3.1675997],\n [0.2001000, -2.2075998, 2.6786997],\n [-1.0849999, -2.6466997, 3.0382997],\n [-1.9573998, -2.0870998, 2.7090997],\n [0.8509999, -0.7173999, 2.6636997],\n [2.3007998, -2.5989997, 2.9226997],\n [-0.3087000, -5.4547995, 4.8119995],\n [0.6392999, 0.6220999, -0.5923999],\n [-0.0586000, 0.3754000, -1.7751998],\n [0.0637000, 1.5387999, -2.6275997],\n [0.0955000, 1.0794999, -4.0821996],\n [0.8716999, 0.3276000, -4.2397996],\n [0.2802000, 1.9248998, -4.7547995],\n [-0.8681999, 0.6330999, -4.3491996],\n [-1.1760999, 2.4077998, -2.3666998],\n [-1.2042999, 3.2867997, -3.0193997],\n [-2.0717998, 1.8058998, -2.5499998],\n [-1.2019999, 2.7410997, -1.3247999],\n [1.3891999, 2.1923998, -2.1029998],\n [1.3915999, 1.7859998, -0.7128999],\n [2.6481997, 1.5975998, -2.7492997],\n [2.6573997, 0.5124000, -2.6283997],\n [2.7186997, 1.8556998, -3.8108996],\n [3.5309997, 1.9918998, -2.2375998],\n [1.4299999, 3.7172996, -2.1670998],\n [0.6241999, 4.1645996, -1.5814998],\n [2.3812998, 4.0763996, -1.7610998],\n [1.3476999, 4.0651996, -3.2032997],\n [2.0756998, 0.4378000, 1.7666998],\n [3.3654997, -0.0810000, 1.8671998],\n [4.2709996, 1.0315999, 2.0579998],\n [5.4819995, 0.5533999, 2.8527997],\n [5.1838995, 0.0640000, 3.7825996],\n [6.0490994, -0.1689000, 2.2567998],\n [6.1442994, 1.3928999, 3.0939997],\n [4.6909995, 1.5017999, 0.6583999],\n [5.4398995, 2.2997998, 0.7063999],\n [5.1090995, 0.6483999, 0.1171000],\n [3.8181996, 1.8505998, 0.1015000],\n [3.3502997, 2.0656998, 2.7942997],\n [2.0458998, 1.7442998, 2.2507998],\n [3.6590996, 3.5300997, 2.4959998],\n [3.5358997, 3.7464996, 1.4332999],\n [2.9753997, 4.1760996, 3.0565997],\n [4.6847995, 3.7776996, 2.7930997],\n [3.2796997, 1.8273998, 4.3083996],\n [3.0708997, 0.7747999, 4.5225996],\n [4.2123996, 2.1093998, 4.8080995],\n [2.4671998, 2.4302998, 4.7258995],\n [1.7917998, -1.7489998, 0.1412000],\n [1.8500998, -3.1467997, 0.2110000],\n [3.0569997, -3.5802997, -0.4612000],\n [4.1632996, -3.6178996, 0.6029999],\n [4.3272996, -2.6173997, 1.0149999],\n [3.8420996, -4.2739996, 1.4174999],\n [5.1055995, -3.9992996, 0.1957000],\n [2.8261997, -4.9693995, -1.0466999],\n [2.6792997, -5.6906994, -0.2364000],\n [3.6907996, -5.2904995, -1.6389998],\n [1.9392998, -4.9911995, -1.6839998],\n [3.2640997, -2.4330998, -1.5048999],\n [2.7238997, -1.2952999, -0.7998999],\n [4.7166995, -2.1413998, -1.8718998],\n [5.1881995, -3.0188997, -2.3293998],\n [4.7565995, -1.3170999, -2.5912997],\n [5.2941995, -1.8488998, -0.9925999],\n [2.4197998, -2.6279997, -2.7728997],\n [1.3752999, -2.8206997, -2.5121998],\n [2.4501998, -1.7101998, -3.3667997],\n [2.7924997, -3.4536997, -3.3878997],\n [-2.2764998, -4.1481996, 4.0262996],\n [1.9905998, -4.6454995, 4.2840996],\n [-4.5414996, -2.6926997, -1.6821998],\n [-4.0522996, 3.5020997, 1.5576998],\n ]\n coords = [[float(j) / Bohr for j in i] for i in coords]\n\n symbols = [\n \"N\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"N\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"Ir\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"B\",\n \"O\",\n \"C\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"O\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"B\",\n \"O\",\n \"C\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"O\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"B\",\n \"O\",\n \"C\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"O\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n ]\n\n atoms = []\n for i, _ in enumerate(coords):\n atoms.append(Atom(symbols[i], position=coords[i]))\n return Molecule(symbols=atoms)", "def run_sequence(seq: Sequence, tracker: Tracker, debug=False, visdom_info=None):\n\n visdom_info = {} if visdom_info is None else visdom_info\n\n base_results_path = '{}/{}'.format(tracker.results_dir, seq.name)\n results_path = '{}.txt'.format(base_results_path)\n times_path = '{}_time.txt'.format(base_results_path)\n base_visual_path = '{}/{}'.format(tracker.visual_dir, seq.name)\n if not os.path.exists(base_visual_path):\n os.makedirs(base_visual_path)\n\n if os.path.isfile(results_path) and not debug:\n return\n\n print('Tracker: {} {} {} , Sequence: {}'.format(tracker.name, tracker.parameter_name, tracker.run_id, seq.name))\n\n if debug:\n output = tracker.run(seq, base_visual_path, debug=debug, visdom_info=visdom_info)\n # output = tracker.run(seq, debug=debug, visdom_info=visdom_info)\n else:\n try:\n # output = tracker.run(seq, debug=debug, visdom_info=visdom_info)\n output = tracker.run(seq, base_visual_path, debug=debug, visdom_info=visdom_info)\n except Exception as e:\n print(e)\n return\n\n tracked_bb = np.array(output['target_bbox']).astype(int)\n exec_times = np.array(output['time']).astype(float)\n\n print('FPS: {}'.format(len(exec_times) / exec_times.sum()))\n # if not debug:\n # np.savetxt(results_path, tracked_bb, delimiter='\\t', fmt='%d')\n # np.savetxt(times_path, exec_times, delimiter='\\t', fmt='%f')\n np.savetxt(results_path, tracked_bb, delimiter='\\t', fmt='%d')\n np.savetxt(times_path, exec_times, delimiter='\\t', fmt='%f')", "def getDimension(unique_name):", "def getDimension(unique_name):", "def move_simulation(self):\n import simulation\n\n dt = 1e-3 # Pas de temps en seconde.\n x, y = [], []\n state = simulation.State() # On positione la voiture a l'origine\n for i, t in enumerate(np.arange(0, self.portion_duration*self.nbr_portions, dt)):\n state.update(*self(t), dt=dt)\n if not i % 1000:\n x.append(state.x)\n y.append(state.y)\n\n # self.score = x[-1]**2 + y[-1]**2 # Bidon et mal fait, c'est juste pour le test.\n # self.score = y[-1]-abs(x[-1])\n # self.score = 1 / ( (self.arriveeX*self.nbr_portions/10.0-x[-1])**2 + (self.arriveeY*self.nbr_portions/10.0-y[-1])**2 ) # Tout droit jusqu'au point choisi\n self.score = 1 / ( (self.arriveeX*self.nbr_portions*4.0/20-x[-1])**2 +\n (self.arriveeY*self.nbr_portions*4.0/20-y[-1])**2 ) # Le point choisi dépend du point standard (0.1) et de nbr_portions\n\n return x, y", "def part1(input):\n ps = PlanetSystem(input)\n for i in range(3):\n ps.simulate_dimension(i, 1000)\n return ps.total_energy", "def main(filename, iterations, save_diagnostics, output_dir, burnin):\n #data = []\n #with open(filename,'rb') as json_data:\n #skip header\n #jsondata = json.load(json_data)\n #j=0\n #while j<271:\n #eruption_time = jsondata[j]['FIELD1']\n #waiting_time = jsondata[j]['FIELD2']\n #data.append([float(eruption_time), float(waiting_time)])\n #j=j+1\n\n #generate ida images\n data = np.array([[131,3,1],[49,1,1],[17,7,1],[55,7,19],[80,5,1],[40,2,2],[91,21,6],[19,16,1],[27,7,1],[15,50,2],[37,1,7],[17,3,1],[22,32,2],[68,2,1],[26,2,3],[15,2,3],[246,2,1],[25,2,1],[19,1,1],[98,1,2],[54,13,1],[168,2,4],[20,102,5],[40,2,1],[41,1,1],[44,19,16],[17,6,1],[92,12,1],[17,2,1],[16,5,3],[45,11,1],[20,10,1],[26,1,2],[21,9,9],[26,10,1],[187,4,2],[65,28,4],[17,9,33],[23,39,1],[58,4,4],[41,107,3],[28,3,1],[16,1,1],[17,16,4],[17,16,1],[17,5,1],[83,2,2],[17,1,2],[26,4,2],[22,7,2],[16,1,1],[15,2,1],[15,2,1],[111,8,1],[25,6,1],[112,4,1],[19,10,2],[38,25,4],[29,1,5],[17,2,1],[111,9,8],[53,5,4],[29,7,1],[25,8,2],[23,2,134],[32,6,1],[27,1,1],[61,4,2],[41,163,4],[57,11,2],[24,2,1],[16,18,1],[81,7,14],[169,5,1],[19,4,1],[412,5,1],[32,2,7],[19,28,3],[17,11,1],[44,4,5],[27,2,2],[18,1,7],[15,3,3],[18,10,1],[19,6,10],[46,2,5],[20,12,3],[25,6,4],[18,4,1],[15,40,8],[16,11,16],[237,1,1],[26,13,2],[26,4,1],[101,5,5],[50,2,1],[22,45,5],[16,7,2],[17,4,2],[19,2,3],[22,1,1],[260,6,1],[20,15,1],[24,5,1],[33,2,1],[16,1,5],[21,18,1],[22,1,1],[18,13,2],[124,3,1],[16,6,1],[19,6,2],[71,2,1],[232,2,2],[21,2,1],[231,11,1],[201,49,2],[28,12,1],[68,5,1],[56,26,7],[17,1,8],[19,10,2],[120,13,2],[218,3,1],[46,5,6],[57,4,1],[30,5,2],[17,8,4],[17,22,1],[15,5,1],[16,7,1],[26,13,1],[28,22,2],[100,1,2],[58,12,2],[52,9,11],[21,4,2],[18,4,1],[699,1,1],[401,6,3],[20,7,1],[20,3,13],[27,1,1],[35,2,2],[27,6,1],[15,13,1],[17,6,1],[26,28,4],[89,2,3],[36,11,2],[17,11,2],[15,1,1],[59,3,1],[15,3,1],[20,11,1],[49,1,1],[24,3,1],[25,7,1],[29,1,1],[61,2,2],[28,3,13],[82,2,8],[22,2,1],[21,25,3],[73,3,2],[22,8,1],[51,3,12],[16,6,1],[64,2,4],[22,2,2],[19,7,1],[69,2,1],[17,8,9],[19,1,13],[28,35,3],[134,2,1],[19,12,1],[27,13,1],[17,10,1],[16,17,4],[46,2,3],[15,1,2],[35,15,2],[20,6,1],[16,10,3],[33,11,1],[20,8,4],[15,5,1],[33,5,2],[460,6,1],[132,2,1],[73,14,3],[34,5,1],[123,1,2],[15,8,1],[30,1,1],[16,1,1],[73,3,1],[54,4,1],[17,1,9],[17,17,3],[22,1,3],[46,16,8],[18,1,1],[22,3,2],[21,4,1],[40,5,1],[19,2,1],[16,11,1],[19,4,1],[26,4,1],[87,1,3],[75,1,8],[25,1,1],[16,1,1],[17,10,3],[15,44,2],[79,3,1],[21,19,1],[292,5,13],[27,4,1],[25,2,1],[23,34,1],[36,2,1],[15,2,7],[18,3,3],[62,1,7],[16,61,5],[15,5,1],[36,5,1],[67,8,3],[18,4,1],[23,2,1],[16,21,3],[32,7,1],[22,6,1],[88,5,1],[19,2,4],[38,2,1],[47,6,28],[18,35,3],[159,15,1],[25,3,5],[295,9,4],[26,2,1],[27,8,3],[86,6,1],[24,25,4],[18,1,2],[16,6,1],[64,16,1],[39,1,2],[30,1,4],[44,1,3],[82,11,4],[28,13,2],[46,19,1],[15,26,1],[30,6,11],[51,3,6],[19,20,1],[940,6,4],[21,6,1],[29,2,1],[20,2,1],[31,2,1],[21,2,3],[25,27,1],[26,2,1],[17,4,1],[64,7,1],[126,7,15],[18,8,1],[20,13,2],[16,7,2],[18,2,1],[19,4,5],[29,1,1],[80,12,2],[42,14,6],[107,2,1],[15,4,1],[48,16,1],[62,3,2],[15,13,1],[29,48,7],[25,4,1],[17,5,20],[19,7,3],[22,10,3],[58,15,3],[17,14,1],[121,2,2],[33,64,11],[16,15,2],[39,6,2],[25,69,7],[69,2,1],[41,6,2],[20,5,1],[42,22,4],[18,17,4],[16,14,3],[27,14,1],[20,1,1],[44,1,101],[33,9,1],[26,2,8],[30,24,3],[27,24,2],[34,7,1],[39,6,3],[20,2,3],[55,5,1],[22,22,2],[17,2,1],[55,3,1],[29,10,5],[60,12,2],[18,13,3],[93,3,2],[15,3,1],[26,5,5],[18,1,1],[17,16,2],[15,13,3],[22,12,1],[256,19,27],[18,7,8],[22,3,1],[35,3,4],[16,2,1],[19,6,2],[24,1,1],[29,3,2],[36,21,8],[24,1,1],[18,6,2],[26,24,11],[19,15,2],[16,1,1],[28,4,1],[60,11,1],[62,4,2],[70,2,1],[75,1,2],[125,3,1],[21,6,1],[165,23,2],[108,1,1],[35,5,1],[251,19,12],[137,4,1],[81,11,4],[104,19,4],[18,18,3],[19,13,1],[18,112,5],[19,6,2],[28,7,2],[23,9,1],[20,15,7],[34,1,1],[24,12,3],[15,5,1],[40,9,4],[24,41,6],[35,1,1],[17,3,1],[17,3,4],[46,7,2],[21,8,10],[17,7,4],[36,6,1],[32,6,2],[31,1,1],[17,32,5],[26,3,4],[16,4,1],[21,2,1],[19,4,1],[33,4,1],[46,7,1],[28,9,1],[169,9,24],[24,18,2],[103,6,1],[93,1,1],[156,2,1],[58,7,1],[55,30,3],[15,5,1],[20,9,1],[19,20,1],[44,1,3],[16,2,1],[23,4,1],[22,10,1],[16,138,5],[17,2,1],[17,1,2],[70,8,5],[15,3,6],[22,6,1],[20,1,1],[35,2,4],[15,3,1],[26,119,46],[390,18,2],[22,4,1],[175,5,2],[23,4,1],[26,2,21],[17,1,2],[112,4,1],[18,22,5],[22,2,1],[122,13,1],[18,1,1],[27,7,1],[26,18,5],[18,1,3],[28,1,15],[35,11,1],[15,2,1],[55,6,5],[67,3,1],[30,5,7],[31,12,1],[16,9,12],[43,7,1],[23,21,1],[43,2,7],[53,40,1],[58,6,1],[29,27,11],[65,6,2],[27,4,2],[15,7,2],[17,26,13],[48,4,79],[30,2,6],[25,1,1],[20,20,6],[59,2,5],[15,14,4],[18,7,1],[18,2,1],[28,7,1],[35,1,1],[15,12,4],[52,2,2],[16,25,1],[91,1,1],[27,7,3],[62,4,1],[29,11,1],[25,4,3],[15,1,1],[40,6,2],[19,2,2],[24,14,2],[33,5,1],[58,3,3],[23,1,4],[15,2,2],[92,5,1],[17,2,1],[16,10,1],[50,8,1],[24,2,1],[73,1,1],[30,33,55],[18,15,1],[15,9,4],[23,1,3],[17,5,1],[43,3,1],[15,9,2],[19,4,2],[20,20,4],[31,1,2],[21,3,1],[79,9,13],[20,3,24],[56,2,1],[26,1,2],[15,3,1],[30,12,1],[64,6,1],[327,8,47],[39,2,1],[22,17,5],[18,6,3],[74,14,2],[17,4,1],[39,1,3],[520,9,3],[65,9,1],[36,1,4],[264,3,3],[16,1,1],[18,5,3],[22,16,3],[21,2,1],[15,3,3],[49,5,1],[37,19,2],[19,13,2],[30,1,1],[44,4,1],[19,9,31],[22,4,2],[21,4,5],[16,4,1],[40,17,1],[15,12,4],[43,4,3],[21,30,1],[60,16,3],[28,2,1],[38,16,2],[19,3,1],[68,18,4],[1,4,3],[1,9,1],[1,2,2],[1,1,4],[1,148,4],[1,6,1],[1,16,1],[1,4,1],[1,19,3],[1,7,3],[1,2,2],[1,4,2],[1,47,5],[1,2,2],[1,1,4],[1,1,2],[1,1,2],[1,1,1],[1,4,2],[1,7,1],[1,4,6],[1,2,1],[1,5,4],[1,9,3],[1,9,2],[1,7,1],[1,4,1],[1,10,2],[1,1,1],[1,5,1],[1,5,1],[1,2,16],[1,2,1],[1,1,1],[1,3,2],[1,8,3],[1,1,18],[1,5,1],[1,14,3],[1,6,6],[1,7,1],[1,1,1],[1,16,1],[1,2,1],[1,2,1],[1,1,2],[1,4,4],[1,4,1],[1,9,1],[1,25,7],[1,1,1],[1,8,2],[1,1,4],[1,77,8],[1,1,3],[1,6,3],[1,4,2],[1,2,2],[1,2,1],[1,40,1],[1,26,3],[1,1,4],[1,1,1],[1,2,2],[1,1,2],[1,15,1],[1,35,86],[1,3,2],[1,4,1],[1,2,1],[1,4,3],[1,30,1],[1,2,1],[1,4,2],[1,2,1],[1,1,1],[1,2,1],[1,3,1],[1,2,3],[1,3,1],[1,14,1],[1,3,2],[1,7,4],[1,6,2],[1,2,1],[1,23,2],[1,4,1],[1,4,3],[1,26,3],[1,47,15],[1,3,5],[1,5,1],[1,3,1],[1,2,1],[1,2,1],[1,3,1],[1,36,1],[1,2,1],[1,1,9],[1,6,1],[1,2,1],[1,8,3],[1,7,1],[1,33,2],[1,14,4],[1,13,3],[1,2,1],[1,5,1],[1,7,2],[1,9,3],[1,6,1],[1,3,1],[1,9,1],[1,2,2],[1,2,1],[1,6,3],[1,4,2],[1,2,1],[1,1,1],[1,13,4],[1,9,2],[1,4,2],[1,7,14],[1,8,1],[1,3,1],[1,25,2],[1,2,1],[1,11,1],[1,2,1],[1,1,1],[1,3,3],[1,3,2],[1,2,1],[1,2,1],[1,2,8],[1,9,1],[1,13,9],[1,3,1],[1,8,1],[1,102,71],[1,22,1],[1,2,3],[1,22,2],[1,1,1],[1,3,1],[1,12,1],[1,3,2],[1,1,1],[1,5,2],[1,30,6],[1,14,1],[1,2,1],[1,1,1],[1,5,1],[1,8,1],[1,4,2],[1,3,1],[1,2,1],[1,1,1],[1,1,1],[1,12,1],[1,14,1],[1,10,2],[1,22,3],[1,15,2],[1,4,2],[1,5,1],[1,10,2],[1,10,26],[1,1,2],[1,1,2],[1,17,1],[1,1,1],[1,7,1],[1,1,1],[1,8,2],[1,5,2],[1,15,1],[1,16,2],[1,7,1],[1,26,1],[1,16,2],[1,13,6],[1,3,3],[1,2,1],[1,2,1],[1,5,3],[1,1,1],[1,4,1],[1,1,1],[1,2,2],[1,13,4],[1,50,2],[1,12,3],[1,2,1],[1,16,5],[1,2,8],[1,3,5],[1,1,1],[1,25,1],[1,5,1],[1,13,2],[1,1,2],[1,8,1],[1,13,1],[1,4,4],[1,2,3],[1,7,2],[1,2,4],[1,2,1],[1,1,2],[1,4,1],[1,3,2],[1,8,4],[1,4,1],[1,2,2],[1,2,1],[1,3,1],[1,7,1],[1,8,5],[1,34,4],[1,2,3],[1,1,1],[1,8,3],[1,3,1],[1,26,2],[1,3,1],[1,1,6],[1,2,4],[1,7,1],[1,9,2],[1,3,93],[1,2,1],[1,3,2],[1,3,3],[1,15,3],[1,12,1],[1,1,1],[1,1,5],[1,4,1],[1,1,4],[1,2,1],[1,6,4],[1,9,1],[1,1,9],[1,11,1],[1,68,2],[1,7,1],[1,11,1],[1,6,1],[1,5,2],[1,2,1],[1,19,1],[1,3,1],[1,1,2],[1,37,1],[1,19,1],[1,4,5],[1,8,1],[1,1,1],[1,7,1],[1,3,1],[1,4,1],[1,6,7],[1,2,1],[1,14,3],[1,4,1],[1,6,5],[1,1,1],[1,1,1],[1,2,1],[1,1,2],[1,7,2],[1,8,1],[1,17,136],[1,6,1],[1,3,2],[1,9,12],[1,7,2],[1,2,9],[1,1,4],[1,3,1],[1,10,1],[1,6,16],[1,8,1],[1,2,2],[1,2,2],[1,4,3],[1,3,3],[1,24,3],[1,68,28],[1,16,1],[1,9,2],[1,1,2],[1,18,7],[1,3,1],[1,5,2],[1,1,3],[1,3,1],[1,3,8],[1,73,5],[1,6,3],[1,5,1],[1,2,1],[1,15,7],[1,80,2],[1,3,1],[1,12,3],[1,8,1],[1,2,1],[1,9,5],[1,3,2],[1,319,20],[1,2,1],[1,4,6],[1,5,4],[1,25,1],[1,8,1],[1,6,5],[1,18,1],[1,2,2],[1,5,2],[1,10,1],[1,10,1],[1,2,1],[1,6,2],[1,7,2],[1,39,1],[1,7,79],[1,28,4],[1,2,1],[1,4,1],[1,25,5],[1,23,3],[1,10,3],[1,2,1],[1,13,1],[1,2,2],[1,6,1],[1,6,4],[1,12,1],[1,4,1],[1,3,1],[1,10,1],[1,4,2],[1,7,1],[1,11,1],[1,6,1],[1,4,2],[1,3,3],[1,1,1],[1,1,1],[1,3,3],[1,3,2],[1,15,1],[1,1,1],[1,1,4],[1,26,2],[1,1,1],[1,7,1],[1,4,63],[1,1,19],[1,96,7],[1,7,2],[1,6,1],[1,4,1],[1,18,2],[1,1,2],[1,4,1],[1,3,3],[1,18,1],[1,3,1],[1,14,1],[1,6,2],[1,13,1],[1,1,5],[1,13,2],[1,1,1],[1,4,4],[1,10,1],[1,2,1],[1,12,3],[1,7,1],[1,8,1],[1,3,1],[1,2,2],[1,4,5],[1,9,1],[1,2,1],[1,2,1],[1,6,8],[1,32,3],[1,3,2],[1,6,1],[1,5,1],[1,7,1],[1,4,2],[1,2,1],[1,5,4],[1,1,2],[1,9,1],[1,2,1],[1,11,1],[1,5,2],[1,2,1],[1,1,1],[1,3,1],[1,7,13],[1,4,4],[1,1,1],[1,6,1],[1,1,3],[1,6,6],[1,6,1],[1,4,4],[1,10,1],[1,15,1],[1,3,7],[1,6,1],[1,9,1],[1,14,23],[1,14,2],[1,6,3],[1,2,1],[1,9,1],[1,1,3],[1,6,4],[1,15,2],[1,8,1],[1,6,6],[1,16,10],[1,5,4],[1,30,3],[1,7,1],[1,4,1],[1,3,1],[1,6,6],[1,1,2],[1,3,2],[1,1,1],[1,1,1],[1,1,1],[1,2,5],[1,2,1],[1,2,5],[1,24,1],[1,3,1],[1,6,1],[1,2,1],[1,4,1],[1,2,2],[1,4,1],[1,1,1],[1,3,1],[1,8,2],[1,4,2],[1,2,2],[1,2,1],[1,12,6],[1,2,1],[1,32,42],[1,7,1],[1,7,1],[1,12,1],[1,2,1],[1,6,1],[1,42,1],[1,2,1],[1,1,2],[1,2,1],[1,6,1],[1,2,2],[1,8,1],[1,22,4],[1,1,1],[1,11,20],[1,6,2],[1,2,1],[1,4,2],[1,9,1],[1,10,1],[1,16,5],[1,3,2],[1,8,1],[1,6,3],[1,1,2],[1,6,1],[1,2,1],[1,28,1],[1,18,1],[1,17,8],[1,4,1],[1,2,2],[1,13,1],[1,25,3],[1,7,4],[1,3,1],[1,1,1],[1,3,3],[1,4,1],[1,7,5],[1,2,2],[1,5,1],[1,2,2],[1,2,2],[1,14,1],[1,3,3],[1,4,1],[1,1,2],[1,11,1],[1,2,1],[1,6,1],[1,7,6],[1,7,1],[1,2,2],[1,2,1],[1,31,4],[1,4,3],[1,14,6],[1,4,4],[1,1,1],[1,2,1],[1,12,5],[1,4,1],[1,7,1],[1,3,1],[1,4,1],[1,11,1],[1,12,1],[1,3,2],[1,9,1],[1,17,2],[1,9,5],[1,6,1],[1,13,2],[1,5,1],[1,4,3],[1,3,1],[1,1,4],[1,7,1],[1,4,1],[1,3,1],[1,56,3],[1,1,1],[1,9,1],[1,4,1],[1,15,1],[1,2,1],[1,12,1],[1,4,2],[1,1,1],[1,1,1],[1,149,2],[1,56,1],[1,4,5],[1,2,2],[1,11,3],[1,2,3],[1,1,2],[1,2,1],[1,15,4],[1,2,2],[1,4,1],[1,17,2],[1,10,5],[1,14,2],[1,8,2],[1,4,2],[1,4,1],[1,6,1],[1,5,1],[1,7,2],[1,20,5],[1,3,1],[1,4,1],[1,11,1],[1,2,1],[1,1,3],[1,5,2],[1,6,1],[1,4,3],[1,4,3],[1,4,2],[1,7,3],[1,5,1],[1,1,1],[1,2,1],[1,8,1],[1,7,1],[1,2,1],[1,1,1],[1,1,1],[1,4,3],[1,11,1],[1,43,1],[1,7,8],[1,8,1],[1,1,1],[1,8,6],[1,9,3],[1,19,1],[1,2,1],[1,43,3],[1,4,5],[1,2,3],[1,4,1],[1,17,1],[1,9,1],[1,8,72],[1,2,1],[1,4,2],[1,16,1],[1,15,1],[1,8,1],[1,3,1],[1,7,8],[1,4,1],[1,23,2],[1,1,2],[1,1,1],[1,15,7],[1,7,4],[1,3,4],[1,5,1],[1,1,1],[1,6,83],[1,1,1],[1,4,3],[1,2,1],[1,3,2],[1,9,2],[1,5,1],[1,22,1],[1,3,6],[1,6,4],[1,4,1],[1,1,4],[1,1,1],[1,5,3],[1,1,2],[1,15,2],[1,8,1],[1,5,2],[1,1,1],[1,4,10],[1,63,1],[1,2,2],[1,2,1],[1,9,1],[1,4,3],[1,2,1],[1,24,1],[1,2,2],[1,2,2],[1,6,2],[1,13,5],[1,34,5],[1,10,1],[1,3,1],[1,22,9],[1,41,1],[1,1,4],[1,13,2],[1,18,1],[1,4,4],[1,7,1],[1,4,3],[1,14,4],[1,3,2],[1,2,1],[1,7,10],[1,15,3],[1,6,1],[1,1,1],[1,2,5],[1,4,10],[1,5,2],[1,12,6],[1,6,1],[1,19,134],[1,11,1],[1,233,9],[1,4,2],[1,40,1],[1,2,1],[1,10,1],[1,3,1],[1,3,1],[1,3,1],[1,35,1],[1,2,7],[1,1,3],[1,3,1],[1,14,2],[1,1,1],[1,7,1],[1,6,5],[1,10,1],[1,5,3],[1,8,1],[1,11,1],[1,13,1],[1,8,9],[1,5,1],[1,3,1],[1,11,1],[1,2,1],[1,5,1],[1,7,1],[1,9,3],[1,2,3],[1,2,2],[1,29,2],[1,2,1],[1,4,3],[1,1,2],[1,2,2],[1,3,6],[1,11,1],[1,1,1],[1,11,1],[1,4,1],[1,6,1],[1,3,5],[1,4,1],[1,4,3],[1,34,1],[1,4,2],[1,1,9],[1,18,1],[1,9,3],[1,15,1],[1,4,4],[1,4,2],[1,9,1],[1,4,1],[1,10,1],[1,2,1],[1,2,4],[1,4,1],[1,1,2],[1,3,3],[1,2,1],[1,47,14],[1,3,1],[1,2,1],[1,3,1],[1,1,1],[1,20,1],[1,14,6],[1,2,2],[1,16,2],[1,2,1],[1,1,31],[1,5,9],[1,10,2],[1,10,3],[1,19,1],[1,1,1],[1,13,2],[1,5,1],[1,1,2],[1,1,2],[1,24,1],[1,9,2],[1,4,1],[1,10,3],[1,35,6],[1,1,1],[1,2,1],[1,1,1],[1,3,1],[1,4,5],[1,4,1],[1,1,1],[1,4,1],[1,10,2],[1,55,6],[1,3,22],[1,28,4],[1,6,3],[1,10,1],[1,6,187],[1,3,2],[1,12,5],[1,7,1],[1,4,1],[1,2,2],[1,2,1],[1,31,9],[1,2,8],[1,20,2],[1,36,2],[1,2,2],[1,15,5],[1,5,2],[1,3,2],[1,8,1],[1,1,1],[1,2,1],[1,37,1],[1,17,4],[1,8,1],[1,19,2],[1,7,1],[1,1,1],[1,1,1],[1,2,1],[1,9,1],[1,2,1],[1,2,1],[1,2,1],[1,19,1],[1,33,3],[1,4,1],[1,7,1],[1,3,1],[1,46,4],[1,2,1],[1,3,2],[1,1,2],[1,2,2],[1,14,1],[1,3,1],[1,11,2],[1,2,2],[1,21,2],[1,34,2],[1,4,1],[1,1,1],[1,2,1],[1,22,1],[1,64,9],[1,21,10],[1,3,3],[1,6,1],[1,16,2],[1,3,1],[1,31,4],[1,1,1],[1,1,2],[1,1,1],[1,3,1],[1,5,4],[1,27,1],[1,1,1],[1,2,2],[1,17,10],[1,4,1],[1,25,1],[1,41,1],[1,18,4],[1,17,40],[1,9,1],[1,2,1],[1,7,1],[1,21,2],[1,2,3],[1,3,1],[1,14,1],[1,8,2],[1,2,1],[1,2,2],[1,5,1],[1,1,2],[1,4,1],[1,6,5],[1,9,17],[1,5,1],[1,6,1],[1,4,1],[1,1,1],[1,3,1],[1,61,9],[1,6,1],[1,9,2],[1,2,2],[1,9,1],[1,7,4],[1,12,1],[1,2,2],[1,40,1],[1,17,13],[1,1,7],[1,11,2],[1,20,2],[1,2,1],[1,1,1],[1,12,10],[1,5,3],[1,2,1],[1,1,1],[1,23,2],[1,9,3],[1,4,1],[1,5,2],[1,4,1],[1,19,5],[1,5,1],[1,1,4],[1,5,1],[1,8,1],[1,9,1],[1,5,3],[1,43,3],[1,1,2],[1,3,1],[1,2,2],[1,15,38],[1,3,1],[1,25,1],[1,1,4],[1,5,6],[1,2,1],[1,4,3],[1,4,2],[1,3,1],[1,9,1],[1,4,1],[1,13,2],[1,7,4],[1,2,6],[1,12,1],[1,8,3],[1,1,4],[1,13,1],[1,3,4],[1,3,2],[1,2,2],[1,4,1],[1,6,1],[1,14,3],[1,7,1],[1,8,1],[1,8,1],[1,3,1],[1,32,5],[1,16,2],[1,2,3],[1,38,1],[1,5,4],[1,10,2],[1,2,7],[1,3,1],[1,8,1],[1,3,2],[1,1,3],[1,4,2],[1,71,12],[1,8,4],[1,2,12],[1,3,1],[1,12,2],[1,2,1],[1,5,1],[1,2,28],[1,19,5],[1,10,1],[1,9,2],[1,3,1],[1,7,6],[1,11,1],[1,2,1],[1,27,2],[1,7,4],[1,4,2],[1,12,8],[1,8,96],[1,12,1],[1,2,4],[1,7,5],[1,15,3],[1,3,2],[1,18,2],[1,25,3],[1,7,2],[1,18,2],[1,6,1],[1,10,2],[1,4,1],[1,1,3],[1,5,1],[1,19,2],[1,8,1],[1,50,4],[1,8,1],[1,11,1],[1,9,1],[1,2,1],[1,2,5],[1,3,1],[1,6,2],[1,1,1],[1,13,5],[1,19,1],[1,7,2],[1,17,1],[1,6,1],[1,4,1],[1,7,3],[1,13,3],[1,7,4],[1,5,2],[1,4,1],[1,11,16],[1,7,1],[1,1,1],[1,2,1],[1,2,1],[1,14,3],[1,30,1],[1,2,6],[1,6,2],[1,3,1],[1,4,1],[1,9,11],[1,6,1],[1,35,1],[1,2,8],[1,1,2],[1,3,2],[1,1,1],[1,9,1],[1,2,57],[1,2,1],[1,5,1],[1,4,2],[1,15,1],[1,12,3],[1,4,3],[1,17,1],[1,12,2],[1,21,12],[1,2,1],[1,9,1],[1,9,47],[1,49,4],[1,5,1],[1,4,1],[1,24,1],[1,2,2],[1,64,2],[1,48,7],[1,2,2],[1,10,2],[1,3,1],[1,11,1],[1,5,1],[1,1,2],[1,2,4],[1,6,1],[1,19,6],[1,6,2],[1,3,2],[1,1,1],[1,22,2],[1,3,2],[1,5,14],[1,2,1],[1,11,1],[1,4,2],[1,6,1],[1,24,10],[1,7,1],[1,2,74],[1,6,1],[1,28,1],[1,1,1],[1,1,1],[1,10,1],[1,88,4],[1,9,4],[1,26,1],[1,3,1],[1,4,1],[1,4,1],[1,6,1],[1,23,1],[1,2,7],[1,1,3],[1,7,1],[1,1,1],[1,5,2],[1,4,1],[1,2,1],[1,1,1],[1,15,5],[1,22,1],[1,6,3],[1,12,2],[1,48,14],[1,7,1],[1,5,1],[1,10,5],[1,5,1],[1,6,5],[1,2,3],[1,14,3],[1,3,1],[1,8,4],[1,2,5],[1,34,3],[1,2,1],[1,4,1],[1,6,7],[1,3,1],[1,3,3],[1,32,2],[1,3,1],[1,3,1],[1,2,1],[1,3,1],[1,39,8],[1,1,1],[1,15,8],[1,3,4],[1,2,3],[1,1,3],[1,38,18],[1,6,1],[1,25,4],[1,2,1],[1,8,1],[1,3,1],[1,24,1],[1,5,5],[1,5,4],[1,2,3],[1,2,1],[1,5,4],[1,51,1],[1,23,3],[1,2,1],[1,2,1],[1,1,2],[1,7,2],[1,3,1],[1,1,1],[1,4,1],[1,2,1],[1,7,6],[1,8,1],[1,11,1],[1,2,6],[1,2,1],[1,2,1],[1,1,1],[1,26,1],[1,3,1],[1,2,1],[1,2,1],[1,2,1],[1,12,2],[1,1,3],[1,3,1],[1,2,4],[1,19,3],[1,3,1],[1,3,2],[1,49,3],[1,2,1],[1,21,3],[1,1,1],[1,5,1],[1,4,1],[1,2,2],[1,2,1],[1,1,1],[1,7,4],[1,2,1],[1,2,1],[1,2,1],[1,3,2],[1,26,2],[1,9,1],[1,2,2],[1,12,1],[1,4,32],[1,4,1],[1,17,1],[1,1,2],[1,77,4],[1,2,1],[1,12,1],[1,2,1],[1,2,4],[1,5,2],[1,10,3],[1,4,3],[1,2,1],[1,1,3],[1,16,4],[1,3,1],[1,40,2],[1,13,1],[1,2,1],[1,6,2],[1,12,2],[1,6,11],[1,6,1],[1,1,1],[1,10,6],[1,1,1],[1,6,5],[1,38,4],[1,2,7],[1,9,1],[1,5,2],[1,3,1],[1,2,1],[1,5,2],[1,4,1],[1,1,1],[1,1,1],[1,4,2],[1,4,3],[1,5,2],[1,1,4],[1,11,4],[1,14,4],[1,4,1],[1,17,2],[1,2,2],[1,39,1],[1,9,21],[1,14,2],[1,4,4],[1,4,3],[1,9,2],[1,1,1],[1,3,2],[1,1,1],[1,1,7],[1,16,4],[1,5,1],[1,2,1],[1,2,1],[1,2,1],[1,98,19],[1,4,1],[1,1,1],[1,5,1],[1,7,1],[1,1,3],[1,9,1],[1,4,2],[1,2,1],[1,7,2],[1,2,1],[1,1,2],[1,1,1],[1,5,2],[1,6,1],[1,11,6],[1,5,4],[1,40,5],[1,1,2],[1,9,1],[1,2,1],[1,6,1],[1,5,1],[1,11,2],[1,4,1],[1,3,17],[1,1,1],[1,1,5],[1,9,5],[1,60,1],[1,3,7],[1,3,4],[1,5,1],[1,3,10],[1,5,2],[1,7,1],[1,2,1],[1,14,14],[1,4,3],[1,1,2],[1,2,4],[1,5,1],[1,11,7],[1,3,1],[1,29,3],[1,2,4],[1,8,1],[1,53,1],[1,10,1],[1,7,2],[1,2,13],[1,58,1],[1,5,6],[1,2,1],[1,4,2],[1,4,2],[1,4,2],[1,5,2],[1,2,3],[1,12,2],[1,4,6],[1,34,1],[1,1,1],[1,8,1],[1,4,1],[1,2,1],[1,2,2],[1,16,1],[1,4,2],[1,3,13],[1,2,2],[1,46,2],[1,4,1],[1,6,1],[1,1,2],[1,2,1],[1,3,6],[1,3,1],[1,19,1],[1,2,1],[1,23,1],[1,3,1],[1,1,1],[1,7,2],[1,4,4],[1,18,3],[1,1,1],[1,7,2],[1,2,2],[1,7,1],[1,2,1],[1,2,1],[1,6,1],[1,9,4],[1,3,1],[1,5,1],[1,13,1],[1,2,2],[1,33,1],[1,12,1],[1,9,3],[1,2,1],[1,1,1],[1,18,1],[1,1,3],[1,3,15],[1,2,4],[1,17,1],[1,1,1],[1,1,1],[1,4,8],[1,1,2],[1,31,19],[1,1,5],[1,7,6],[1,12,4],[1,2,4],[1,7,8],[1,4,2],[1,13,2],[1,19,18],[1,42,4],[1,3,1],[1,17,1],[1,3,3],[1,4,2],[1,12,1],[1,1,6],[1,23,2],[1,3,1],[1,20,1],[1,21,4],[1,1,1],[1,3,2],[1,10,1],[1,9,1],[1,8,6],[1,21,3],[1,5,1],[1,7,6],[1,2,1],[1,5,1],[1,1,2],[1,11,1],[1,8,212],[1,9,3],[1,6,1],[1,1,2],[1,25,12],[1,4,1],[1,14,15],[1,4,1],[1,13,1],[1,2,2],[1,3,1],[1,4,1],[1,3,1],[1,1,1],[1,3,1],[1,9,7],[1,1,1],[1,6,1],[1,8,2],[1,8,1],[1,2,3],[1,3,1],[1,2,3],[1,1,2],[1,10,1],[1,6,1],[1,12,3],[1,12,1],[1,1,1],[1,2,1],[1,2,4],[1,4,1],[1,2,1],[1,1,1],[1,4,1],[1,23,2],[1,4,2],[1,20,1],[1,17,4],[1,8,2],[1,4,6],[1,4,1],[1,6,1],[1,10,1],[1,6,2],[1,1,1],[1,3,1],[1,4,1],[1,4,1],[1,16,143],[1,7,1],[1,10,1],[1,7,2],[1,3,3],[1,8,3],[1,2,1],[1,49,1],[1,2,7],[1,14,4],[1,31,3],[1,29,1],[1,31,8],[1,5,2],[1,7,1],[1,1,1],[1,4,5],[1,1,1],[1,7,3],[1,1,2],[1,5,3],[1,3,1],[1,7,4],[1,129,9],[1,13,1],[1,11,4],[1,6,28],[1,6,1],[1,6,1],[1,20,1],[1,2,1],[1,16,3],[1,3,3],[1,5,1],[1,64,1],[1,4,2],[1,7,1],[1,21,3],[1,2,2],[1,9,1],[1,2,1],[1,5,6],[1,6,6],[1,3,1],[1,5,1],[1,3,1],[1,3,1],[1,6,2],[1,2,3],[1,4,1],[1,1,1],[1,12,37],[1,6,1],[1,1,1],[1,4,2],[1,4,8],[1,6,2],[1,2,2],[1,19,1],[1,1,1],[1,1,3],[1,3,1],[1,4,5],[1,15,2],[1,8,3],[1,1,1],[1,2,2],[1,3,1],[1,10,1],[1,4,1],[1,1,2],[1,19,1],[1,5,2],[1,4,4],[1,3,2],[1,3,17],[1,1,1],[1,1,1],[1,2,1],[1,18,3],[1,3,1],[1,16,4],[1,5,1],[1,11,2],[1,19,8],[1,2,1],[1,2,1],[1,1,6],[1,3,1],[1,2,1],[1,1,1],[1,2,1],[1,11,3],[1,17,4],[1,4,1],[1,4,4],[1,5,2],[1,1,1],[1,1,2],[1,10,12],[1,2,2],[1,8,1],[1,1,2],[1,8,1],[1,17,2],[1,2,1],[1,4,1],[1,6,1],[1,20,21],[1,5,7],[1,3,1],[1,13,2],[1,3,6],[1,8,3],[1,12,1],[1,12,2],[1,3,2],[1,15,2],[1,6,1],[1,9,5],[1,5,3],[1,4,1],[1,7,4],[1,4,4],[1,9,4],[1,11,1],[1,3,1],[1,17,1],[1,71,5],[1,7,1],[1,3,1],[1,5,1],[1,1,1],[1,1,2],[1,2,1],[1,1,2],[1,10,2],[1,3,1],[1,2,2],[1,5,1],[1,28,4],[1,2,1],[1,1,1],[1,9,1],[1,3,2],[1,8,2],[1,13,1],[1,2,1],[1,6,1],[1,25,79],[1,30,24],[1,10,31],[1,5,1],[1,9,1],[1,1,1],[1,4,1],[1,118,14],[1,18,3],[1,30,1],[1,10,3],[1,5,1],[1,5,1],[1,1,1],[1,6,1],[1,9,3],[1,6,2],[1,5,1],[1,2,2],[1,3,1],[1,7,4],[1,8,2],[1,10,2],[1,1,8],[1,41,1],[1,21,4],[1,6,1],[1,13,3],[1,5,1],[1,34,7],[1,22,1],[1,9,8],[1,5,3],[1,11,1],[1,2,1],[1,6,1],[1,4,1],[1,72,1],[1,44,3],[1,2,1],[1,1,1],[1,3,1],[1,8,2],[1,1,3],[1,14,1],[1,3,2],[1,1,1],[1,9,2],[1,17,1],[1,9,35],[1,3,1],[1,6,1],[1,2,11],[1,5,3],[1,1,1],[1,2,1],[1,14,7],[1,51,44],[1,3,6],[1,1,1],[1,6,2],[1,2,1],[1,11,2],[1,8,3],[1,3,2],[1,3,3],[1,4,1],[1,2,1],[1,5,1],[1,8,5],[1,60,1],[1,6,3],[1,36,2],[1,1,1],[1,2,1],[1,10,2],[1,26,2],[1,7,3],[1,6,1],[1,6,2],[1,3,3],[1,2,3],[1,6,2],[1,2,2],[1,2,2],[1,5,2],[1,2,1],[1,15,5],[1,1,2],[1,1,3],[1,37,24],[1,8,2],[1,17,2],[1,31,1],[1,14,2],[1,2,1],[1,16,2],[1,3,1],[1,2,2],[1,1,2],[1,2,3],[1,4,2],[1,1,1],[1,9,5],[1,1,2],[1,1,4],[1,4,18],[1,6,1],[1,12,1],[1,3,85],[1,17,2],[1,4,1],[1,7,1],[1,4,1],[1,3,1],[1,22,2],[1,1,1],[1,15,27],[1,4,1],[1,1,1],[1,1,3],[1,3,1],[1,35,2],[1,1,1],[1,33,4],[1,2,1],[1,3,3],[1,6,1],[1,9,1],[1,8,1],[1,6,1],[1,16,2],[1,20,2],[1,5,1],[1,1,5],[1,2,2],[1,12,25],[1,6,1],[1,13,1],[1,2,1],[1,2,1],[1,10,1],[1,2,1],[1,37,3],[1,2,1],[1,58,11],[1,14,3],[1,6,1],[1,6,1],[1,1,3],[1,1,1],[1,9,2],[1,1,502],[1,45,5],[1,5,1],[1,4,1],[1,2,8],[1,5,1],[1,1,1],[1,7,1],[1,4,1],[1,3,4],[1,1,1],[1,10,1],[1,9,1],[1,13,1],[1,10,8],[1,4,4],[1,7,1],[1,1,2],[1,2,2],[1,9,2],[1,13,2],[1,8,1],[1,1,1],[1,2,4],[1,29,1],[1,8,2],[1,7,3],[1,30,7],[1,1,1],[1,10,10],[1,3,1],[1,1,1],[1,5,1],[1,4,3],[1,7,1],[1,43,8],[1,1,2],[1,9,1],[1,1,1],[1,3,6],[1,9,1],[1,1,1],[1,7,1],[1,6,1],[1,2,2],[1,13,4],[1,13,3],[1,2,3],[1,8,1],[1,11,2],[1,9,53],[1,2,1],[1,16,1],[1,6,3],[1,48,3],[1,4,1],[1,7,3],[1,2,2],[1,8,1],[1,8,1],[1,26,2],[1,3,1],[1,8,2],[1,121,2],[1,2,2],[1,8,1],[1,2,2],[1,4,2],[1,8,1],[1,1,1],[1,4,1],[1,3,3],[1,7,1],[1,7,2],[1,2,1],[1,8,2],[1,34,28],[1,3,2],[1,3,1],[1,5,1],[1,9,1],[1,7,1],[1,14,4],[1,1,1],[1,34,4],[1,1,1],[1,6,1],[1,3,1],[1,2,1],[1,4,1],[1,5,2],[1,10,1],[1,41,5],[1,7,2],[1,19,4],[1,3,3],[1,12,3],[1,7,1],[1,4,2],[1,16,1],[1,3,1],[1,8,4],[1,9,2],[1,8,2],[1,2,1],[1,10,2],[1,8,1],[1,16,2],[1,7,2],[1,5,1],[1,2,3],[1,15,4],[1,3,5],[1,4,4],[1,1,1],[1,3,2],[1,5,1],[1,8,4],[1,4,1],[1,41,7],[1,2,1],[1,1,3],[1,1,6],[1,2,1],[1,10,2],[1,10,2],[1,3,3],[1,39,4],[1,1,2],[1,5,7],[1,12,2],[1,15,5],[1,4,1],[1,13,1],[1,3,1],[1,44,3],[1,1,2],[1,1,1],[1,6,1],[1,3,1],[1,3,2],[1,7,15],[1,1,1],[1,11,4],[1,3,1],[1,1,3],[1,1,1],[1,2,1],[1,9,4],[1,22,1],[1,46,2],[1,3,18],[1,22,8],[1,3,1],[1,4,10],[1,12,16],[1,2,1],[1,8,3],[1,1,1],[1,2,4],[1,1,1],[1,6,4],[1,7,1],[1,7,4],[1,14,4],[1,1,1],[1,13,2],[1,61,1],[1,6,2],[1,16,1],[1,14,7],[1,9,2],[1,18,2],[1,9,3],[1,1,2],[1,4,1],[1,6,1],[1,6,4],[1,10,1],[1,5,2],[1,7,1],[1,3,1],[1,11,2],[1,53,1],[1,10,2],[1,17,1],[1,2,2],[1,5,14],[1,17,1],[1,2,1],[1,5,1],[1,28,2],[1,8,2],[1,4,1],[1,4,2],[1,21,1],[1,3,1],[1,3,2],[1,5,2],[1,5,1],[1,3,13],[1,13,2],[1,124,753],[1,2,2],[1,43,1],[1,6,1],[1,2,2],[1,11,1],[1,22,1],[1,5,2],[1,5,1],[1,8,1],[1,2,4],[1,2,2],[1,9,1],[1,6,1],[1,2,1],[1,6,1],[1,14,3],[1,21,1],[1,3,4],[1,3,3],[1,3,1],[1,2,2],[1,2,2],[1,5,2],[1,11,1],[1,6,1],[1,3,1],[1,64,1],[1,6,1],[1,2,12],[1,5,1],[1,6,4],[1,10,1],[1,14,1],[1,14,1],[1,2,1],[1,2,1],[1,8,4],[1,17,2],[1,5,3],[1,64,1],[1,33,3],[1,18,2],[1,1,1],[1,42,9],[1,20,2],[1,10,2],[1,2,2],[1,3,1],[1,13,1],[1,5,1],[1,39,5],[1,8,2],[1,6,1],[1,3,2],[1,12,1],[1,2,4],[1,8,1],[1,2,1],[1,4,5],[1,7,1],[1,2,1],[1,2,1],[1,5,2],[1,15,3],[1,6,1],[1,1,1],[1,11,2],[1,4,2],[1,1,1],[1,7,3],[1,7,2],[1,3,1],[1,3,1],[1,2,1],[1,8,3],[1,3,1],[1,7,12],[1,8,1],[1,4,2],[1,6,2],[1,9,1],[1,3,30],[1,8,3],[1,8,2],[1,8,1],[1,11,1],[1,13,1],[1,2,1],[1,16,1],[1,10,1],[1,3,1],[1,6,4],[1,29,2],[1,4,2],[1,4,1],[1,1,1],[1,7,1],[1,1,1],[1,4,11],[1,1,1],[1,6,1],[1,26,1],[1,3,1],[1,2,1],[1,10,1],[1,4,1],[1,14,2],[1,10,1],[1,5,2],[1,5,1],[1,2,1],[1,26,33],[1,1,1],[1,11,2],[1,8,5],[1,18,1],[1,2,1],[1,5,1],[1,4,2],[1,5,1],[1,11,2],[1,1,2],[1,2,2],[1,6,6],[1,10,1],[1,14,1],[1,2,1],[1,13,1],[1,14,1],[1,8,2],[1,21,2],[1,1,2],[1,1,1],[1,14,1],[1,2,1],[1,15,2],[1,4,1],[1,3,1],[1,10,2],[1,4,2],[1,5,1],[1,11,22],[1,8,3],[1,4,1],[1,3,2],[1,1,2],[1,25,3],[1,2,1],[1,11,2],[1,5,2],[1,39,1],[1,1,1],[1,415,128],[1,6,1],[1,5,1],[1,8,5],[1,2,3],[1,1,1],[1,1,1],[1,4,1],[1,2,4],[1,4,1],[1,2,9],[1,4,2],[1,23,3],[1,6,9],[1,5,4],[1,2,5],[1,1,1],[1,7,1],[1,3,7],[1,1,2],[1,2,16],[1,5,2],[1,1,3],[1,4,1],[1,11,1],[1,2,2],[1,2,1],[1,10,1],[1,6,2],[1,11,1],[1,28,1],[1,21,3],[1,3,2],[1,3,1],[1,4,1],[1,1,2],[1,7,1],[1,11,4],[1,4,2],[1,22,4],[1,1,1],[1,1,1],[1,12,7],[1,1,1],[1,4,2],[1,2,1],[1,6,4],[1,14,3],[1,8,2],[1,1,11],[1,13,2],[1,4,1],[1,3,2],[1,95,10],[1,1,2],[1,4,2],[1,27,2],[1,2,1],[1,19,1],[1,13,4],[1,1,1],[1,37,1],[1,4,1],[1,5,1],[1,7,5],[1,1,1],[1,4,5],[1,5,1],[1,1,1],[1,16,2],[1,22,1],[1,4,2],[1,24,4],[1,10,1],[1,77,6],[1,21,1],[1,11,1],[1,2,1],[1,1,1],[1,4,5],[1,2,4],[1,55,4],[1,17,1],[1,1,3],[1,2,2],[1,7,1],[1,17,1],[1,34,2],[1,4,1],[1,2,2],[1,1,2],[1,100,1],[1,17,2],[1,8,6],[1,11,2],[1,11,2],[1,3,1],[1,5,2],[1,1,1],[1,6,7],[1,15,5],[1,7,1],[1,4,1],[1,5,1],[1,6,2],[1,7,1],[1,2,2],[1,10,2],[1,17,1],[1,10,2],[1,6,3],[1,21,1],[1,2,1],[1,78,4],[1,6,1],[1,1,2],[1,5,1],[1,186,9],[1,16,3],[1,15,13],[1,30,4],[1,2,1],[1,15,3],[1,13,1],[1,3,1],[1,1,1],[1,2,2],[1,5,5],[1,7,1],[1,16,1],[1,2,1],[1,14,2],[1,11,5],[1,9,1],[1,13,2],[1,2,1],[1,4,64],[1,4,1],[1,18,4],[1,3,1],[1,1,1],[1,16,2],[1,4,1],[1,11,4],[1,9,3],[1,3,1],[1,4,1],[1,1,1],[1,10,3],[1,7,1],[1,13,1],[1,16,4],[1,1,16],[1,2,2],[1,18,6],[1,42,2],[1,1,3],[1,15,1],[1,3,1],[1,43,1],[1,1,1],[1,27,2],[1,1,3],[1,1,5],[1,13,1],[1,1,1],[1,10,11],[1,8,1],[1,9,1],[1,13,1],[1,1,2],[1,13,3],[1,1,1],[1,5,1],[1,14,2],[1,14,1],[1,13,1],[1,4,3],[1,25,1],[1,1,3],[1,3,3],[1,4,1],[1,1,1],[1,4,4],[1,15,1],[1,2,1],[1,1,1],[1,7,12],[1,68,2],[1,13,2],[1,2,1],[1,6,4],[1,46,6],[1,1,1],[1,2,2],[1,4,1],[1,2,1],[1,11,5],[1,1,1],[1,9,1],[1,9,1],[1,13,1],[1,4,1],[1,14,1],[1,42,9],[1,5,1],[1,4,1],[1,24,7],[1,7,1],[1,17,1],[1,2,1],[1,2,5],[1,3,6],[1,2,1],[1,15,4],[1,3,2],[1,33,2],[1,30,4],[1,27,4],[1,1,1],[1,14,4],[1,2,3],[1,26,7],[1,22,1],[1,2,2],[1,2,2],[1,166,3],[1,4,4],[1,9,1],[1,12,15],[1,2,6],[1,13,2],[1,4,3],[1,9,2],[1,2,3],[1,3,3],[1,9,2],[1,22,1],[1,5,3],[1,3,4],[1,2,3],[1,3,1],[1,23,1],[1,18,1],[1,6,1],[1,4,1],[1,9,3],[1,35,1],[1,73,2],[1,1,3],[1,31,5],[1,25,1],[1,3,4],[1,11,1],[1,9,4],[1,2,1],[1,27,36],[1,23,5],[1,4,2],[1,1,2],[1,29,2],[1,3,2],[1,1,1],[1,4,1],[1,12,1],[1,36,16],[1,5,14],[1,19,1],[1,6,1],[1,6,1],[1,4,1],[1,6,1],[1,4,2],[1,9,7],[1,7,1],[1,30,4],[1,4,1],[1,18,3],[1,2,2],[1,3,1],[1,9,2],[1,2,2],[1,1,2],[1,1,2],[1,14,1],[1,3,1],[1,5,2],[1,10,1],[1,9,1],[1,10,3],[1,4,1],[1,2,1],[1,4,4],[1,2,1],[1,3,3],[1,39,2],[1,3,1],[1,1,3],[1,14,1],[1,2,4],[1,13,1],[1,4,6],[1,3,5],[1,5,4],[1,8,1],[1,131,1],[1,28,1],[1,5,1],[1,8,5],[1,2,9],[1,4,2],[1,5,1],[1,46,3],[1,7,3],[1,1,1],[1,7,3],[1,2,1],[1,4,1],[1,2,1],[1,2,1],[1,2,1],[1,4,6],[1,5,1],[1,9,3],[1,2,2],[1,9,1],[1,42,3],[1,11,3],[1,5,1],[1,1,2],[1,6,1],[1,37,51],[1,2,1],[1,4,3],[1,23,2],[1,1,15],[1,5,4],[1,1,4],[1,18,3],[1,12,3],[1,4,2],[1,4,1],[1,2,7],[1,2,6],[1,3,6],[1,6,1],[1,10,3],[1,4,2],[1,1,2],[1,4,1],[1,4,3],[1,1,3],[1,3,1],[1,6,2],[1,10,2],[1,6,4],[1,4,3],[1,7,2],[1,2,2],[1,4,1],[1,1,1],[1,4,5],[1,14,1],[1,20,4],[1,7,15],[1,18,2],[1,6,1],[1,1,1],[1,7,1],[1,5,2],[1,6,2],[1,4,1],[1,6,3],[1,2,1],[1,6,1],[1,4,1],[1,7,1],[1,7,4],[1,7,1],[1,1,1],[1,24,4],[1,2,2],[1,3,5],[1,8,1],[1,15,2],[1,5,1],[1,2,3],[1,2,2],[1,4,1],[1,6,1],[1,2,3],[1,11,1],[1,23,5],[1,2,2],[1,1,1],[1,8,1],[1,17,6],[1,1,1],[1,9,2],[1,1,1],[1,10,1],[1,5,1],[1,6,1],[1,6,1],[1,5,1],[1,2,6],[1,2,1],[1,9,1],[1,14,1],[1,18,8],[1,39,2],[1,13,1],[1,6,1],[1,6,2],[1,9,1],[1,14,1],[1,5,4],[1,26,2],[1,4,1],[1,7,2],[1,5,5],[1,2,1],[1,20,2],[1,14,1],[1,10,1],[1,4,1],[1,3,1],[1,10,2],[1,9,12],[1,4,4],[1,2,1],[1,4,1],[1,4,1],[1,2,1],[1,8,1],[1,2,4],[1,1,1],[1,33,2],[1,4,1],[1,5,1],[1,205,1],[1,2,1],[1,15,3],[1,5,1],[1,1,1],[1,1,1],[1,1,1],[1,13,1],[1,14,5],[1,6,4],[1,3,1],[1,7,5],[1,42,2],[1,11,1],[1,24,2],[1,11,2],[1,11,2],[1,12,1],[1,7,1],[1,1,1],[1,3,2],[1,21,1],[1,13,1],[1,2,1],[1,37,6],[1,8,4],[1,2,2],[1,2,2],[1,36,1],[1,8,1],[1,19,11],[1,19,7],[1,8,1],[1,18,2],[1,7,2],[1,8,1],[1,1,1],[1,4,1],[1,3,3],[1,10,1],[1,6,1],[1,4,1],[1,10,1],[1,25,1],[1,14,1],[1,14,3],[1,4,1],[1,2,1],[1,2,2],[1,4,2],[1,3,4],[1,62,11],[1,4,1],[1,39,3],[1,65,2],[1,3,1],[1,11,2],[1,4,1],[1,2,2],[1,1,1],[1,2,3],[1,2,1],[1,17,7],[1,7,4],[1,1,4],[1,62,3],[1,17,3],[1,26,3],[1,15,1],[1,2,1],[1,4,6],[1,1,2],[1,8,2],[1,16,2],[1,1,1],[1,7,2],[1,4,1],[1,1,1],[1,7,2],[1,8,2],[1,12,1],[1,1,2],[1,2,1],[1,2,1],[1,26,7],[1,2,1],[1,5,1],[1,5,1],[1,5,1],[1,1,1],[1,6,27],[1,5,4],[1,6,1],[1,8,1],[1,38,2],[1,26,2],[1,13,1],[1,20,2],[1,6,6],[1,2,2],[1,2,1],[1,16,2],[1,88,1],[1,4,1],[1,5,3],[1,1,4],[1,1,4],[1,12,2],[1,3,1],[1,3,1],[1,3,1],[1,2,3],[1,6,1],[1,2,4],[1,28,2],[1,17,3],[1,10,1],[1,51,3],[1,1,1],[1,15,4],[1,10,14],[1,1,3],[1,3,3],[1,1,1],[1,5,1],[1,3,1],[1,23,3],[1,10,1],[1,1,1],[1,21,6],[1,11,1],[1,8,1],[1,1,1],[1,2,1],[1,1,3],[1,26,1],[1,1,2],[1,4,1],[1,4,1],[1,6,1],[1,6,1],[1,2,2],[1,11,5],[1,15,2],[1,13,1],[1,2,2],[1,4,1],[1,4,1],[1,2,6],[1,13,3],[1,23,2],[1,18,2],[1,8,2],[1,1,1],[1,4,1],[1,7,1],[1,2,1],[1,8,6],[1,12,1],[1,23,4],[1,9,4],[1,2,2],[1,8,1],[1,7,2],[1,2,2],[1,2,4],[1,8,16],[1,22,3],[1,2,1],[1,2,4],[1,2,1],[1,9,2],[1,3,3],[1,4,1],[1,3,9],[1,3,1],[1,2,2],[1,2,3],[1,11,1],[1,5,1],[1,5,1],[1,2,2],[1,10,20],[1,2,2],[1,2,1],[1,3,3],[1,10,1],[1,2,3],[1,2,1],[1,5,1],[1,4,2],[1,8,1],[1,2,2],[1,6,1],[1,5,1],[1,9,1],[1,3,2],[1,1,1],[1,2,6],[1,1,1],[1,5,1],[1,2,1],[1,16,1],[1,6,1],[1,2,1],[1,2,1],[1,5,1],[1,9,1],[1,10,16],[1,4,1],[1,4,2],[1,5,2],[1,8,1],[1,16,2],[1,2,1],[1,5,1],[1,1,2],[1,55,2],[1,20,1],[1,11,1],[1,5,2],[1,13,1],[1,1,1],[1,10,6],[1,5,2],[1,21,1],[1,7,3],[1,5,1],[1,7,1],[1,3,1],[1,6,1],[1,46,3],[1,8,5],[1,5,1],[1,2,1],[1,2,6],[1,22,1],[1,42,1],[1,1,1],[1,4,2],[1,13,1],[1,3,3],[1,2,2],[1,4,2],[1,1,3],[1,88,1],[1,24,4],[1,4,1],[1,3,1],[1,5,1],[1,17,6],[1,6,2],[1,20,3],[1,47,2],[1,2,7],[1,13,1],[1,1,3],[1,1,2],[1,2,2],[1,2,2],[1,4,3],[1,7,1],[1,3,1],[1,10,1],[1,2,1],[1,2,5],[1,1,2],[1,17,2],[1,12,4],[1,24,1],[1,3,1],[1,1,3],[1,6,1],[1,2,5],[1,3,1],[1,1,1],[1,13,2],[1,6,1],[1,2,1],[1,10,2],[1,4,1],[1,1,1],[1,18,7],[1,7,2],[1,8,1],[1,5,1],[1,2,1],[1,4,1],[1,2,2],[1,14,1],[1,13,1],[1,10,4],[1,4,4],[1,6,4],[1,4,1],[1,16,2],[1,8,2],[1,3,3],[1,3,1],[1,21,2],[1,7,1],[1,2,1],[1,2,1],[1,2,3],[1,4,1],[1,6,1],[1,28,1],[1,2,7],[1,3,1],[1,23,4],[1,2,1],[1,6,1],[1,2,1],[1,4,1],[1,3,2],[1,1,1],[1,9,2],[1,9,2],[1,2,1],[1,4,2],[1,10,1],[1,12,1],[1,4,2],[1,7,1],[1,2,2],[1,9,1],[1,16,5],[1,31,2],[1,16,2],[1,22,3],[1,2,1],[1,6,1],[1,1,1],[1,6,3],[1,14,2],[1,5,3],[1,81,3],[1,8,2],[1,1,1],[1,61,9],[1,1,4],[1,2,1],[1,11,3],[1,3,5],[1,3,6],[1,4,7],[1,1,2],[1,5,2],[1,2,1],[1,3,2],[1,9,5],[1,9,1],[1,1,3],[1,3,2],[1,13,3],[1,14,1],[1,15,6],[1,6,1],[1,2,1],[1,7,1],[1,2,1],[1,10,2],[1,2,2],[1,14,1],[1,2,2],[1,3,3],[1,3,1],[1,4,1],[1,59,2],[1,5,2],[1,4,2],[1,1,1],[1,2,1],[1,4,1],[1,2,2],[1,5,4],[1,4,1],[1,4,1],[1,10,3],[1,2,2],[1,2,3],[1,8,1],[1,2,1],[1,1,1],[1,18,1],[1,6,1],[1,12,3],[1,5,3],[1,3,1],[1,7,3],[1,10,2],[1,2,23],[1,1,12],[1,1,1],[1,32,3],[1,2,1],[1,4,1],[1,12,2],[1,4,1],[1,3,1],[1,5,1],[1,4,2],[1,4,1],[1,16,2],[1,1,1],[1,4,1],[1,7,1],[1,2,4],[1,8,1],[1,4,4],[1,1,1],[1,1,2],[1,6,3],[1,8,2],[1,23,15],[1,2,2],[1,2,1],[1,2,1],[1,11,1],[1,3,2],[1,9,2],[1,4,2],[1,2,3],[1,34,1],[1,7,1],[1,2,4],[1,65,2],[1,41,3],[1,1,2],[1,1,1],[1,6,1],[1,6,1],[1,7,1],[1,3,1],[1,14,9],[1,6,1],[1,6,5],[1,2,13],[1,5,2],[1,2,1],[1,4,1],[1,17,1],[1,5,1],[1,1,1],[1,3,2],[1,9,1],[1,1,4],[1,48,2],[1,7,1],[1,4,1],[1,3,1],[1,4,2],[1,118,3],[1,2,1],[1,2,4],[1,2,1],[1,12,13],[1,2,1],[1,4,2],[1,4,1],[1,6,1],[1,1,1],[1,7,2],[1,10,1],[1,21,5],[1,5,2],[1,9,1],[1,2,2],[1,1,1],[1,1,1],[1,1,1],[1,3,1],[1,1,1],[1,7,1],[1,83,9],[1,6,2],[1,7,2],[1,13,1],[1,4,2],[1,3,1],[1,8,2],[1,2,1],[1,10,3],[1,2,1],[1,2,1],[1,9,11],[1,2,1],[1,3,1],[1,17,1],[1,7,2],[1,8,2],[1,20,1],[1,2,1],[1,1,2],[1,8,1],[1,2,1],[1,6,1],[1,21,3],[1,1,2],[1,5,5],[1,2,1],[1,2,3],[1,2,1],[1,2,2],[1,16,1],[1,2,1],[1,2,1],[1,3,1],[1,17,1],[1,6,1],[1,4,15],[1,1,1],[1,11,1],[1,84,15],[1,31,3],[1,2,2],[1,8,1],[1,9,1],[1,2,3],[1,15,2],[1,4,1],[1,18,1],[1,3,1],[1,1,1],[1,2,4],[1,2,2],[1,2,1],[1,2,1],[1,25,1],[1,3,1],[1,141,13],[1,4,2],[1,2,2],[1,14,2],[1,7,1],[1,30,9],[1,17,1],[1,1,2],[1,6,1],[1,2,1],[1,2,1],[1,8,1],[1,2,1],[1,10,1],[1,6,3],[1,12,1],[1,68,1],[1,2,1],[1,10,2],[1,14,2],[1,26,9],[1,7,3],[1,3,3],[1,6,6],[1,3,1],[1,18,4],[1,3,1],[1,4,4],[1,2,1],[1,1,1],[1,37,8],[1,8,6],[1,2,1],[1,9,6],[1,5,2],[1,3,1],[1,3,2],[1,2,1],[1,3,1],[1,13,7],[1,9,1],[1,122,2],[1,2,1],[1,22,6],[1,11,2],[1,16,2],[1,28,46],[1,2,4],[1,7,1],[1,2,3],[1,2,6],[1,2,2],[1,1,2],[1,1,1],[1,5,1],[1,1,2],[1,3,2],[1,7,6],[1,11,1],[1,21,1],[1,40,6],[1,14,2],[1,21,1],[1,1,1],[1,14,2],[1,21,1],[1,2,1],[1,1,1],[1,1,2],[1,40,2],[1,4,2],[1,1,3],[1,1,1],[1,107,2],[1,4,6],[1,136,6],[1,5,1],[1,9,1],[1,24,3],[1,7,1],[1,10,5],[1,29,3],[1,12,2],[1,10,3],[1,5,3],[1,2,1],[1,59,1],[1,5,2],[1,13,2],[1,1,2],[1,50,2],[1,1,3],[1,2,3],[1,6,1],[1,4,2],[1,5,4],[1,3,2],[1,8,1],[1,4,2],[1,1,1],[1,17,1],[1,13,3],[1,2,1],[1,7,1],[1,3,1],[1,8,1],[1,1,1],[1,20,1],[1,4,4],[1,1,2],[1,2,1],[1,2,1],[1,2,2],[1,1,2],[1,13,2],[1,4,1],[1,4,1],[1,3,1],[1,2,1],[1,4,4],[1,13,5],[1,9,1],[1,8,1],[1,12,1],[1,15,3],[1,2,1],[1,2,2],[1,4,1],[1,2,2],[1,1,1],[1,3,1],[1,13,1],[1,4,1],[1,9,4],[1,3,2],[1,2,1],[1,4,4],[1,1,3],[1,15,1],[1,4,1],[1,2,1],[1,3,1],[1,2,1],[1,3,6],[1,5,1],[1,7,10],[1,1,2],[1,6,2],[1,7,2],[1,3,1],[1,3,3],[1,6,1],[1,13,1],[1,22,3],[1,6,5],[1,6,1],[1,3,1],[1,3,1],[1,21,5],[1,11,2],[1,6,3],[1,38,4],[1,6,4],[1,4,1],[1,2,1],[1,5,5],[1,5,3],[1,40,1],[1,4,3],[1,8,1],[1,13,2],[1,4,2],[1,1,1],[1,9,9],[1,1,1],[1,12,2],[1,36,1],[1,2,1],[1,18,3],[1,28,1],[1,5,1],[1,20,4],[1,40,3],[1,3,1],[1,5,3],[1,2,1],[1,31,3],[1,6,1],[1,3,1],[1,1,5],[1,3,3],[1,36,1],[1,1,1],[1,22,2],[1,9,2],[1,2,4],[1,2,2],[1,4,4],[1,2,1],[1,6,1],[1,3,3],[1,5,1],[1,13,2],[1,4,1],[1,1,3],[1,1,1],[1,11,5],[1,4,1],[1,2,3],[1,26,1],[1,9,1],[1,6,1],[1,15,1],[1,23,5],[1,3,5],[1,4,3],[1,8,1],[1,9,4],[1,2,1],[1,7,1],[1,1,6],[1,4,1],[1,43,1],[1,2,3],[1,1,1],[1,15,4],[1,3,1],[1,1,1],[1,10,1],[1,79,1],[1,1,14],[1,2,1],[1,6,1],[1,1,1],[1,24,1],[1,2,3],[1,9,2],[1,2,3],[1,8,1],[1,115,15],[1,1,1],[1,1,2],[1,3,1],[1,9,24],[1,6,1],[1,3,6],[1,10,3],[1,3,1],[1,1,1],[1,3,2],[1,2,1],[1,11,1],[1,5,1],[1,1,1],[1,2,1],[1,3,1],[1,5,1],[1,11,1],[1,2,1],[1,7,7],[1,15,1],[1,6,2],[1,51,7],[1,2,1],[1,54,1],[1,5,1],[1,1,1],[1,7,5],[1,1,1],[1,4,1],[1,3,1],[1,22,4],[1,5,3],[1,5,1],[1,64,9],[1,6,1],[1,28,6],[1,5,1],[1,11,1],[1,2,2],[1,4,2],[1,1,4],[1,8,1],[1,1,5],[1,7,1],[1,2,1],[1,2,2],[1,8,1],[1,11,3],[1,8,3],[1,7,1],[1,10,5],[1,5,1],[1,98,5],[1,18,1],[1,1,1],[1,5,1],[1,2,2],[1,14,2],[1,3,1],[1,1,1],[1,11,3],[1,7,9],[1,5,3],[1,3,1],[1,3,3],[1,125,34],[1,1,1],[1,2,1],[1,6,2],[1,2,2],[1,11,7],[1,5,2],[1,5,5],[1,6,1],[1,10,2],[1,14,2],[1,4,3],[1,8,7],[1,2,3],[1,2,2],[1,13,1],[1,6,1],[1,10,5],[1,11,1],[1,4,2],[1,14,1],[1,1,6],[1,15,1],[1,1,3],[1,5,3],[1,7,1],[1,2,1],[1,1,3],[1,2,4],[1,3,1],[1,8,3],[1,2,3],[1,2,1],[1,2,2],[1,2,1],[1,4,1],[1,16,2],[1,1,2],[1,1,5],[1,7,1],[1,3,1],[1,2,1],[1,16,3],[1,4,1],[1,8,2],[1,16,6],[1,12,2],[1,84,26],[1,10,2],[1,2,2],[1,5,1],[1,1,1],[1,8,1],[1,4,1],[1,4,1],[1,4,2],[1,4,1],[1,4,10],[1,14,2],[1,4,2],[1,5,2],[1,19,1],[1,4,3],[1,8,2],[1,6,1],[1,2,5],[1,2,1],[1,16,4],[1,4,1],[1,2,2],[1,7,1],[1,4,2],[1,4,1],[1,8,1],[1,10,2],[1,3,2],[1,3,1],[1,10,2],[1,1,1],[1,12,3],[1,37,1],[1,10,1],[1,16,4],[1,1,1],[1,11,1],[1,4,1],[1,8,6],[1,3,2],[1,66,2],[1,14,1],[1,2,4],[1,2,2],[1,7,2],[1,24,2],[1,5,1],[1,1,1],[1,1,1],[1,3,1],[1,31,2],[1,24,1],[1,8,5],[1,8,2],[1,3,4],[1,64,1],[1,1,4],[1,4,47],[1,8,4],[1,25,1],[1,19,2],[1,4,1],[1,33,4],[1,16,2],[1,4,1],[1,1,1],[1,2,3],[1,27,1],[1,20,1],[1,10,3],[1,2,1],[1,2,1],[1,76,1],[1,2,1],[1,5,1],[1,2,2],[1,15,3],[1,40,2],[1,4,22],[1,2,2],[1,2,2],[1,10,1],[1,3,1],[1,55,4],[1,2,7],[1,7,1],[1,4,6],[1,2,1],[1,2,1],[1,28,1],[1,2,2],[1,6,2],[1,6,2],[1,4,15],[1,3,2],[1,1,1],[1,29,1],[1,13,1],[1,16,1],[1,4,1],[1,7,7],[1,3,3],[1,16,4],[1,12,11],[1,1,1],[1,2,4],[1,54,2],[1,1,2],[1,6,2],[1,1,3],[1,2,2],[1,1,1],[1,2,1],[1,11,4],[1,9,1],[1,20,1],[1,1,1],[1,17,3],[1,1,1],[1,9,2],[1,2,2],[1,3,1],[1,29,19],[1,28,1],[1,8,3],[1,21,8],[1,7,3],[1,6,2],[1,5,2],[1,11,1],[1,1,2],[1,7,1],[1,22,1],[1,9,1],[1,3,3],[1,8,2],[1,5,1],[1,23,2],[1,11,5],[1,17,2],[1,5,5],[1,4,3],[1,33,1],[1,2,3],[1,6,1],[1,32,1],[1,6,2],[1,64,2],[1,3,1],[1,7,1],[1,3,6],[1,12,1],[1,1,1],[1,9,1],[1,38,3],[1,1,1],[1,3,1],[1,3,5],[1,78,16],[1,3,1],[1,7,1],[1,26,1],[1,9,2],[1,113,2],[1,9,1],[1,5,9],[1,3,2],[1,4,1],[1,2,1],[1,5,1],[1,24,3],[1,11,4],[1,38,2],[1,13,3],[1,7,3],[1,1,1],[1,1,2],[1,3,3],[1,5,3],[1,6,1],[1,7,1],[1,3,1],[1,4,2],[1,3,1],[1,3,1],[1,1,2],[1,2,1],[1,18,8],[1,1,3],[1,1,1],[1,2,5],[1,13,9],[1,2,2],[1,6,1],[1,5,1],[1,13,3],[1,7,1],[1,3,2],[1,2,1],[1,4,1],[1,2,2],[1,6,2],[1,4,3],[1,1,3],[1,3,2],[1,12,8],[1,6,1],[1,7,1],[1,6,3],[1,9,4],[1,16,17],[1,1,2],[1,4,1],[1,2,1],[1,2,1],[1,2,1],[1,1,1],[1,4,2],[1,4,1],[1,8,1],[1,14,17],[1,7,1],[1,7,6],[1,5,1],[1,4,2],[1,80,2],[1,13,1],[1,11,1],[1,9,1],[1,2,4],[1,3,1],[1,2,1],[1,5,2],[1,3,1],[1,1,2],[1,12,1],[1,8,5],[1,6,3],[1,17,1],[1,3,4],[1,1,2],[1,5,2],[1,1,3],[1,2,2],[1,2,3],[1,2,1],[1,4,1],[1,1,1],[1,14,1],[1,2,1],[1,16,4],[1,15,2],[1,3,3],[1,8,8],[1,6,1],[1,25,4],[1,6,1],[1,7,3],[1,36,2],[1,2,1],[1,32,2],[1,1,1],[1,7,1],[1,14,2],[1,21,1],[1,3,1],[1,27,7],[1,6,3],[1,1,5],[1,5,4],[1,12,2],[1,2,1],[1,2,1],[1,8,7],[1,8,8],[1,7,1],[1,2,1],[1,4,1],[1,1,7],[1,10,3],[1,17,1],[1,1,1],[1,8,6],[1,29,5],[1,12,2],[1,7,2],[1,7,1],[1,2,2],[1,2,1],[1,2,1],[1,54,9],[1,1,1],[1,12,2],[1,8,1],[1,8,4],[1,39,1],[1,3,3],[1,9,4],[1,6,5],[1,2,1],[1,15,2],[1,18,1],[1,2,2],[1,1,1],[1,1,1],[1,2,4],[1,3,1],[1,6,1],[1,3,3],[1,4,3],[1,3,2],[1,1,1],[1,2,2],[1,16,12],[1,4,2],[1,15,2],[1,6,1],[1,7,1],[1,9,8],[1,70,2],[1,5,1],[1,4,3],[1,24,4],[1,8,6],[1,18,43],[1,23,3],[1,10,1],[1,14,8],[1,6,4],[1,2,1],[1,2,1],[1,1,1],[1,2,1],[1,9,3],[1,6,4],[1,5,3],[1,43,2],[1,5,1],[1,11,1],[1,1,2],[1,5,3],[1,4,2],[1,16,2],[1,16,10],[1,5,1],[1,2,2],[1,2,1],[1,2,3],[1,4,6],[1,3,12],[1,6,1],[1,10,1],[1,1,2],[1,13,1],[1,3,1],[1,5,2],[1,6,1],[1,3,1],[1,2,1],[1,1,1],[1,13,1],[1,20,1],[1,20,2],[1,8,1],[1,5,2],[1,2,2],[1,10,5],[1,1,3],[1,7,2],[1,4,1],[1,15,18],[1,1,4],[1,5,2],[1,4,1],[1,1,11],[1,1,3],[1,4,1],[1,1,1],[1,2,1],[1,2,12],[1,5,1],[1,3,1],[1,25,2],[1,16,1],[1,10,1],[1,18,1],[1,28,3],[1,5,6],[1,4,2],[1,2,2],[1,51,124],[1,4,2],[1,5,1],[1,28,1],[1,4,5],[1,6,2],[1,20,1],[1,7,1],[1,5,3],[1,11,1],[1,4,3],[1,1,1],[1,6,3],[1,5,1],[1,3,1],[1,10,2],[1,64,5],[1,12,12],[1,5,2],[1,6,1],[1,8,2],[1,28,8],[1,19,1],[1,2,1],[1,1,1],[2,6,1],[2,2,2],[2,4,5],[2,11,1],[2,4,1],[2,4,1],[2,14,1],[2,19,2],[2,2,1],[2,6,4],[2,2,1],[2,6,2],[2,4,1],[2,12,2],[2,15,2],[2,5,1],[2,11,1],[2,11,1],[2,2,2],[2,3,3],[2,5,9],[2,2,1],[2,1,1],[2,1,4],[2,2,1],[2,4,1],[2,11,1],[2,6,1],[2,2,2],[2,8,1],[2,81,7],[2,8,1],[2,5,1],[2,6,3],[2,2,2],[2,39,1],[2,5,2],[2,5,2],[2,2,4],[2,10,2],[2,4,2],[2,2,1],[2,6,6],[2,8,2],[2,56,1],[2,9,1],[2,1,1],[2,16,3],[2,5,2],[2,3,2],[2,12,25],[2,4,4],[2,6,2],[2,7,1],[2,30,11],[2,4,1],[2,16,5],[2,8,2],[2,7,2],[2,11,1],[2,7,1],[2,2,1],[2,1,1],[2,2,9],[2,39,6],[2,2,1],[2,2,1],[2,7,1],[2,19,1],[2,11,2],[2,8,2],[2,4,7],[2,2,1],[2,7,1],[2,1,1],[2,4,1],[2,6,1],[2,6,1],[2,2,4],[2,26,37],[2,2,1],[2,13,2],[2,35,10],[2,13,1],[2,6,1],[2,10,2],[2,19,9],[2,7,1],[2,7,1],[2,2,2],[2,1,1],[2,5,2],[2,10,2],[2,6,1],[2,6,1],[2,6,1],[2,2,2],[2,1,1],[2,6,60],[2,8,1],[2,18,1],[2,4,2],[2,1,1],[2,1,1],[2,2,3],[2,21,2],[2,7,2],[2,11,3],[2,14,2],[2,3,2],[2,12,1],[2,1,2],[2,34,1],[2,1,1],[2,16,1],[2,1,1],[2,11,1],[2,14,1],[2,8,1],[2,9,1],[2,8,1],[2,3,1],[2,4,4],[2,4,1],[2,44,3],[2,4,1],[2,19,6],[2,19,2],[2,3,2],[2,17,2],[2,17,4],[2,1,6],[2,5,3],[2,27,6],[2,5,3],[2,6,3],[2,22,2],[2,22,3],[2,13,19],[2,8,1],[2,2,2],[2,7,1],[2,9,3],[2,2,1],[2,11,1],[2,8,1],[2,4,1],[2,8,2],[2,4,1],[2,1,1],[2,16,1],[2,2,1],[2,4,1],[2,9,11],[2,3,3],[2,3,1],[2,1,2],[2,3,1],[2,28,1],[2,8,5],[2,6,2],[2,8,1],[2,1,1],[2,10,1],[2,6,1],[2,55,1],[2,1,1],[2,4,2],[2,3,2],[2,16,4],[2,11,1],[2,2,3],[2,15,1],[2,1,10],[2,8,2],[2,15,1],[2,1,1],[2,7,114],[2,10,3],[2,1,1],[2,5,1],[2,3,3],[2,2,1],[2,1,1],[2,8,1],[2,96,1],[2,10,3],[2,3,2],[2,2,1],[2,1,1],[2,3,1],[2,25,2],[2,3,1],[2,12,4],[2,2,9],[2,3,1],[2,2,1],[2,9,1],[2,12,1],[2,18,1],[2,23,6],[2,9,85],[2,2,8],[2,1,2],[2,26,1],[2,8,2],[2,6,3],[2,1,4],[2,6,1],[2,8,3],[2,9,2],[2,1,1],[2,7,1],[2,1,3],[2,7,1],[2,3,2],[2,10,1],[2,2,2],[2,8,2],[2,4,4],[2,23,2],[2,8,5],[2,1,1],[2,3,3],[2,7,2],[2,1,1],[2,2,1],[2,1,7],[2,10,1],[2,18,1],[2,39,5],[2,13,2],[2,7,2],[2,6,2],[2,9,1],[2,5,1],[2,7,1],[2,35,2],[2,2,2],[2,5,2],[2,1,1],[2,9,2],[2,18,1],[2,2,3],[2,35,1],[2,6,5],[2,2,2],[2,2,1],[2,12,2],[2,1,1],[2,10,1],[2,6,1],[2,2,1],[2,15,2],[2,7,1],[2,5,4],[2,4,1],[2,2,14],[2,2,1],[2,5,3],[2,21,2],[2,10,1],[2,2,1],[2,8,1],[2,16,1],[2,9,2],[2,11,2],[2,1,6],[2,12,2],[2,18,2],[2,2,4],[2,4,3],[2,7,11],[2,3,1],[2,28,5],[2,1,4],[2,8,1],[2,2,5],[2,2,1],[2,3,1],[2,10,2],[2,3,3],[2,2,1],[2,17,1],[2,6,1],[2,16,1],[2,10,16],[2,17,1],[2,4,2],[2,1,1],[2,3,3],[2,7,3],[2,5,1],[2,11,1],[2,13,1],[2,3,1],[2,6,1],[2,5,2],[2,17,2],[2,33,13],[2,2,10],[2,3,5],[2,4,3],[2,5,1],[2,2,4],[2,8,2],[2,14,1],[2,16,1],[2,2,3],[2,19,6],[2,5,1],[2,8,2],[2,7,1],[2,1,1],[2,11,1],[2,2,2],[2,11,10],[2,10,1],[2,14,1],[2,1,7],[2,10,1],[2,34,1],[2,2,1],[2,2,4],[2,9,2],[2,16,1],[2,2,4],[2,8,3],[2,1,2],[2,3,5],[2,13,5],[2,20,1],[2,25,8],[2,9,1],[2,1,1],[2,15,3],[2,6,2],[2,394,278],[2,11,2],[2,1,1],[2,3,15],[2,4,2],[2,3,6],[2,6,3],[2,1,12],[2,2,1],[2,1,3],[2,11,2],[2,20,3],[2,31,9],[2,25,7],[2,15,2],[2,11,31],[2,17,2],[2,5,1],[2,2,2],[2,4,1],[2,6,2],[2,27,2],[2,10,2],[2,1,2],[2,26,5],[2,5,14],[2,12,2],[2,5,2],[2,2,1],[2,2,3],[2,6,1],[2,1,3],[2,9,3],[2,18,1],[2,5,5],[2,29,13],[2,14,1],[2,1,4],[2,3,1],[2,5,1],[2,19,4],[2,11,7],[2,8,3],[2,18,1],[2,3,5],[2,11,1],[2,4,1],[2,10,4],[2,19,2],[2,10,3],[2,12,2],[2,19,9],[2,73,3],[2,13,3],[2,12,1],[2,4,5],[2,55,1],[2,6,6],[2,27,2],[2,2,1],[2,20,1],[2,8,1],[2,1,1],[2,29,2],[2,10,8],[2,5,2],[2,10,2],[2,14,1],[2,10,1],[2,1,1],[2,4,2],[2,5,1],[2,1,4],[2,4,2],[2,9,1],[2,9,4],[2,2,1],[2,4,1],[2,6,2],[2,2,2],[2,10,15],[2,17,1],[2,9,1],[2,9,1],[2,8,2],[2,4,1],[2,4,1],[2,243,2],[2,9,3],[2,12,2],[2,4,3],[2,2,1],[2,1,2],[2,57,4],[2,7,2],[2,8,2],[2,14,2],[2,2,1],[2,6,1],[2,7,2],[2,8,1],[2,4,3],[2,36,5],[2,3,1],[2,1,1],[2,45,8],[2,1,1],[2,2,3],[2,9,1],[2,1,1],[2,13,2],[2,44,6],[2,2,1],[2,36,1],[2,4,1],[2,5,1],[2,3,2],[2,1,1],[2,28,2],[2,9,1],[2,3,3],[2,10,2],[2,16,1],[2,1,1],[2,1,1],[2,13,1],[2,14,3],[2,65,1],[2,7,1],[2,2,1],[2,11,8],[2,4,1],[2,17,1],[2,6,1],[2,15,5],[2,15,1],[2,17,2],[2,8,1],[2,8,1],[2,1,2],[2,5,7],[2,1,1],[2,3,2],[2,2,1],[2,4,1],[2,32,1],[2,3,1],[2,1,1],[2,1,1],[2,2,2],[2,2,1],[2,8,2],[2,11,3],[2,2,3],[2,42,3],[2,5,1],[2,6,2],[2,1,1],[2,9,1],[2,2,2],[2,5,1],[2,2,1],[2,7,1],[2,7,6],[2,6,2],[2,3,1],[2,1,3],[2,15,1],[2,23,1],[2,1,1],[2,3,1],[2,4,2],[2,8,1],[2,2,7],[2,3,4],[2,6,5],[2,4,1],[2,5,3],[2,16,5],[2,11,1],[2,13,1],[2,22,3],[2,10,5],[2,2,2],[2,2,2],[2,6,1],[2,7,1],[2,4,2],[2,4,3],[2,7,3],[2,7,4],[2,1,1],[2,71,9],[2,4,8],[2,33,4],[2,16,2],[2,1,18],[2,15,1],[2,3,1],[2,8,1],[2,6,3],[2,4,2],[2,1,1],[2,7,2],[2,2,8],[2,2,1],[2,8,1],[2,1,3],[2,5,1],[2,2,2],[2,11,1],[2,17,3],[2,118,1],[2,8,4],[2,14,1],[2,3,4],[2,14,1],[2,2,2],[2,4,3],[2,2,1],[2,11,1],[2,8,10],[2,1,2],[2,3,3],[2,2,2],[2,12,1],[2,2,2],[2,26,3],[2,3,2],[2,3,3],[2,19,1],[2,1,13],[2,23,2],[2,3,1],[2,7,4],[2,10,4],[2,2,3],[2,71,3],[2,3,3],[2,23,1],[2,1,1],[2,34,3],[2,62,1],[2,4,1],[2,7,2],[2,2,8],[2,6,1],[2,20,3],[2,26,2],[2,5,2],[2,2,1],[2,7,1],[2,1,1],[2,7,2],[2,28,7],[2,4,1],[2,2,2],[2,4,1],[2,7,1],[2,2,3],[2,3,1],[2,8,3],[2,43,1],[2,2,1],[2,1,4],[2,2,1],[2,13,3],[2,4,2],[2,6,1],[2,17,1],[2,2,8],[2,32,1],[2,11,2],[2,5,2],[2,45,3],[2,9,1],[2,14,2],[2,9,1],[2,2,1],[2,10,5],[2,2,1],[2,13,1],[2,2,2],[2,3,5],[2,2,1],[2,17,3],[2,11,1],[2,15,1],[2,13,4],[2,7,7],[2,10,2],[2,6,4],[2,2,3],[2,1,3],[2,27,2],[2,2,3],[2,2,1],[2,3,1],[2,3,9],[2,3,46],[2,11,1],[2,30,1],[2,5,1],[2,8,8],[2,2,1],[2,1,1],[2,2,1],[2,6,7],[2,1,1],[2,4,1],[2,4,2],[2,15,2],[2,6,7],[2,4,2],[2,5,1],[2,1,4],[2,2,3],[2,1,2],[2,2,2],[2,1,7],[2,15,2],[2,18,3],[2,2,1],[2,6,1],[2,8,1],[2,134,20],[2,26,1],[2,2,2],[2,8,4],[2,1,1],[2,3,1],[2,14,1],[2,3,1],[2,26,1],[2,19,1],[2,1,1],[2,1,1],[2,7,1],[2,5,2],[2,5,8],[2,3,4],[2,1,1],[2,2,2],[2,16,1],[2,7,2],[2,6,1],[2,1,6],[2,4,3],[2,2,2],[2,2,2],[2,2,1],[2,2,1],[2,1,2],[2,8,3],[2,4,1],[2,9,1],[2,18,33],[2,14,1],[2,1,1],[2,3,2],[2,7,1],[2,14,4],[2,4,2],[2,31,7],[2,19,2],[2,11,4],[2,2,1],[2,7,2],[2,2,1],[2,2,3],[2,52,4],[2,4,1],[2,1,1],[2,4,3],[2,11,1],[2,3,2],[2,6,1],[2,10,3],[2,6,1],[2,12,1],[2,10,2],[2,4,2],[2,23,2],[2,3,3],[2,8,1],[2,21,6],[2,2,2],[2,1,1],[2,1,1],[2,16,3],[2,9,2],[2,5,1],[2,2,2],[2,1,4],[2,4,1],[2,1,25],[2,24,2],[2,6,1],[2,3,4],[2,10,4],[2,6,2],[2,35,2],[2,2,2],[2,1,1],[2,25,10],[2,8,1],[2,1,2],[2,1,1],[2,2,1],[2,3,8],[2,2,1],[2,2,1],[2,5,2],[2,4,3],[2,2,8],[2,1,1],[2,4,2],[2,3,3],[2,12,1],[2,3,2],[2,4,1],[2,2,4],[2,7,2],[2,1,1],[2,73,14],[2,90,1],[2,4,1],[2,2,1],[2,1,1],[2,6,3],[2,1,1],[2,4,1],[2,10,3],[2,2,3],[2,1,1],[2,6,1],[2,37,2],[2,10,1],[2,2,2],[2,60,2],[2,16,3],[2,6,1],[2,1,1],[2,3,4],[2,38,5],[2,6,2],[2,2,1],[2,2,1],[2,9,2],[2,11,1],[2,6,1],[2,9,1],[2,2,2],[2,4,3],[2,8,1],[2,3,2],[2,1,9],[2,14,2],[2,8,1],[2,30,4],[2,2,1],[2,31,2],[2,31,1],[2,21,23],[2,1,5],[2,4,1],[2,2,1],[2,5,3],[2,4,2],[2,10,2],[2,2,2],[2,18,1],[2,15,1],[2,2,1],[2,1,2],[2,5,1],[2,13,1],[2,14,4],[2,1,4],[2,5,1],[2,109,3],[2,18,2],[2,1,2],[2,164,114],[2,8,1],[2,2,3],[2,4,1],[2,1,1],[2,10,1],[2,9,2],[2,4,3],[2,1,75],[2,6,1],[2,17,2],[2,3,1],[2,9,1],[2,2,1],[2,21,1],[2,30,3],[2,7,2],[2,2,2],[2,63,5],[2,16,3],[2,6,1],[2,2,8],[2,25,2],[2,31,3],[2,126,21],[2,10,1],[2,2,2],[2,14,7],[2,6,10],[2,4,3],[2,7,1],[2,12,1],[2,2,1],[2,3,2],[2,2,15],[2,1,4],[2,4,1],[2,3,1],[2,4,1],[2,6,2],[2,7,3],[2,2,3],[2,9,2],[2,6,1],[2,2,1],[2,16,1],[2,22,2],[2,10,1],[2,10,4],[2,7,2],[2,13,1],[2,3,1],[2,7,2],[2,23,12],[2,3,1],[2,6,1],[2,4,2],[2,29,2],[2,5,3],[2,8,1],[2,1,1],[2,6,1],[2,3,1],[2,17,2],[2,15,1],[2,2,1],[2,6,1],[2,2,2],[2,30,1],[2,3,1],[2,2,2],[2,2,5],[2,2,1],[2,37,5],[2,6,2],[2,7,6],[2,2,3],[2,3,3],[2,2,5],[2,75,6],[2,2,3],[2,10,1],[2,2,3],[2,7,2],[2,30,1],[2,12,33],[2,1,1],[2,3,4],[2,14,1],[2,9,2],[2,8,1],[2,1,1],[2,9,1],[2,4,1],[2,2,1],[2,7,1],[2,4,1],[2,3,1],[2,4,3],[2,1,1],[2,5,2],[2,3,4],[2,4,2],[2,6,3],[2,13,5],[2,4,2],[2,6,1],[2,2,5],[2,2,3],[2,1,1],[2,14,1],[2,5,1],[2,4,2],[2,9,1],[2,7,6],[2,4,1],[2,19,2],[2,23,1],[2,20,7],[2,9,1],[2,4,1],[2,12,2],[2,9,4],[2,3,2],[2,3,7],[2,3,1],[2,10,2],[2,6,1],[2,7,1],[2,1,1],[2,9,1],[2,6,1],[2,1,1],[2,17,2],[2,9,1],[2,5,2],[2,1,1],[2,11,2],[2,9,1],[2,1,1],[2,3,6],[2,2,1],[2,5,9],[2,12,2],[2,2,1],[2,6,2],[2,17,4],[2,2,2],[2,7,1],[2,596,5],[2,6,1],[2,2,1],[2,58,125],[2,6,1],[2,8,1],[2,2,1],[2,3,1],[2,1,2],[2,11,4],[2,1,1],[2,9,6],[2,2,8],[2,1,1],[2,6,2],[2,1,1],[2,2,1],[2,7,2],[2,7,3],[2,14,2],[2,1,1],[2,18,9],[2,2,5],[2,2,12],[2,8,4],[2,6,4],[2,3,1],[2,19,2],[2,4,1],[2,2,1],[2,4,3],[2,3,1],[2,13,1],[2,1,1],[2,7,1],[2,1,1],[2,8,1],[2,13,14],[2,11,1],[2,31,1],[2,4,1],[2,6,1],[2,3,2],[2,26,1],[2,4,2],[2,1,1],[2,2,2],[2,1,2],[2,1,1],[2,7,1],[2,8,1],[2,6,2],[2,19,13],[2,2,3],[2,8,3],[2,1,6],[2,5,1],[2,1,1],[2,6,1],[2,9,1],[2,2,2],[2,35,1],[2,1,1],[2,27,2],[2,54,2],[2,6,2],[2,5,1],[2,2,1],[2,2,4],[2,2,1],[2,2,1],[2,14,1],[2,9,1],[2,53,17],[2,2,1],[2,10,1],[2,9,1],[2,23,1],[2,7,1],[2,12,4],[2,1,2],[2,8,1],[2,7,4],[2,2,1],[2,2,1],[2,3,1],[2,11,1],[2,2,2],[2,6,1],[2,2,1],[2,18,4],[2,3,4],[2,8,2],[2,13,1],[2,2,1],[2,1,2],[2,14,4],[2,8,11],[2,1,1],[2,8,3],[2,7,3],[2,90,1],[2,20,2],[2,16,1],[2,20,2],[2,3,1],[2,8,10],[2,10,1],[2,10,1],[2,1,1],[2,3,1],[2,5,1],[2,37,3],[2,24,3],[2,10,1],[2,3,1],[2,2,4],[2,4,1],[2,19,2],[2,1,1],[2,5,1],[2,8,1],[2,3,1],[2,1,1],[2,2,1],[2,2,32],[2,2,1],[2,4,1],[2,1,1],[2,2,2],[2,5,1],[2,2,3],[2,25,9],[2,2,1],[2,4,4],[2,2,1],[2,15,1],[2,59,1],[2,3,2],[2,4,1],[2,9,2],[2,3,10],[2,6,1],[2,5,5],[2,8,2],[2,2,2],[2,4,2],[2,10,1],[2,126,1],[2,3,1],[2,8,1],[2,9,2],[2,1,30],[2,25,1],[2,7,3],[2,2,2],[2,1,3],[2,21,1],[2,38,1],[2,48,1],[2,22,1],[2,4,2],[2,55,2],[2,5,1],[2,15,1],[2,14,44],[2,4,1],[2,1,2],[2,2,3],[2,2,1],[2,3,3],[2,6,1],[2,2,1],[2,26,7],[2,4,1],[2,1,2],[2,3,2],[2,6,2],[2,10,1],[2,18,3],[2,2,1],[2,38,2],[2,1,1],[2,8,1],[2,8,1],[2,3,1],[2,4,1],[2,1,1],[2,1,2],[2,4,1],[2,26,2],[2,3,3],[2,2,1],[2,6,1],[2,19,1],[2,3,4],[2,2,1],[2,4,1],[2,11,1],[2,9,1],[2,9,1],[2,9,1],[2,1,1],[2,1,1],[2,7,1],[2,2,1],[2,11,4],[2,10,2],[2,4,1],[2,6,1],[2,4,1],[2,8,1],[2,11,1],[2,1,1],[2,7,1],[2,8,2],[2,9,1],[2,8,1],[2,41,2],[2,2,4],[2,1,6],[2,2,1],[2,6,3],[2,128,5],[2,2,1],[2,13,13],[2,6,1],[2,1,3],[2,3,3],[2,7,2],[2,10,12],[2,2,1],[2,8,1],[2,1,1],[2,7,1],[2,2,1],[2,10,2],[2,11,10],[2,1,1],[2,8,3],[2,4,5],[2,2,1],[2,14,2],[2,4,1],[2,4,1],[2,7,1],[2,6,1],[2,7,3],[2,1,1],[2,2,1],[2,7,2],[2,2,1],[2,6,1],[2,8,1],[2,2,4],[2,6,1],[2,43,1],[2,108,3],[2,8,1],[2,13,1],[2,4,1],[2,10,3],[2,2,1],[2,24,2],[2,1,2],[2,4,2],[2,2,2],[2,40,6],[2,6,2],[2,6,2],[2,4,3],[2,28,5],[2,4,1],[2,15,1],[2,12,1],[2,1,1],[2,27,1],[3,1,1],[3,5,2],[3,16,2],[3,16,3],[3,1,2],[3,98,2],[3,91,7],[3,6,37],[3,4,1],[3,9,1],[3,97,2],[3,6,1],[3,23,3],[3,115,1],[3,2,1],[3,1,1],[3,1,1],[3,14,4],[3,1,1],[3,28,1],[3,1,1],[3,6,1],[3,15,5],[3,3,1],[3,52,1],[3,2,3],[3,3,1],[3,4,5],[3,13,1],[3,16,3],[3,13,1],[3,17,1],[3,4,4],[3,6,7],[3,14,1],[3,32,1],[3,3,3],[3,11,4],[3,1,1],[3,8,6],[3,9,7],[3,2,1],[3,9,2],[3,5,2],[3,26,12],[3,11,3],[3,12,2],[3,4,2],[3,6,2],[3,30,6],[3,1,2],[3,10,1],[3,1,1],[3,4,1],[3,7,1],[3,30,29],[3,2,3],[3,2,2],[3,2,1],[3,11,1],[3,2,3],[3,3,1],[3,9,1],[3,2,2],[3,5,1],[3,1,2],[3,1,13],[3,6,9],[3,1,1],[3,6,2],[3,1,3],[3,4,1],[3,6,1],[3,9,3],[3,1,1],[3,9,2],[3,19,45],[3,2,1],[3,7,8],[3,21,3],[3,6,2],[3,2,1],[3,6,1],[3,5,1],[3,2,1],[3,15,7],[3,2,1],[3,9,3],[3,11,1],[3,4,1],[3,7,1],[3,2,1],[3,19,1],[3,5,1],[3,2,1],[3,1,1],[3,22,3],[3,21,5],[3,13,1],[3,2,1],[3,4,1],[3,23,1],[3,8,1],[3,3,2],[3,2,2],[3,4,1],[3,12,2],[3,5,2],[3,16,8],[3,6,1],[3,1,2],[3,2,1],[3,7,1],[3,6,1],[3,6,3],[3,45,1],[3,4,5],[3,1,2],[3,3,1],[3,2,1],[3,1,1],[3,12,1],[3,8,1],[3,3,1],[3,6,1],[3,2,2],[3,9,2],[3,5,2],[3,2,1],[3,3,1],[3,15,1],[3,11,1],[3,4,1],[3,9,2],[3,3,1],[3,4,1],[3,1,3],[3,6,15],[3,6,3],[3,2,6],[3,1,3],[3,3,2],[3,15,1],[3,6,1],[3,7,1],[3,5,1],[3,9,1],[3,49,2],[3,5,2],[3,9,4],[3,39,1],[3,4,3],[3,1,5],[3,1,2],[3,2,1],[3,14,2],[3,4,3],[3,18,1],[3,5,4],[3,19,3],[3,3,1],[3,2,1],[3,3,2],[3,48,10],[3,1,1],[3,5,6],[3,12,3],[3,1,2],[3,5,4],[3,4,1],[3,4,1],[3,5,1],[3,1,1],[3,10,1],[3,10,2],[3,6,3],[3,2,7],[3,4,1],[3,9,2],[3,1,1],[3,2,1],[3,4,6],[3,1,1],[3,25,9],[3,11,1],[3,2,1],[3,8,2],[3,1,1],[3,9,3],[3,4,6],[3,1,7],[3,1,1],[3,4,1],[3,11,2],[3,14,1],[3,65,2],[3,6,1],[3,5,2],[3,2,2],[3,13,1],[3,2,5],[3,2,1],[3,4,2],[3,25,1],[3,2,1],[3,2,3],[3,9,1],[3,5,5],[3,46,1],[3,6,2],[3,12,9],[3,4,4],[3,2,3],[3,13,5],[3,39,16],[3,3,1],[3,1,2],[3,68,14],[3,5,1],[3,11,1],[3,7,1],[3,4,1],[3,53,11],[3,4,3],[3,4,1],[3,2,1],[3,4,1],[3,1,1],[3,1,2],[3,8,4],[3,5,1],[3,6,5],[3,6,13],[3,403,3],[3,23,1],[3,3,3],[3,14,1],[3,10,1],[3,3,2],[3,46,11],[3,4,3],[3,29,1],[3,41,2],[3,11,1],[3,15,3],[3,11,2],[3,6,1],[3,3,1],[3,17,2],[3,14,3],[3,5,4],[3,2,1],[3,2,1],[3,5,6],[3,6,1],[3,54,2],[3,2,1],[3,4,2],[3,1,1],[3,7,1],[3,8,34],[3,7,1],[3,1,2],[3,3,2],[3,2,5],[3,1,1],[3,15,12],[3,13,1],[3,5,1],[3,1,1],[3,5,1],[3,39,1],[3,26,9],[3,11,1],[3,6,1],[3,2,1],[3,19,4],[3,4,5],[3,10,1],[3,11,6],[3,4,1],[3,38,1],[3,1,1],[3,1,3],[3,2,1],[3,5,10],[3,4,1],[3,18,2],[3,4,1],[3,19,1],[3,1,1],[3,8,6],[3,1,1],[3,9,1],[3,8,3],[3,15,4],[3,9,3],[3,13,1],[3,10,1],[3,1,2],[3,5,4],[3,4,2],[3,4,1],[3,28,1],[3,6,2],[3,9,1],[3,1,2],[3,2,2],[3,25,1],[3,5,8],[3,5,3],[3,8,2],[3,2,1],[3,14,5],[3,2,1],[3,11,3],[3,10,1],[3,2,2],[3,1,1],[3,3,1],[3,9,1],[3,39,9],[3,27,2],[3,1,1],[3,1,3],[3,12,3],[3,6,1],[3,14,2],[3,17,3],[3,198,1],[3,3,1],[3,5,1],[3,1,1],[3,2,4],[3,12,1],[3,31,1],[3,8,14],[3,25,2],[3,16,2],[3,18,2],[3,2,3],[3,2,3],[3,6,28],[3,22,3],[3,6,1],[3,8,2],[3,4,3],[3,3,3],[3,8,1],[3,1,1],[3,1,2],[3,1,1],[3,1,1],[3,1,2],[3,6,2],[3,2,3],[3,4,1],[3,3,1],[3,1,1],[3,3,2],[3,8,10],[3,6,1],[3,2,1],[3,2,1],[3,5,1],[3,29,6],[3,10,1],[3,3,8],[3,1,3],[3,2,2],[3,3,1],[3,3,4],[3,5,19],[3,15,1],[3,65,1],[3,2,2],[3,60,3],[3,52,1],[3,1,1],[3,4,2],[3,4,1],[3,6,1],[3,7,4],[3,1,1],[3,13,1],[3,8,3],[3,13,1],[3,6,1],[3,3,2],[3,14,1],[3,2,2],[3,4,1],[3,1,1],[3,11,29],[3,7,1],[3,21,6],[3,4,1],[3,1,1],[3,2,1],[3,9,1],[3,2,4],[3,3,1],[3,2,3],[3,1,2],[3,3,2],[3,3,4],[3,16,2],[3,9,2],[3,2,1],[3,17,8],[3,9,4],[3,7,1],[3,6,4],[3,1,2],[3,2,1],[3,4,4],[3,2,1],[3,3,1],[3,3,1],[3,11,1],[3,2,2],[3,2,1],[3,2,3],[3,2,2],[3,10,6],[3,10,4],[3,1,1],[3,8,3],[3,29,2],[3,7,1],[3,2,1],[3,4,1],[3,11,1],[3,2,1],[3,2,2],[3,13,3],[3,4,1],[3,3,1],[3,2,4],[3,18,1],[3,12,1],[3,6,3],[3,3,1],[3,5,1],[3,3,2],[3,9,2],[3,5,1],[3,5,1],[3,11,1],[3,1,1],[3,39,18],[3,3,2],[3,4,1],[3,17,2],[3,14,2],[3,10,6],[3,1,1],[3,4,5],[3,2,1],[3,4,6],[3,12,1],[3,106,80],[3,32,1],[3,7,1],[3,8,1],[3,2,1],[3,33,2],[3,33,7],[3,10,1],[3,3,2],[3,4,3],[3,16,3],[3,7,1],[3,8,1],[3,16,1],[3,8,1],[3,8,1],[3,30,1],[3,7,1],[3,2,1],[3,3,10],[3,27,1],[3,2,1],[3,1,3],[3,2,1],[3,23,1],[3,1,1],[3,5,2],[3,6,1],[3,2,1],[3,2,13],[3,1,3],[3,6,2],[3,5,1],[3,26,1],[3,4,5],[3,2,1],[3,9,1],[3,6,1],[3,2,1],[3,21,2],[3,15,1],[3,4,2],[3,2,1],[3,30,1],[3,4,2],[3,2,1],[3,2,58],[3,8,2],[3,13,1],[3,16,2],[3,10,6],[3,6,1],[3,6,1],[3,2,6],[3,1,1],[3,2,4],[3,11,9],[3,25,2],[3,4,2],[3,1,1],[3,9,9],[3,1,9],[3,3,3],[3,4,1],[3,2,3],[3,5,2],[3,2,7],[3,2,1],[3,2,1],[3,6,3],[3,3,4],[3,1,2],[3,4,3],[3,7,118],[3,7,1],[3,6,1],[3,3,1],[3,1,15],[3,1,2],[3,4,2],[3,2,1],[3,4,1],[3,6,1],[3,23,1],[3,1,1],[3,3,1],[3,4,1],[3,10,3],[3,2,2],[3,6,5],[3,8,1],[3,3,1],[3,4,1],[3,20,2],[3,14,2],[3,7,1],[3,21,29],[3,10,2],[3,10,2],[3,3,3],[3,2,1],[3,3,2],[3,24,3],[3,3,1],[3,9,1],[3,6,1],[3,22,1],[3,13,1],[3,5,2],[3,1,1],[3,9,1],[3,10,2],[3,4,1],[3,7,1],[3,2,1],[3,12,4],[3,48,2],[3,43,1],[3,6,1],[3,1,1],[3,4,1],[3,14,10],[3,2,1],[3,1,1],[3,1,1],[3,3,1],[3,11,5],[3,36,1],[3,4,49],[3,11,1],[3,8,1],[3,2,2],[3,3,1],[3,3,1],[3,8,3],[3,15,8],[3,30,9],[3,23,5],[3,10,1],[3,7,6],[3,1,1],[3,9,2],[3,6,1],[3,3,1],[3,3,1],[3,2,1],[3,21,1],[3,13,2],[3,4,2],[3,9,2],[3,8,1],[3,2,2],[3,4,2],[3,1,1],[3,9,2],[3,32,2],[3,2,2],[3,10,1],[3,1,4],[3,4,3],[3,14,3],[3,5,2],[3,2,1],[3,3,1],[3,5,3],[3,14,3],[3,2,3],[3,6,1],[3,4,1],[3,1,1],[3,16,1],[3,3,1],[3,2,1],[3,5,1],[3,33,1],[3,3,1],[3,14,4],[3,8,3],[3,12,2],[3,14,1],[3,2,1],[3,1,1],[3,13,2],[3,8,1],[3,9,1],[3,17,1],[3,14,2],[3,16,1],[3,12,4],[3,2,1],[3,2,2],[3,20,1],[3,2,2],[3,8,4],[3,7,3],[3,8,1],[3,1,2],[3,5,5],[3,29,1],[3,1,1],[3,2,1],[3,8,2],[3,2,1],[3,7,9],[3,3,2],[3,7,1],[3,6,1],[3,6,2],[3,1,26],[3,3,3],[3,7,1],[3,2,2],[3,8,2],[3,7,1],[3,3,1],[3,4,4],[3,11,1],[3,5,15],[3,28,1],[3,3,8],[3,3,3],[3,2,4],[3,6,4],[3,3,2],[3,2,2],[3,5,1],[3,12,2],[3,10,2],[3,1,1],[3,6,1],[3,2,1],[3,3,2],[4,8,1],[4,3,1],[4,23,1],[4,4,9],[4,6,2],[4,9,1],[4,9,6],[4,5,9],[4,8,1],[4,2,1],[4,2,3],[4,8,1],[4,1,1],[4,4,1],[4,8,1],[4,2,1],[4,16,1],[4,1,8],[4,4,1],[4,1,3],[4,18,1],[4,2,1],[4,4,9],[4,2,1],[4,3,1],[4,9,2],[4,2,1],[4,7,3],[4,5,4],[4,27,2],[4,1,1],[4,8,2],[4,7,1],[4,8,1],[4,9,4],[4,3,2],[4,6,4],[4,2,2],[4,13,5],[4,8,1],[4,10,2],[4,1,1],[4,2,1],[4,1,2],[4,6,2],[4,5,2],[4,8,2],[4,16,2],[4,7,2],[4,102,5],[4,2,2],[4,1,1],[4,2,1],[4,1,2],[4,2,1],[4,29,4],[4,2,1],[4,1,1],[4,1,4],[4,3,2],[4,6,1],[4,19,2],[4,4,3],[4,1,12],[4,1,1],[4,62,3],[4,14,1],[4,1,1],[4,1,1],[4,7,4],[4,9,1],[4,15,1],[4,16,15],[4,2,2],[4,2,1],[4,41,3],[4,7,8],[4,7,3],[4,5,1],[4,9,1],[4,6,1],[4,1,3],[4,15,1],[4,5,4],[4,28,2],[4,11,3],[4,15,1],[4,1,1],[4,1,1],[4,12,1],[4,16,4],[4,12,5],[4,5,2],[4,8,4],[4,124,115],[4,11,3],[4,46,10],[4,4,1],[4,3,1],[4,2,1],[4,27,1],[4,1,1],[4,20,1],[4,2,1],[4,4,1],[4,53,1],[4,18,1],[4,1,1],[4,8,2],[4,3,1],[4,2,1],[4,5,1],[4,2,3],[4,2,5],[4,3,1],[4,8,1],[4,2,5],[4,8,2],[4,9,2],[4,48,1],[4,9,1],[4,20,2],[4,4,4],[4,3,2],[4,8,2],[4,6,2],[4,12,6],[4,9,1],[4,3,1],[4,4,1],[4,5,3],[4,5,1],[4,8,4],[4,3,1],[4,7,1],[4,6,2],[4,15,16],[4,6,1],[4,50,4],[4,23,4],[4,9,7],[4,8,2],[4,1,1],[4,2,1],[4,9,1],[4,12,1],[4,4,3],[4,2,2],[4,42,4],[4,1,1],[4,6,1],[4,11,10],[4,6,11],[4,7,1],[4,4,2],[4,4,2],[4,6,1],[4,59,4],[4,1,1],[4,2,7],[4,12,20],[4,11,3],[4,4,1],[4,12,3],[4,6,3],[4,7,2],[4,17,4],[4,106,8],[4,6,2],[4,7,1],[4,1,1],[4,8,1],[4,4,6],[4,3,1],[4,4,3],[4,14,3],[4,15,2],[4,4,1],[4,44,91],[4,7,2],[4,3,2],[4,2,1],[4,23,2],[4,30,1],[4,2,2],[4,10,1],[4,6,9],[4,6,2],[4,3,2],[4,3,2],[4,20,1],[4,4,1],[4,18,2],[4,12,1],[4,20,14],[4,10,1],[4,3,1],[4,2,1],[4,3,2],[4,3,3],[4,6,3],[4,2,4],[4,8,1],[4,8,5],[4,3,1],[4,10,2],[4,2,1],[4,1,1],[4,10,1],[4,25,2],[4,1,1],[4,4,1],[4,63,2],[4,1,1],[4,4,1],[4,6,7],[4,2,3],[4,8,1],[4,19,2],[4,11,1],[4,30,10],[4,4,4],[4,2,3],[4,2,1],[4,43,29],[4,2,1],[4,1,1],[4,17,1],[4,14,1],[4,13,1],[4,6,4],[4,2,2],[4,1,2],[4,3,1],[4,7,3],[4,4,1],[4,4,1],[4,1,1],[4,13,5],[4,2,1],[4,1,1],[4,5,1],[4,4,2],[4,13,2],[4,10,4],[4,8,1],[4,3,1],[4,2,2],[4,8,3],[4,4,2],[4,6,1],[4,7,1],[4,14,29],[4,19,1],[4,7,1],[4,19,1],[4,24,2],[4,2,1],[4,1,1],[4,28,1],[4,1,1],[4,2,1],[4,3,1],[4,2,1],[4,1,7],[4,2,4],[4,3,1],[4,29,1],[4,2,1],[4,14,1],[4,2,1],[4,28,3],[4,11,3],[4,1,2],[4,21,2],[4,1,1],[4,15,1],[4,17,1],[4,16,1],[4,13,1],[4,2,1],[4,15,5],[4,19,1],[4,17,1],[4,5,3],[4,12,2],[4,33,1],[4,8,1],[4,15,4],[4,2,11],[4,4,1],[4,1,10],[4,39,1],[4,28,1],[4,25,2],[4,1,1],[4,14,2],[4,8,32],[4,9,1],[4,7,1],[4,6,2],[4,1,2],[4,3,1],[4,6,2],[4,12,2],[4,2,2],[4,5,2],[4,18,1],[4,5,3],[4,6,2],[4,25,1],[4,3,16],[4,14,4],[4,2,6],[4,14,2],[4,3,1],[4,4,1],[4,9,3],[4,28,2],[4,9,1],[4,2,1],[4,7,1],[4,2,1],[4,1,4],[4,4,3],[4,1,1],[4,16,6],[4,3,1],[4,10,1],[4,12,3],[4,8,1],[4,4,1],[4,15,2],[4,4,1],[4,2,3],[4,2,9],[4,4,1],[4,7,2],[4,14,1],[4,31,3],[4,13,1],[4,19,2],[4,8,3],[4,2,1],[4,12,1],[4,5,1],[4,45,3],[4,6,1],[4,1,1],[4,12,6],[4,4,3],[4,3,1],[4,5,2],[4,4,4],[4,19,2],[4,8,1],[4,2,1],[4,27,2],[4,73,3],[4,22,2],[4,1,2],[4,7,46],[4,9,2],[4,2,1],[4,524,305],[4,7,1],[4,26,1],[4,2,1],[4,6,1],[4,30,2],[4,6,1],[4,25,92],[4,2,1],[4,13,1],[4,1,4],[4,1,7],[4,6,1],[4,8,2],[4,6,1],[4,4,2],[4,2,6],[4,12,2],[4,2,2],[4,5,2],[4,3,2],[4,13,1],[4,4,1],[4,6,3],[4,14,1],[4,15,1],[4,25,1],[4,3,1],[4,9,4],[4,94,3],[4,11,2],[4,12,4],[4,7,3],[4,3,1],[4,9,2],[4,3,1],[4,2,1],[4,8,3],[4,7,5],[4,2,45],[4,10,1],[4,10,4],[4,5,3],[4,6,6],[5,5,1],[5,2,1],[5,3,3],[5,11,2],[5,28,1],[5,8,1],[5,4,1],[5,4,1],[5,12,1],[5,7,1],[5,1,1],[5,38,7],[5,6,2],[5,4,2],[5,5,1],[5,2,2],[5,2,7],[5,1,4],[5,4,1],[5,4,1],[5,1,2],[5,3,1],[5,7,1],[5,2,1],[5,10,2],[5,4,1],[5,2,1],[5,2,2],[5,3,1],[5,15,78],[5,2,1],[5,1,5],[5,10,1],[5,6,4],[5,10,2],[5,5,1],[5,1,1],[5,1,1],[5,2,2],[5,6,1],[5,2,2],[5,6,2],[5,10,2],[5,3,1],[5,6,2],[5,4,3],[5,16,5],[5,47,48],[5,2,5],[5,6,7],[5,4,2],[5,3,1],[5,2,1],[5,8,1],[5,7,1],[5,2,2],[5,2,1],[5,3,1],[5,7,4],[5,1,1],[5,1,1],[5,8,6],[5,1,4],[5,9,3],[5,11,4],[5,6,1],[5,6,1],[5,2,1],[5,5,1],[5,84,1],[5,2,33],[5,8,1],[5,6,3],[5,5,3],[5,2,1],[5,10,2],[5,3,1],[5,68,9],[5,6,2],[5,21,11],[5,3,4],[5,3,1],[5,16,3],[5,2,2],[5,2,1],[5,14,2],[5,24,2],[5,19,1],[5,1,4],[5,1,1],[5,3,1],[5,6,1],[5,2,1],[5,5,2],[5,4,3],[5,26,3],[5,2,1],[5,6,4],[5,2,1],[5,6,3],[5,5,1],[5,8,3],[5,1,3],[5,9,1],[5,1,2],[5,11,2],[5,23,1],[5,7,1],[5,2,2],[5,3,2],[5,2,1],[5,11,2],[5,8,2],[5,1,1],[5,4,1],[5,2,1],[5,7,1],[5,11,1],[5,1,1],[5,33,1],[5,4,1],[5,5,1],[5,17,3],[5,1,2],[5,18,2],[5,1,2],[5,1,1],[5,2,3],[5,4,2],[5,2,1],[5,13,7],[5,5,1],[5,19,4],[5,23,9],[5,11,6],[5,7,2],[5,10,1],[5,2,1],[5,26,1],[5,3,3],[5,3,2],[5,3,2],[5,15,3],[5,2,1],[5,3,1],[5,4,1],[5,8,1],[5,4,1],[5,23,1],[5,6,1],[5,1,3],[5,124,17],[5,1,1],[5,1,1],[5,15,1],[5,11,2],[5,2,1],[5,2,2],[5,3,2],[5,1,1],[5,6,4],[5,6,1],[5,3,3],[5,6,5],[5,17,1],[5,7,2],[5,5,1],[5,11,1],[5,3,2],[5,36,2],[5,17,7],[5,4,1],[5,7,2],[5,2,1],[5,2,1],[5,2,1],[5,7,10],[5,4,1],[5,1,3],[5,19,2],[5,2,2],[5,3,1],[5,8,3],[5,4,1],[5,15,1],[5,2,3],[5,13,2],[5,1,3],[5,7,1],[5,23,48],[5,9,1],[5,12,10],[5,16,1],[5,10,1],[5,7,5],[5,2,1],[5,3,1],[5,23,2],[5,4,1],[5,18,1],[5,13,2],[5,54,136],[5,6,2],[5,2,2],[5,5,1],[5,6,1],[5,15,8],[5,14,9],[5,4,1],[5,7,2],[5,3,3],[5,117,5],[5,25,8],[5,14,4],[5,25,3],[5,7,1],[5,7,1],[5,15,3],[5,3,2],[5,4,1],[5,6,4],[5,14,4],[5,7,1],[5,20,1],[5,6,5],[5,12,1],[5,9,3],[5,2,1],[5,4,20],[5,4,3],[5,1,1],[5,1,1],[5,8,1],[5,4,1],[5,1,1],[5,6,3],[5,19,1],[5,14,1],[5,22,2],[5,2,1],[5,11,2],[5,1,1],[5,10,1],[5,4,1],[5,23,3],[5,3,1],[5,15,1],[5,8,4],[5,11,4],[5,4,1],[5,2,1],[5,8,6],[5,2,4],[5,2,7],[5,3,2],[5,2,1],[5,1,1],[5,1,1],[5,11,2],[5,4,10],[5,11,4],[5,110,4],[5,6,1],[5,2,1],[5,96,34],[6,4,1],[6,7,3],[6,2,1],[6,6,2],[6,10,1],[6,2,1],[6,10,1],[6,59,2],[6,7,4],[6,4,2],[6,3,1],[6,6,1],[6,1,4],[6,7,3],[6,2,3],[6,1,1],[6,12,1],[6,1,39],[6,28,1],[6,3,4],[6,8,3],[6,4,4],[6,9,2],[6,15,1],[6,10,1],[6,1,1],[6,2,1],[6,7,1],[6,2,1],[6,93,1],[6,14,6],[6,2,2],[6,55,39],[6,15,2],[6,23,3],[6,3,3],[6,35,2],[6,5,15],[6,1,7],[6,8,19],[6,10,10],[6,3,2],[6,6,3],[6,1,2],[6,6,1],[6,2,1],[6,4,1],[6,127,20],[6,20,18],[6,3,1],[6,9,2],[6,2,3],[6,10,1],[6,27,1],[6,9,1],[6,9,1],[6,28,1],[6,1,1],[6,10,1],[6,11,1],[6,5,1],[6,4,1],[6,82,35],[6,2,1],[6,1,1],[6,3,1],[6,2,1],[6,2,11],[6,2,8],[6,3,2],[6,12,3],[6,5,6],[6,42,4],[6,8,1],[6,2,1],[6,2,2],[6,10,3],[6,6,2],[6,48,2],[6,2,3],[6,2,2],[6,2,1],[6,4,1],[6,10,1],[6,1,1],[6,7,1],[6,35,1],[6,17,1],[6,21,2],[6,1,1],[6,4,2],[6,25,1],[6,7,2],[6,12,4],[6,2,6],[6,24,4],[6,2,1],[6,5,1],[6,2,1],[6,2,1],[6,3,2],[6,4,2],[6,2,1],[6,2,1],[6,2,9],[6,2,2],[6,5,1],[6,8,10],[6,1,1],[6,12,2],[6,10,1],[6,4,2],[6,12,4],[6,1,3],[6,3,2],[6,8,1],[6,4,4],[6,12,5],[6,4,2],[6,10,1],[6,1,1],[6,12,1],[6,6,4],[6,2,1],[6,3,2],[6,1,1],[6,3,5],[6,6,1],[6,32,1],[6,10,1],[6,6,5],[6,27,2],[6,7,1],[6,2,1],[6,10,2],[6,5,1],[6,8,2],[6,3,2],[6,9,2],[6,22,1],[6,2,2],[6,10,1],[6,3,4],[6,1,1],[6,3,6],[6,8,2],[6,44,1],[6,1,1],[6,9,7],[6,9,5],[6,19,4],[6,7,1],[6,1,1],[6,10,1],[6,14,2],[6,4,3],[6,4,1],[6,6,1],[6,3,1],[6,4,1],[6,6,3],[6,6,2],[6,6,1],[6,1,3],[6,12,13],[6,3,2],[6,1,4],[6,15,1],[6,39,4],[6,5,1],[6,1,5],[6,11,3],[6,5,7],[6,9,2],[6,1,1],[6,12,1],[6,12,1],[6,1,4],[6,11,1],[6,3,1],[6,6,2],[6,5,2],[6,2,1],[6,1,2],[6,2,1],[6,41,23],[6,3,1],[6,15,1],[6,1,1],[6,1,1],[6,2,2],[6,3,1],[6,10,1],[6,17,6],[6,5,2],[6,30,1],[7,2,2],[7,10,2],[7,8,3],[7,9,4],[7,4,1],[7,8,1],[7,2,1],[7,7,134],[7,16,1],[7,5,3],[7,3,1],[7,6,2],[7,1,1],[7,5,1],[7,5,1],[7,2,1],[7,24,1],[7,8,4],[7,9,2],[7,1,1],[7,6,2],[7,9,2],[7,1,1],[7,5,28],[7,1,1],[7,2,2],[7,7,2],[7,11,1],[7,2,1],[7,17,32],[7,5,1],[7,2,1],[7,3,2],[7,7,4],[7,15,3],[7,3,1],[7,6,2],[7,1,1],[7,2,1],[7,1,1],[7,1,11],[7,2,1],[7,8,1],[7,6,1],[7,2,1],[7,57,1],[7,20,46],[7,6,2],[7,6,1],[7,1,2],[7,28,7],[7,3,5],[7,4,1],[7,4,6],[7,2,2],[7,3,3],[7,2,3],[7,2,1],[7,1,1],[7,2,6],[7,4,1],[7,3,1],[7,23,1],[7,7,2],[7,7,1],[7,4,3],[7,2,1],[7,1,1],[7,4,2],[7,15,2],[7,6,1],[7,2,1],[7,14,1],[7,1,1],[7,1,1],[7,4,2],[7,2,1],[7,4,1],[7,2,1],[7,4,3],[7,22,1],[7,10,1],[7,2,1],[7,1,2],[7,7,2],[7,1,2],[7,12,1],[7,3,1],[7,2,4],[7,3,8],[7,2,1],[7,6,1],[7,5,3],[7,8,2],[7,5,1],[7,6,1],[7,6,1],[7,5,1],[7,9,5],[7,3,1],[7,3,2],[7,3,19],[7,28,3],[7,2,2],[7,3,1],[7,51,4],[7,2,1],[7,2,1],[7,22,2],[7,5,1],[7,2,1],[7,4,2],[7,2,1],[7,6,2],[7,6,1],[7,3,1],[7,37,1],[7,9,1],[7,8,2],[7,2,1],[7,4,1],[7,2,1],[7,18,1],[7,9,2],[7,1,1],[7,5,1],[7,2,1],[7,13,1],[7,45,1],[7,1,3],[7,7,5],[7,16,1],[7,7,1],[7,1,1],[7,3,1],[7,8,1],[7,1,1],[7,1,4],[7,2,2],[7,6,1],[7,6,1],[7,2,1],[7,16,1],[7,11,1],[7,1,1],[7,2,1],[7,3,2],[7,8,8],[7,33,1],[7,2,8],[7,4,1],[7,6,7],[7,12,3],[7,17,1],[7,9,5],[7,3,2],[7,3,2],[7,4,1],[7,1,1],[7,2,2],[7,6,1],[8,9,1],[8,79,3],[8,3,1],[8,14,4],[8,2,4],[8,10,5],[8,7,3],[8,8,1],[8,6,1],[8,7,1],[8,8,2],[8,9,1],[8,30,2],[8,1,1],[8,1,5],[8,15,2],[8,10,3],[8,5,3],[8,1,2],[8,3,1],[8,16,1],[8,3,1],[8,3,3],[8,3,4],[8,2,1],[8,6,2],[8,4,4],[8,5,3],[8,8,4],[8,8,3],[8,4,3],[8,13,7],[8,2,1],[8,2,1],[8,1,1],[8,4,1],[8,10,3],[8,16,9],[8,3,2],[8,1,2],[8,2,5],[8,5,2],[8,156,14],[8,1,1],[8,5,1],[8,252,690],[8,5,1],[8,25,21],[8,1,1],[8,39,12],[8,1,4],[8,6,1],[8,25,7],[8,1,1],[8,7,1],[8,46,11],[8,3,1],[8,1,1],[8,14,1],[8,24,1],[8,16,3],[8,6,3],[8,5,1],[8,1,2],[8,12,2],[8,2,1],[8,2,5],[8,6,1],[8,6,1],[8,14,1],[8,7,1],[8,6,1],[8,4,6],[8,1,2],[8,3,1],[8,2,14],[8,7,12],[8,2,2],[8,25,15],[8,8,3],[8,6,6],[8,5,1],[8,1,1],[8,2,3],[8,18,3],[8,2,2],[8,3,1],[8,4,1],[8,3,3],[8,4,2],[8,12,2],[8,1,1],[8,4,1],[8,18,1],[8,2,2],[8,11,3],[8,5,1],[8,6,1],[8,13,1],[8,6,1],[8,23,1],[8,18,3],[8,13,2],[8,4,1],[8,38,4],[8,1,1],[8,6,1],[8,10,2],[8,2,7],[8,10,7],[8,1,1],[8,4,7],[8,2,1],[8,2,2],[8,7,1],[8,17,1],[8,10,5],[8,4,4],[8,8,4],[8,3,2],[8,2,1],[8,33,1],[8,8,6],[8,15,1],[8,2,1],[8,7,4],[8,6,3],[8,2,1],[8,1,2],[8,3,1],[8,4,1],[8,4,2],[8,27,1],[8,10,1],[9,8,2],[9,2,2],[9,7,1],[9,11,1],[9,35,5],[9,3,1],[9,2,2],[9,6,7],[9,16,2],[9,7,15],[9,3,1],[9,9,1],[9,5,1],[9,3,1],[9,3,1],[9,4,1],[9,2,5],[9,1,1],[9,5,4],[9,1,1],[9,13,1],[9,14,4],[9,3,1],[9,35,3],[9,41,1],[9,8,3],[9,2,5],[9,8,2],[9,13,3],[9,10,1],[9,4,1],[9,35,12],[9,9,1],[9,12,1],[9,4,1],[9,2,4],[9,1,2],[9,6,4],[9,1,4],[9,20,3],[9,4,3],[9,3,3],[9,1,4],[9,2,11],[9,11,2],[9,19,1],[9,5,1],[9,6,2],[9,1,1],[9,3,1],[9,15,3],[9,2,1],[9,6,1],[9,13,1],[9,2,1],[9,11,2],[9,3,5],[9,6,1],[9,16,1],[9,4,1],[9,3,2],[9,3,1],[9,2,5],[9,13,1],[9,3,1],[9,2,2],[9,7,1],[9,2,3],[9,3,4],[9,5,1],[9,4,1],[9,10,2],[9,36,1],[9,7,2],[9,3,1],[9,4,2],[9,5,5],[9,12,1],[9,4,1],[9,2,2],[9,12,1],[9,13,1],[9,12,1],[9,2,4],[9,1,1],[9,1,2],[9,6,6],[9,1,2],[9,8,4],[9,7,2],[9,15,4],[10,3,25],[10,2,1],[10,4,2],[10,8,1],[10,2,1],[10,1,1],[10,21,1],[10,21,19],[10,4,4],[10,4,8],[10,2,1],[10,1,3],[10,3,5],[10,6,1],[10,8,5],[10,4,1],[10,24,5],[10,2,2],[10,24,1],[10,6,4],[10,1,2],[10,25,1],[10,14,1],[10,6,3],[10,2,3],[10,6,1],[10,15,2],[10,54,3],[10,12,1],[10,21,1],[10,7,1],[10,4,4],[10,5,1],[10,10,3],[10,37,1],[10,8,3],[10,11,1],[10,2,4],[10,6,1],[10,30,1],[10,35,1],[10,4,2],[10,2,1],[10,5,2],[10,6,1],[10,4,4],[10,12,1],[10,12,1],[10,44,4],[10,16,3],[10,1,64],[10,27,1],[10,9,3],[10,17,2],[10,25,2],[10,2,2],[10,7,3],[10,89,1],[10,7,30],[10,2,4],[10,2,3],[10,2,1],[10,3,3],[10,11,1],[10,7,1],[10,2,1],[10,4,2],[10,1,1],[10,1,1],[10,6,2],[10,7,3],[10,4,1],[10,2,2],[10,18,1],[10,4,1],[10,19,1],[10,14,6],[10,5,1],[10,5,6],[10,12,1],[11,5,6],[11,15,8],[11,9,1],[11,3,2],[11,6,3],[11,24,4],[11,27,3],[11,2,2],[11,5,9],[11,13,1],[11,3,1],[11,2,25],[11,10,1],[11,4,11],[11,7,2],[11,49,1],[11,4,1],[11,12,1],[11,7,1],[11,1,2],[11,10,6],[11,2,1],[11,4,2],[11,1,2],[11,2,1],[11,5,1],[11,4,3],[11,1,1],[11,6,1],[11,4,3],[11,95,2],[11,8,1],[11,18,1],[11,5,1],[11,16,12],[11,13,2],[11,7,6],[11,56,1],[11,6,1],[11,8,1],[11,21,14],[11,2,7],[11,5,1],[11,1,1],[11,5,2],[11,2,1],[11,15,1],[11,3,3],[11,26,1],[11,6,6],[11,1,1],[11,10,7],[11,6,3],[11,6,1],[11,8,2],[11,1,2],[11,35,2],[11,19,2],[11,8,2],[11,4,1],[11,7,2],[11,4,5],[11,3,5],[11,17,1],[11,3,3],[11,2,1],[11,12,1],[11,2,8],[11,85,1],[11,4,1],[11,9,1],[11,2,2],[11,2,1],[11,6,2],[11,6,3],[11,18,3],[11,1,1],[11,8,1],[11,22,1],[11,7,1],[11,4,2],[11,4,1],[11,8,3],[11,10,4],[11,24,1],[11,10,19],[11,12,8],[12,5,1],[12,1,7],[12,4,1],[12,21,6],[12,12,2],[12,16,1],[12,1,1],[12,2,1],[12,3,1],[12,8,9],[12,1,1],[12,17,2],[12,16,6],[12,14,1],[12,3,3],[12,27,3],[12,2,1],[12,3,3],[12,14,4],[12,1,3],[12,10,1],[12,5,7],[12,7,3],[12,13,5],[12,4,1],[12,47,4],[12,18,1],[12,31,2],[12,8,1],[12,5,4],[12,1,1],[12,26,1],[12,13,2],[12,5,2],[12,4,3],[12,15,5],[12,2,1],[12,2,1],[12,3,1],[12,5,1],[12,11,1],[12,4,3],[12,1,1],[12,7,2],[12,6,1],[12,14,6],[12,32,4],[12,14,1],[12,31,1],[12,7,3],[12,9,7],[12,5,1],[12,6,1],[12,6,6],[12,7,8],[12,2,1],[12,3,1],[12,4,3],[12,1,1],[12,19,2],[12,11,1],[12,7,2],[12,8,1],[12,15,4],[12,5,1],[12,9,3],[12,2,1],[12,1,1],[12,8,9],[12,3,6],[12,15,1],[13,1,11],[13,7,2],[13,10,1],[13,13,4],[13,3,2],[13,1,2],[13,2,1],[13,3,4],[13,3,1],[13,4,3],[13,5,1],[13,10,13],[13,5,4],[13,2,3],[13,3,2],[13,72,2],[13,7,3],[13,19,2],[13,4,1],[13,5,6],[13,4,2],[13,2,1],[13,2,1],[13,34,11],[13,5,2],[13,9,5],[13,6,2],[13,5,5],[13,9,5],[13,9,1],[13,19,3],[13,4,1],[13,3,1],[13,7,2],[13,1,1],[13,11,7],[13,4,7],[13,6,1],[13,2,1],[13,1,1],[13,21,1],[13,6,15],[13,5,2],[13,1,1],[13,1,2],[14,2,1],[14,18,1],[14,8,2],[14,5,1],[14,2,2],[14,5,2],[14,2,1],[14,8,2],[14,4,1],[14,8,5],[14,14,1],[14,9,6],[14,18,2],[14,4,1],[14,6,1],[14,18,1],[14,6,6],[14,4,1],[14,6,2],[14,6,8],[14,3,1],[14,2,3],[14,1,1],[14,17,4],[14,4,3],[14,15,3],[14,4,8],[14,15,2],[14,6,1],[14,9,22],[14,7,3],[14,7,6],[14,2,2],[14,1,1],[14,7,4],[14,10,1],[14,1,1]])\n #data = np.array([[131,3,1],[49,1,1],[17,7,1],[55,7,19],[80,5,1],[40,2,2],[91,21,6],[19,16,1],[27,7,1],[15,50,2],[37,1,7],[17,3,1],[22,32,2],[68,2,1],[26,2,3],[15,2,3],[246,2,1],[25,2,1],[19,1,1],[98,1,2],[54,13,1],[168,2,4],[20,102,5],[40,2,1],[41,1,1],[44,19,16],[17,6,1],[92,12,1],[17,2,1],[16,5,3],[45,11,1],[20,10,1],[26,1,2],[21,9,9],[26,10,1],[187,4,2],[65,28,4],[17,9,33],[23,39,1],[58,4,4],[41,107,3],[28,3,1],[16,1,1],[17,16,4],[17,16,1],[17,5,1],[83,2,2],[17,1,2],[26,4,2],[22,7,2],[16,1,1],[15,2,1],[15,2,1],[111,8,1],[25,6,1],[112,4,1],[19,10,2],[38,25,4],[29,1,5],[17,2,1],[111,9,8],[53,5,4],[29,7,1],[25,8,2],[23,2,134],[32,6,1],[27,1,1],[61,4,2],[41,163,4],[57,11,2],[24,2,1],[16,18,1],[81,7,14],[169,5,1],[19,4,1],[412,5,1],[32,2,7],[19,28,3],[17,11,1],[44,4,5],[27,2,2],[18,1,7],[15,3,3],[18,10,1],[19,6,10],[46,2,5],[20,12,3],[25,6,4],[18,4,1],[15,40,8],[16,11,16],[237,1,1],[26,13,2],[26,4,1],[101,5,5],[50,2,1],[22,45,5],[16,7,2],[17,4,2],[19,2,3],[22,1,1],[260,6,1],[20,15,1],[24,5,1],[33,2,1],[16,1,5],[21,18,1],[22,1,1],[18,13,2],[124,3,1],[16,6,1],[19,6,2],[71,2,1],[232,2,2],[21,2,1],[231,11,1],[201,49,2],[28,12,1],[68,5,1],[56,26,7],[17,1,8],[19,10,2],[120,13,2],[218,3,1],[46,5,6],[57,4,1],[30,5,2],[17,8,4],[17,22,1],[15,5,1],[16,7,1],[26,13,1],[28,22,2],[100,1,2],[58,12,2],[52,9,11],[21,4,2],[18,4,1],[699,1,1],[401,6,3],[20,7,1],[20,3,13],[27,1,1],[35,2,2],[27,6,1],[15,13,1],[17,6,1],[26,28,4],[89,2,3],[36,11,2],[17,11,2],[15,1,1],[59,3,1],[15,3,1],[20,11,1],[49,1,1],[24,3,1],[25,7,1],[29,1,1],[61,2,2],[28,3,13],[82,2,8],[22,2,1],[21,25,3],[73,3,2],[22,8,1],[51,3,12],[16,6,1],[64,2,4],[22,2,2],[19,7,1],[69,2,1],[17,8,9],[19,1,13],[28,35,3],[134,2,1],[19,12,1],[27,13,1],[17,10,1],[16,17,4],[46,2,3],[15,1,2],[35,15,2],[20,6,1],[16,10,3],[33,11,1],[20,8,4],[15,5,1],[33,5,2],[460,6,1],[132,2,1],[73,14,3],[34,5,1],[123,1,2],[15,8,1],[30,1,1],[16,1,1],[73,3,1],[54,4,1],[17,1,9],[17,17,3],[22,1,3],[46,16,8],[18,1,1],[22,3,2],[21,4,1],[40,5,1],[19,2,1],[16,11,1],[19,4,1],[26,4,1],[87,1,3],[75,1,8],[25,1,1],[2230,5,1],[16,1,1],[17,10,3],[15,44,2],[79,3,1],[21,19,1],[292,5,13],[27,4,1],[25,2,1],[23,34,1],[36,2,1],[15,2,7],[18,3,3],[62,1,7],[16,61,5],[15,5,1],[36,5,1],[67,8,3],[18,4,1],[23,2,1],[16,21,3],[32,7,1],[22,6,1],[88,5,1],[19,2,4],[38,2,1],[47,6,28],[18,35,3],[159,15,1],[25,3,5],[295,9,4],[26,2,1],[27,8,3],[86,6,1],[24,25,4],[18,1,2],[16,6,1],[64,16,1],[39,1,2],[30,1,4],[44,1,3],[82,11,4],[28,13,2],[46,19,1],[15,26,1],[30,6,11],[51,3,6],[19,20,1],[940,6,4],[21,6,1],[29,2,1],[20,2,1],[31,2,1],[21,2,3],[25,27,1],[26,2,1],[17,4,1],[64,7,1],[126,7,15],[18,8,1],[20,13,2],[16,7,2],[18,2,1],[19,4,5],[29,1,1],[80,12,2],[42,14,6],[107,2,1],[15,4,1],[48,16,1],[62,3,2],[15,13,1],[29,48,7],[25,4,1],[17,5,20],[19,7,3],[22,10,3],[58,15,3],[17,14,1],[121,2,2],[33,64,11],[16,15,2],[39,6,2],[25,69,7],[69,2,1],[41,6,2],[20,5,1],[42,22,4],[18,17,4],[16,14,3],[27,14,1],[20,1,1],[44,1,101],[33,9,1],[26,2,8],[30,24,3],[27,24,2],[34,7,1],[39,6,3],[20,2,3],[55,5,1],[22,22,2],[17,2,1],[55,3,1],[29,10,5],[60,12,2],[18,13,3],[93,3,2],[15,3,1],[26,5,5],[18,1,1],[17,16,2],[15,13,3],[22,12,1],[256,19,27],[18,7,8],[22,3,1],[35,3,4],[16,2,1],[19,6,2],[24,1,1],[29,3,2],[36,21,8],[24,1,1],[18,6,2],[26,24,11],[19,15,2],[16,1,1],[28,4,1],[60,11,1],[62,4,2],[70,2,1],[75,1,2],[125,3,1],[21,6,1],[165,23,2],[108,1,1],[35,5,1],[251,19,12],[137,4,1],[81,11,4],[104,19,4],[18,18,3],[19,13,1],[18,112,5],[19,6,2],[28,7,2],[23,9,1],[20,15,7],[34,1,1],[24,12,3],[15,5,1],[40,9,4],[24,41,6],[35,1,1],[17,3,1],[17,3,4],[46,7,2],[21,8,10],[17,7,4],[36,6,1],[32,6,2],[31,1,1],[17,32,5],[26,3,4],[16,4,1],[21,2,1],[19,4,1],[33,4,1],[46,7,1],[28,9,1],[169,9,24],[24,18,2],[103,6,1],[93,1,1],[156,2,1],[58,7,1],[55,30,3],[15,5,1],[20,9,1],[19,20,1],[44,1,3],[16,2,1],[23,4,1],[22,10,1],[16,138,5],[17,2,1],[17,1,2],[70,8,5],[15,3,6],[22,6,1],[20,1,1],[35,2,4],[15,3,1],[26,119,46],[390,18,2],[22,4,1],[175,5,2],[23,4,1],[26,2,21],[17,1,2],[112,4,1],[18,22,5],[22,2,1],[122,13,1],[18,1,1],[27,7,1],[26,18,5],[18,1,3],[28,1,15],[35,11,1],[15,2,1],[55,6,5],[67,3,1],[30,5,7],[31,12,1],[16,9,12],[43,7,1],[23,21,1],[43,2,7],[53,40,1],[58,6,1],[29,27,11],[65,6,2],[27,4,2],[15,7,2],[17,26,13],[48,4,79],[30,2,6],[25,1,1],[20,20,6],[59,2,5],[15,14,4],[18,7,1],[18,2,1],[28,7,1],[35,1,1],[15,12,4],[52,2,2],[16,25,1],[91,1,1],[27,7,3],[62,4,1],[29,11,1],[25,4,3],[15,1,1],[40,6,2],[19,2,2],[24,14,2],[33,5,1],[58,3,3],[23,1,4],[15,2,2],[1263,4,1],[92,5,1],[17,2,1],[16,10,1],[50,8,1],[24,2,1],[73,1,1],[30,33,55],[18,15,1],[15,9,4],[23,1,3],[17,5,1],[43,3,1],[15,9,2],[19,4,2],[20,20,4],[31,1,2],[21,3,1],[79,9,13],[20,3,24],[56,2,1],[26,1,2],[15,3,1],[30,12,1],[64,6,1],[327,8,47],[39,2,1],[22,17,5],[18,6,3],[74,14,2],[17,4,1],[39,1,3],[520,9,3],[65,9,1],[36,1,4],[264,3,3],[16,1,1],[18,5,3],[22,16,3],[21,2,1],[15,3,3],[49,5,1],[37,19,2],[19,13,2],[30,1,1],[44,4,1],[19,9,31],[22,4,2],[21,4,5],[16,4,1],[40,17,1],[15,12,4],[43,4,3],[21,30,1],[60,16,3],[28,2,1],[38,16,2],[19,3,1],[68,18,4],[1,4,3],[1,9,1],[1,2,2],[1,1,4],[1,148,4],[1,6,1],[1,16,1],[1,4,1],[1,19,3],[1,7,3],[1,2,2],[1,4,2],[1,47,5],[1,2,2],[1,1,4],[1,1,2],[1,1,2],[1,1,1],[1,4,2],[1,7,1],[1,4,6],[1,2,1],[1,5,4],[1,9,3],[1,9,2],[1,7,1],[1,4,1],[1,10,2],[1,1,1],[1,5,1],[1,5,1],[1,2,16],[1,2,1],[1,1,1],[1,3,2],[1,8,3],[1,1,18],[1,5,1],[1,14,3],[1,6,6],[1,7,1],[1,1,1],[1,16,1],[1,2,1],[1,2,1],[1,1,2],[1,4,4],[1,4,1],[1,9,1],[1,25,7],[1,1,1],[1,8,2],[1,1,4],[1,77,8],[1,1,3],[1,6,3],[1,4,2],[1,2,2],[1,2,1],[1,40,1],[1,26,3],[1,1,4],[1,1,1],[1,2,2],[1,1,2],[1,15,1],[1,35,86],[1,3,2],[1,4,1],[1,2,1],[1,4,3],[1,30,1],[1,2,1],[1,4,2],[1,2,1],[1,1,1],[1,2,1],[1,3,1],[1,2,3],[1,3,1],[1,14,1],[1,3,2],[1,7,4],[1,6,2],[1,2,1],[1,23,2],[1,4,1],[1,4,3],[1,26,3],[1,47,15],[1,3,5],[1,5,1],[1,3,1],[1,2,1],[1,2,1],[1,3,1],[1,36,1],[1,2,1],[1,1,9],[1,6,1],[1,2,1],[1,8,3],[1,7,1],[1,33,2],[1,14,4],[1,13,3],[1,2,1],[1,5,1],[1,7,2],[1,9,3],[1,6,1],[1,3,1],[1,9,1],[1,2,2],[1,2,1],[1,6,3],[1,4,2],[1,2,1],[1,1,1],[1,13,4],[1,9,2],[1,4,2],[1,7,14],[1,8,1],[1,3,1],[1,25,2],[1,2,1],[1,11,1],[1,2,1],[1,1,1],[1,3,3],[1,3,2],[1,2,1],[1,2,1],[1,2,8],[1,9,1],[1,13,9],[1,3,1],[1,8,1],[1,102,71],[1,22,1],[1,2,3],[1,22,2],[1,1,1],[1,3,1],[1,12,1],[1,3,2],[1,1,1],[1,5,2],[1,30,6],[1,14,1],[1,2,1],[1,1,1],[1,5,1],[1,8,1],[1,4,2],[1,3,1],[1,2,1],[1,1,1],[1,1,1],[1,12,1],[1,14,1],[1,10,2],[1,22,3],[1,15,2],[1,4,2],[1,5,1],[1,10,2],[1,10,26],[1,1,2],[1,1,2],[1,17,1],[1,1,1],[1,7,1],[1,1,1],[1,8,2],[1,5,2],[1,15,1],[1,16,2],[1,7,1],[1,26,1],[1,16,2],[1,13,6],[1,3,3],[1,2,1],[1,2,1],[1,5,3],[1,1,1],[1,4,1],[1,1,1],[1,2,2],[1,13,4],[1,50,2],[1,12,3],[1,2,1],[1,16,5],[1,2,8],[1,3,5],[1,1,1],[1,25,1],[1,5,1],[1,13,2],[1,1,2],[1,8,1],[1,13,1],[1,4,4],[1,2,3],[1,7,2],[1,2,4],[1,2,1],[1,1,2],[1,4,1],[1,3,2],[1,8,4],[1,4,1],[1,2,2],[1,2,1],[1,3,1],[1,7,1],[1,8,5],[1,34,4],[1,2,3],[1,1,1],[1,8,3],[1,3,1],[1,26,2],[1,3,1],[1,1,6],[1,2,4],[1,7,1],[1,9,2],[1,3,93],[1,2,1],[1,3,2],[1,3,3],[1,15,3],[1,12,1],[1,1,1],[1,1,5],[1,4,1],[1,1,4],[1,2,1],[1,6,4],[1,9,1],[1,1,9],[1,11,1],[1,68,2],[1,7,1],[1,11,1],[1,6,1],[1,5,2],[1,2,1],[1,19,1],[1,3,1],[1,1,2],[1,37,1],[1,19,1],[1,4,5],[1,8,1],[1,1,1],[1,7,1],[1,3,1],[1,4,1],[1,6,7],[1,2,1],[1,14,3],[1,4,1],[1,6,5],[1,1,1],[1,1,1],[1,2,1],[1,1,2],[1,7,2],[1,8,1],[1,17,136],[1,6,1],[1,3,2],[1,9,12],[1,7,2],[1,2,9],[1,1,4],[1,3,1],[1,10,1],[1,6,16],[1,8,1],[1,2,2],[1,2,2],[1,4,3],[1,3,3],[1,24,3],[1,68,28],[1,16,1],[1,9,2],[1,1,2],[1,18,7],[1,3,1],[1,5,2],[1,1,3],[1,3,1],[1,3,8],[1,73,5],[1,6,3],[1,5,1],[1,2,1],[1,15,7],[1,80,2],[1,3,1],[1,12,3],[1,8,1],[1,2,1],[1,9,5],[1,3,2],[1,319,20],[1,2,1],[1,4,6],[1,5,4],[1,25,1],[1,8,1],[1,6,5],[1,18,1],[1,2,2],[1,5,2],[1,10,1],[1,10,1],[1,2,1],[1,6,2],[1,7,2],[1,39,1],[1,7,79],[1,28,4],[1,2,1],[1,4,1],[1,25,5],[1,23,3],[1,10,3],[1,2,1],[1,13,1],[1,2,2],[1,6,1],[1,6,4],[1,12,1],[1,4,1],[1,3,1],[1,10,1],[1,4,2],[1,7,1],[1,11,1],[1,6,1],[1,4,2],[1,3,3],[1,1,1],[1,1,1],[1,3,3],[1,3,2],[1,15,1],[1,1,1],[1,1,4],[1,26,2],[1,1,1],[1,7,1],[1,4,63],[1,1,19],[1,96,7],[1,7,2],[1,6,1],[1,4,1],[1,18,2],[1,1,2],[1,4,1],[1,3,3],[1,18,1],[1,3,1],[1,14,1],[1,6,2],[1,13,1],[1,1,5],[1,13,2],[1,1,1],[1,4,4],[1,10,1],[1,2,1],[1,12,3],[1,7,1],[1,8,1],[1,3,1],[1,2,2],[1,4,5],[1,9,1],[1,2,1],[1,2,1],[1,6,8],[1,32,3],[1,3,2],[1,6,1],[1,5,1],[1,7,1],[1,4,2],[1,2,1],[1,5,4],[1,1,2],[1,9,1],[1,2,1],[1,11,1],[1,5,2],[1,2,1],[1,1,1],[1,3,1],[1,7,13],[1,4,4],[1,1,1],[1,6,1],[1,1,3],[1,6,6],[1,6,1],[1,4,4],[1,10,1],[1,15,1],[1,3,7],[1,6,1],[1,9,1],[1,14,23],[1,14,2],[1,6,3],[1,2,1],[1,9,1],[1,1,3],[1,6,4],[1,15,2],[1,8,1],[1,6,6],[1,16,10],[1,5,4],[1,30,3],[1,7,1],[1,4,1],[1,3,1],[1,6,6],[1,1,2],[1,3,2],[1,1,1],[1,1,1],[1,1,1],[1,2,5],[1,2,1],[1,2,5],[1,24,1],[1,3,1],[1,6,1],[1,2,1],[1,4,1],[1,2,2],[1,4,1],[1,1,1],[1,3,1],[1,8,2],[1,4,2],[1,2,2],[1,2,1],[1,12,6],[1,2,1],[1,32,42],[1,7,1],[1,7,1],[1,12,1],[1,2,1],[1,6,1],[1,42,1],[1,2,1],[1,1,2],[1,2,1],[1,6,1],[1,2,2],[1,8,1],[1,22,4],[1,1,1],[1,11,20],[1,6,2],[1,2,1],[1,4,2],[1,9,1],[1,10,1],[1,16,5],[1,3,2],[1,8,1],[1,6,3],[1,1,2],[1,6,1],[1,2,1],[1,28,1],[1,18,1],[1,17,8],[1,4,1],[1,2,2],[1,13,1],[1,25,3],[1,7,4],[1,3,1],[1,1,1],[1,3,3],[1,4,1],[1,7,5],[1,2,2],[1,5,1],[1,2,2],[1,2,2],[1,14,1],[1,3,3],[1,4,1],[1,1,2],[1,11,1],[1,2,1],[1,6,1],[1,7,6],[1,7,1],[1,2,2],[1,2,1],[1,31,4],[1,4,3],[1,14,6],[1,4,4],[1,1,1],[1,2,1],[1,12,5],[1,4,1],[1,7,1],[1,3,1],[1,4,1],[1,11,1],[1,12,1],[1,3,2],[1,9,1],[1,17,2],[1,9,5],[1,6,1],[1,13,2],[1,5,1],[1,4,3],[1,3,1],[1,1,4],[1,7,1],[1,4,1],[1,3,1],[1,56,3],[1,1,1],[1,9,1],[1,4,1],[1,15,1],[1,2,1],[1,12,1],[1,4,2],[1,1,1],[1,1,1],[1,149,2],[1,56,1],[1,4,5],[1,2,2],[1,11,3],[1,2,3],[1,1,2],[1,2,1],[1,15,4],[1,2,2],[1,4,1],[1,17,2],[1,10,5],[1,14,2],[1,8,2],[1,4,2],[1,4,1],[1,6,1],[1,5,1],[1,7,2],[1,20,5],[1,3,1],[1,4,1],[1,11,1],[1,2,1],[1,1,3],[1,5,2],[1,6,1],[1,4,3],[1,4,3],[1,4,2],[1,7,3],[1,5,1],[1,1,1],[1,2,1],[1,8,1],[1,7,1],[1,2,1],[1,1,1],[1,1,1],[1,4,3],[1,11,1],[1,43,1],[1,7,8],[1,8,1],[1,1,1],[1,8,6],[1,9,3],[1,19,1],[1,2,1],[1,43,3],[1,4,5],[1,2,3],[1,4,1],[1,17,1],[1,9,1],[1,8,72],[1,2,1],[1,4,2],[1,16,1],[1,15,1],[1,8,1],[1,3,1],[1,7,8],[1,4,1],[1,23,2],[1,1,2],[1,1,1],[1,15,7],[1,7,4],[1,3,4],[1,5,1],[1,1,1],[1,6,83],[1,1,1],[1,4,3],[1,2,1],[1,3,2],[1,9,2],[1,5,1],[1,22,1],[1,3,6],[1,6,4],[1,4,1],[1,1,4],[1,1,1],[1,5,3],[1,1,2],[1,15,2],[1,8,1],[1,5,2],[1,1,1],[1,4,10],[1,63,1],[1,2,2],[1,2,1],[1,9,1],[1,4,3],[1,2,1],[1,24,1],[1,2,2],[1,2,2],[1,6,2],[1,13,5],[1,34,5],[1,10,1],[1,3,1],[1,22,9],[1,41,1],[1,1,4],[1,13,2],[1,18,1],[1,4,4],[1,7,1],[1,4,3],[1,14,4],[1,3,2],[1,2,1],[1,7,10],[1,15,3],[1,6,1],[1,1,1],[1,2,5],[1,4,10],[1,5,2],[1,12,6],[1,6,1],[1,19,134],[1,11,1],[1,233,9],[1,4,2],[1,40,1],[1,2,1],[1,10,1],[1,3,1],[1,3,1],[1,3,1],[1,35,1],[1,2,7],[1,1,3],[1,3,1],[1,14,2],[1,1,1],[1,7,1],[1,6,5],[1,10,1],[1,5,3],[1,8,1],[1,11,1],[1,13,1],[1,8,9],[1,5,1],[1,3,1],[1,11,1],[1,2,1],[1,5,1],[1,7,1],[1,9,3],[1,2,3],[1,2,2],[1,29,2],[1,2,1],[1,4,3],[1,1,2],[1,2,2],[1,3,6],[1,11,1],[1,1,1],[1,11,1],[1,4,1],[1,6,1],[1,3,5],[1,4,1],[1,4,3],[1,34,1],[1,4,2],[1,1,9],[1,18,1],[1,9,3],[1,15,1],[1,4,4],[1,4,2],[1,9,1],[1,4,1],[1,10,1],[1,2,1],[1,2,4],[1,4,1],[1,1,2],[1,3,3],[1,2,1],[1,47,14],[1,3,1],[1,2,1],[1,3,1],[1,1,1],[1,20,1],[1,14,6],[1,2,2],[1,16,2],[1,2,1],[1,1,31],[1,5,9],[1,10,2],[1,10,3],[1,19,1],[1,1,1],[1,13,2],[1,5,1],[1,1,2],[1,1,2],[1,24,1],[1,9,2],[1,4,1],[1,10,3],[1,35,6],[1,1,1],[1,2,1],[1,1,1],[1,3,1],[1,4,5],[1,4,1],[1,1,1],[1,4,1],[1,10,2],[1,55,6],[1,3,22],[1,28,4],[1,6,3],[1,10,1],[1,6,187],[1,3,2],[1,12,5],[1,7,1],[1,4,1],[1,2,2],[1,2,1],[1,31,9],[1,2,8],[1,20,2],[1,36,2],[1,2,2],[1,15,5],[1,5,2],[1,3,2],[1,8,1],[1,1,1],[1,2,1],[1,37,1],[1,17,4],[1,8,1],[1,19,2],[1,7,1],[1,1,1],[1,1,1],[1,2,1],[1,9,1],[1,2,1],[1,2,1],[1,2,1],[1,19,1],[1,33,3],[1,4,1],[1,7,1],[1,3,1],[1,46,4],[1,2,1],[1,3,2],[1,1,2],[1,2,2],[1,14,1],[1,3,1],[1,11,2],[1,2,2],[1,21,2],[1,34,2],[1,4,1],[1,1,1],[1,2,1],[1,22,1],[1,64,9],[1,21,10],[1,3,3],[1,6,1],[1,16,2],[1,3,1],[1,31,4],[1,1,1],[1,1,2],[1,1,1],[1,3,1],[1,5,4],[1,27,1],[1,1,1],[1,2,2],[1,17,10],[1,4,1],[1,25,1],[1,41,1],[1,18,4],[1,17,40],[1,9,1],[1,2,1],[1,7,1],[1,21,2],[1,2,3],[1,3,1],[1,14,1],[1,8,2],[1,2,1],[1,2,2],[1,5,1],[1,1,2],[1,4,1],[1,6,5],[1,9,17],[1,5,1],[1,6,1],[1,4,1],[1,1,1],[1,3,1],[1,61,9],[1,6,1],[1,9,2],[1,2,2],[1,9,1],[1,7,4],[1,12,1],[1,2,2],[1,40,1],[1,17,13],[1,1,7],[1,11,2],[1,20,2],[1,2,1],[1,1,1],[1,12,10],[1,5,3],[1,2,1],[1,1,1],[1,23,2],[1,9,3],[1,4,1],[1,5,2],[1,4,1],[1,19,5],[1,5,1],[1,1,4],[1,5,1],[1,8,1],[1,9,1],[1,5,3],[1,43,3],[1,1,2],[1,3,1],[1,2,2],[1,15,38],[1,3,1],[1,25,1],[1,1,4],[1,5,6],[1,2,1],[1,4,3],[1,4,2],[1,3,1],[1,9,1],[1,4,1],[1,13,2],[1,7,4],[1,2,6],[1,12,1],[1,8,3],[1,1,4],[1,13,1],[1,3,4],[1,3,2],[1,2,2],[1,4,1],[1,6,1],[1,14,3],[1,7,1],[1,8,1],[1,8,1],[1,3,1],[1,32,5],[1,16,2],[1,2,3],[1,38,1],[1,5,4],[1,10,2],[1,2,7],[1,3,1],[1,8,1],[1,3,2],[1,1,3],[1,4,2],[1,71,12],[1,8,4],[1,2,12],[1,3,1],[1,12,2],[1,2,1],[1,5,1],[1,2,28],[1,19,5],[1,10,1],[1,9,2],[1,3,1],[1,7,6],[1,11,1],[1,2,1],[1,27,2],[1,7,4],[1,4,2],[1,12,8],[1,8,96],[1,12,1],[1,2,4],[1,965,1303],[1,7,5],[1,15,3],[1,3,2],[1,18,2],[1,25,3],[1,7,2],[1,18,2],[1,6,1],[1,10,2],[1,4,1],[1,1,3],[1,5,1],[1,19,2],[1,8,1],[1,50,4],[1,8,1],[1,11,1],[1,9,1],[1,2,1],[1,2,5],[1,3,1],[1,6,2],[1,1,1],[1,13,5],[1,19,1],[1,7,2],[1,17,1],[1,6,1],[1,4,1],[1,7,3],[1,13,3],[1,7,4],[1,5,2],[1,4,1],[1,11,16],[1,7,1],[1,1,1],[1,2,1],[1,2,1],[1,14,3],[1,30,1],[1,2,6],[1,6,2],[1,3,1],[1,4,1],[1,9,11],[1,6,1],[1,35,1],[1,2,8],[1,1,2],[1,3,2],[1,1,1],[1,9,1],[1,2,57],[1,2,1],[1,5,1],[1,4,2],[1,15,1],[1,12,3],[1,4,3],[1,17,1],[1,12,2],[1,21,12],[1,2,1],[1,9,1],[1,9,47],[1,49,4],[1,5,1],[1,4,1],[1,24,1],[1,2,2],[1,64,2],[1,48,7],[1,2,2],[1,10,2],[1,3,1],[1,11,1],[1,5,1],[1,1,2],[1,2,4],[1,6,1],[1,19,6],[1,6,2],[1,3,2],[1,1,1],[1,22,2],[1,3,2],[1,5,14],[1,2,1],[1,11,1],[1,4,2],[1,6,1],[1,24,10],[1,7,1],[1,2,74],[1,6,1],[1,28,1],[1,1,1],[1,1,1],[1,10,1],[1,88,4],[1,9,4],[1,26,1],[1,3,1],[1,4,1],[1,4,1],[1,6,1],[1,23,1],[1,2,7],[1,1,3],[1,7,1],[1,1,1],[1,5,2],[1,4,1],[1,2,1],[1,1,1],[1,15,5],[1,22,1],[1,6,3],[1,12,2],[1,48,14],[1,7,1],[1,5,1],[1,10,5],[1,5,1],[1,6,5],[1,2,3],[1,14,3],[1,3,1],[1,8,4],[1,2,5],[1,34,3],[1,2,1],[1,4,1],[1,6,7],[1,3,1],[1,3,3],[1,32,2],[1,3,1],[1,3,1],[1,2,1],[1,3,1],[1,39,8],[1,1,1],[1,15,8],[1,3,4],[1,2,3],[1,1,3],[1,38,18],[1,6,1],[1,25,4],[1,2,1],[1,8,1],[1,3,1],[1,24,1],[1,5,5],[1,5,4],[1,2,3],[1,2,1],[1,5,4],[1,51,1],[1,23,3],[1,2,1],[1,2,1],[1,1,2],[1,7,2],[1,3,1],[1,1,1],[1,4,1],[1,2,1],[1,7,6],[1,8,1],[1,11,1],[1,2,6],[1,2,1],[1,2,1],[1,1,1],[1,26,1],[1,3,1],[1,2,1],[1,2,1],[1,2,1],[1,12,2],[1,1,3],[1,3,1],[1,2,4],[1,19,3],[1,3,1],[1,3,2],[1,49,3],[1,2,1],[1,21,3],[1,1,1],[1,5,1],[1,4,1],[1,2,2],[1,2,1],[1,1,1],[1,7,4],[1,2,1],[1,2,1],[1,2,1],[1,3,2],[1,26,2],[1,9,1],[1,2,2],[1,12,1],[1,4,32],[1,4,1],[1,17,1],[1,1,2],[1,77,4],[1,2,1],[1,12,1],[1,2,1],[1,2,4],[1,5,2],[1,10,3],[1,4,3],[1,2,1],[1,1,3],[1,16,4],[1,3,1],[1,40,2],[1,13,1],[1,2,1],[1,6,2],[1,12,2],[1,6,11],[1,6,1],[1,1,1],[1,10,6],[1,1,1],[1,6,5],[1,38,4],[1,2,7],[1,9,1],[1,5,2],[1,3,1],[1,2,1],[1,5,2],[1,4,1],[1,1,1],[1,1,1],[1,4,2],[1,4,3],[1,5,2],[1,1,4],[1,11,4],[1,14,4],[1,4,1],[1,17,2],[1,2,2],[1,39,1],[1,9,21],[1,14,2],[1,4,4],[1,4,3],[1,9,2],[1,1,1],[1,3,2],[1,1,1],[1,1,7],[1,16,4],[1,5,1],[1,2,1],[1,2,1],[1,2,1],[1,98,19],[1,4,1],[1,1,1],[1,5,1],[1,7,1],[1,1,3],[1,9,1],[1,4,2],[1,2,1],[1,7,2],[1,2,1],[1,1,2],[1,1,1],[1,5,2],[1,6,1],[1,11,6],[1,5,4],[1,40,5],[1,1,2],[1,9,1],[1,2,1],[1,6,1],[1,5,1],[1,11,2],[1,4,1],[1,3,17],[1,1,1],[1,1,5],[1,9,5],[1,60,1],[1,3,7],[1,3,4],[1,5,1],[1,3,10],[1,5,2],[1,7,1],[1,2,1],[1,14,14],[1,4,3],[1,1,2],[1,2,4],[1,5,1],[1,11,7],[1,3,1],[1,29,3],[1,2,4],[1,8,1],[1,53,1],[1,10,1],[1,7,2],[1,2,13],[1,58,1],[1,5,6],[1,2,1],[1,4,2],[1,4,2],[1,4,2],[1,5,2],[1,2,3],[1,12,2],[1,4,6],[1,34,1],[1,1,1],[1,8,1],[1,4,1],[1,2,1],[1,2,2],[1,16,1],[1,4,2],[1,3,13],[1,2,2],[1,46,2],[1,4,1],[1,6,1],[1,1,2],[1,2,1],[1,3,6],[1,3,1],[1,19,1],[1,2,1],[1,23,1],[1,3,1],[1,1,1],[1,7,2],[1,4,4],[1,18,3],[1,1,1],[1,7,2],[1,2,2],[1,7,1],[1,2,1],[1,2,1],[1,6,1],[1,9,4],[1,3,1],[1,5,1],[1,13,1],[1,2,2],[1,33,1],[1,12,1],[1,9,3],[1,2,1],[1,1,1],[1,18,1],[1,1,3],[1,3,15],[1,2,4],[1,17,1],[1,1,1],[1,1,1],[1,4,8],[1,1,2],[1,31,19],[1,1,5],[1,7,6],[1,12,4],[1,2,4],[1,7,8],[1,4,2],[1,13,2],[1,19,18],[1,42,4],[1,3,1],[1,17,1],[1,3,3],[1,4,2],[1,12,1],[1,1,6],[1,23,2],[1,3,1],[1,20,1],[1,21,4],[1,1,1],[1,3,2],[1,10,1],[1,9,1],[1,8,6],[1,21,3],[1,5,1],[1,7,6],[1,2,1],[1,5,1],[1,1,2],[1,11,1],[1,8,212],[1,9,3],[1,6,1],[1,1,2],[1,25,12],[1,4,1],[1,14,15],[1,4,1],[1,13,1],[1,2,2],[1,3,1],[1,4,1],[1,3,1],[1,1,1],[1,3,1],[1,9,7],[1,1,1],[1,6,1],[1,8,2],[1,8,1],[1,2,3],[1,3,1],[1,2,3],[1,1,2],[1,10,1],[1,6,1],[1,12,3],[1,12,1],[1,1,1],[1,2,1],[1,2,4],[1,4,1],[1,2,1],[1,1,1],[1,4,1],[1,23,2],[1,4,2],[1,20,1],[1,17,4],[1,8,2],[1,4,6],[1,4,1],[1,6,1],[1,10,1],[1,6,2],[1,1,1],[1,3,1],[1,4,1],[1,4,1],[1,16,143],[1,7,1],[1,10,1],[1,7,2],[1,3,3],[1,8,3],[1,2,1],[1,49,1],[1,2,7],[1,14,4],[1,31,3],[1,29,1],[1,31,8],[1,5,2],[1,7,1],[1,1,1],[1,4,5],[1,1,1],[1,7,3],[1,1,2],[1,5,3],[1,3,1],[1,7,4],[1,129,9],[1,13,1],[1,11,4],[1,6,28],[1,6,1],[1,6,1],[1,20,1],[1,2,1],[1,16,3],[1,3,3],[1,5,1],[1,64,1],[1,4,2],[1,7,1],[1,21,3],[1,2,2],[1,9,1],[1,2,1],[1,5,6],[1,6,6],[1,3,1],[1,5,1],[1,3,1],[1,3,1],[1,6,2],[1,2,3],[1,4,1],[1,1,1],[1,12,37],[1,6,1],[1,1,1],[1,4,2],[1,4,8],[1,6,2],[1,2,2],[1,19,1],[1,1,1],[1,1,3],[1,3,1],[1,4,5],[1,15,2],[1,8,3],[1,1,1],[1,2,2],[1,3,1],[1,10,1],[1,4,1],[1,1,2],[1,19,1],[1,5,2],[1,4,4],[1,3,2],[1,3,17],[1,1,1],[1,1,1],[1,2,1],[1,18,3],[1,3,1],[1,16,4],[1,5,1],[1,11,2],[1,19,8],[1,2,1],[1,2,1],[1,1,6],[1,3,1],[1,2,1],[1,1,1],[1,2,1],[1,11,3],[1,17,4],[1,4,1],[1,4,4],[1,5,2],[1,1,1],[1,1,2],[1,10,12],[1,2,2],[1,8,1],[1,1,2],[1,8,1],[1,17,2],[1,2,1],[1,4,1],[1,6,1],[1,20,21],[1,5,7],[1,3,1],[1,13,2],[1,3,6],[1,8,3],[1,12,1],[1,12,2],[1,3,2],[1,15,2],[1,6,1],[1,9,5],[1,5,3],[1,4,1],[1,7,4],[1,4,4],[1,9,4],[1,11,1],[1,3,1],[1,17,1],[1,71,5],[1,7,1],[1,3,1],[1,5,1],[1,1,1],[1,1,2],[1,2,1],[1,1,2],[1,10,2],[1,3,1],[1,2,2],[1,5,1],[1,28,4],[1,2,1],[1,1,1],[1,9,1],[1,3,2],[1,8,2],[1,13,1],[1,2,1],[1,6,1],[1,25,79],[1,30,24],[1,10,31],[1,5,1],[1,9,1],[1,1,1],[1,4,1],[1,118,14],[1,18,3],[1,30,1],[1,10,3],[1,5,1],[1,5,1],[1,1,1],[1,6,1],[1,9,3],[1,6,2],[1,5,1],[1,2,2],[1,3,1],[1,7,4],[1,8,2],[1,10,2],[1,1,8],[1,41,1],[1,21,4],[1,6,1],[1,13,3],[1,5,1],[1,34,7],[1,22,1],[1,9,8],[1,5,3],[1,11,1],[1,2,1],[1,6,1],[1,4,1],[1,72,1],[1,44,3],[1,2,1],[1,1,1],[1,3,1],[1,8,2],[1,1,3],[1,14,1],[1,3,2],[1,1,1],[1,9,2],[1,17,1],[1,9,35],[1,3,1],[1,6,1],[1,2,11],[1,5,3],[1,1257,55],[1,1,1],[1,2,1],[1,14,7],[1,51,44],[1,3,6],[1,1,1],[1,6,2],[1,2,1],[1,11,2],[1,8,3],[1,3,2],[1,3,3],[1,4,1],[1,2,1],[1,5,1],[1,8,5],[1,60,1],[1,6,3],[1,36,2],[1,1,1],[1,2,1],[1,10,2],[1,26,2],[1,7,3],[1,6,1],[1,6,2],[1,3,3],[1,2,3],[1,6,2],[1,2,2],[1,2,2],[1,5,2],[1,2,1],[1,15,5],[1,1,2],[1,1,3],[1,37,24],[1,8,2],[1,17,2],[1,31,1],[1,14,2],[1,2,1],[1,16,2],[1,3,1],[1,2,2],[1,1,2],[1,2,3],[1,4,2],[1,1,1],[1,9,5],[1,1,2],[1,1,4],[1,4,18],[1,6,1],[1,12,1],[1,3,85],[1,17,2],[1,4,1],[1,7,1],[1,4,1],[1,3,1],[1,22,2],[1,1,1],[1,15,27],[1,4,1],[1,1,1],[1,1,3],[1,3,1],[1,35,2],[1,1,1],[1,33,4],[1,2,1],[1,3,3],[1,6,1],[1,9,1],[1,8,1],[1,6,1],[1,16,2],[1,20,2],[1,5,1],[1,1,5],[1,2,2],[1,12,25],[1,6,1],[1,13,1],[1,2,1],[1,2,1],[1,10,1],[1,2,1],[1,37,3],[1,2,1],[1,58,11],[1,14,3],[1,6,1],[1,6,1],[1,1,3],[1,1,1],[1,9,2],[1,1,502],[1,45,5],[1,5,1],[1,4,1],[1,2,8],[1,5,1],[1,1,1],[1,7,1],[1,4,1],[1,3,4],[1,1,1],[1,10,1],[1,9,1],[1,13,1],[1,10,8],[1,4,4],[1,7,1],[1,1,2],[1,2,2],[1,9,2],[1,13,2],[1,8,1],[1,1,1],[1,2,4],[1,29,1],[1,8,2],[1,7,3],[1,30,7],[1,1,1],[1,10,10],[1,3,1],[1,1,1],[1,5,1],[1,4,3],[1,7,1],[1,43,8],[1,1,2],[1,9,1],[1,1,1],[1,3,6],[1,9,1],[1,1,1],[1,7,1],[1,6,1],[1,2,2],[1,13,4],[1,13,3],[1,2,3],[1,8,1],[1,11,2],[1,9,53],[1,2,1],[1,16,1],[1,6,3],[1,48,3],[1,4,1],[1,7,3],[1,2,2],[1,8,1],[1,8,1],[1,26,2],[1,3,1],[1,8,2],[1,121,2],[1,2,2],[1,8,1],[1,2,2],[1,4,2],[1,8,1],[1,1,1],[1,4,1],[1,3,3],[1,7,1],[1,7,2],[1,2,1],[1,8,2],[1,34,28],[1,3,2],[1,3,1],[1,5,1],[1,9,1],[1,7,1],[1,14,4],[1,1,1],[1,34,4],[1,1,1],[1,6,1],[1,3,1],[1,2,1],[1,4,1],[1,5,2],[1,10,1],[1,41,5],[1,7,2],[1,19,4],[1,3,3],[1,12,3],[1,7,1],[1,4,2],[1,16,1],[1,3,1],[1,8,4],[1,9,2],[1,8,2],[1,2,1],[1,10,2],[1,8,1],[1,16,2],[1,7,2],[1,5,1],[1,2,3],[1,15,4],[1,3,5],[1,4,4],[1,1,1],[1,3,2],[1,5,1],[1,8,4],[1,4,1],[1,41,7],[1,2,1],[1,1,3],[1,1,6],[1,2,1],[1,10,2],[1,10,2],[1,3,3],[1,39,4],[1,1,2],[1,5,7],[1,12,2],[1,15,5],[1,4,1],[1,13,1],[1,3,1],[1,44,3],[1,1,2],[1,1,1],[1,6,1],[1,3,1],[1,3,2],[1,7,15],[1,1,1],[1,11,4],[1,3,1],[1,1,3],[1,1,1],[1,2,1],[1,9,4],[1,22,1],[1,46,2],[1,3,18],[1,22,8],[1,3,1],[1,4,10],[1,12,16],[1,2,1],[1,8,3],[1,1,1],[1,2,4],[1,1,1],[1,6,4],[1,7,1],[1,7,4],[1,14,4],[1,1,1],[1,13,2],[1,61,1],[1,6,2],[1,16,1],[1,14,7],[1,9,2],[1,18,2],[1,9,3],[1,1,2],[1,4,1],[1,6,1],[1,6,4],[1,10,1],[1,5,2],[1,7,1],[1,3,1],[1,11,2],[1,53,1],[1,10,2],[1,17,1],[1,2,2],[1,5,14],[1,17,1],[1,2,1],[1,5,1],[1,28,2],[1,8,2],[1,4,1],[1,4,2],[1,21,1],[1,3,1],[1,3,2],[1,5,2],[1,5,1],[1,3,13],[1,13,2],[1,124,753],[1,2,2],[1,43,1],[1,6,1],[1,2,2],[1,11,1],[1,22,1],[1,5,2],[1,5,1],[1,8,1],[1,2,4],[1,2,2],[1,9,1],[1,6,1],[1,2,1],[1,6,1],[1,14,3],[1,21,1],[1,3,4],[1,3,3],[1,3,1],[1,2,2],[1,2,2],[1,5,2],[1,11,1],[1,6,1],[1,3,1],[1,64,1],[1,6,1],[1,2,12],[1,5,1],[1,6,4],[1,10,1],[1,14,1],[1,14,1],[1,2,1],[1,2,1],[1,8,4],[1,17,2],[1,5,3],[1,64,1],[1,33,3],[1,18,2],[1,1,1],[1,42,9],[1,20,2],[1,10,2],[1,2,2],[1,3,1],[1,13,1],[1,5,1],[1,39,5],[1,8,2],[1,6,1],[1,3,2],[1,12,1],[1,2,4],[1,8,1],[1,2,1],[1,4,5],[1,7,1],[1,2,1],[1,2,1],[1,5,2],[1,15,3],[1,6,1],[1,1,1],[1,11,2],[1,4,2],[1,1,1],[1,7,3],[1,7,2],[1,3,1],[1,3,1],[1,2,1],[1,8,3],[1,3,1],[1,7,12],[1,8,1],[1,4,2],[1,6,2],[1,9,1],[1,3,30],[1,8,3],[1,8,2],[1,8,1],[1,11,1],[1,13,1],[1,2,1],[1,16,1],[1,10,1],[1,3,1],[1,6,4],[1,29,2],[1,4,2],[1,4,1],[1,1,1],[1,7,1],[1,1,1],[1,4,11],[1,1,1],[1,6,1],[1,26,1],[1,3,1],[1,2,1],[1,10,1],[1,4,1],[1,14,2],[1,10,1],[1,5,2],[1,5,1],[1,2,1],[1,26,33],[1,1,1],[1,11,2],[1,8,5],[1,18,1],[1,2,1],[1,5,1],[1,4,2],[1,5,1],[1,11,2],[1,1,2],[1,2,2],[1,6,6],[1,10,1],[1,14,1],[1,2,1],[1,13,1],[1,14,1],[1,8,2],[1,21,2],[1,1,2],[1,1,1],[1,14,1],[1,2,1],[1,15,2],[1,4,1],[1,3,1],[1,10,2],[1,4,2],[1,5,1],[1,11,22],[1,8,3],[1,4,1],[1,3,2],[1,1,2],[1,25,3],[1,2,1],[1,11,2],[1,5,2],[1,39,1],[1,1,1],[1,415,128],[1,6,1],[1,5,1],[1,8,5],[1,2,3],[1,1,1],[1,1,1],[1,4,1],[1,2,4],[1,4,1],[1,2,9],[1,4,2],[1,23,3],[1,6,9],[1,5,4],[1,2,5],[1,1,1],[1,7,1],[1,3,7],[1,1,2],[1,2,16],[1,5,2],[1,1,3],[1,4,1],[1,11,1],[1,2,2],[1,2,1],[1,10,1],[1,6,2],[1,11,1],[1,28,1],[1,21,3],[1,3,2],[1,3,1],[1,4,1],[1,1,2],[1,7,1],[1,11,4],[1,4,2],[1,22,4],[1,1,1],[1,1,1],[1,12,7],[1,1,1],[1,4,2],[1,2,1],[1,6,4],[1,14,3],[1,8,2],[1,1,11],[1,13,2],[1,4,1],[1,3,2],[1,95,10],[1,1,2],[1,4,2],[1,27,2],[1,2,1],[1,19,1],[1,13,4],[1,1,1],[1,37,1],[1,4,1],[1,5,1],[1,7,5],[1,1,1],[1,4,5],[1,5,1],[1,1,1],[1,16,2],[1,22,1],[1,4,2],[1,24,4],[1,10,1],[1,77,6],[1,21,1],[1,11,1],[1,2,1],[1,1,1],[1,4,5],[1,2,4],[1,55,4],[1,17,1],[1,1,3],[1,2,2],[1,7,1],[1,17,1],[1,34,2],[1,4,1],[1,2,2],[1,1,2],[1,100,1],[1,17,2],[1,8,6],[1,11,2],[1,11,2],[1,3,1],[1,5,2],[1,1,1],[1,6,7],[1,15,5],[1,7,1],[1,4,1],[1,5,1],[1,6,2],[1,7,1],[1,2,2],[1,10,2],[1,17,1],[1,10,2],[1,6,3],[1,21,1],[1,2,1],[1,78,4],[1,6,1],[1,1,2],[1,5,1],[1,186,9],[1,16,3],[1,15,13],[1,30,4],[1,2,1],[1,15,3],[1,13,1],[1,3,1],[1,1,1],[1,2,2],[1,5,5],[1,7,1],[1,16,1],[1,2,1],[1,14,2],[1,11,5],[1,9,1],[1,13,2],[1,2,1],[1,4,64],[1,4,1],[1,18,4],[1,3,1],[1,1,1],[1,16,2],[1,4,1],[1,11,4],[1,9,3],[1,3,1],[1,4,1],[1,1,1],[1,10,3],[1,7,1],[1,13,1],[1,16,4],[1,1,16],[1,2,2],[1,18,6],[1,42,2],[1,1,3],[1,15,1],[1,3,1],[1,43,1],[1,1,1],[1,27,2],[1,1,3],[1,1,5],[1,13,1],[1,1,1],[1,10,11],[1,8,1],[1,9,1],[1,13,1],[1,1,2],[1,13,3],[1,1,1],[1,5,1],[1,14,2],[1,14,1],[1,13,1],[1,4,3],[1,25,1],[1,1,3],[1,3,3],[1,4,1],[1,1,1],[1,4,4],[1,15,1],[1,2,1],[1,1,1],[1,7,12],[1,68,2],[1,13,2],[1,2,1],[1,6,4],[1,46,6],[1,1,1],[1,2,2],[1,4,1],[1,2,1],[1,11,5],[1,1,1],[1,9,1],[1,9,1],[1,13,1],[1,4,1],[1,14,1],[1,42,9],[1,5,1],[1,4,1],[1,24,7],[1,7,1],[1,17,1],[1,2,1],[1,2,5],[1,3,6],[1,2,1],[1,15,4],[1,3,2],[1,33,2],[1,30,4],[1,27,4],[1,1,1],[1,14,4],[1,2,3],[1,26,7],[1,22,1],[1,2,2],[1,2,2],[1,166,3],[1,4,4],[1,9,1],[1,12,15],[1,2,6],[1,13,2],[1,4,3],[1,9,2],[1,2,3],[1,3,3],[1,9,2],[1,22,1],[1,5,3],[1,3,4],[1,2,3],[1,3,1],[1,23,1],[1,18,1],[1,6,1],[1,4,1],[1,9,3],[1,35,1],[1,73,2],[1,1,3],[1,31,5],[1,25,1],[1,3,4],[1,11,1],[1,9,4],[1,2,1],[1,27,36],[1,23,5],[1,4,2],[1,1,2],[1,29,2],[1,3,2],[1,1,1],[1,4,1],[1,12,1],[1,36,16],[1,5,14],[1,19,1],[1,6,1],[1,6,1],[1,4,1],[1,6,1],[1,4,2],[1,9,7],[1,7,1],[1,30,4],[1,4,1],[1,18,3],[1,2,2],[1,3,1],[1,9,2],[1,2,2],[1,1,2],[1,1,2],[1,14,1],[1,3,1],[1,5,2],[1,10,1],[1,9,1],[1,10,3],[1,4,1],[1,2,1],[1,4,4],[1,2,1],[1,3,3],[1,39,2],[1,3,1],[1,1,3],[1,14,1],[1,2,4],[1,13,1],[1,4,6],[1,3,5],[1,5,4],[1,8,1],[1,131,1],[1,28,1],[1,5,1],[1,965,1303],[1,8,5],[1,2,9],[1,4,2],[1,5,1],[1,46,3],[1,7,3],[1,1,1],[1,7,3],[1,2,1],[1,4,1],[1,2,1],[1,2,1],[1,2,1],[1,4,6],[1,5,1],[1,9,3],[1,2,2],[1,9,1],[1,42,3],[1,11,3],[1,5,1],[1,1,2],[1,6,1],[1,37,51],[1,2,1],[1,4,3],[1,23,2],[1,1,15],[1,5,4],[1,1,4],[1,18,3],[1,12,3],[1,4,2],[1,4,1],[1,2,7],[1,2,6],[1,3,6],[1,6,1],[1,10,3],[1,4,2],[1,1,2],[1,4,1],[1,4,3],[1,1,3],[1,3,1],[1,6,2],[1,10,2],[1,6,4],[1,4,3],[1,7,2],[1,2,2],[1,4,1],[1,1,1],[1,4,5],[1,14,1],[1,20,4],[1,7,15],[1,18,2],[1,6,1],[1,1,1],[1,7,1],[1,5,2],[1,6,2],[1,4,1],[1,6,3],[1,2,1],[1,6,1],[1,4,1],[1,7,1],[1,7,4],[1,7,1],[1,1,1],[1,24,4],[1,2,2],[1,3,5],[1,8,1],[1,15,2],[1,5,1],[1,2,3],[1,2,2],[1,4,1],[1,6,1],[1,2,3],[1,11,1],[1,23,5],[1,2,2],[1,1,1],[1,8,1],[1,17,6],[1,1,1],[1,9,2],[1,1,1],[1,10,1],[1,5,1],[1,6,1],[1,6,1],[1,5,1],[1,2,6],[1,2,1],[1,9,1],[1,14,1],[1,18,8],[1,39,2],[1,13,1],[1,6,1],[1,6,2],[1,9,1],[1,14,1],[1,5,4],[1,26,2],[1,4,1],[1,7,2],[1,5,5],[1,2,1],[1,20,2],[1,14,1],[1,10,1],[1,4,1],[1,3,1],[1,10,2],[1,9,12],[1,4,4],[1,2,1],[1,4,1],[1,4,1],[1,2,1],[1,8,1],[1,2,4],[1,1,1],[1,33,2],[1,4,1],[1,5,1],[1,205,1],[1,2,1],[1,15,3],[1,5,1],[1,1,1],[1,1,1],[1,1,1],[1,13,1],[1,14,5],[1,6,4],[1,3,1],[1,7,5],[1,42,2],[1,11,1],[1,24,2],[1,11,2],[1,11,2],[1,12,1],[1,7,1],[1,1,1],[1,3,2],[1,21,1],[1,13,1],[1,2,1],[1,37,6],[1,8,4],[1,2,2],[1,2,2],[1,36,1],[1,8,1],[1,19,11],[1,19,7],[1,8,1],[1,18,2],[1,7,2],[1,8,1],[1,1,1],[1,4,1],[1,3,3],[1,10,1],[1,6,1],[1,4,1],[1,10,1],[1,25,1],[1,14,1],[1,14,3],[1,4,1],[1,2,1],[1,2,2],[1,4,2],[1,3,4],[1,62,11],[1,4,1],[1,39,3],[1,65,2],[1,3,1],[1,11,2],[1,4,1],[1,2,2],[1,1,1],[1,2,3],[1,2,1],[1,17,7],[1,7,4],[1,1,4],[1,62,3],[1,17,3],[1,26,3],[1,15,1],[1,2,1],[1,4,6],[1,1,2],[1,8,2],[1,16,2],[1,1,1],[1,7,2],[1,4,1],[1,1,1],[1,7,2],[1,8,2],[1,12,1],[1,1,2],[1,2,1],[1,2,1],[1,26,7],[1,2,1],[1,5,1],[1,5,1],[1,5,1],[1,1,1],[1,6,27],[1,5,4],[1,6,1],[1,8,1],[1,38,2],[1,26,2],[1,13,1],[1,20,2],[1,6,6],[1,2,2],[1,2,1],[1,16,2],[1,88,1],[1,4,1],[1,5,3],[1,1,4],[1,1,4],[1,12,2],[1,3,1],[1,3,1],[1,3,1],[1,2,3],[1,6,1],[1,2,4],[1,28,2],[1,17,3],[1,10,1],[1,51,3],[1,1,1],[1,15,4],[1,10,14],[1,1,3],[1,3,3],[1,1,1],[1,5,1],[1,3,1],[1,23,3],[1,10,1],[1,1,1],[1,21,6],[1,11,1],[1,8,1],[1,1,1],[1,2,1],[1,1,3],[1,26,1],[1,1,2],[1,4,1],[1,4,1],[1,6,1],[1,6,1],[1,2,2],[1,11,5],[1,15,2],[1,13,1],[1,2,2],[1,4,1],[1,4,1],[1,2,6],[1,13,3],[1,23,2],[1,18,2],[1,8,2],[1,1,1],[1,4,1],[1,7,1],[1,2,1],[1,8,6],[1,12,1],[1,23,4],[1,9,4],[1,2,2],[1,8,1],[1,7,2],[1,2,2],[1,2,4],[1,8,16],[1,22,3],[1,2,1],[1,2,4],[1,2,1],[1,9,2],[1,3,3],[1,4,1],[1,3,9],[1,3,1],[1,2,2],[1,2,3],[1,11,1],[1,5,1],[1,5,1],[1,2,2],[1,10,20],[1,2,2],[1,2,1],[1,3,3],[1,10,1],[1,2,3],[1,2,1],[1,5,1],[1,4,2],[1,8,1],[1,2,2],[1,6,1],[1,5,1],[1,9,1],[1,3,2],[1,1,1],[1,2,6],[1,1,1],[1,5,1],[1,2,1],[1,16,1],[1,6,1],[1,2,1],[1,2,1],[1,5,1],[1,9,1],[1,10,16],[1,4,1],[1,4,2],[1,5,2],[1,8,1],[1,16,2],[1,2,1],[1,5,1],[1,1,2],[1,55,2],[1,20,1],[1,11,1],[1,5,2],[1,13,1],[1,1,1],[1,10,6],[1,5,2],[1,21,1],[1,7,3],[1,5,1],[1,7,1],[1,3,1],[1,6,1],[1,46,3],[1,8,5],[1,5,1],[1,2,1],[1,2,6],[1,22,1],[1,42,1],[1,1,1],[1,4,2],[1,13,1],[1,3,3],[1,2,2],[1,4,2],[1,1,3],[1,88,1],[1,24,4],[1,4,1],[1,3,1],[1,5,1],[1,17,6],[1,6,2],[1,20,3],[1,47,2],[1,2,7],[1,13,1],[1,1,3],[1,1,2],[1,2,2],[1,2,2],[1,4,3],[1,7,1],[1,3,1],[1,10,1],[1,2,1],[1,2,5],[1,1,2],[1,17,2],[1,12,4],[1,24,1],[1,3,1],[1,1,3],[1,6,1],[1,2,5],[1,3,1],[1,1,1],[1,13,2],[1,6,1],[1,2,1],[1,10,2],[1,4,1],[1,1,1],[1,18,7],[1,7,2],[1,8,1],[1,5,1],[1,2,1],[1,4,1],[1,2,2],[1,14,1],[1,13,1],[1,10,4],[1,4,4],[1,6,4],[1,4,1],[1,16,2],[1,8,2],[1,3,3],[1,3,1],[1,21,2],[1,7,1],[1,2,1],[1,2,1],[1,2,3],[1,4,1],[1,6,1],[1,28,1],[1,2,7],[1,3,1],[1,23,4],[1,2,1],[1,6,1],[1,2,1],[1,4,1],[1,3,2],[1,1,1],[1,9,2],[1,9,2],[1,2,1],[1,4,2],[1,10,1],[1,12,1],[1,4,2],[1,7,1],[1,2,2],[1,9,1],[1,16,5],[1,31,2],[1,16,2],[1,22,3],[1,2,1],[1,6,1],[1,1,1],[1,6,3],[1,14,2],[1,5,3],[1,81,3],[1,8,2],[1,1,1],[1,61,9],[1,1,4],[1,2,1],[1,11,3],[1,3,5],[1,3,6],[1,4,7],[1,1,2],[1,5,2],[1,2,1],[1,3,2],[1,9,5],[1,9,1],[1,1,3],[1,3,2],[1,13,3],[1,14,1],[1,15,6],[1,6,1],[1,2,1],[1,7,1],[1,2,1],[1,10,2],[1,2,2],[1,14,1],[1,2,2],[1,3,3],[1,3,1],[1,4,1],[1,59,2],[1,5,2],[1,4,2],[1,1,1],[1,2,1],[1,4,1],[1,2,2],[1,5,4],[1,4,1],[1,4,1],[1,10,3],[1,2,2],[1,2,3],[1,8,1],[1,2,1],[1,1,1],[1,18,1],[1,6,1],[1,12,3],[1,5,3],[1,3,1],[1,7,3],[1,10,2],[1,2,23],[1,1,12],[1,1,1],[1,32,3],[1,2,1],[1,4,1],[1,12,2],[1,4,1],[1,3,1],[1,5,1],[1,4,2],[1,4,1],[1,16,2],[1,1,1],[1,4,1],[1,7,1],[1,2,4],[1,8,1],[1,4,4],[1,1,1],[1,1,2],[1,6,3],[1,8,2],[1,23,15],[1,2,2],[1,2,1],[1,2,1],[1,11,1],[1,3,2],[1,9,2],[1,4,2],[1,2,3],[1,34,1],[1,7,1],[1,2,4],[1,65,2],[1,41,3],[1,1,2],[1,1,1],[1,6,1],[1,6,1],[1,7,1],[1,3,1],[1,14,9],[1,6,1],[1,6,5],[1,2,13],[1,5,2],[1,2,1],[1,4,1],[1,17,1],[1,5,1],[1,1,1],[1,3,2],[1,9,1],[1,1,4],[1,48,2],[1,7,1],[1,4,1],[1,3,1],[1,4,2],[1,118,3],[1,2,1],[1,2,4],[1,2,1],[1,12,13],[1,2,1],[1,4,2],[1,4,1],[1,6,1],[1,1,1],[1,7,2],[1,10,1],[1,21,5],[1,5,2],[1,9,1],[1,2,2],[1,1,1],[1,1,1],[1,1,1],[1,3,1],[1,1,1],[1,7,1],[1,83,9],[1,6,2],[1,7,2],[1,13,1],[1,4,2],[1,3,1],[1,8,2],[1,2,1],[1,10,3],[1,2,1],[1,2,1],[1,9,11],[1,2,1],[1,3,1],[1,17,1],[1,7,2],[1,8,2],[1,20,1],[1,2,1],[1,1,2],[1,8,1],[1,2,1],[1,6,1],[1,21,3],[1,1,2],[1,5,5],[1,2,1],[1,2,3],[1,2,1],[1,2,2],[1,16,1],[1,2,1],[1,2,1],[1,3,1],[1,17,1],[1,6,1],[1,4,15],[1,1,1],[1,11,1],[1,84,15],[1,31,3],[1,2,2],[1,8,1],[1,9,1],[1,2,3],[1,15,2],[1,4,1],[1,18,1],[1,3,1],[1,1,1],[1,2,4],[1,2,2],[1,2,1],[1,2,1],[1,25,1],[1,3,1],[1,141,13],[1,4,2],[1,2,2],[1,14,2],[1,7,1],[1,30,9],[1,17,1],[1,1,2],[1,6,1],[1,2,1],[1,2,1],[1,8,1],[1,2,1],[1,10,1],[1,6,3],[1,12,1],[1,68,1],[1,2,1],[1,10,2],[1,14,2],[1,26,9],[1,7,3],[1,3,3],[1,6,6],[1,3,1],[1,18,4],[1,3,1],[1,4,4],[1,2,1],[1,1,1],[1,37,8],[1,8,6],[1,2,1],[1,9,6],[1,5,2],[1,3,1],[1,3,2],[1,2,1],[1,3,1],[1,13,7],[1,9,1],[1,122,2],[1,2,1],[1,22,6],[1,11,2],[1,16,2],[1,28,46],[1,2,4],[1,7,1],[1,2,3],[1,2,6],[1,2,2],[1,1,2],[1,1,1],[1,5,1],[1,1,2],[1,3,2],[1,7,6],[1,11,1],[1,21,1],[1,40,6],[1,14,2],[1,21,1],[1,1,1],[1,14,2],[1,21,1],[1,2,1],[1,1,1],[1,1,2],[1,40,2],[1,4,2],[1,1,3],[1,1,1],[1,107,2],[1,4,6],[1,136,6],[1,5,1],[1,9,1],[1,24,3],[1,7,1],[1,10,5],[1,29,3],[1,12,2],[1,10,3],[1,5,3],[1,2,1],[1,59,1],[1,5,2],[1,13,2],[1,1,2],[1,50,2],[1,1,3],[1,2,3],[1,6,1],[1,4,2],[1,5,4],[1,3,2],[1,8,1],[1,4,2],[1,1,1],[1,17,1],[1,13,3],[1,2,1],[1,7,1],[1,3,1],[1,8,1],[1,1,1],[1,20,1],[1,4,4],[1,1,2],[1,2,1],[1,2,1],[1,2,2],[1,1,2],[1,13,2],[1,4,1],[1,4,1],[1,3,1],[1,2,1],[1,4,4],[1,13,5],[1,9,1],[1,8,1],[1,12,1],[1,15,3],[1,2,1],[1,2,2],[1,4,1],[1,2,2],[1,1,1],[1,3,1],[1,13,1],[1,4,1],[1,9,4],[1,3,2],[1,2,1],[1,4,4],[1,1,3],[1,15,1],[1,4,1],[1,2,1],[1,3,1],[1,2,1],[1,3,6],[1,5,1],[1,7,10],[1,1,2],[1,6,2],[1,7,2],[1,3,1],[1,3,3],[1,6,1],[1,13,1],[1,22,3],[1,6,5],[1,6,1],[1,3,1],[1,3,1],[1,21,5],[1,11,2],[1,6,3],[1,38,4],[1,6,4],[1,4,1],[1,2,1],[1,5,5],[1,5,3],[1,40,1],[1,4,3],[1,8,1],[1,13,2],[1,4,2],[1,1,1],[1,9,9],[1,1,1],[1,12,2],[1,36,1],[1,2,1],[1,18,3],[1,28,1],[1,5,1],[1,20,4],[1,40,3],[1,3,1],[1,5,3],[1,2,1],[1,31,3],[1,6,1],[1,3,1],[1,1,5],[1,3,3],[1,36,1],[1,1,1],[1,22,2],[1,9,2],[1,2,4],[1,2,2],[1,4,4],[1,2,1],[1,6,1],[1,3,3],[1,5,1],[1,13,2],[1,4,1],[1,1,3],[1,1,1],[1,11,5],[1,4,1],[1,2,3],[1,26,1],[1,9,1],[1,6,1],[1,15,1],[1,23,5],[1,3,5],[1,4,3],[1,8,1],[1,9,4],[1,2,1],[1,7,1],[1,1,6],[1,4,1],[1,43,1],[1,2,3],[1,1,1],[1,15,4],[1,3,1],[1,1,1],[1,10,1],[1,79,1],[1,1,14],[1,2,1],[1,6,1],[1,1,1],[1,24,1],[1,2,3],[1,9,2],[1,2,3],[1,8,1],[1,115,15],[1,1,1],[1,1,2],[1,3,1],[1,9,24],[1,6,1],[1,3,6],[1,10,3],[1,3,1],[1,1,1],[1,3,2],[1,2,1],[1,11,1],[1,5,1],[1,1,1],[1,2,1],[1,3,1],[1,5,1],[1,11,1],[1,2,1],[1,7,7],[1,15,1],[1,6,2],[1,51,7],[1,2,1],[1,54,1],[1,5,1],[1,1,1],[1,7,5],[1,1,1],[1,4,1],[1,3,1],[1,22,4],[1,5,3],[1,5,1],[1,64,9],[1,6,1],[1,28,6],[1,5,1],[1,11,1],[1,2,2],[1,4,2],[1,1,4],[1,8,1],[1,1,5],[1,7,1],[1,2,1],[1,2,2],[1,8,1],[1,11,3],[1,8,3],[1,7,1],[1,10,5],[1,5,1],[1,98,5],[1,18,1],[1,1,1],[1,5,1],[1,2,2],[1,14,2],[1,3,1],[1,1,1],[1,11,3],[1,7,9],[1,5,3],[1,3,1],[1,3,3],[1,125,34],[1,1,1],[1,2,1],[1,6,2],[1,2,2],[1,11,7],[1,5,2],[1,5,5],[1,6,1],[1,10,2],[1,14,2],[1,4,3],[1,8,7],[1,2,3],[1,2,2],[1,13,1],[1,6,1],[1,10,5],[1,11,1],[1,4,2],[1,14,1],[1,1,6],[1,15,1],[1,1,3],[1,5,3],[1,7,1],[1,2,1],[1,1,3],[1,2,4],[1,3,1],[1,8,3],[1,2,3],[1,2,1],[1,2,2],[1,2,1],[1,4,1],[1,16,2],[1,1,2],[1,1,5],[1,7,1],[1,3,1],[1,2,1],[1,16,3],[1,4,1],[1,8,2],[1,16,6],[1,12,2],[1,84,26],[1,10,2],[1,2,2],[1,5,1],[1,1,1],[1,8,1],[1,4,1],[1,4,1],[1,4,2],[1,4,1],[1,4,10],[1,14,2],[1,4,2],[1,5,2],[1,19,1],[1,4,3],[1,8,2],[1,6,1],[1,2,5],[1,2,1],[1,16,4],[1,4,1],[1,2,2],[1,7,1],[1,4,2],[1,4,1],[1,8,1],[1,10,2],[1,3,2],[1,3,1],[1,10,2],[1,1,1],[1,12,3],[1,37,1],[1,10,1],[1,16,4],[1,1,1],[1,11,1],[1,4,1],[1,8,6],[1,3,2],[1,66,2],[1,14,1],[1,2,4],[1,2,2],[1,7,2],[1,24,2],[1,5,1],[1,1,1],[1,1,1],[1,3,1],[1,31,2],[1,24,1],[1,8,5],[1,8,2],[1,3,4],[1,64,1],[1,1,4],[1,4,47],[1,8,4],[1,25,1],[1,19,2],[1,4,1],[1,33,4],[1,16,2],[1,4,1],[1,1,1],[1,2,3],[1,27,1],[1,20,1],[1,10,3],[1,2,1],[1,2,1],[1,76,1],[1,2,1],[1,5,1],[1,2,2],[1,15,3],[1,40,2],[1,4,22],[1,2,2],[1,2,2],[1,10,1],[1,3,1],[1,55,4],[1,2,7],[1,7,1],[1,4,6],[1,2,1],[1,2,1],[1,28,1],[1,2,2],[1,6,2],[1,6,2],[1,4,15],[1,3,2],[1,1,1],[1,29,1],[1,13,1],[1,16,1],[1,4,1],[1,7,7],[1,3,3],[1,16,4],[1,12,11],[1,1,1],[1,2,4],[1,54,2],[1,1,2],[1,6,2],[1,1,3],[1,2,2],[1,1,1],[1,2,1],[1,11,4],[1,9,1],[1,20,1],[1,1,1],[1,17,3],[1,1,1],[1,9,2],[1,2,2],[1,3,1],[1,29,19],[1,28,1],[1,8,3],[1,21,8],[1,7,3],[1,6,2],[1,5,2],[1,11,1],[1,1,2],[1,7,1],[1,22,1],[1,9,1],[1,3,3],[1,8,2],[1,5,1],[1,23,2],[1,11,5],[1,17,2],[1,5,5],[1,4,3],[1,33,1],[1,2,3],[1,6,1],[1,32,1],[1,6,2],[1,64,2],[1,3,1],[1,7,1],[1,3,6],[1,12,1],[1,1,1],[1,9,1],[1,38,3],[1,1,1],[1,3,1],[1,3,5],[1,78,16],[1,3,1],[1,7,1],[1,26,1],[1,9,2],[1,113,2],[1,9,1],[1,5,9],[1,3,2],[1,4,1],[1,2,1],[1,5,1],[1,24,3],[1,11,4],[1,38,2],[1,13,3],[1,7,3],[1,1,1],[1,1,2],[1,3,3],[1,5,3],[1,6,1],[1,7,1],[1,3,1],[1,4,2],[1,3,1],[1,3,1],[1,1,2],[1,2,1],[1,18,8],[1,1,3],[1,1,1],[1,2,5],[1,13,9],[1,2,2],[1,6,1],[1,5,1],[1,13,3],[1,7,1],[1,3,2],[1,2,1],[1,4,1],[1,2,2],[1,6,2],[1,4,3],[1,1,3],[1,3,2],[1,12,8],[1,6,1],[1,7,1],[1,6,3],[1,9,4],[1,16,17],[1,1,2],[1,4,1],[1,2,1],[1,2,1],[1,2,1],[1,1,1],[1,4,2],[1,4,1],[1,8,1],[1,14,17],[1,7,1],[1,7,6],[1,5,1],[1,4,2],[1,80,2],[1,13,1],[1,11,1],[1,9,1],[1,2,4],[1,3,1],[1,2,1],[1,5,2],[1,3,1],[1,1,2],[1,12,1],[1,8,5],[1,6,3],[1,17,1],[1,3,4],[1,1,2],[1,5,2],[1,1,3],[1,2,2],[1,2,3],[1,2,1],[1,4,1],[1,1,1],[1,14,1],[1,2,1],[1,16,4],[1,15,2],[1,3,3],[1,8,8],[1,6,1],[1,25,4],[1,6,1],[1,7,3],[1,36,2],[1,2,1],[1,32,2],[1,1,1],[1,7,1],[1,14,2],[1,21,1],[1,3,1],[1,27,7],[1,6,3],[1,1,5],[1,5,4],[1,12,2],[1,2,1],[1,2,1],[1,8,7],[1,8,8],[1,7,1],[1,2,1],[1,4,1],[1,1,7],[1,10,3],[1,17,1],[1,1,1],[1,8,6],[1,29,5],[1,12,2],[1,7,2],[1,7,1],[1,2,2],[1,2,1],[1,2,1],[1,54,9],[1,1,1],[1,12,2],[1,8,1],[1,8,4],[1,39,1],[1,3,3],[1,9,4],[1,6,5],[1,2,1],[1,15,2],[1,18,1],[1,2,2],[1,1,1],[1,1,1],[1,2,4],[1,3,1],[1,6,1],[1,3,3],[1,4,3],[1,3,2],[1,1,1],[1,2,2],[1,16,12],[1,4,2],[1,15,2],[1,6,1],[1,7,1],[1,9,8],[1,70,2],[1,5,1],[1,4,3],[1,24,4],[1,8,6],[1,18,43],[1,23,3],[1,10,1],[1,14,8],[1,6,4],[1,2,1],[1,2,1],[1,1,1],[1,2,1],[1,9,3],[1,6,4],[1,5,3],[1,43,2],[1,5,1],[1,11,1],[1,1,2],[1,5,3],[1,4,2],[1,16,2],[1,16,10],[1,5,1],[1,2,2],[1,2,1],[1,2,3],[1,4,6],[1,3,12],[1,6,1],[1,10,1],[1,1,2],[1,13,1],[1,3,1],[1,5,2],[1,6,1],[1,3,1],[1,2,1],[1,1,1],[1,13,1],[1,20,1],[1,20,2],[1,8,1],[1,5,2],[1,2,2],[1,10,5],[1,1,3],[1,7,2],[1,4,1],[1,15,18],[1,1,4],[1,5,2],[1,4,1],[1,1,11],[1,1,3],[1,4,1],[1,1,1],[1,2,1],[1,2,12],[1,5,1],[1,3,1],[1,25,2],[1,16,1],[1,10,1],[1,18,1],[1,28,3],[1,5,6],[1,4,2],[1,2,2],[1,51,124],[1,4,2],[1,5,1],[1,28,1],[1,4,5],[1,6,2],[1,20,1],[1,7,1],[1,5,3],[1,11,1],[1,4,3],[1,1,1],[1,6,3],[1,5,1],[1,3,1],[1,10,2],[1,64,5],[1,12,12],[1,5,2],[1,6,1],[1,8,2],[1,28,8],[1,19,1],[1,2,1],[1,1,1],[2,6,1],[2,2,2],[2,4,5],[2,11,1],[2,4,1],[2,4,1],[2,14,1],[2,19,2],[2,2,1],[2,6,4],[2,2,1],[2,6,2],[2,4,1],[2,12,2],[2,15,2],[2,5,1],[2,11,1],[2,11,1],[2,2,2],[2,3,3],[2,5,9],[2,2,1],[2,1,1],[2,1,4],[2,2,1],[2,4,1],[2,11,1],[2,6,1],[2,2,2],[2,8,1],[2,81,7],[2,8,1],[2,5,1],[2,6,3],[2,2,2],[2,39,1],[2,5,2],[2,5,2],[2,2,4],[2,10,2],[2,4,2],[2,2,1],[2,6,6],[2,8,2],[2,56,1],[2,9,1],[2,1,1],[2,16,3],[2,5,2],[2,3,2],[2,12,25],[2,4,4],[2,6,2],[2,7,1],[2,30,11],[2,4,1],[2,16,5],[2,8,2],[2,7,2],[2,11,1],[2,7,1],[2,2,1],[2,1,1],[2,2,9],[2,39,6],[2,2,1],[2,2,1],[2,7,1],[2,19,1],[2,11,2],[2,8,2],[2,4,7],[2,2,1],[2,7,1],[2,1,1],[2,4,1],[2,6,1],[2,6,1],[2,2,4],[2,26,37],[2,2,1],[2,13,2],[2,35,10],[2,13,1],[2,6,1],[2,10,2],[2,19,9],[2,7,1],[2,7,1],[2,2,2],[2,1,1],[2,5,2],[2,10,2],[2,6,1],[2,6,1],[2,6,1],[2,2,2],[2,1,1],[2,6,60],[2,8,1],[2,18,1],[2,4,2],[2,1,1],[2,1,1],[2,2,3],[2,21,2],[2,7,2],[2,11,3],[2,14,2],[2,3,2],[2,12,1],[2,1,2],[2,34,1],[2,1,1],[2,16,1],[2,1,1],[2,11,1],[2,14,1],[2,8,1],[2,9,1],[2,8,1],[2,3,1],[2,4,4],[2,4,1],[2,44,3],[2,4,1],[2,19,6],[2,19,2],[2,3,2],[2,17,2],[2,17,4],[2,1,6],[2,5,3],[2,27,6],[2,5,3],[2,6,3],[2,22,2],[2,22,3],[2,13,19],[2,8,1],[2,2,2],[2,7,1],[2,9,3],[2,2,1],[2,11,1],[2,8,1],[2,4,1],[2,8,2],[2,4,1],[2,1,1],[2,16,1],[2,2,1],[2,4,1],[2,9,11],[2,3,3],[2,3,1],[2,1,2],[2,3,1],[2,28,1],[2,8,5],[2,6,2],[2,8,1],[2,1,1],[2,10,1],[2,6,1],[2,55,1],[2,1,1],[2,4,2],[2,3,2],[2,16,4],[2,11,1],[2,2,3],[2,15,1],[2,1,10],[2,8,2],[2,15,1],[2,1,1],[2,7,114],[2,10,3],[2,1,1],[2,5,1],[2,3,3],[2,2,1],[2,1,1],[2,8,1],[2,96,1],[2,10,3],[2,3,2],[2,2,1],[2,1,1],[2,3,1],[2,25,2],[2,3,1],[2,12,4],[2,2,9],[2,3,1],[2,2,1],[2,9,1],[2,12,1],[2,18,1],[2,23,6],[2,9,85],[2,2,8],[2,1,2],[2,26,1],[2,8,2],[2,6,3],[2,1,4],[2,6,1],[2,8,3],[2,9,2],[2,1,1],[2,7,1],[2,1,3],[2,7,1],[2,3,2],[2,10,1],[2,2,2],[2,8,2],[2,4,4],[2,23,2],[2,8,5],[2,1,1],[2,3,3],[2,7,2],[2,1,1],[2,2,1],[2,1,7],[2,10,1],[2,18,1],[2,39,5],[2,13,2],[2,7,2],[2,6,2],[2,9,1],[2,5,1],[2,7,1],[2,35,2],[2,2,2],[2,5,2],[2,1,1],[2,9,2],[2,18,1],[2,2,3],[2,35,1],[2,6,5],[2,2,2],[2,2,1],[2,12,2],[2,1,1],[2,10,1],[2,6,1],[2,2,1],[2,15,2],[2,7,1],[2,5,4],[2,4,1],[2,2,14],[2,2,1],[2,5,3],[2,21,2],[2,10,1],[2,2,1],[2,8,1],[2,16,1],[2,9,2],[2,11,2],[2,1,6],[2,12,2],[2,18,2],[2,2,4],[2,4,3],[2,7,11],[2,3,1],[2,28,5],[2,1,4],[2,8,1],[2,2,5],[2,2,1],[2,3,1],[2,10,2],[2,3,3],[2,2,1],[2,17,1],[2,6,1],[2,16,1],[2,10,16],[2,17,1],[2,4,2],[2,1,1],[2,3,3],[2,7,3],[2,5,1],[2,11,1],[2,13,1],[2,3,1],[2,6,1],[2,5,2],[2,17,2],[2,33,13],[2,2,10],[2,3,5],[2,4,3],[2,5,1],[2,2,4],[2,8,2],[2,14,1],[2,16,1],[2,2,3],[2,19,6],[2,5,1],[2,8,2],[2,7,1],[2,1,1],[2,11,1],[2,2,2],[2,11,10],[2,10,1],[2,14,1],[2,1,7],[2,10,1],[2,34,1],[2,2,1],[2,2,4],[2,9,2],[2,16,1],[2,2,4],[2,8,3],[2,1,2],[2,3,5],[2,13,5],[2,20,1],[2,25,8],[2,9,1],[2,1,1],[2,15,3],[2,6,2],[2,394,278],[2,11,2],[2,1,1],[2,3,15],[2,4,2],[2,3,6],[2,6,3],[2,1,12],[2,2,1],[2,1,3],[2,11,2],[2,20,3],[2,31,9],[2,25,7],[2,15,2],[2,11,31],[2,17,2],[2,5,1],[2,2,2],[2,4,1],[2,6,2],[2,27,2],[2,10,2],[2,1,2],[2,26,5],[2,5,14],[2,12,2],[2,5,2],[2,2,1],[2,2,3],[2,6,1],[2,1,3],[2,9,3],[2,18,1],[2,5,5],[2,29,13],[2,14,1],[2,1,4],[2,3,1],[2,5,1],[2,19,4],[2,11,7],[2,8,3],[2,18,1],[2,3,5],[2,11,1],[2,4,1],[2,10,4],[2,19,2],[2,10,3],[2,12,2],[2,19,9],[2,73,3],[2,13,3],[2,12,1],[2,4,5],[2,55,1],[2,6,6],[2,27,2],[2,2,1],[2,20,1],[2,8,1],[2,1,1],[2,29,2],[2,10,8],[2,5,2],[2,10,2],[2,14,1],[2,10,1],[2,1,1],[2,4,2],[2,5,1],[2,1,4],[2,4,2],[2,9,1],[2,9,4],[2,2,1],[2,4,1],[2,6,2],[2,2,2],[2,10,15],[2,17,1],[2,9,1],[2,9,1],[2,8,2],[2,4,1],[2,4,1],[2,243,2],[2,9,3],[2,12,2],[2,4,3],[2,2,1],[2,1,2],[2,57,4],[2,7,2],[2,8,2],[2,14,2],[2,2,1],[2,6,1],[2,7,2],[2,8,1],[2,4,3],[2,36,5],[2,3,1],[2,1,1],[2,45,8],[2,1,1],[2,2,3],[2,9,1],[2,1,1],[2,13,2],[2,44,6],[2,2,1],[2,36,1],[2,4,1],[2,5,1],[2,3,2],[2,1,1],[2,28,2],[2,9,1],[2,3,3],[2,10,2],[2,16,1],[2,1,1],[2,1,1],[2,13,1],[2,14,3],[2,65,1],[2,7,1],[2,2,1],[2,11,8],[2,4,1],[2,17,1],[2,6,1],[2,15,5],[2,15,1],[2,17,2],[2,8,1],[2,8,1],[2,1,2],[2,5,7],[2,1,1],[2,3,2],[2,2,1],[2,4,1],[2,32,1],[2,3,1],[2,1,1],[2,1,1],[2,2,2],[2,2,1],[2,8,2],[2,11,3],[2,2,3],[2,42,3],[2,5,1],[2,6,2],[2,1,1],[2,9,1],[2,2,2],[2,5,1],[2,2,1],[2,7,1],[2,7,6],[2,6,2],[2,3,1],[2,1,3],[2,15,1],[2,23,1],[2,1,1],[2,3,1],[2,4,2],[2,8,1],[2,2,7],[2,3,4],[2,6,5],[2,4,1],[2,5,3],[2,16,5],[2,11,1],[2,13,1],[2,22,3],[2,10,5],[2,2,2],[2,2,2],[2,6,1],[2,7,1],[2,4,2],[2,4,3],[2,7,3],[2,7,4],[2,1,1],[2,71,9],[2,4,8],[2,33,4],[2,16,2],[2,1,18],[2,15,1],[2,3,1],[2,8,1],[2,6,3],[2,4,2],[2,1,1],[2,7,2],[2,2,8],[2,2,1],[2,8,1],[2,1,3],[2,5,1],[2,2,2],[2,11,1],[2,17,3],[2,118,1],[2,8,4],[2,14,1],[2,3,4],[2,14,1],[2,2,2],[2,4,3],[2,2,1],[2,11,1],[2,8,10],[2,1,2],[2,3,3],[2,2,2],[2,12,1],[2,2,2],[2,26,3],[2,3,2],[2,3,3],[2,19,1],[2,1,13],[2,23,2],[2,3,1],[2,7,4],[2,10,4],[2,2,3],[2,71,3],[2,3,3],[2,23,1],[2,1,1],[2,34,3],[2,62,1],[2,4,1],[2,7,2],[2,2,8],[2,6,1],[2,20,3],[2,26,2],[2,5,2],[2,2,1],[2,7,1],[2,1,1],[2,7,2],[2,28,7],[2,4,1],[2,2,2],[2,4,1],[2,7,1],[2,2,3],[2,3,1],[2,8,3],[2,43,1],[2,2,1],[2,1,4],[2,2,1],[2,13,3],[2,4,2],[2,6,1],[2,17,1],[2,2,8],[2,32,1],[2,11,2],[2,5,2],[2,45,3],[2,9,1],[2,14,2],[2,9,1],[2,2,1],[2,10,5],[2,2,1],[2,13,1],[2,2,2],[2,3,5],[2,2,1],[2,17,3],[2,11,1],[2,15,1],[2,13,4],[2,7,7],[2,10,2],[2,6,4],[2,2,3],[2,1,3],[2,27,2],[2,2,3],[2,2,1],[2,3,1],[2,3,9],[2,3,46],[2,11,1],[2,30,1],[2,5,1],[2,8,8],[2,2,1],[2,1,1],[2,2,1],[2,6,7],[2,1,1],[2,4,1],[2,4,2],[2,15,2],[2,6,7],[2,4,2],[2,5,1],[2,1,4],[2,2,3],[2,1,2],[2,2,2],[2,1,7],[2,15,2],[2,18,3],[2,2,1],[2,6,1],[2,8,1],[2,134,20],[2,26,1],[2,2,2],[2,8,4],[2,1,1],[2,3,1],[2,14,1],[2,3,1],[2,26,1],[2,19,1],[2,1,1],[2,1,1],[2,7,1],[2,5,2],[2,5,8],[2,3,4],[2,1,1],[2,2,2],[2,16,1],[2,7,2],[2,6,1],[2,1,6],[2,4,3],[2,2,2],[2,2,2],[2,2,1],[2,2,1],[2,1,2],[2,8,3],[2,4,1],[2,9,1],[2,18,33],[2,14,1],[2,1,1],[2,3,2],[2,7,1],[2,14,4],[2,4,2],[2,31,7],[2,19,2],[2,11,4],[2,2,1],[2,7,2],[2,2,1],[2,2,3],[2,52,4],[2,4,1],[2,1,1],[2,4,3],[2,11,1],[2,3,2],[2,6,1],[2,10,3],[2,6,1],[2,12,1],[2,10,2],[2,4,2],[2,23,2],[2,3,3],[2,8,1],[2,21,6],[2,2,2],[2,1,1],[2,1,1],[2,16,3],[2,9,2],[2,5,1],[2,2,2],[2,1,4],[2,4,1],[2,1,25],[2,24,2],[2,6,1],[2,3,4],[2,10,4],[2,6,2],[2,35,2],[2,2,2],[2,1,1],[2,25,10],[2,8,1],[2,1,2],[2,1,1],[2,2,1],[2,3,8],[2,2,1],[2,2,1],[2,5,2],[2,4,3],[2,2,8],[2,1,1],[2,4,2],[2,3,3],[2,12,1],[2,3,2],[2,4,1],[2,2,4],[2,7,2],[2,1,1],[2,73,14],[2,90,1],[2,4,1],[2,2,1],[2,1,1],[2,6,3],[2,1,1],[2,4,1],[2,10,3],[2,2,3],[2,1,1],[2,6,1],[2,37,2],[2,10,1],[2,2,2],[2,60,2],[2,16,3],[2,6,1],[2,1,1],[2,3,4],[2,38,5],[2,6,2],[2,2,1],[2,2,1],[2,9,2],[2,11,1],[2,6,1],[2,9,1],[2,2,2],[2,4,3],[2,8,1],[2,3,2],[2,1,9],[2,14,2],[2,8,1],[2,30,4],[2,2,1],[2,31,2],[2,31,1],[2,21,23],[2,1,5],[2,4,1],[2,2,1],[2,5,3],[2,4,2],[2,10,2],[2,2,2],[2,18,1],[2,15,1],[2,2,1],[2,1,2],[2,5,1],[2,13,1],[2,14,4],[2,1,4],[2,5,1],[2,109,3],[2,18,2],[2,1,2],[2,164,114],[2,8,1],[2,2,3],[2,4,1],[2,1,1],[2,10,1],[2,9,2],[2,4,3],[2,1,75],[2,6,1],[2,17,2],[2,3,1],[2,9,1],[2,2,1],[2,21,1],[2,30,3],[2,7,2],[2,2,2],[2,63,5],[2,16,3],[2,6,1],[2,2,8],[2,25,2],[2,31,3],[2,126,21],[2,10,1],[2,2,2],[2,14,7],[2,6,10],[2,4,3],[2,7,1],[2,12,1],[2,2,1],[2,3,2],[2,2,15],[2,1,4],[2,4,1],[2,3,1],[2,4,1],[2,6,2],[2,7,3],[2,2,3],[2,9,2],[2,6,1],[2,2,1],[2,16,1],[2,22,2],[2,10,1],[2,10,4],[2,7,2],[2,13,1],[2,3,1],[2,7,2],[2,23,12],[2,3,1],[2,6,1],[2,4,2],[2,29,2],[2,5,3],[2,8,1],[2,1,1],[2,6,1],[2,3,1],[2,17,2],[2,15,1],[2,2,1],[2,6,1],[2,2,2],[2,30,1],[2,3,1],[2,2,2],[2,2,5],[2,2,1],[2,37,5],[2,6,2],[2,7,6],[2,2,3],[2,3,3],[2,2,5],[2,75,6],[2,2,3],[2,10,1],[2,2,3],[2,7,2],[2,30,1],[2,12,33],[2,1,1],[2,3,4],[2,14,1],[2,9,2],[2,8,1],[2,1,1],[2,9,1],[2,4,1],[2,2,1],[2,7,1],[2,4,1],[2,3,1],[2,4,3],[2,1,1],[2,5,2],[2,3,4],[2,4,2],[2,6,3],[2,13,5],[2,4,2],[2,6,1],[2,2,5],[2,2,3],[2,1,1],[2,14,1],[2,5,1],[2,4,2],[2,9,1],[2,7,6],[2,4,1],[2,19,2],[2,23,1],[2,20,7],[2,9,1],[2,4,1],[2,12,2],[2,9,4],[2,3,2],[2,3,7],[2,3,1],[2,10,2],[2,6,1],[2,7,1],[2,1,1],[2,9,1],[2,6,1],[2,1,1],[2,17,2],[2,9,1],[2,5,2],[2,1,1],[2,11,2],[2,9,1],[2,1,1],[2,3,6],[2,2,1],[2,5,9],[2,12,2],[2,2,1],[2,6,2],[2,17,4],[2,2,2],[2,7,1],[2,596,5],[2,6,1],[2,2,1],[2,58,125],[2,6,1],[2,8,1],[2,2,1],[2,3,1],[2,1,2],[2,11,4],[2,1,1],[2,9,6],[2,2,8],[2,1,1],[2,6,2],[2,1,1],[2,2,1],[2,7,2],[2,7,3],[2,14,2],[2,1,1],[2,18,9],[2,2,5],[2,2,12],[2,8,4],[2,6,4],[2,3,1],[2,19,2],[2,4,1],[2,2,1],[2,4,3],[2,3,1],[2,13,1],[2,1,1],[2,7,1],[2,1,1],[2,8,1],[2,13,14],[2,11,1],[2,31,1],[2,4,1],[2,6,1],[2,3,2],[2,26,1],[2,4,2],[2,1,1],[2,2,2],[2,1,2],[2,1,1],[2,7,1],[2,8,1],[2,6,2],[2,19,13],[2,2,3],[2,8,3],[2,1,6],[2,5,1],[2,1,1],[2,6,1],[2,9,1],[2,2,2],[2,35,1],[2,1,1],[2,27,2],[2,54,2],[2,6,2],[2,5,1],[2,2,1],[2,2,4],[2,2,1],[2,2,1],[2,14,1],[2,9,1],[2,53,17],[2,2,1],[2,10,1],[2,9,1],[2,23,1],[2,7,1],[2,12,4],[2,1,2],[2,8,1],[2,7,4],[2,2,1],[2,2,1],[2,3,1],[2,11,1],[2,2,2],[2,6,1],[2,2,1],[2,18,4],[2,3,4],[2,8,2],[2,13,1],[2,2,1],[2,1,2],[2,14,4],[2,8,11],[2,1,1],[2,8,3],[2,7,3],[2,90,1],[2,20,2],[2,16,1],[2,20,2],[2,3,1],[2,8,10],[2,10,1],[2,10,1],[2,1,1],[2,3,1],[2,5,1],[2,37,3],[2,24,3],[2,10,1],[2,3,1],[2,2,4],[2,4,1],[2,19,2],[2,1,1],[2,5,1],[2,8,1],[2,3,1],[2,1,1],[2,2,1],[2,2,32],[2,2,1],[2,4,1],[2,1,1],[2,2,2],[2,5,1],[2,2,3],[2,25,9],[2,2,1],[2,4,4],[2,2,1],[2,15,1],[2,59,1],[2,3,2],[2,4,1],[2,9,2],[2,3,10],[2,6,1],[2,5,5],[2,8,2],[2,2,2],[2,4,2],[2,10,1],[2,126,1],[2,3,1],[2,8,1],[2,9,2],[2,1,30],[2,25,1],[2,7,3],[2,2,2],[2,1,3],[2,21,1],[2,38,1],[2,48,1],[2,22,1],[2,4,2],[2,55,2],[2,5,1],[2,15,1],[2,14,44],[2,4,1],[2,1,2],[2,2,3],[2,2,1],[2,3,3],[2,6,1],[2,2,1],[2,26,7],[2,4,1],[2,1,2],[2,3,2],[2,6,2],[2,10,1],[2,18,3],[2,2,1],[2,38,2],[2,1,1],[2,8,1],[2,8,1],[2,3,1],[2,4,1],[2,1,1],[2,1,2],[2,4,1],[2,26,2],[2,3,3],[2,2,1],[2,6,1],[2,19,1],[2,3,4],[2,2,1],[2,4,1],[2,11,1],[2,9,1],[2,9,1],[2,9,1],[2,1,1],[2,1,1],[2,7,1],[2,2,1],[2,11,4],[2,10,2],[2,4,1],[2,6,1],[2,4,1],[2,8,1],[2,11,1],[2,1,1],[2,7,1],[2,8,2],[2,9,1],[2,8,1],[2,41,2],[2,2,4],[2,1,6],[2,2,1],[2,6,3],[2,128,5],[2,2,1],[2,13,13],[2,6,1],[2,1,3],[2,3,3],[2,7,2],[2,10,12],[2,2,1],[2,8,1],[2,1,1],[2,7,1],[2,2,1],[2,10,2],[2,11,10],[2,1,1],[2,8,3],[2,4,5],[2,2,1],[2,14,2],[2,4,1],[2,4,1],[2,7,1],[2,6,1],[2,7,3],[2,1,1],[2,2,1],[2,7,2],[2,2,1],[2,6,1],[2,8,1],[2,2,4],[2,6,1],[2,43,1],[2,108,3],[2,8,1],[2,13,1],[2,4,1],[2,10,3],[2,2,1],[2,24,2],[2,1,2],[2,4,2],[2,2,2],[2,40,6],[2,6,2],[2,6,2],[2,4,3],[2,28,5],[2,4,1],[2,15,1],[2,12,1],[2,1,1],[2,27,1],[3,1,1],[3,5,2],[3,16,2],[3,16,3],[3,1,2],[3,98,2],[3,91,7],[3,6,37],[3,4,1],[3,9,1],[3,97,2],[3,6,1],[3,23,3],[3,115,1],[3,2,1],[3,1,1],[3,1,1],[3,14,4],[3,1,1],[3,28,1],[3,1,1],[3,6,1],[3,15,5],[3,3,1],[3,52,1],[3,2,3],[3,3,1],[3,4,5],[3,13,1],[3,16,3],[3,13,1],[3,17,1],[3,4,4],[3,6,7],[3,14,1],[3,32,1],[3,3,3],[3,11,4],[3,1,1],[3,8,6],[3,9,7],[3,2,1],[3,9,2],[3,5,2],[3,26,12],[3,11,3],[3,12,2],[3,4,2],[3,6,2],[3,30,6],[3,1,2],[3,10,1],[3,1,1],[3,4,1],[3,7,1],[3,30,29],[3,2,3],[3,2,2],[3,2,1],[3,11,1],[3,2,3],[3,3,1],[3,9,1],[3,2,2],[3,5,1],[3,1,2],[3,1,13],[3,6,9],[3,1,1],[3,6,2],[3,1,3],[3,4,1],[3,6,1],[3,9,3],[3,1,1],[3,9,2],[3,19,45],[3,2,1],[3,7,8],[3,21,3],[3,6,2],[3,2,1],[3,6,1],[3,5,1],[3,2,1],[3,15,7],[3,2,1],[3,9,3],[3,11,1],[3,4,1],[3,7,1],[3,2,1],[3,19,1],[3,5,1],[3,2,1],[3,1,1],[3,22,3],[3,21,5],[3,13,1],[3,2,1],[3,4,1],[3,23,1],[3,8,1],[3,3,2],[3,2,2],[3,4,1],[3,12,2],[3,5,2],[3,16,8],[3,6,1],[3,1,2],[3,2,1],[3,7,1],[3,6,1],[3,6,3],[3,45,1],[3,4,5],[3,1,2],[3,3,1],[3,2,1],[3,1,1],[3,12,1],[3,8,1],[3,3,1],[3,6,1],[3,2,2],[3,9,2],[3,5,2],[3,2,1],[3,3,1],[3,15,1],[3,11,1],[3,4,1],[3,9,2],[3,3,1],[3,4,1],[3,1,3],[3,6,15],[3,6,3],[3,2,6],[3,1,3],[3,3,2],[3,15,1],[3,6,1],[3,7,1],[3,5,1],[3,9,1],[3,49,2],[3,5,2],[3,9,4],[3,39,1],[3,4,3],[3,1,5],[3,1,2],[3,2,1],[3,14,2],[3,4,3],[3,18,1],[3,5,4],[3,19,3],[3,3,1],[3,2,1],[3,3,2],[3,48,10],[3,1,1],[3,5,6],[3,12,3],[3,1,2],[3,5,4],[3,4,1],[3,4,1],[3,5,1],[3,1,1],[3,10,1],[3,10,2],[3,6,3],[3,2,7],[3,4,1],[3,9,2],[3,1,1],[3,2,1],[3,4,6],[3,1,1],[3,25,9],[3,11,1],[3,2,1],[3,8,2],[3,1,1],[3,9,3],[3,4,6],[3,1,7],[3,1,1],[3,4,1],[3,11,2],[3,14,1],[3,65,2],[3,6,1],[3,5,2],[3,2,2],[3,13,1],[3,2,5],[3,2,1],[3,4,2],[3,25,1],[3,2,1],[3,2,3],[3,9,1],[3,5,5],[3,46,1],[3,6,2],[3,12,9],[3,4,4],[3,2,3],[3,13,5],[3,39,16],[3,3,1],[3,1,2],[3,68,14],[3,5,1],[3,11,1],[3,7,1],[3,4,1],[3,53,11],[3,4,3],[3,4,1],[3,2,1],[3,4,1],[3,1,1],[3,1,2],[3,8,4],[3,5,1],[3,6,5],[3,6,13],[3,403,3],[3,23,1],[3,3,3],[3,14,1],[3,10,1],[3,3,2],[3,46,11],[3,4,3],[3,29,1],[3,41,2],[3,11,1],[3,15,3],[3,11,2],[3,6,1],[3,3,1],[3,17,2],[3,14,3],[3,5,4],[3,2,1],[3,2,1],[3,5,6],[3,6,1],[3,54,2],[3,2,1],[3,4,2],[3,1,1],[3,7,1],[3,8,34],[3,7,1],[3,1,2],[3,3,2],[3,2,5],[3,1,1],[3,15,12],[3,13,1],[3,5,1],[3,1,1],[3,5,1],[3,39,1],[3,26,9],[3,11,1],[3,6,1],[3,2,1],[3,19,4],[3,4,5],[3,10,1],[3,11,6],[3,4,1],[3,38,1],[3,1,1],[3,1,3],[3,2,1],[3,5,10],[3,4,1],[3,18,2],[3,4,1],[3,19,1],[3,1,1],[3,8,6],[3,1,1],[3,9,1],[3,8,3],[3,15,4],[3,9,3],[3,13,1],[3,10,1],[3,1,2],[3,5,4],[3,4,2],[3,4,1],[3,28,1],[3,6,2],[3,9,1],[3,1,2],[3,2,2],[3,25,1],[3,5,8],[3,5,3],[3,8,2],[3,2,1],[3,14,5],[3,2,1],[3,11,3],[3,10,1],[3,2,2],[3,1,1],[3,3,1],[3,9,1],[3,39,9],[3,27,2],[3,1,1],[3,1,3],[3,12,3],[3,6,1],[3,14,2],[3,17,3],[3,198,1],[3,3,1],[3,5,1],[3,1,1],[3,2,4],[3,12,1],[3,31,1],[3,8,14],[3,25,2],[3,16,2],[3,18,2],[3,2,3],[3,2,3],[3,6,28],[3,22,3],[3,6,1],[3,8,2],[3,4,3],[3,3,3],[3,8,1],[3,1,1],[3,1,2],[3,1,1],[3,1,1],[3,1,2],[3,6,2],[3,2,3],[3,4,1],[3,3,1],[3,1,1],[3,3,2],[3,8,10],[3,6,1],[3,2,1],[3,2,1],[3,5,1],[3,29,6],[3,10,1],[3,3,8],[3,1,3],[3,2,2],[3,3,1],[3,3,4],[3,5,19],[3,15,1],[3,65,1],[3,2,2],[3,60,3],[3,52,1],[3,1,1],[3,4,2],[3,4,1],[3,6,1],[3,7,4],[3,1,1],[3,13,1],[3,8,3],[3,13,1],[3,6,1],[3,3,2],[3,14,1],[3,2,2],[3,4,1],[3,1,1],[3,11,29],[3,7,1],[3,21,6],[3,4,1],[3,1,1],[3,2,1],[3,9,1],[3,2,4],[3,3,1],[3,2,3],[3,1,2],[3,3,2],[3,3,4],[3,16,2],[3,9,2],[3,2,1],[3,17,8],[3,9,4],[3,7,1],[3,6,4],[3,1,2],[3,2,1],[3,4,4],[3,2,1],[3,3,1],[3,3,1],[3,11,1],[3,2,2],[3,2,1],[3,2,3],[3,2,2],[3,10,6],[3,10,4],[3,1,1],[3,8,3],[3,29,2],[3,7,1],[3,2,1],[3,4,1],[3,11,1],[3,2,1],[3,2,2],[3,13,3],[3,4,1],[3,3,1],[3,2,4],[3,18,1],[3,12,1],[3,6,3],[3,3,1],[3,5,1],[3,3,2],[3,9,2],[3,5,1],[3,5,1],[3,11,1],[3,1,1],[3,39,18],[3,3,2],[3,4,1],[3,17,2],[3,14,2],[3,10,6],[3,1,1],[3,4,5],[3,2,1],[3,4,6],[3,12,1],[3,106,80],[3,32,1],[3,7,1],[3,8,1],[3,2,1],[3,33,2],[3,33,7],[3,10,1],[3,3,2],[3,4,3],[3,16,3],[3,7,1],[3,8,1],[3,16,1],[3,8,1],[3,8,1],[3,30,1],[3,7,1],[3,2,1],[3,3,10],[3,27,1],[3,2,1],[3,1,3],[3,2,1],[3,23,1],[3,1,1],[3,5,2],[3,6,1],[3,2,1],[3,2,13],[3,1,3],[3,6,2],[3,5,1],[3,26,1],[3,4,5],[3,2,1],[3,9,1],[3,6,1],[3,2,1],[3,21,2],[3,15,1],[3,4,2],[3,2,1],[3,30,1],[3,4,2],[3,2,1],[3,2,58],[3,8,2],[3,13,1],[3,16,2],[3,10,6],[3,6,1],[3,6,1],[3,2,6],[3,1,1],[3,2,4],[3,11,9],[3,25,2],[3,4,2],[3,1,1],[3,9,9],[3,1,9],[3,3,3],[3,4,1],[3,2,3],[3,5,2],[3,2,7],[3,2,1],[3,2,1],[3,6,3],[3,3,4],[3,1,2],[3,4,3],[3,7,118],[3,7,1],[3,6,1],[3,3,1],[3,1,15],[3,1,2],[3,4,2],[3,2,1],[3,4,1],[3,6,1],[3,23,1],[3,1,1],[3,3,1],[3,4,1],[3,10,3],[3,2,2],[3,6,5],[3,8,1],[3,3,1],[3,4,1],[3,20,2],[3,14,2],[3,7,1],[3,21,29],[3,10,2],[3,10,2],[3,3,3],[3,2,1],[3,3,2],[3,24,3],[3,3,1],[3,9,1],[3,6,1],[3,22,1],[3,13,1],[3,5,2],[3,1,1],[3,9,1],[3,10,2],[3,4,1],[3,7,1],[3,2,1],[3,12,4],[3,48,2],[3,43,1],[3,6,1],[3,1,1],[3,4,1],[3,14,10],[3,2,1],[3,1,1],[3,1,1],[3,3,1],[3,11,5],[3,36,1],[3,4,49],[3,11,1],[3,8,1],[3,2,2],[3,3,1],[3,3,1],[3,8,3],[3,15,8],[3,30,9],[3,23,5],[3,10,1],[3,7,6],[3,1,1],[3,9,2],[3,6,1],[3,3,1],[3,3,1],[3,2,1],[3,21,1],[3,13,2],[3,4,2],[3,9,2],[3,8,1],[3,2,2],[3,4,2],[3,1,1],[3,9,2],[3,32,2],[3,2,2],[3,10,1],[3,1,4],[3,4,3],[3,14,3],[3,5,2],[3,2,1],[3,3,1],[3,5,3],[3,14,3],[3,2,3],[3,6,1],[3,4,1],[3,1,1],[3,16,1],[3,3,1],[3,2,1],[3,5,1],[3,33,1],[3,3,1],[3,14,4],[3,8,3],[3,12,2],[3,14,1],[3,2,1],[3,1,1],[3,13,2],[3,8,1],[3,9,1],[3,17,1],[3,14,2],[3,16,1],[3,12,4],[3,2,1],[3,2,2],[3,20,1],[3,2,2],[3,8,4],[3,7,3],[3,8,1],[3,1,2],[3,5,5],[3,29,1],[3,1,1],[3,2,1],[3,8,2],[3,2,1],[3,7,9],[3,3,2],[3,7,1],[3,6,1],[3,6,2],[3,1,26],[3,3,3],[3,7,1],[3,2,2],[3,8,2],[3,7,1],[3,3,1],[3,4,4],[3,11,1],[3,5,15],[3,28,1],[3,3,8],[3,3,3],[3,2,4],[3,6,4],[3,3,2],[3,2,2],[3,5,1],[3,12,2],[3,10,2],[3,1,1],[3,6,1],[3,2,1],[3,3,2],[4,8,1],[4,3,1],[4,23,1],[4,4,9],[4,6,2],[4,9,1],[4,9,6],[4,5,9],[4,8,1],[4,2,1],[4,2,3],[4,8,1],[4,1,1],[4,4,1],[4,8,1],[4,2,1],[4,16,1],[4,1,8],[4,4,1],[4,1,3],[4,18,1],[4,2,1],[4,4,9],[4,2,1],[4,3,1],[4,9,2],[4,2,1],[4,7,3],[4,5,4],[4,27,2],[4,1,1],[4,8,2],[4,7,1],[4,8,1],[4,9,4],[4,3,2],[4,6,4],[4,2,2],[4,13,5],[4,8,1],[4,10,2],[4,1,1],[4,2,1],[4,1,2],[4,6,2],[4,5,2],[4,8,2],[4,16,2],[4,7,2],[4,102,5],[4,2,2],[4,1,1],[4,2,1],[4,1,2],[4,2,1],[4,29,4],[4,2,1],[4,1,1],[4,1,4],[4,3,2],[4,6,1],[4,19,2],[4,4,3],[4,1,12],[4,1,1],[4,62,3],[4,14,1],[4,1,1],[4,1,1],[4,7,4],[4,9,1],[4,15,1],[4,16,15],[4,2,2],[4,2,1],[4,41,3],[4,7,8],[4,7,3],[4,5,1],[4,9,1],[4,6,1],[4,1,3],[4,15,1],[4,5,4],[4,28,2],[4,11,3],[4,15,1],[4,1,1],[4,1,1],[4,12,1],[4,16,4],[4,12,5],[4,5,2],[4,8,4],[4,124,115],[4,11,3],[4,46,10],[4,4,1],[4,3,1],[4,2,1],[4,27,1],[4,1,1],[4,20,1],[4,2,1],[4,4,1],[4,53,1],[4,18,1],[4,1,1],[4,8,2],[4,3,1],[4,2,1],[4,5,1],[4,2,3],[4,2,5],[4,3,1],[4,8,1],[4,2,5],[4,8,2],[4,9,2],[4,48,1],[4,9,1],[4,20,2],[4,4,4],[4,3,2],[4,8,2],[4,6,2],[4,12,6],[4,9,1],[4,3,1],[4,4,1],[4,5,3],[4,5,1],[4,8,4],[4,3,1],[4,7,1],[4,6,2],[4,15,16],[4,6,1],[4,50,4],[4,23,4],[4,9,7],[4,8,2],[4,1,1],[4,2,1],[4,9,1],[4,12,1],[4,4,3],[4,2,2],[4,42,4],[4,1,1],[4,6,1],[4,11,10],[4,6,11],[4,7,1],[4,4,2],[4,4,2],[4,6,1],[4,59,4],[4,1,1],[4,2,7],[4,12,20],[4,11,3],[4,4,1],[4,12,3],[4,6,3],[4,7,2],[4,17,4],[4,106,8],[4,6,2],[4,7,1],[4,1,1],[4,8,1],[4,4,6],[4,3,1],[4,4,3],[4,14,3],[4,15,2],[4,4,1],[4,44,91],[4,7,2],[4,3,2],[4,2,1],[4,23,2],[4,30,1],[4,2,2],[4,10,1],[4,6,9],[4,6,2],[4,3,2],[4,3,2],[4,20,1],[4,4,1],[4,18,2],[4,12,1],[4,20,14],[4,10,1],[4,3,1],[4,2,1],[4,3,2],[4,3,3],[4,6,3],[4,2,4],[4,8,1],[4,8,5],[4,3,1],[4,10,2],[4,2,1],[4,1,1],[4,10,1],[4,25,2],[4,1,1],[4,4,1],[4,63,2],[4,1,1],[4,4,1],[4,6,7],[4,2,3],[4,8,1],[4,19,2],[4,11,1],[4,30,10],[4,4,4],[4,2,3],[4,2,1],[4,43,29],[4,2,1],[4,1,1],[4,17,1],[4,14,1],[4,13,1],[4,6,4],[4,2,2],[4,1,2],[4,3,1],[4,7,3],[4,4,1],[4,4,1],[4,1,1],[4,13,5],[4,2,1],[4,1,1],[4,5,1],[4,4,2],[4,13,2],[4,10,4],[4,8,1],[4,3,1],[4,2,2],[4,8,3],[4,4,2],[4,6,1],[4,7,1],[4,14,29],[4,19,1],[4,7,1],[4,19,1],[4,24,2],[4,2,1],[4,1,1],[4,28,1],[4,1,1],[4,2,1],[4,3,1],[4,2,1],[4,1,7],[4,2,4],[4,3,1],[4,29,1],[4,2,1],[4,14,1],[4,2,1],[4,28,3],[4,11,3],[4,1,2],[4,21,2],[4,1,1],[4,15,1],[4,17,1],[4,16,1],[4,13,1],[4,2,1],[4,15,5],[4,19,1],[4,17,1],[4,5,3],[4,12,2],[4,33,1],[4,8,1],[4,15,4],[4,2,11],[4,4,1],[4,1,10],[4,39,1],[4,28,1],[4,25,2],[4,1,1],[4,14,2],[4,8,32],[4,9,1],[4,7,1],[4,6,2],[4,1,2],[4,3,1],[4,6,2],[4,12,2],[4,2,2],[4,5,2],[4,18,1],[4,5,3],[4,6,2],[4,25,1],[4,3,16],[4,14,4],[4,2,6],[4,14,2],[4,3,1],[4,4,1],[4,9,3],[4,28,2],[4,9,1],[4,2,1],[4,7,1],[4,2,1],[4,1,4],[4,4,3],[4,1,1],[4,16,6],[4,3,1],[4,10,1],[4,12,3],[4,8,1],[4,4,1],[4,15,2],[4,4,1],[4,2,3],[4,2,9],[4,4,1],[4,7,2],[4,14,1],[4,31,3],[4,13,1],[4,19,2],[4,8,3],[4,2,1],[4,12,1],[4,5,1],[4,45,3],[4,6,1],[4,1,1],[4,12,6],[4,4,3],[4,3,1],[4,5,2],[4,4,4],[4,19,2],[4,8,1],[4,2,1],[4,27,2],[4,73,3],[4,22,2],[4,1,2],[4,7,46],[4,9,2],[4,2,1],[4,524,305],[4,7,1],[4,26,1],[4,2,1],[4,6,1],[4,30,2],[4,6,1],[4,25,92],[4,2,1],[4,13,1],[4,1,4],[4,1,7],[4,6,1],[4,8,2],[4,6,1],[4,4,2],[4,2,6],[4,12,2],[4,2,2],[4,5,2],[4,3,2],[4,13,1],[4,4,1],[4,6,3],[4,14,1],[4,15,1],[4,25,1],[4,3,1],[4,9,4],[4,94,3],[4,11,2],[4,12,4],[4,7,3],[4,3,1],[4,9,2],[4,3,1],[4,2,1],[4,8,3],[4,7,5],[4,2,45],[4,10,1],[4,10,4],[4,5,3],[4,6,6],[5,5,1],[5,2,1],[5,3,3],[5,11,2],[5,28,1],[5,8,1],[5,4,1],[5,4,1],[5,12,1],[5,7,1],[5,1,1],[5,38,7],[5,6,2],[5,4,2],[5,5,1],[5,2,2],[5,2,7],[5,1,4],[5,4,1],[5,4,1],[5,1,2],[5,3,1],[5,7,1],[5,2,1],[5,10,2],[5,4,1],[5,2,1],[5,2,2],[5,3,1],[5,15,78],[5,2,1],[5,1,5],[5,10,1],[5,6,4],[5,10,2],[5,5,1],[5,1,1],[5,1,1],[5,2,2],[5,6,1],[5,2,2],[5,6,2],[5,10,2],[5,3,1],[5,6,2],[5,4,3],[5,16,5],[5,47,48],[5,2,5],[5,6,7],[5,4,2],[5,3,1],[5,2,1],[5,8,1],[5,7,1],[5,2,2],[5,2,1],[5,3,1],[5,7,4],[5,1,1],[5,1,1],[5,8,6],[5,1,4],[5,9,3],[5,11,4],[5,6,1],[5,6,1],[5,2,1],[5,5,1],[5,84,1],[5,2,33],[5,8,1],[5,6,3],[5,5,3],[5,2,1],[5,10,2],[5,3,1],[5,68,9],[5,6,2],[5,21,11],[5,3,4],[5,3,1],[5,16,3],[5,2,2],[5,2,1],[5,14,2],[5,24,2],[5,19,1],[5,1,4],[5,1,1],[5,3,1],[5,6,1],[5,2,1],[5,5,2],[5,4,3],[5,26,3],[5,2,1],[5,6,4],[5,2,1],[5,6,3],[5,5,1],[5,8,3],[5,1,3],[5,9,1],[5,1,2],[5,11,2],[5,23,1],[5,7,1],[5,2,2],[5,3,2],[5,2,1],[5,11,2],[5,8,2],[5,1,1],[5,4,1],[5,2,1],[5,7,1],[5,11,1],[5,1,1],[5,33,1],[5,4,1],[5,5,1],[5,17,3],[5,1,2],[5,18,2],[5,1,2],[5,1,1],[5,2,3],[5,4,2],[5,2,1],[5,13,7],[5,5,1],[5,19,4],[5,23,9],[5,11,6],[5,7,2],[5,10,1],[5,2,1],[5,26,1],[5,3,3],[5,3,2],[5,3,2],[5,15,3],[5,2,1],[5,3,1],[5,4,1],[5,8,1],[5,4,1],[5,23,1],[5,6,1],[5,1,3],[5,124,17],[5,1,1],[5,1,1],[5,15,1],[5,11,2],[5,2,1],[5,2,2],[5,3,2],[5,1,1],[5,6,4],[5,6,1],[5,3,3],[5,6,5],[5,17,1],[5,7,2],[5,5,1],[5,11,1],[5,3,2],[5,36,2],[5,17,7],[5,4,1],[5,7,2],[5,2,1],[5,2,1],[5,2,1],[5,7,10],[5,4,1],[5,1,3],[5,19,2],[5,2,2],[5,3,1],[5,8,3],[5,4,1],[5,15,1],[5,2,3],[5,13,2],[5,1,3],[5,7,1],[5,23,48],[5,9,1],[5,12,10],[5,16,1],[5,10,1],[5,7,5],[5,2,1],[5,3,1],[5,23,2],[5,4,1],[5,18,1],[5,13,2],[5,54,136],[5,6,2],[5,2,2],[5,5,1],[5,6,1],[5,15,8],[5,14,9],[5,4,1],[5,7,2],[5,3,3],[5,117,5],[5,25,8],[5,14,4],[5,25,3],[5,7,1],[5,7,1],[5,15,3],[5,3,2],[5,4,1],[5,6,4],[5,14,4],[5,7,1],[5,20,1],[5,6,5],[5,12,1],[5,9,3],[5,2,1],[5,4,20],[5,4,3],[5,1,1],[5,1,1],[5,8,1],[5,4,1],[5,1,1],[5,6,3],[5,19,1],[5,14,1],[5,22,2],[5,2,1],[5,11,2],[5,1,1],[5,10,1],[5,4,1],[5,23,3],[5,3,1],[5,15,1],[5,8,4],[5,11,4],[5,4,1],[5,2,1],[5,8,6],[5,2,4],[5,2,7],[5,3,2],[5,2,1],[5,1,1],[5,1,1],[5,11,2],[5,4,10],[5,11,4],[5,110,4],[5,6,1],[5,2,1],[5,96,34],[6,4,1],[6,7,3],[6,2,1],[6,6,2],[6,10,1],[6,2,1],[6,10,1],[6,59,2],[6,7,4],[6,4,2],[6,3,1],[6,6,1],[6,1,4],[6,7,3],[6,2,3],[6,1,1],[6,12,1],[6,1,39],[6,28,1],[6,3,4],[6,8,3],[6,4,4],[6,9,2],[6,15,1],[6,10,1],[6,1,1],[6,2,1],[6,7,1],[6,2,1],[6,93,1],[6,14,6],[6,2,2],[6,55,39],[6,15,2],[6,23,3],[6,3,3],[6,35,2],[6,5,15],[6,1,7],[6,8,19],[6,10,10],[6,3,2],[6,6,3],[6,1,2],[6,6,1],[6,2,1],[6,4,1],[6,127,20],[6,20,18],[6,3,1],[6,9,2],[6,2,3],[6,10,1],[6,27,1],[6,9,1],[6,9,1],[6,28,1],[6,1,1],[6,10,1],[6,11,1],[6,5,1],[6,4,1],[6,82,35],[6,2,1],[6,1,1],[6,3,1],[6,2,1],[6,2,11],[6,2,8],[6,3,2],[6,12,3],[6,5,6],[6,42,4],[6,8,1],[6,2,1],[6,2,2],[6,10,3],[6,6,2],[6,48,2],[6,2,3],[6,2,2],[6,2,1],[6,4,1],[6,10,1],[6,1,1],[6,7,1],[6,35,1],[6,17,1],[6,21,2],[6,1,1],[6,4,2],[6,25,1],[6,7,2],[6,12,4],[6,2,6],[6,24,4],[6,2,1],[6,5,1],[6,2,1],[6,2,1],[6,3,2],[6,4,2],[6,2,1],[6,2,1],[6,2,9],[6,2,2],[6,5,1],[6,8,10],[6,1,1],[6,12,2],[6,10,1],[6,4,2],[6,12,4],[6,1,3],[6,3,2],[6,8,1],[6,4,4],[6,12,5],[6,4,2],[6,10,1],[6,1,1],[6,12,1],[6,6,4],[6,2,1],[6,3,2],[6,1,1],[6,3,5],[6,6,1],[6,32,1],[6,10,1],[6,6,5],[6,27,2],[6,7,1],[6,2,1],[6,10,2],[6,5,1],[6,8,2],[6,3,2],[6,9,2],[6,22,1],[6,2,2],[6,10,1],[6,3,4],[6,1,1],[6,3,6],[6,8,2],[6,44,1],[6,1,1],[6,9,7],[6,9,5],[6,19,4],[6,7,1],[6,1,1],[6,10,1],[6,14,2],[6,4,3],[6,4,1],[6,6,1],[6,3,1],[6,4,1],[6,6,3],[6,6,2],[6,6,1],[6,1,3],[6,12,13],[6,3,2],[6,1,4],[6,15,1],[6,39,4],[6,5,1],[6,1,5],[6,11,3],[6,5,7],[6,9,2],[6,1,1],[6,12,1],[6,12,1],[6,1,4],[6,11,1],[6,3,1],[6,6,2],[6,5,2],[6,2,1],[6,1,2],[6,2,1],[6,41,23],[6,3,1],[6,15,1],[6,1,1],[6,1,1],[6,2,2],[6,3,1],[6,10,1],[6,17,6],[6,5,2],[6,30,1],[7,2,2],[7,10,2],[7,8,3],[7,9,4],[7,4,1],[7,8,1],[7,2,1],[7,7,134],[7,16,1],[7,5,3],[7,3,1],[7,6,2],[7,1,1],[7,5,1],[7,5,1],[7,2,1],[7,24,1],[7,8,4],[7,9,2],[7,1,1],[7,6,2],[7,9,2],[7,1,1],[7,5,28],[7,1,1],[7,2,2],[7,7,2],[7,11,1],[7,2,1],[7,17,32],[7,5,1],[7,2,1],[7,3,2],[7,7,4],[7,15,3],[7,3,1],[7,6,2],[7,1,1],[7,2,1],[7,1,1],[7,1,11],[7,2,1],[7,8,1],[7,6,1],[7,2,1],[7,57,1],[7,20,46],[7,6,2],[7,6,1],[7,1,2],[7,28,7],[7,3,5],[7,4,1],[7,4,6],[7,2,2],[7,3,3],[7,2,3],[7,2,1],[7,1,1],[7,2,6],[7,4,1],[7,3,1],[7,23,1],[7,7,2],[7,7,1],[7,4,3],[7,2,1],[7,1,1],[7,4,2],[7,15,2],[7,6,1],[7,2,1],[7,14,1],[7,1,1],[7,1,1],[7,4,2],[7,2,1],[7,4,1],[7,2,1],[7,4,3],[7,22,1],[7,10,1],[7,2,1],[7,1,2],[7,7,2],[7,1,2],[7,12,1],[7,3,1],[7,2,4],[7,3,8],[7,2,1],[7,6,1],[7,5,3],[7,8,2],[7,5,1],[7,6,1],[7,6,1],[7,5,1],[7,9,5],[7,3,1],[7,3,2],[7,3,19],[7,28,3],[7,2,2],[7,3,1],[7,51,4],[7,2,1],[7,2,1],[7,22,2],[7,5,1],[7,2,1],[7,4,2],[7,2,1],[7,6,2],[7,6,1],[7,3,1],[7,37,1],[7,9,1],[7,8,2],[7,2,1],[7,4,1],[7,2,1],[7,18,1],[7,9,2],[7,1,1],[7,5,1],[7,2,1],[7,13,1],[7,45,1],[7,1,3],[7,7,5],[7,16,1],[7,7,1],[7,1,1],[7,3,1],[7,8,1],[7,1,1],[7,1,4],[7,2,2],[7,6,1],[7,6,1],[7,2,1],[7,16,1],[7,11,1],[7,1,1],[7,2,1],[7,3,2],[7,8,8],[7,33,1],[7,2,8],[7,4,1],[7,6,7],[7,12,3],[7,17,1],[7,9,5],[7,3,2],[7,3,2],[7,4,1],[7,1,1],[7,2,2],[7,6,1],[8,9,1],[8,79,3],[8,3,1],[8,14,4],[8,2,4],[8,10,5],[8,7,3],[8,8,1],[8,6,1],[8,7,1],[8,8,2],[8,9,1],[8,30,2],[8,1,1],[8,1,5],[8,15,2],[8,10,3],[8,5,3],[8,1,2],[8,3,1],[8,16,1],[8,3,1],[8,3,3],[8,3,4],[8,2,1],[8,6,2],[8,4,4],[8,5,3],[8,8,4],[8,8,3],[8,4,3],[8,13,7],[8,2,1],[8,2,1],[8,1,1],[8,4,1],[8,10,3],[8,16,9],[8,3,2],[8,1,2],[8,2,5],[8,5,2],[8,156,14],[8,1,1],[8,5,1],[8,252,690],[8,5,1],[8,25,21],[8,1,1],[8,39,12],[8,1,4],[8,6,1],[8,25,7],[8,1,1],[8,7,1],[8,46,11],[8,3,1],[8,1,1],[8,14,1],[8,24,1],[8,16,3],[8,6,3],[8,5,1],[8,1,2],[8,12,2],[8,2,1],[8,2,5],[8,6,1],[8,6,1],[8,14,1],[8,7,1],[8,6,1],[8,4,6],[8,1,2],[8,3,1],[8,2,14],[8,7,12],[8,2,2],[8,25,15],[8,8,3],[8,6,6],[8,5,1],[8,1,1],[8,2,3],[8,18,3],[8,2,2],[8,3,1],[8,4,1],[8,3,3],[8,4,2],[8,12,2],[8,1,1],[8,4,1],[8,18,1],[8,2,2],[8,11,3],[8,5,1],[8,6,1],[8,13,1],[8,6,1],[8,23,1],[8,18,3],[8,13,2],[8,4,1],[8,38,4],[8,1,1],[8,6,1],[8,10,2],[8,2,7],[8,10,7],[8,1,1],[8,4,7],[8,2,1],[8,2,2],[8,7,1],[8,17,1],[8,10,5],[8,4,4],[8,8,4],[8,3,2],[8,2,1],[8,33,1],[8,8,6],[8,15,1],[8,2,1],[8,7,4],[8,6,3],[8,2,1],[8,1,2],[8,3,1],[8,4,1],[8,4,2],[8,27,1],[8,10,1],[9,8,2],[9,2,2],[9,7,1],[9,11,1],[9,35,5],[9,3,1],[9,2,2],[9,6,7],[9,16,2],[9,7,15],[9,3,1],[9,9,1],[9,5,1],[9,3,1],[9,3,1],[9,4,1],[9,2,5],[9,1,1],[9,5,4],[9,1,1],[9,13,1],[9,14,4],[9,3,1],[9,35,3],[9,41,1],[9,8,3],[9,2,5],[9,8,2],[9,13,3],[9,10,1],[9,4,1],[9,35,12],[9,9,1],[9,12,1],[9,4,1],[9,2,4],[9,1,2],[9,6,4],[9,1,4],[9,20,3],[9,4,3],[9,3,3],[9,1,4],[9,2,11],[9,11,2],[9,19,1],[9,5,1],[9,6,2],[9,1,1],[9,3,1],[9,15,3],[9,2,1],[9,6,1],[9,13,1],[9,2,1],[9,11,2],[9,3,5],[9,6,1],[9,16,1],[9,4,1],[9,3,2],[9,3,1],[9,2,5],[9,13,1],[9,3,1],[9,2,2],[9,7,1],[9,2,3],[9,3,4],[9,5,1],[9,4,1],[9,10,2],[9,36,1],[9,7,2],[9,3,1],[9,4,2],[9,5,5],[9,12,1],[9,4,1],[9,2,2],[9,12,1],[9,13,1],[9,12,1],[9,2,4],[9,1,1],[9,1,2],[9,6,6],[9,1,2],[9,8,4],[9,7,2],[9,15,4],[10,3,25],[10,2,1],[10,4,2],[10,8,1],[10,2,1],[10,1,1],[10,21,1],[10,21,19],[10,4,4],[10,4,8],[10,2,1],[10,1,3],[10,3,5],[10,6,1],[10,8,5],[10,4,1],[10,24,5],[10,2,2],[10,24,1],[10,6,4],[10,1,2],[10,25,1],[10,14,1],[10,6,3],[10,2,3],[10,6,1],[10,15,2],[10,54,3],[10,12,1],[10,21,1],[10,7,1],[10,4,4],[10,5,1],[10,10,3],[10,37,1],[10,8,3],[10,11,1],[10,2,4],[10,6,1],[10,30,1],[10,35,1],[10,4,2],[10,2,1],[10,5,2],[10,6,1],[10,4,4],[10,12,1],[10,12,1],[10,44,4],[10,16,3],[10,1,64],[10,27,1],[10,9,3],[10,17,2],[10,25,2],[10,2,2],[10,7,3],[10,89,1],[10,7,30],[10,2,4],[10,2,3],[10,2,1],[10,3,3],[10,11,1],[10,7,1],[10,2,1],[10,4,2],[10,1,1],[10,1,1],[10,6,2],[10,7,3],[10,4,1],[10,2,2],[10,18,1],[10,4,1],[10,19,1],[10,14,6],[10,5,1],[10,5,6],[10,12,1],[11,5,6],[11,15,8],[11,9,1],[11,3,2],[11,6,3],[11,24,4],[11,27,3],[11,2,2],[11,5,9],[11,13,1],[11,3,1],[11,2,25],[11,10,1],[11,4,11],[11,7,2],[11,49,1],[11,4,1],[11,12,1],[11,7,1],[11,1,2],[11,10,6],[11,2,1],[11,4,2],[11,1,2],[11,2,1],[11,5,1],[11,4,3],[11,1,1],[11,6,1],[11,4,3],[11,95,2],[11,8,1],[11,18,1],[11,5,1],[11,16,12],[11,13,2],[11,7,6],[11,56,1],[11,6,1],[11,8,1],[11,21,14],[11,2,7],[11,5,1],[11,1,1],[11,5,2],[11,2,1],[11,15,1],[11,3,3],[11,26,1],[11,6,6],[11,1,1],[11,10,7],[11,6,3],[11,6,1],[11,8,2],[11,1,2],[11,35,2],[11,19,2],[11,8,2],[11,4,1],[11,7,2],[11,4,5],[11,3,5],[11,17,1],[11,3,3],[11,2,1],[11,12,1],[11,2,8],[11,85,1],[11,4,1],[11,9,1],[11,2,2],[11,2,1],[11,6,2],[11,6,3],[11,18,3],[11,1,1],[11,8,1],[11,22,1],[11,7,1],[11,4,2],[11,4,1],[11,8,3],[11,10,4],[11,24,1],[11,10,19],[11,12,8],[12,5,1],[12,1,7],[12,4,1],[12,21,6],[12,12,2],[12,16,1],[12,1,1],[12,2,1],[12,3,1],[12,8,9],[12,1,1],[12,17,2],[12,16,6],[12,14,1],[12,3,3],[12,27,3],[12,2,1],[12,3,3],[12,14,4],[12,1,3],[12,10,1],[12,5,7],[12,7,3],[12,13,5],[12,4,1],[12,47,4],[12,18,1],[12,31,2],[12,8,1],[12,5,4],[12,1,1],[12,26,1],[12,13,2],[12,5,2],[12,4,3],[12,15,5],[12,2,1],[12,2,1],[12,3,1],[12,5,1],[12,11,1],[12,4,3],[12,1,1],[12,7,2],[12,6,1],[12,14,6],[12,32,4],[12,14,1],[12,31,1],[12,7,3],[12,9,7],[12,5,1],[12,6,1],[12,6,6],[12,7,8],[12,2,1],[12,3,1],[12,4,3],[12,1,1],[12,19,2],[12,11,1],[12,7,2],[12,8,1],[12,15,4],[12,5,1],[12,9,3],[12,2,1],[12,1,1],[12,8,9],[12,3,6],[12,15,1],[13,1,11],[13,7,2],[13,10,1],[13,13,4],[13,3,2],[13,1,2],[13,2,1],[13,3,4],[13,3,1],[13,4,3],[13,5,1],[13,10,13],[13,5,4],[13,2,3],[13,3,2],[13,72,2],[13,7,3],[13,19,2],[13,4,1],[13,5,6],[13,4,2],[13,2,1],[13,2,1],[13,34,11],[13,5,2],[13,9,5],[13,6,2],[13,5,5],[13,9,5],[13,9,1],[13,19,3],[13,4,1],[13,3,1],[13,7,2],[13,1,1],[13,11,7],[13,4,7],[13,6,1],[13,2,1],[13,1,1],[13,21,1],[13,6,15],[13,5,2],[13,1,1],[13,1,2],[14,2,1],[14,18,1],[14,8,2],[14,5,1],[14,2,2],[14,5,2],[14,2,1],[14,8,2],[14,4,1],[14,8,5],[14,14,1],[14,9,6],[14,18,2],[14,4,1],[14,6,1],[14,18,1],[14,6,6],[14,4,1],[14,6,2],[14,6,8],[14,3,1],[14,2,3],[14,1,1],[14,17,4],[14,4,3],[14,15,3],[14,4,8],[14,15,2],[14,6,1],[14,9,22],[14,7,3],[14,7,6],[14,2,2],[14,1,1],[14,7,4],[14,10,1],[14,1,1]])\n #data = np.array([[26,2],[18,3],[30,4],[19,2],[21,1],[40,1],[17,3],[20,3],[19,3],[15,4],[246,1],[57,2],[16,2],[44,101],[31,1],[19,2],[35,2],[25,1],[28,1],[82,1],[52,11],[19,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,6],[1,1],[1,4],[1,1],[1,7],[1,9],[1,1],[1,2],[1,4],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,9],[1,1],[1,1],[1,1],[1,2],[1,6],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,13],[1,1],[1,4],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,7],[1,2],[1,1],[1,5],[1,1],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,1],[1,4],[1,3],[1,2],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,1],[1,3],[1,37],[1,1],[1,2],[1,1],[1,1],[1,50],[1,1],[1,1],[1,1],[1,8],[1,1],[1,1],[1,1],[1,6],[1,2],[1,3],[1,3],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,2],[1,15],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,9],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,12],[2,3],[2,3],[2,1],[2,1],[2,1],[2,4],[2,1],[2,5],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,3],[2,2],[2,1],[2,13],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,8],[2,3],[2,1],[2,1],[2,13],[2,2],[2,1],[2,2],[2,3],[2,1],[2,1],[3,1],[3,2],[3,5],[3,1],[3,1],[3,11],[3,3],[3,1],[3,1],[3,6],[3,1],[3,3],[3,1],[3,2],[3,4],[3,2],[3,2],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[4,1],[4,2],[4,2],[4,9],[4,1],[4,1],[4,5],[4,1],[4,16],[4,1],[4,2],[4,1],[4,1],[4,1],[4,6],[4,2],[4,2],[5,2],[5,2],[5,2],[5,2],[5,3],[5,1],[6,3],[6,1],[6,4],[6,1],[7,1],[7,1],[7,2],[7,1],[7,1],[8,7],[8,1],[8,1],[9,1],[9,3],[9,2],[9,1],[10,1],[10,11],[11,1],[11,2],[12,4],[13,11],[13,2],[14,3],[22,1],[39,3],[107,1],[46,6],[22,1],[15,1],[29,45],[29,1],[35,1],[23,2],[21,1],[17,1],[57,1],[20,1],[19,4],[24,1],[18,2],[61,2],[51,12],[41,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,6],[1,2],[1,1],[1,4],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,1],[1,1],[1,3],[1,3],[1,2],[1,4],[1,7],[1,3],[1,1],[1,15],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,4],[1,2],[1,2],[1,1],[1,4],[1,2],[1,5],[1,1],[1,1],[1,1],[1,1],[1,5],[1,8],[1,1],[1,1],[1,2],[1,2],[1,134],[1,45],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,4],[1,6],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,19],[1,4],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,19],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,5],[1,3],[1,6],[1,2],[1,1],[1,3],[1,2],[1,2],[1,1],[1,2],[1,1],[1,26],[1,4],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,5],[1,4],[1,1],[1,27],[1,1],[1,1],[1,1],[1,11],[1,2],[1,4],[1,1],[1,1],[1,24],[1,2],[1,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,15],[2,1],[2,1],[2,1],[2,3],[2,1],[2,5],[2,1],[2,4],[2,1],[2,1],[2,5],[2,2],[2,1],[2,1],[2,2],[2,1],[2,3],[2,4],[2,1],[2,3],[2,1],[2,2],[2,17],[2,4],[2,2],[2,7],[2,2],[2,1],[3,1],[3,3],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,3],[3,1],[3,18],[3,1],[3,1],[3,1],[3,6],[3,8],[3,1],[3,1],[3,2],[3,2],[3,1],[4,1],[4,3],[4,1],[4,1],[4,1],[4,4],[4,1],[4,20],[4,2],[4,4],[4,2],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,3],[4,4],[4,2],[4,2],[4,1],[4,1],[5,3],[5,1],[5,1],[6,1],[6,8],[7,1],[7,1],[7,5],[8,21],[8,1],[8,1],[8,2],[9,1],[10,30],[10,2],[10,3],[10,1],[11,1],[11,2],[11,1],[11,1],[12,1],[12,3],[12,6],[13,1],[13,2],[13,1],[14,1],[14,2],[17,1],[52,1],[64,1],[190,2],[25,3],[19,3],[22,1],[15,2],[25,1],[25,2],[38,1],[69,1],[1,1],[1,4],[1,1],[1,21],[1,1],[1,3],[1,11],[1,31],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,2],[1,2],[1,212],[1,6],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,1],[1,3],[1,4],[1,1],[1,2],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,6],[1,1],[1,3],[1,7],[1,2],[1,5],[1,3],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,9],[1,1],[1,2],[1,2],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,78],[1,3],[1,7],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,3],[1,2],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,8],[2,1],[2,1],[2,5],[2,2],[2,1],[2,6],[2,1],[2,4],[2,2],[2,2],[2,1],[2,2],[2,1],[2,1],[2,30],[2,3],[2,5],[2,4],[2,3],[2,1],[2,1],[3,1],[3,2],[3,1],[3,11],[3,1],[3,1],[3,8],[3,2],[3,1],[3,4],[3,3],[3,2],[3,3],[3,1],[3,3],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[4,8],[4,1],[4,2],[4,1],[4,2],[4,1],[4,3],[4,1],[4,2],[4,7],[4,1],[4,1],[4,1],[4,1],[4,7],[5,1],[5,1],[5,2],[5,2],[5,1],[5,11],[5,1],[5,1],[5,1],[5,1],[5,2],[5,1],[5,2],[5,8],[5,1],[6,2],[6,8],[6,1],[6,1],[6,1],[6,2],[6,1],[6,2],[6,1],[7,1],[7,3],[7,1],[7,2],[7,6],[7,2],[8,1],[8,6],[8,15],[9,2],[10,3],[10,1],[10,1],[10,2],[10,5],[10,2],[10,64],[11,1],[11,1],[11,1],[12,1],[12,6],[12,1],[12,2],[14,4],[14,1],[17,1],[21,1],[17,1],[32,1],[16,1],[18,5],[17,1],[16,1],[17,2],[262,1],[22,1],[227,5],[82,4],[28,3],[56,7],[42,2],[26,1],[137,1],[55,19],[29,1],[42,2],[1,5],[1,1],[1,2],[1,22],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,2],[1,3],[1,1],[1,4],[1,1],[1,2],[1,4],[1,1],[1,2],[1,2],[1,1],[1,2],[1,2],[1,5],[1,7],[1,2],[1,2],[1,1],[1,1],[1,7],[1,1],[1,1],[1,1],[1,2],[1,3],[1,16],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,5],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,4],[1,28],[1,6],[1,1],[1,2],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,16],[1,1],[1,2],[1,3],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,7],[1,1],[1,1],[1,2],[1,2],[1,4],[1,3],[1,4],[1,1],[1,1],[1,2],[1,5],[1,1],[1,1],[1,5],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[2,5],[2,5],[2,4],[2,2],[2,32],[2,1],[2,1],[2,4],[2,3],[2,1],[2,1],[2,1],[2,45],[2,3],[2,11],[2,1],[2,1],[2,2],[2,1],[2,4],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,2],[2,3],[2,1],[2,8],[2,2],[2,2],[2,1],[2,2],[2,2],[2,1],[2,7],[2,4],[2,2],[2,4],[2,1],[2,8],[3,1],[3,1],[3,1],[3,3],[3,4],[3,1],[3,10],[3,6],[3,1],[3,1],[3,1],[3,2],[3,4],[3,4],[3,1],[3,1],[3,7],[3,2],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,3],[3,1],[3,1],[3,19],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,1],[4,2],[4,1],[4,9],[4,4],[4,5],[4,3],[4,2],[4,3],[5,1],[5,2],[5,20],[5,1],[5,2],[5,2],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,4],[5,1],[6,2],[6,2],[6,1],[6,1],[6,1],[6,1],[6,1],[6,6],[6,2],[7,1],[7,1],[7,1],[7,4],[8,1],[8,5],[8,14],[9,1],[9,4],[10,1],[10,1],[10,1],[10,1],[11,6],[11,4],[12,1],[12,2],[13,2],[13,1],[13,6],[14,2],[42,4],[264,3],[22,3],[15,6],[19,1],[46,2],[193,1],[15,1],[127,5],[47,1],[16,2],[27,1],[25,1],[19,5],[73,1],[60,1],[27,1],[19,2],[1,2],[1,1],[1,2],[1,2],[1,4],[1,2],[1,1],[1,1],[1,2],[1,1],[1,2],[1,16],[1,2],[1,3],[1,2],[1,1],[1,4],[1,20],[1,3],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,2],[1,3],[1,4],[1,1],[1,1],[1,2],[1,6],[1,1],[1,1],[1,1],[1,47],[1,2],[1,2],[1,5],[1,2],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,16],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,5],[1,2],[1,7],[1,1],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,14],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,3],[1,4],[1,5],[1,1],[1,1],[1,1],[1,17],[1,71],[1,1],[1,1],[1,1],[1,79],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,7],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[2,1],[2,1],[2,1],[2,4],[2,13],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,3],[2,6],[2,3],[2,1],[2,1],[2,1],[2,2],[2,17],[2,2],[2,2],[2,8],[2,1],[2,3],[2,2],[2,11],[2,1],[2,2],[2,5],[2,1],[2,1],[2,2],[2,1],[2,2],[2,2],[2,1],[2,1],[2,3],[2,4],[2,1],[2,6],[2,25],[2,1],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,2],[2,1],[2,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,3],[3,8],[3,5],[3,3],[3,7],[3,1],[3,1],[3,9],[3,6],[3,3],[3,2],[3,8],[3,4],[3,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,6],[4,1],[4,3],[4,2],[4,1],[4,3],[4,1],[4,2],[4,1],[4,1],[4,1],[4,1],[5,1],[5,5],[5,3],[5,2],[5,3],[5,1],[5,3],[6,1],[6,1],[6,1],[6,1],[7,1],[7,1],[7,1],[7,1],[7,32],[7,2],[7,1],[7,4],[7,1],[7,1],[7,4],[8,2],[8,2],[8,1],[8,2],[8,1],[9,1],[9,3],[9,1],[9,1],[9,1],[10,3],[11,4],[11,1],[11,1],[11,3],[11,3],[11,1],[12,1],[12,1],[12,1],[13,2],[13,1],[13,2],[14,5],[26,2],[49,1],[26,1],[18,1],[27,1],[15,1],[23,1],[58,3],[36,2],[19,3],[62,2],[72,2],[90,1],[124,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,3],[1,1],[1,4],[1,2],[1,1],[1,1],[1,18],[1,1],[1,2],[1,4],[1,24],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,4],[1,3],[1,1],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,2],[1,1],[1,1],[1,1],[1,1],[1,8],[1,10],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,17],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,3],[1,2],[1,1],[1,4],[1,2],[1,1],[1,2],[1,25],[1,2],[1,7],[1,1],[1,1],[1,6],[1,1],[1,3],[1,2],[1,4],[1,1],[1,1],[1,6],[1,1],[1,2],[1,3],[1,1],[1,4],[1,2],[1,3],[1,2],[1,3],[1,1],[1,1],[1,3],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[2,1],[2,5],[2,1],[2,2],[2,5],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,2],[2,1],[2,2],[2,6],[2,1],[2,2],[2,1],[2,3],[2,1],[2,2],[2,3],[2,13],[2,1],[2,2],[2,1],[2,3],[2,1],[2,4],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,2],[2,3],[2,2],[2,2],[2,1],[2,1],[2,3],[2,1],[2,1],[2,5],[3,2],[3,2],[3,2],[3,5],[3,1],[3,1],[3,1],[3,1],[3,3],[3,2],[3,2],[3,1],[3,1],[3,1],[3,1],[3,5],[3,1],[3,4],[3,2],[3,1],[3,1],[3,3],[3,1],[3,1],[3,3],[4,3],[4,1],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[5,1],[5,2],[5,9],[5,2],[5,1],[5,7],[5,2],[5,1],[5,2],[5,2],[5,1],[6,3],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,29],[6,2],[7,3],[7,2],[7,1],[7,1],[7,2],[7,2],[7,2],[7,3],[7,2],[8,5],[8,1],[8,1],[8,3],[8,2],[8,1],[8,2],[9,1],[9,1],[10,1],[10,14],[10,3],[10,4],[10,3],[10,4],[11,1],[11,5],[11,2],[11,3],[11,1],[11,1],[11,2],[12,1],[12,1],[13,5],[13,1],[13,1],[14,1],[14,3],[14,1],[24,1],[15,1],[19,2],[15,5],[131,1],[28,13],[33,1],[24,1],[17,1],[15,1],[44,2],[16,2],[16,3],[29,7],[29,1],[82,8],[16,1],[17,2],[16,2],[45,1],[159,1],[100,2],[23,1],[15,1],[15,1],[22,1],[48,1],[25,5],[15,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,2],[1,4],[1,44],[1,1],[1,2],[1,40],[1,1],[1,9],[1,1],[1,17],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,25],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,12],[1,1],[1,2],[1,12],[1,2],[1,2],[1,5],[1,2],[1,3],[1,7],[1,5],[1,72],[1,2],[1,8],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,3],[1,1],[1,2],[1,2],[1,5],[1,3],[1,2],[1,3],[1,382],[1,1],[1,3],[1,1],[1,1],[1,6],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,2],[1,6],[1,1],[1,3],[1,3],[1,1],[1,6],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,7],[1,1],[1,1],[1,2],[2,1],[2,1],[2,1],[2,1],[2,12],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,52],[2,2],[2,1],[2,1],[2,2],[2,1],[2,2],[2,9],[2,1],[2,1],[2,18],[2,3],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,1],[2,1],[2,1],[2,1],[3,6],[3,3],[3,4],[3,1],[3,1],[3,1],[3,1],[3,1],[3,4],[3,1],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[3,80],[3,1],[3,2],[3,1],[3,1],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,2],[4,1],[4,4],[4,4],[4,1],[4,2],[4,2],[4,1],[4,2],[4,1],[4,1],[5,1],[5,1],[5,3],[5,3],[5,1],[5,1],[5,1],[5,2],[5,1],[6,4],[6,3],[6,1],[6,6],[6,1],[6,1],[7,2],[7,1],[7,1],[7,2],[7,1],[7,2],[7,1],[7,1],[8,1],[8,4],[8,1],[8,2],[8,3],[9,2],[9,3],[9,3],[9,6],[10,1],[10,1],[10,1],[10,1],[11,8],[11,1],[11,1],[12,2],[13,5],[15,1],[35,7],[16,1],[24,2],[16,1],[25,1],[65,4],[36,1],[16,5],[21,10],[18,1],[16,12],[29,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,5],[1,3],[1,3],[1,3],[1,1],[1,4],[1,3],[1,3],[1,3],[1,1],[1,1],[1,1],[1,2],[1,5],[1,3],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,8],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,2],[1,4],[1,2],[1,7],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,5],[1,1],[1,4],[1,8],[1,6],[1,1],[1,4],[1,1],[1,1],[1,3],[1,1],[1,3],[1,2],[1,7],[1,2],[1,5],[1,2],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,5],[1,1],[1,13],[1,3],[1,2],[1,1],[1,1],[1,10],[1,1],[1,2],[1,1],[1,3],[1,12],[1,2],[1,2],[1,4],[1,1],[1,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,4],[2,3],[2,1],[2,1],[2,1],[2,6],[2,1],[2,6],[2,1],[2,2],[2,6],[2,1],[2,10],[2,1],[2,1],[2,4],[2,1],[2,3],[2,3],[2,1],[2,1],[2,3],[2,5],[2,3],[2,10],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,4],[2,1],[2,1],[2,2],[2,1],[2,3],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[3,2],[3,1],[3,1],[3,1],[3,5],[3,34],[3,2],[3,3],[3,1],[3,1],[3,2],[3,1],[3,5],[3,1],[3,1],[3,2],[3,4],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,25],[3,1],[3,1],[4,1],[4,6],[4,3],[4,1],[4,6],[4,1],[4,1],[4,4],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,1],[4,3],[4,4],[5,1],[5,2],[5,3],[5,1],[5,1],[5,1],[5,4],[5,1],[5,2],[5,4],[5,1],[5,1],[6,1],[6,4],[6,2],[6,1],[6,1],[6,2],[6,3],[7,11],[7,1],[7,5],[8,2],[8,1],[8,1],[9,2],[9,5],[9,4],[9,3],[9,1],[9,2],[9,2],[10,1],[10,2],[11,1],[12,3],[12,1],[13,11],[13,1],[17,1],[201,2],[16,2],[104,4],[123,2],[15,1],[26,5],[74,1],[15,3],[15,7],[16,1],[39,2],[27,1],[32,1],[53,4],[28,1],[25,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,7],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,16],[1,3],[1,2],[1,2],[1,3],[1,1],[1,1],[1,3],[1,11],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,4],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,32],[1,2],[1,1],[1,1],[1,6],[1,1],[1,7],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,2],[1,2],[1,2],[1,2],[1,1],[1,1],[1,55],[1,2],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,5],[1,4],[1,7],[1,1],[1,1],[1,6],[1,2],[1,2],[1,6],[1,3],[1,2],[1,1],[1,6],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,4],[1,9],[1,2],[1,3],[1,1],[2,1],[2,1],[2,11],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,4],[2,1],[2,2],[2,2],[2,2],[2,3],[2,4],[2,2],[2,5],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,3],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,4],[2,2],[3,2],[3,1],[3,1],[3,3],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,6],[3,2],[3,1],[3,1],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,5],[3,1],[3,1],[3,2],[3,2],[3,2],[3,1],[3,1],[3,2],[3,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,2],[4,3],[4,3],[4,1],[4,4],[4,1],[4,2],[4,1],[4,3],[4,1],[5,1],[5,2],[5,1],[5,3],[5,3],[5,1],[5,2],[5,9],[5,1],[5,1],[5,2],[5,1],[5,2],[6,2],[6,3],[6,1],[6,1],[6,2],[6,1],[6,2],[6,2],[6,1],[6,4],[6,2],[7,7],[7,2],[7,4],[7,1],[7,2],[7,19],[7,1],[7,1],[7,1],[8,1],[8,12],[8,1],[8,3],[8,1],[9,1],[9,1],[9,1],[9,1],[9,1],[10,1],[10,1],[10,4],[10,2],[12,3],[12,1],[12,1],[13,1],[13,1],[14,1],[14,1],[14,3],[30,7],[32,1],[40,2],[16,1],[91,6],[122,1],[15,1],[17,1],[20,3],[19,2],[19,1],[98,2],[81,14],[47,4],[38,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,83],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,5],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,4],[1,2],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,7],[1,1],[1,2],[1,4],[1,1],[1,1],[1,88],[1,2],[1,2],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,57],[1,2],[1,6],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,5],[1,5],[1,1],[1,1],[1,9],[1,1],[1,1],[1,3],[1,4],[1,1],[1,2],[1,5],[1,2],[1,3],[1,1],[1,2],[1,4],[1,4],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,6],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,2],[2,2],[2,15],[2,4],[2,1],[2,1],[2,2],[2,1],[2,2],[2,3],[2,3],[2,3],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,2],[2,1],[2,7],[2,1],[2,4],[2,3],[2,2],[2,3],[2,1],[2,1],[2,2],[3,4],[3,1],[3,1],[3,2],[3,3],[3,6],[3,2],[3,9],[3,9],[3,2],[3,2],[3,1],[3,15],[3,1],[3,1],[3,1],[3,3],[4,1],[4,1],[4,2],[4,3],[4,1],[4,2],[4,1],[4,6],[4,2],[4,8],[4,9],[4,1],[4,1],[4,1],[5,1],[5,1],[5,78],[5,1],[5,1],[5,1],[5,17],[5,1],[5,3],[5,2],[5,1],[6,1],[6,1],[6,5],[6,19],[6,1],[6,6],[6,1],[6,1],[6,2],[6,1],[6,1],[6,1],[6,2],[6,1],[7,2],[7,1],[7,1],[7,4],[7,1],[7,28],[7,1],[8,1],[8,1],[8,1],[9,3],[9,1],[9,11],[9,4],[10,1],[10,2],[11,1],[11,1],[11,1],[11,1],[12,1],[14,2],[14,2],[14,2],[18,2],[31,1],[29,2],[16,1],[17,20],[25,1],[20,3],[59,1],[25,1],[27,2],[26,1],[44,1],[17,4],[16,4],[20,6],[67,2],[15,1],[65,1],[17,1],[33,1],[61,2],[1,2],[1,2],[1,2],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,4],[1,4],[1,5],[1,2],[1,1],[1,1],[1,18],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,2],[1,5],[1,4],[1,1],[1,4],[1,1],[1,1],[1,1],[1,56],[1,1],[1,4],[1,1],[1,9],[1,6],[1,9],[1,1],[1,2],[1,1],[1,1],[1,1],[1,18],[1,10],[1,1],[1,5],[1,1],[1,1],[1,2],[1,5],[1,1],[1,3],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,8],[1,3],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,5],[1,2],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,5],[1,2],[1,27],[1,3],[1,1],[1,2],[1,9],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,15],[1,1],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,17],[1,1],[1,4],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,18],[1,1],[1,2],[1,46],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,7],[1,8],[1,1],[1,3],[1,6],[2,1],[2,1],[2,1],[2,1],[2,5],[2,4],[2,1],[2,2],[2,2],[2,4],[2,2],[2,1],[2,2],[2,1],[2,3],[2,5],[2,1],[2,2],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,12],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,2],[2,3],[2,1],[2,2],[2,1],[2,10],[2,2],[2,8],[2,2],[2,2],[2,1],[2,5],[2,5],[2,4],[2,1],[2,1],[2,1],[2,1],[3,2],[3,6],[3,2],[3,1],[3,58],[3,1],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[3,2],[3,1],[3,1],[3,6],[3,10],[3,1],[3,4],[3,1],[3,1],[3,6],[3,1],[3,29],[3,2],[3,2],[3,6],[3,1],[4,1],[4,4],[4,2],[4,1],[4,46],[4,2],[4,1],[4,2],[4,2],[4,3],[4,11],[4,3],[4,1],[4,2],[4,1],[4,15],[4,2],[5,5],[5,9],[5,1],[5,2],[5,136],[5,48],[5,5],[5,1],[5,1],[5,1],[5,1],[5,1],[6,1],[6,1],[6,10],[6,1],[6,2],[6,1],[7,2],[7,1],[7,3],[7,2],[7,11],[7,6],[7,1],[8,1],[8,3],[8,2],[8,1],[8,12],[8,2],[8,2],[9,1],[9,1],[9,1],[9,4],[10,1],[10,2],[11,2],[12,9],[13,1],[14,2],[21,1],[26,1],[16,2],[29,1],[16,5],[401,3],[33,1],[19,31],[15,4],[28,2],[23,1],[42,4],[40,1],[70,1],[15,3],[15,2],[22,1],[103,1],[256,27],[41,1],[86,1],[17,1],[31,1],[26,1],[105,2],[28,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,6],[1,4],[1,1],[1,4],[1,7],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,3],[1,2],[1,1],[1,2],[1,2],[1,8],[1,1],[1,2],[1,1],[1,5],[1,2],[1,1],[1,1],[1,2],[1,2],[1,2],[1,2],[1,1],[1,9],[1,1],[1,2],[1,2],[1,3],[1,2],[1,1],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,5],[1,1],[1,29],[1,1],[1,4],[1,2],[1,3],[1,3],[1,17],[1,6],[1,2],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,9],[1,3],[1,1],[1,1],[1,1],[1,2],[1,3],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,1],[1,7],[1,1],[1,5],[1,1],[1,1],[1,4],[1,1],[1,2],[1,6],[1,2],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,3],[1,3],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,16],[1,5],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,8],[2,3],[2,1],[2,2],[2,4],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,9],[2,1],[2,23],[2,1],[2,1],[2,1],[2,2],[2,3],[2,1],[2,1],[2,3],[2,1],[2,1],[2,2],[2,1],[2,25],[2,2],[2,3],[2,2],[2,1],[2,1],[2,3],[2,1],[2,3],[2,1],[2,3],[2,1],[2,2],[2,1],[2,1],[2,1],[3,1],[3,2],[3,2],[3,3],[3,2],[3,1],[3,1],[3,5],[3,9],[3,1],[3,3],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,9],[3,1],[3,2],[3,7],[3,3],[3,4],[3,2],[3,1],[3,37],[3,1],[3,1],[3,1],[3,1],[4,1],[4,2],[4,305],[4,4],[4,1],[4,1],[4,1],[4,4],[4,3],[4,1],[4,6],[4,7],[4,1],[4,1],[4,1],[4,1],[4,29],[4,1],[5,10],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[6,2],[6,1],[6,1],[6,2],[7,1],[7,1],[7,2],[7,1],[7,1],[7,1],[7,2],[8,1],[8,3],[8,2],[9,1],[9,1],[10,1],[10,3],[10,1],[11,6],[11,2],[11,1],[11,1],[12,5],[12,4],[12,1],[14,1],[14,1],[23,1],[26,2],[15,2],[16,16],[31,7],[18,3],[22,3],[87,1],[17,2],[17,9],[30,1],[58,4],[24,2],[28,5],[53,1],[23,1],[28,2],[44,1],[60,3],[17,2],[17,1],[1,1],[1,2],[1,1],[1,11],[1,1],[1,1],[1,2],[1,2],[1,3],[1,2],[1,6],[1,3],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,3],[1,2],[1,4],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,7],[1,2],[1,1],[1,1],[1,4],[1,2],[1,1],[1,3],[1,1],[1,5],[1,3],[1,3],[1,3],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,3],[1,5],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,15],[1,1],[1,8],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,3],[1,15],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,5],[1,3],[1,1],[1,1],[1,14],[1,1],[1,2],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,5],[1,2],[1,3],[1,1],[1,2],[1,9],[1,1],[1,4],[1,1],[1,2],[1,8],[1,1],[1,3],[1,1],[1,1],[1,4],[1,4],[1,3],[1,1],[1,1],[1,9],[1,2],[1,4],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,2],[1,3],[1,2],[1,6],[1,1],[1,18],[2,1],[2,3],[2,3],[2,1],[2,6],[2,1],[2,2],[2,2],[2,5],[2,1],[2,1],[2,1],[2,3],[2,2],[2,6],[2,1],[2,3],[2,3],[2,1],[2,3],[2,2],[2,2],[2,1],[2,1],[2,9],[2,5],[2,1],[2,1],[2,1],[2,2],[2,85],[2,60],[2,2],[2,1],[2,12],[2,1],[2,1],[2,1],[2,8],[2,1],[2,21],[2,1],[2,3],[2,1],[2,1],[2,8],[2,1],[2,1],[3,3],[3,3],[3,1],[3,3],[3,3],[3,1],[3,2],[3,2],[3,1],[3,1],[3,14],[3,1],[3,6],[3,1],[3,2],[3,1],[3,3],[3,2],[3,1],[3,1],[3,1],[3,1],[3,2],[3,3],[3,2],[4,3],[4,2],[4,1],[4,3],[4,1],[4,1],[4,2],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,4],[5,1],[5,1],[5,1],[5,3],[5,2],[5,1],[5,4],[6,6],[6,1],[6,18],[6,1],[6,1],[6,1],[6,5],[6,2],[6,3],[6,2],[7,3],[7,5],[7,2],[7,1],[7,3],[7,5],[7,1],[7,1],[7,1],[7,1],[8,1],[8,1],[8,3],[8,1],[8,1],[8,4],[9,1],[9,2],[9,4],[10,2],[10,1],[11,2],[11,1],[11,1],[12,3],[13,1],[14,2],[32,7],[26,2],[22,2],[15,1],[26,46],[15,2],[16,1],[19,1],[36,1],[16,2],[24,1],[20,5],[1,1],[1,1],[1,1],[1,7],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,1],[1,10],[1,5],[1,13],[1,2],[1,3],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,8],[1,1],[1,3],[1,5],[1,1],[1,2],[1,2],[1,2],[1,4],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,8],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,2],[1,4],[1,3],[1,2],[1,9],[1,19],[1,1],[1,1],[1,1],[1,1],[1,14],[1,3],[1,2],[1,4],[1,2],[1,1],[1,4],[1,1],[1,1],[1,5],[1,2],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,11],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,3],[1,9],[1,2],[1,6],[1,9],[1,3],[1,1],[1,1],[1,5],[1,1],[1,3],[1,2],[1,9],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,4],[1,2],[1,1],[1,3],[1,2],[1,1],[1,12],[1,1],[1,1],[1,1],[1,1],[2,5],[2,2],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,3],[2,3],[2,114],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,9],[2,1],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,2],[2,1],[2,3],[2,19],[2,1],[2,8],[2,2],[2,2],[2,7],[2,1],[2,1],[3,2],[3,1],[3,5],[3,3],[3,1],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,30],[3,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,2],[3,2],[3,1],[3,2],[3,2],[3,1],[3,2],[3,1],[3,2],[4,1],[4,3],[4,1],[4,1],[4,7],[4,2],[4,2],[4,3],[4,3],[4,2],[4,2],[4,1],[4,1],[4,2],[4,1],[4,2],[4,1],[4,1],[4,6],[5,2],[5,1],[5,2],[5,1],[5,7],[5,7],[5,1],[5,2],[5,1],[6,1],[6,1],[6,1],[6,2],[6,1],[6,1],[6,4],[6,1],[7,1],[7,1],[7,1],[7,3],[7,1],[7,1],[7,1],[8,1],[8,2],[8,3],[8,1],[8,1],[8,9],[8,6],[9,1],[9,3],[9,4],[10,4],[10,1],[10,3],[10,1],[10,19],[11,3],[11,2],[11,5],[11,5],[11,1],[12,7],[13,3],[13,4],[13,2],[13,4],[14,2],[16,1],[93,1],[22,2],[42,6],[15,1],[16,3],[36,8],[34,1],[30,3],[43,7],[46,8],[40,1],[22,1],[1,3],[1,1],[1,13],[1,2],[1,3],[1,2],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,5],[1,13],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,6],[1,4],[1,1],[1,4],[1,1],[1,2],[1,3],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,6],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,3],[1,2],[1,3],[1,1],[1,1],[1,3],[1,2],[1,3],[1,3],[1,2],[1,1],[1,3],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,4],[1,2],[1,2],[1,3],[1,7],[1,3],[1,1],[1,1],[1,3],[1,2],[1,1],[1,4],[1,5],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,7],[1,6],[1,1],[1,2],[1,3],[1,3],[1,1],[1,4],[1,2],[1,7],[1,2],[1,5],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,3],[1,6],[1,2],[1,2],[1,1],[1,1],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,3],[2,1],[2,2],[2,12],[2,1],[2,1],[2,3],[2,3],[2,1],[2,2],[2,3],[2,3],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,8],[2,2],[2,1],[2,2],[2,1],[2,1],[2,7],[2,1],[2,1],[2,1],[2,7],[2,2],[2,1],[2,18],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,5],[2,1],[2,1],[2,6],[2,3],[2,1],[3,3],[3,1],[3,1],[3,3],[3,1],[3,1],[3,3],[3,1],[3,2],[3,3],[3,1],[3,1],[3,1],[4,6],[4,1],[4,1],[4,3],[4,1],[4,1],[4,1],[4,2],[4,2],[4,5],[4,2],[4,2],[4,2],[4,2],[4,1],[4,3],[4,2],[4,1],[5,1],[5,3],[5,2],[5,2],[5,1],[5,1],[5,3],[5,1],[5,1],[5,2],[5,4],[5,4],[5,1],[6,2],[6,2],[6,2],[6,1],[6,1],[6,1],[6,1],[6,4],[6,1],[7,2],[7,1],[7,2],[7,1],[7,1],[7,1],[8,2],[8,2],[8,3],[8,14],[9,5],[9,2],[9,1],[9,1],[10,8],[10,2],[11,1],[11,1],[12,1],[12,1],[12,1],[12,7],[12,3],[48,1],[73,3],[22,2],[19,1],[20,1],[40,2],[15,2],[34,1],[22,5],[31,2],[47,28],[51,1],[19,2],[231,1],[15,3],[18,2],[18,3],[101,5],[65,2],[30,11],[18,3],[1,1],[1,2],[1,2],[1,1],[1,3],[1,5],[1,2],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,64],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,4],[1,5],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,6],[1,1],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,14],[1,1],[1,1],[1,1],[1,1],[1,2],[1,12],[1,2],[1,2],[1,1],[1,1],[1,3],[1,2],[1,3],[1,2],[1,1],[1,5],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,2],[1,3],[1,1],[2,2],[2,1],[2,3],[2,2],[2,1],[2,1],[2,2],[2,1],[2,2],[2,2],[2,1],[2,1],[2,10],[2,2],[2,1],[2,2],[2,3],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,6],[2,2],[2,4],[2,9],[2,2],[2,1],[2,3],[2,2],[2,10],[2,3],[2,1],[2,37],[2,2],[2,2],[2,2],[3,9],[3,4],[3,3],[3,2],[3,2],[3,1],[3,19],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,2],[3,2],[3,10],[3,1],[3,1],[3,1],[3,1],[3,3],[3,6],[4,2],[4,5],[4,1],[4,3],[4,10],[4,1],[4,1],[4,1],[4,1],[4,4],[4,5],[4,1],[4,1],[4,2],[5,2],[5,2],[5,1],[5,2],[5,1],[5,3],[5,2],[5,1],[5,1],[6,3],[6,1],[6,1],[6,6],[6,1],[6,3],[7,2],[7,1],[7,1],[7,1],[7,1],[7,1],[8,1],[8,2],[8,1],[8,3],[8,1],[9,1],[9,1],[9,2],[10,3],[10,4],[10,1],[11,1],[12,1],[12,1],[13,1],[13,3],[13,1],[14,1],[35,2],[15,7],[32,1],[80,1],[22,2],[16,1],[25,1],[156,1],[175,2],[460,1],[63,1],[74,3],[121,2],[16,3],[49,5],[29,1],[16,1],[1,5],[1,4],[1,3],[1,5],[1,1],[1,1],[1,2],[1,2],[1,1],[1,3],[1,1],[1,2],[1,1],[1,3],[1,4],[1,12],[1,1],[1,3],[1,1],[1,2],[1,3],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,12],[1,1],[1,1],[1,3],[1,1],[1,2],[1,38],[1,1],[1,1],[1,1],[1,2],[1,5],[1,1],[1,1],[1,10],[1,3],[1,3],[1,4],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,6],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,9],[1,1],[1,1],[1,4],[1,4],[1,3],[1,3],[1,2],[1,1],[1,6],[1,2],[1,3],[1,1],[1,5],[1,2],[1,2],[1,1],[1,1],[1,5],[1,2],[1,1],[1,3],[1,1],[1,6],[1,1],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,2],[1,2],[1,8],[1,1],[1,3],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,4],[1,3],[1,1],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[2,1],[2,1],[2,4],[2,7],[2,1],[2,3],[2,2],[2,3],[2,2],[2,10],[2,2],[2,6],[2,4],[2,2],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,4],[2,1],[2,1],[2,2],[2,2],[2,1],[2,2],[2,3],[2,1],[2,10],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,3],[2,2],[2,2],[3,5],[3,3],[3,26],[3,1],[3,4],[3,2],[3,5],[3,1],[3,3],[3,2],[3,1],[3,1],[3,2],[3,1],[3,2],[3,2],[3,1],[3,4],[3,2],[4,8],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,2],[4,1],[4,5],[4,1],[4,2],[4,2],[4,2],[4,3],[4,2],[5,2],[5,1],[5,2],[5,3],[5,1],[5,1],[5,3],[5,1],[5,1],[5,1],[6,4],[6,2],[6,1],[6,1],[6,7],[6,2],[7,1],[7,1],[7,1],[7,3],[7,3],[7,3],[8,2],[8,1],[8,3],[9,3],[9,2],[9,1],[9,3],[9,2],[10,1],[10,1],[10,4],[11,2],[11,1],[11,1],[12,1],[12,55],[12,1],[13,1],[35,4],[21,9],[26,1],[165,7],[21,1],[55,5],[19,10],[18,5],[17,1],[67,1],[68,4],[19,1],[24,6],[89,3],[21,1],[40,1],[52,2],[16,1],[1,3],[1,4],[1,1],[1,4],[1,2],[1,3],[1,1],[1,3],[1,1],[1,4],[1,1],[1,1],[1,14],[1,5],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,22],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,4],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,2],[1,2],[1,5],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,4],[1,1],[1,2],[1,37],[1,1],[1,2],[1,1],[1,2],[1,2],[1,5],[1,1],[1,1],[1,11],[1,2],[1,1],[1,1],[1,1],[1,7],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,6],[1,2],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,2],[1,3],[1,1],[1,4],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,11],[1,2],[1,1],[1,6],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,8],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,5],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,3],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,1],[2,1],[2,19],[2,6],[2,3],[2,1],[2,2],[2,3],[2,2],[2,6],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,7],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,7],[2,1],[2,3],[2,3],[2,1],[3,6],[3,2],[3,2],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,3],[3,1],[3,1],[3,29],[3,1],[3,2],[3,3],[3,1],[3,1],[3,1],[3,15],[3,2],[3,1],[3,1],[3,2],[3,1],[3,2],[3,2],[3,7],[3,3],[3,4],[3,1],[4,2],[4,10],[4,1],[4,1],[4,1],[4,1],[4,1],[4,6],[5,3],[5,2],[5,1],[5,4],[5,1],[5,2],[5,1],[6,13],[6,2],[6,2],[6,2],[6,1],[6,1],[6,1],[7,1],[7,1],[7,2],[8,1],[8,1],[8,1],[9,2],[9,1],[9,1],[9,1],[9,1],[9,1],[10,1],[10,1],[10,112],[10,1],[11,1],[11,3],[11,11],[12,1],[13,2],[13,1],[13,2],[14,1],[78,1],[43,1],[20,1],[15,1],[26,5],[17,2],[32,2],[93,2],[57,2],[25,1],[112,4],[18,1],[73,1],[30,55],[24,1],[699,1],[17,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,3],[1,3],[1,1],[1,2],[1,2],[1,3],[1,1],[1,4],[1,5],[1,3],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[1,2],[1,1],[1,2],[1,4],[1,1],[1,2],[1,1],[1,1],[1,6],[1,3],[1,4],[1,1],[1,2],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,4],[1,4],[1,1],[1,3],[1,1],[1,1],[1,1],[1,9],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,13],[1,2],[1,1],[1,1],[1,1],[1,7],[1,3],[1,3],[1,1],[1,1],[1,1],[1,2],[1,15],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,7],[1,3],[1,1],[1,1],[1,1],[1,5],[1,1],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,6],[1,2],[1,4],[1,15],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,2],[1,1],[2,1],[2,10],[2,3],[2,1],[2,1],[2,1],[2,3],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,2],[2,1],[2,24],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,5],[2,3],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,4],[2,1],[3,2],[3,2],[3,1],[3,2],[3,1],[3,3],[3,1],[3,1],[3,1],[3,3],[3,13],[3,10],[3,7],[3,1],[3,1],[3,1],[3,9],[3,9],[3,1],[3,2],[3,11],[3,1],[3,4],[3,1],[3,1],[4,2],[4,1],[4,2],[4,1],[4,115],[4,1],[4,1],[4,1],[4,1],[4,2],[4,2],[4,1],[4,2],[4,4],[4,9],[4,1],[4,1],[5,1],[5,2],[5,3],[5,2],[5,1],[5,4],[5,1],[5,2],[5,1],[5,1],[5,1],[5,7],[5,1],[5,1],[6,39],[6,2],[6,3],[6,1],[7,1],[7,2],[7,3],[7,1],[7,2],[7,8],[7,1],[8,3],[8,1],[8,1],[8,1],[8,1],[9,3],[9,2],[9,1],[10,3],[10,25],[10,1],[10,1],[11,6],[11,1],[11,1],[11,1],[11,7],[12,1],[12,1],[12,1],[13,1],[13,1],[14,8],[14,1],[14,1],[74,2],[26,11],[69,1],[108,1],[20,5],[21,1],[16,1],[16,3],[32,2],[62,2],[50,1],[16,1],[15,1],[22,5],[1,2],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,6],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,5],[1,10],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,7],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,4],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,9],[1,7],[1,9],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,15],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,42],[1,12],[1,3],[1,3],[1,5],[1,2],[1,1],[1,5],[1,4],[1,3],[1,3],[1,4],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,3],[1,1],[1,12],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,5],[1,1],[1,16],[1,1],[1,7],[1,1],[1,1],[1,3],[1,1],[1,7],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,7],[1,1],[2,1],[2,3],[2,1],[2,1],[2,9],[2,2],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,3],[2,2],[2,3],[2,1],[2,1],[2,1],[2,2],[2,1],[2,4],[2,2],[2,1],[2,10],[2,2],[2,1],[2,4],[2,1],[2,4],[2,3],[2,1],[2,1],[2,1],[2,1],[2,5],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[3,1],[3,3],[3,135],[3,1],[3,10],[3,1],[3,1],[3,3],[3,2],[3,2],[3,2],[3,5],[3,1],[3,2],[3,7],[3,2],[3,1],[3,1],[3,3],[3,3],[3,1],[3,1],[3,1],[3,1],[3,3],[3,1],[4,91],[4,2],[4,2],[4,3],[4,10],[4,3],[4,2],[4,3],[4,1],[4,1],[4,32],[4,2],[4,2],[5,1],[5,1],[5,3],[5,1],[5,3],[5,2],[5,1],[5,34],[5,2],[5,7],[5,2],[5,1],[6,2],[6,1],[6,5],[6,2],[6,1],[6,1],[7,2],[7,2],[7,1],[7,1],[7,6],[7,1],[8,1],[8,2],[8,1],[8,5],[8,4],[8,1],[8,3],[8,1],[9,4],[9,7],[9,1],[11,2],[11,2],[11,1],[11,1],[11,2],[11,19],[11,6],[12,6],[13,2],[13,1],[13,1],[14,1],[76,1],[65,1],[15,2],[19,1],[15,1],[32,1],[33,1],[19,4],[27,3],[62,7],[36,2],[39,3],[44,3],[17,1],[940,4],[20,1],[16,5],[17,4],[21,1],[46,1],[55,1],[251,12],[27,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,12],[1,8],[1,1],[1,1],[1,5],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,9],[1,2],[1,5],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,3],[1,2],[1,1],[1,3],[1,2],[1,3],[1,1],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,32],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,5],[1,1],[1,11],[1,4],[1,15],[1,3],[1,2],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,11],[1,9],[1,1],[1,2],[1,6],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,128],[1,3],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,3],[1,2],[1,3],[1,1],[1,1],[1,1],[1,3],[1,2],[1,2],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,17],[1,1],[1,1],[1,1],[1,3],[1,8],[2,1],[2,1],[2,3],[2,1],[2,3],[2,2],[2,4],[2,2],[2,1],[2,3],[2,1],[2,2],[2,1],[2,2],[2,2],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,10],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,2],[2,1],[2,1],[2,4],[2,1],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,1],[3,1],[3,2],[3,1],[3,8],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,3],[3,2],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,1],[4,3],[4,1],[4,2],[4,2],[4,1],[4,1],[5,33],[5,5],[5,2],[5,1],[5,5],[5,48],[6,2],[6,3],[6,2],[6,1],[6,1],[6,2],[6,3],[6,1],[6,3],[7,8],[7,1],[7,1],[7,2],[8,1],[8,1],[8,1],[8,1],[8,2],[8,1],[9,1],[9,1],[9,1],[10,1],[10,1],[10,1],[11,2],[11,5],[12,1],[12,2],[12,2],[17,4],[17,1],[15,2],[29,5],[38,1],[20,1],[16,2],[24,1],[42,1],[29,1],[60,2],[20,1],[168,4],[17,33],[83,2],[71,1],[16,1],[18,3],[54,1],[15,8],[22,1],[36,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,2],[1,7],[1,5],[1,1],[1,9],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,7],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,15],[1,1],[1,3],[1,2],[1,2],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,3],[1,2],[1,1],[1,143],[1,1],[1,1],[1,2],[1,4],[1,4],[1,2],[1,2],[1,96],[1,1],[1,4],[1,16],[1,2],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,8],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,4],[1,2],[1,1],[1,5],[1,2],[1,1],[1,1],[1,6],[1,1],[1,15],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,1],[1,7],[1,1],[1,2],[1,4],[1,1],[1,6],[1,5],[1,6],[1,1],[1,1],[1,2],[1,2],[1,1],[1,5],[1,2],[1,2],[1,12],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,4],[1,1],[1,3],[1,8],[2,1],[2,1],[2,2],[2,3],[2,1],[2,3],[2,1],[2,1],[2,1],[2,5],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,14],[2,1],[2,1],[2,1],[2,5],[2,1],[2,7],[2,3],[2,1],[2,3],[2,2],[2,3],[2,1],[2,1],[2,33],[2,1],[2,1],[2,1],[2,2],[2,3],[2,5],[2,1],[2,2],[2,8],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[3,1],[3,2],[3,1],[3,1],[3,1],[3,3],[3,16],[3,1],[3,4],[3,1],[3,1],[3,8],[3,2],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,3],[3,1],[3,2],[3,1],[3,1],[3,2],[3,5],[3,6],[3,1],[3,1],[3,2],[3,3],[3,1],[3,1],[3,4],[3,1],[4,1],[4,2],[4,1],[4,1],[4,2],[4,1],[4,4],[4,2],[4,3],[4,1],[4,2],[4,2],[4,3],[4,1],[4,1],[4,1],[4,1],[4,45],[5,2],[5,1],[5,4],[5,2],[5,1],[5,1],[5,1],[5,1],[5,3],[5,1],[5,3],[6,5],[6,13],[6,4],[6,1],[6,2],[6,1],[6,2],[7,3],[7,1],[7,2],[7,1],[7,1],[8,1],[8,1],[8,1],[8,11],[8,4],[8,1],[8,1],[9,2],[9,1],[10,1],[10,1],[10,2],[11,25],[11,1],[11,1],[11,7],[11,1],[12,3],[12,1],[12,1],[26,3],[29,11],[18,1],[20,1],[15,1],[16,1],[35,4],[15,1],[63,2],[39,1],[64,4],[15,1],[15,1],[26,1],[64,1],[40,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,12],[1,1],[1,1],[1,2],[1,2],[1,3],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,10],[1,1],[1,1],[1,16],[1,1],[1,2],[1,47],[1,3],[1,1],[1,1],[1,1],[1,4],[1,1],[1,170],[1,2],[1,2],[1,1],[1,1],[1,3],[1,3],[1,1],[1,5],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,3],[1,1],[1,14],[1,35],[1,1],[1,3],[1,4],[1,2],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,15],[1,13],[1,2],[1,1],[1,1],[1,8],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,3],[1,1],[1,53],[1,1],[1,4],[1,3],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,14],[2,3],[2,1],[2,2],[2,3],[2,9],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,4],[2,8],[2,3],[2,1],[2,1],[2,3],[2,2],[2,1],[2,1],[2,1],[2,2],[2,4],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,3],[2,1],[2,1],[2,4],[2,2],[2,161],[2,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,51],[3,1],[3,1],[3,3],[3,1],[3,3],[3,2],[3,1],[3,1],[3,2],[3,3],[3,4],[3,2],[3,2],[3,1],[3,1],[3,10],[3,1],[4,1],[4,1],[4,1],[4,4],[4,1],[4,1],[4,4],[4,1],[4,5],[4,9],[4,1],[4,3],[4,1],[5,4],[5,3],[5,1],[5,1],[5,1],[5,1],[5,1],[5,2],[5,1],[5,1],[5,1],[6,7],[6,1],[6,1],[6,1],[6,1],[6,1],[6,3],[6,2],[7,1],[7,2],[7,1],[7,1],[8,1],[8,2],[8,2],[9,1],[9,1],[10,3],[10,1],[10,1],[10,3],[11,9],[11,1],[11,1],[11,1],[11,1],[11,2],[11,2],[12,1],[12,4],[13,2],[13,2],[13,15],[14,1],[14,1],[17,3],[185,1],[51,1],[21,3],[19,3],[17,1],[29,1],[38,4],[169,24],[41,4],[15,1],[59,5],[87,3],[169,1],[29,5],[28,1],[25,4],[48,1],[15,3],[18,1],[22,2],[36,4],[134,1],[19,1],[15,1],[17,3],[56,1],[24,1],[17,1],[1,1],[1,3],[1,4],[1,3],[1,2],[1,3],[1,6],[1,4],[1,6],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,9],[1,79],[1,1],[1,4],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,7],[1,1],[1,3],[1,3],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,3],[1,5],[1,4],[1,1],[1,2],[1,5],[1,2],[1,1],[1,10],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,24],[1,2],[1,1],[1,11],[1,2],[1,8],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,4],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,3],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,31],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,7],[1,1],[1,5],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,13],[1,5],[1,3],[1,2],[1,4],[1,2],[1,1],[1,2],[1,1],[1,1],[1,4],[1,3],[1,3],[1,1],[1,2],[1,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,5],[2,2],[2,8],[2,1],[2,1],[2,1],[2,3],[2,13],[2,6],[2,1],[2,4],[2,1],[2,2],[2,2],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,6],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,4],[2,6],[2,1],[2,1],[2,1],[2,1],[2,6],[2,1],[2,1],[2,1],[2,2],[2,2],[2,4],[3,1],[3,1],[3,2],[3,1],[3,5],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,6],[3,1],[3,8],[3,1],[3,1],[3,1],[3,1],[3,13],[3,3],[3,1],[3,2],[3,2],[3,1],[4,4],[4,1],[4,1],[4,3],[4,1],[4,1],[4,1],[4,2],[5,4],[5,1],[5,2],[5,3],[5,1],[5,1],[5,1],[5,1],[5,2],[6,8],[7,1],[7,1],[7,2],[8,2],[8,2],[8,2],[8,3],[8,3],[8,1],[8,1],[9,1],[9,1],[10,1],[10,3],[10,1],[12,3],[12,2],[12,2],[12,1],[12,1],[12,1],[13,3],[13,1],[13,1],[14,1],[17,1],[25,7],[15,6],[111,8],[92,1],[26,21],[328,1],[16,1],[752,1],[16,1],[22,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,2],[1,3],[1,6],[1,1],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,7],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,5],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,6],[1,1],[1,1],[1,4],[1,2],[1,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,8],[1,2],[1,2],[1,3],[1,2],[1,2],[1,3],[1,1],[1,3],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,6],[1,1],[1,1],[1,2],[1,2],[1,6],[1,1],[1,1],[1,8],[1,5],[1,1],[1,2],[1,4],[1,21],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,4],[1,2],[2,5],[2,1],[2,1],[2,4],[2,2],[2,1],[2,3],[2,1],[2,2],[2,8],[2,1],[2,2],[2,12],[2,2],[2,2],[2,1],[2,5],[2,2],[2,2],[2,1],[2,2],[2,1],[2,3],[2,4],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,4],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,4],[2,5],[2,1],[2,2],[2,2],[2,9],[2,1],[2,1],[3,3],[3,1],[3,1],[3,5],[3,1],[3,2],[3,3],[3,1],[3,12],[3,2],[3,1],[3,1],[3,3],[3,3],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,1],[3,1],[3,7],[4,2],[4,2],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,3],[5,1],[5,2],[5,1],[5,1],[5,1],[5,1],[6,1],[6,5],[6,11],[6,1],[6,1],[6,2],[6,1],[6,4],[6,1],[6,1],[7,5],[7,1],[7,1],[8,1],[8,3],[9,2],[9,1],[10,1],[11,1],[11,1],[11,2],[11,1],[12,4],[12,2],[13,1],[13,1],[13,2],[14,6],[14,1],[68,4],[113,4],[22,1],[48,79],[28,2],[88,1],[232,2],[23,1],[32,1],[72,2],[26,1],[20,1],[53,1],[16,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,8],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,6],[1,1],[1,3],[1,1],[1,3],[1,4],[1,3],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,5],[1,2],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,5],[1,4],[1,1],[1,1],[1,9],[1,6],[1,5],[1,1],[1,1],[1,3],[1,2],[1,9],[1,2],[1,3],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,2],[1,1],[1,16],[1,3],[1,1],[1,86],[1,1],[1,2],[1,4],[1,2],[1,16],[1,9],[1,4],[1,2],[1,9],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,7],[1,10],[1,5],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,12],[1,2],[1,4],[1,1],[1,1],[1,2],[1,2],[1,4],[2,6],[2,3],[2,2],[2,1],[2,3],[2,2],[2,2],[2,2],[2,6],[2,1],[2,4],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,1],[2,3],[2,1],[2,2],[2,2],[2,1],[2,2],[2,9],[2,10],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,3],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,8],[2,2],[2,1],[2,3],[2,1],[3,1],[3,1],[3,1],[3,2],[3,7],[3,5],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,2],[3,1],[3,1],[3,2],[3,1],[3,2],[3,5],[3,2],[4,1],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,6],[4,2],[5,5],[5,2],[5,9],[5,5],[5,1],[5,2],[5,1],[5,2],[6,7],[6,7],[7,3],[7,8],[7,1],[7,1],[7,2],[7,7],[8,1],[8,1],[8,1],[9,6],[9,4],[10,2],[10,1],[10,1],[10,3],[10,2],[11,1],[12,5],[12,3],[12,1],[13,1],[14,2],[14,3],[14,4],[30,1],[19,1],[27,1],[24,12],[20,24],[20,1],[80,1],[26,1],[25,1],[35,1],[150,1],[22,1],[28,1],[187,2],[15,2],[21,1],[22,1],[17,8],[27,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,4],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,4],[1,1],[1,3],[1,5],[1,1],[1,10],[1,8],[1,1],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,3],[1,7],[1,3],[1,1],[1,10],[1,1],[1,4],[1,1],[1,1],[1,2],[1,7],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,4],[1,1],[1,2],[1,3],[1,1],[1,2],[1,2],[1,7],[1,1],[1,1],[1,1],[1,1],[1,5],[1,2],[1,1],[1,5],[1,1],[1,1],[1,5],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,17],[1,4],[1,2],[1,6],[1,1],[1,2],[1,1],[1,2],[1,1],[1,6],[1,2],[1,1],[1,28],[1,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,2],[1,1],[1,3],[1,1],[2,1],[2,3],[2,1],[2,4],[2,1],[2,3],[2,2],[2,1],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,9],[2,1],[2,1],[2,7],[2,3],[2,1],[2,1],[2,3],[2,4],[2,2],[2,2],[2,2],[2,1],[2,3],[2,2],[2,3],[2,2],[2,1],[2,1],[2,2],[3,10],[3,1],[3,3],[3,4],[3,4],[3,398],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,1],[3,3],[3,1],[3,1],[3,4],[3,3],[3,2],[3,1],[4,2],[4,16],[4,3],[4,2],[4,1],[4,4],[4,1],[4,1],[4,4],[4,1],[4,1],[4,1],[4,21],[4,5],[4,1],[4,3],[4,2],[4,2],[4,1],[4,2],[4,1],[4,2],[5,3],[5,1],[5,3],[5,1],[5,5],[5,7],[5,1],[5,1],[5,1],[5,7],[5,4],[5,6],[5,1],[6,1],[6,2],[6,3],[6,2],[6,1],[6,3],[7,8],[7,6],[7,1],[7,2],[7,1],[7,1],[8,4],[8,1],[8,4],[8,1],[8,1],[8,8],[8,3],[9,1],[9,1],[9,2],[10,6],[11,1],[11,1],[11,1],[12,1],[12,4],[12,6],[13,3],[13,1],[520,3],[292,13],[16,1],[20,1],[44,3],[22,1],[17,2],[18,1],[46,5],[19,1],[15,3],[28,1],[23,1],[19,13],[25,2],[23,134],[68,1],[79,13],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,5],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,1],[1,1],[1,3],[1,1],[1,2],[1,6],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,5],[1,12],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,2],[1,6],[1,1],[1,1],[1,36],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,1],[1,5],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,3],[1,2],[1,2],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,2],[1,1],[1,22],[1,1],[1,1],[1,1],[1,187],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,5],[1,4],[1,1],[1,2],[1,1],[1,20],[1,4],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,1],[2,1],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,5],[2,1],[2,2],[2,1],[2,1],[2,6],[2,6],[2,9],[2,1],[2,2],[2,1],[2,2],[2,2],[2,3],[2,6],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,44],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[3,9],[3,4],[3,1],[3,2],[3,1],[3,1],[3,1],[3,4],[3,2],[3,1],[3,1],[3,21],[3,6],[3,1],[3,2],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,3],[3,1],[3,3],[3,5],[3,1],[3,1],[3,5],[3,1],[3,2],[3,2],[3,1],[3,1],[3,1],[4,92],[4,1],[4,1],[4,1],[4,13],[4,4],[4,1],[4,1],[4,2],[4,1],[4,1],[5,1],[5,1],[5,1],[5,2],[5,1],[5,3],[5,3],[5,1],[5,1],[5,1],[5,4],[5,1],[6,1],[6,3],[6,2],[6,23],[6,2],[6,3],[6,35],[7,1],[7,1],[7,1],[8,690],[8,1],[8,3],[9,2],[9,5],[9,1],[10,4],[11,6],[12,4],[12,1],[14,15],[14,1],[18,1],[46,1],[16,1],[24,4],[27,2],[21,1],[98,1],[107,3],[44,16],[16,1],[28,1],[1,1],[1,2],[1,7],[1,3],[1,1],[1,1],[1,2],[1,2],[1,14],[1,1],[1,1],[1,1],[1,36],[1,1],[1,3],[1,4],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,13],[1,51],[1,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,6],[1,2],[1,2],[1,1],[1,3],[1,1],[1,5],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,94],[1,6],[1,1],[1,1],[1,1],[1,2],[1,4],[1,5],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,2],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,5],[1,2],[1,1],[1,2],[1,2],[1,5],[1,1],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,4],[1,4],[1,1],[1,28],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,10],[1,4],[1,4],[1,2],[1,1],[1,3],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,3],[1,5],[1,7],[2,1],[2,5],[2,1],[2,3],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,7],[2,7],[2,2],[2,4],[2,3],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,2],[2,5],[2,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,1],[3,1],[3,1],[3,2],[3,2],[3,1],[3,1],[3,5],[3,5],[3,1],[3,1],[3,10],[3,30],[3,1],[3,1],[3,1],[3,3],[3,1],[3,4],[3,3],[3,3],[3,1],[3,1],[3,2],[3,1],[3,92],[3,1],[4,4],[4,1],[4,2],[4,5],[4,1],[4,2],[4,2],[4,1],[4,4],[4,1],[4,1],[4,1],[5,1],[5,2],[5,1],[5,1],[5,1],[5,4],[5,2],[5,1],[5,10],[6,2],[6,1],[6,1],[6,1],[6,4],[6,2],[6,1],[6,1],[6,2],[7,1],[7,1],[7,1],[7,1],[7,2],[7,1],[7,1],[8,5],[8,1],[8,1],[8,5],[8,5],[8,1],[9,2],[9,1],[9,4],[9,4],[10,1],[10,1],[10,5],[10,5],[10,1],[10,1],[11,1],[11,1],[11,1],[11,2],[12,1],[12,2],[12,2],[12,1],[13,1],[13,1],[13,3],[14,1],[14,22],[14,1],[14,1],[14,2],[20,4],[27,1],[18,2],[49,1],[16,3],[15,1],[18,1],[15,1],[18,1],[15,1],[27,2],[21,1],[23,1],[54,1],[22,1],[46,1],[17,1],[37,7],[17,1],[19,1],[33,2],[62,1],[18,4],[18,1],[24,1],[18,1],[36,1],[20,1],[125,1],[18,13],[36,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,3],[1,4],[1,3],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,10],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,3],[1,8],[1,2],[1,4],[1,10],[1,1],[1,71],[1,1],[1,2],[1,18],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,2],[1,1],[1,34],[1,9],[1,2],[1,7],[1,3],[1,3],[1,3],[1,3],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,8],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,6],[1,3],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,2],[1,9],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,6],[1,1],[1,10],[1,1],[1,10],[1,1],[1,2],[1,2],[1,2],[1,3],[1,1],[1,2],[1,3],[1,2],[1,2],[1,20],[1,2],[1,3],[1,2],[1,1],[1,1],[1,5],[1,1],[1,5],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[2,1],[2,1],[2,3],[2,3],[2,2],[2,2],[2,1],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,10],[2,1],[2,1],[2,6],[2,3],[2,5],[2,3],[2,1],[2,1],[2,11],[2,2],[2,3],[2,2],[2,1],[2,7],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,2],[2,1],[2,3],[2,1],[2,3],[2,2],[2,1],[2,6],[2,3],[2,1],[2,1],[2,1],[3,4],[3,2],[3,1],[3,8],[3,1],[3,49],[3,2],[3,2],[3,3],[3,1],[3,2],[3,5],[3,3],[3,2],[3,1],[3,3],[3,1],[3,2],[3,13],[3,7],[3,2],[3,1],[4,2],[4,4],[4,1],[4,2],[4,1],[4,1],[4,1],[4,2],[5,1],[5,4],[5,1],[5,1],[5,1],[5,1],[5,1],[5,4],[5,1],[5,2],[6,1],[6,7],[6,1],[6,1],[6,4],[6,2],[6,3],[6,1],[6,9],[7,1],[7,1],[8,3],[8,7],[8,1],[8,2],[8,2],[8,2],[8,8],[8,1],[9,1],[9,1],[9,1],[9,2],[10,1],[11,3],[12,1],[12,1],[12,2],[12,1],[12,3],[13,1],[14,1],[58,1],[21,1],[36,15],[218,1],[34,1],[20,2],[16,2],[28,1],[38,1],[38,3],[16,1],[165,2],[132,1],[19,2],[260,1],[39,2],[64,1],[18,1],[1,1],[1,1],[1,1],[1,12],[1,1],[1,2],[1,1],[1,5],[1,2],[1,2],[1,1],[1,2],[1,1],[1,13],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,2],[1,5],[1,1],[1,3],[1,2],[1,1],[1,2],[1,6],[1,1],[1,2],[1,2],[1,7],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,3],[1,6],[1,1],[1,1],[1,1],[1,6],[1,3],[1,2],[1,6],[1,2],[1,1],[1,3],[1,1],[1,2],[1,1],[1,1],[1,2],[1,3],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,2],[1,63],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,2],[1,3],[1,9],[1,2],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,10],[1,1],[1,2],[1,1],[1,2],[1,2],[1,7],[1,1],[1,8],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,1],[1,15],[1,6],[1,1],[1,1],[1,422],[1,2],[1,2],[1,4],[1,2],[1,2],[1,3],[1,2],[1,3],[1,1],[1,5],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[2,4],[2,3],[2,1],[2,2],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,2],[2,2],[2,2],[2,13],[2,11],[2,4],[2,1],[2,2],[2,10],[2,5],[2,2],[2,75],[2,3],[2,1],[2,8],[2,4],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,14],[2,2],[2,15],[2,1],[2,2],[2,4],[2,1],[2,1],[2,2],[2,33],[2,2],[2,1],[2,1],[2,3],[2,2],[2,2],[2,1],[3,1],[3,13],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,6],[3,7],[3,2],[3,1],[3,3],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,3],[3,3],[3,2],[3,1],[3,6],[3,2],[3,4],[3,2],[4,4],[4,4],[4,4],[4,4],[4,6],[4,1],[4,1],[4,1],[4,3],[4,1],[4,2],[4,5],[4,1],[5,4],[5,1],[5,2],[5,8],[5,3],[5,1],[5,1],[5,1],[5,1],[5,3],[6,1],[6,3],[6,2],[6,4],[6,1],[6,3],[6,1],[6,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,3],[8,1],[8,1],[8,1],[8,7],[9,2],[10,2],[10,1],[10,6],[11,1],[11,3],[11,2],[12,1],[12,1],[14,2],[14,6],[17,2],[19,1],[15,1],[112,1],[16,1],[30,6],[19,3],[15,4],[19,2],[25,1],[17,4],[49,1],[48,1],[26,1],[17,9],[43,3],[51,6],[17,1],[21,3],[26,4],[31,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,9],[1,1],[1,753],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,7],[1,2],[1,6],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,3],[1,4],[1,3],[1,4],[1,1],[1,2],[1,1],[1,6],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,3],[1,3],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,26],[1,3],[1,1],[1,1],[1,4],[1,1],[1,1],[1,5],[1,2],[1,3],[1,1],[1,5],[1,2],[1,2],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,3],[1,1],[1,4],[1,8],[1,10],[1,1],[1,2],[1,6],[1,1],[1,2],[1,2],[1,2],[1,6],[1,1],[1,1],[1,15],[1,2],[2,1],[2,12],[2,1],[2,8],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,20],[2,2],[2,2],[2,1],[2,1],[2,2],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,14],[2,2],[2,1],[2,5],[2,5],[2,1],[2,2],[2,2],[2,6],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[3,2],[3,3],[3,3],[3,1],[3,1],[3,1],[3,3],[3,1],[3,1],[3,6],[3,8],[3,1],[3,1],[3,1],[3,3],[3,12],[3,1],[3,1],[3,1],[3,1],[3,6],[3,1],[3,2],[3,1],[3,1],[4,5],[4,1],[4,5],[4,5],[4,29],[4,11],[4,1],[4,1],[4,2],[4,1],[4,1],[5,2],[5,4],[5,1],[5,6],[5,1],[5,1],[5,1],[5,1],[6,1],[6,4],[6,1],[6,4],[6,2],[6,2],[6,1],[6,1],[6,2],[6,1],[7,1],[7,2],[7,1],[7,1],[7,2],[8,3],[8,4],[8,5],[8,7],[8,5],[9,5],[9,1],[9,1],[10,2],[10,2],[10,4],[11,1],[11,1],[12,8],[12,1],[12,1],[13,1],[13,1],[13,2],[14,2],[20,4],[18,3],[65,1],[23,1],[20,3],[237,1],[70,5],[80,2],[71,1],[15,4],[18,8],[54,1],[30,1],[15,2],[26,2],[20,1],[17,1],[26,4],[20,13],[1,2],[1,1],[1,3],[1,1],[1,3],[1,5],[1,3],[1,1],[1,5],[1,1],[1,3],[1,7],[1,2],[1,1],[1,1],[1,1],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,11],[1,1],[1,6],[1,4],[1,3],[1,3],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,7],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,5],[1,2],[1,1],[1,1],[1,4],[1,1],[1,10],[1,4],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,4],[1,1],[1,8],[1,1],[1,1],[1,2],[1,4],[1,1],[1,34],[1,2],[1,2],[1,1],[1,1],[1,4],[1,1],[1,3],[1,7],[1,4],[1,7],[1,7],[1,1],[1,3],[1,1],[1,1],[1,3],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,14],[1,6],[1,6],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[2,2],[2,1],[2,1],[2,4],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,1],[2,4],[2,1],[2,1],[2,1],[2,1],[2,4],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,2],[2,1],[2,2],[2,6],[2,1],[2,1],[2,1],[2,2],[2,2],[3,3],[3,7],[3,4],[3,2],[3,3],[3,1],[3,1],[3,4],[3,1],[3,14],[3,2],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,9],[3,25],[3,1],[3,1],[4,1],[4,9],[4,1],[4,3],[4,1],[4,1],[4,12],[4,1],[4,3],[4,7],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[5,5],[5,2],[5,1],[5,1],[5,2],[5,5],[5,1],[5,1],[5,1],[5,1],[5,1],[6,5],[6,1],[6,3],[6,1],[6,4],[6,1],[6,1],[6,3],[6,2],[6,1],[7,1],[7,1],[7,1],[7,1],[7,1],[8,2],[8,1],[8,1],[8,1],[8,1],[9,2],[10,374],[10,3],[11,1],[11,1],[11,3],[11,8],[11,4],[12,1],[13,3],[13,2],[13,4],[58,1],[43,1],[38,1],[196,1],[55,3],[15,1],[79,1],[16,5],[20,1],[32,1],[111,1],[68,1],[50,17],[327,47],[46,3],[24,3],[41,2],[65,1],[1,2],[1,14],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,7],[1,4],[1,5],[1,8],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,6],[1,2],[1,1],[1,5],[1,1],[1,3],[1,29],[1,4],[1,2],[1,1],[1,1],[1,4],[1,2],[1,9],[1,5],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,4],[1,2],[1,1],[1,8],[1,2],[1,13],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,4],[1,6],[1,1],[1,1],[1,3],[1,2],[1,4],[1,2],[1,10],[1,2],[1,2],[1,2],[1,1],[1,4],[1,2],[1,1],[1,5],[1,93],[1,1],[1,1],[1,3],[1,22],[1,1],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,4],[1,1],[1,6],[1,1],[1,3],[1,4],[1,1],[1,1],[1,2],[1,2],[1,8],[1,3],[1,1],[1,5],[1,6],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,2],[1,1],[1,2],[1,2],[1,2],[1,28],[1,1],[1,6],[1,6],[1,2],[2,1],[2,2],[2,1],[2,2],[2,1],[2,2],[2,6],[2,1],[2,1],[2,2],[2,6],[2,2],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,2],[2,2],[2,6],[2,3],[2,3],[2,1],[2,2],[2,2],[2,1],[2,1],[2,14],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,9],[2,2],[2,1],[2,5],[2,1],[2,1],[2,3],[2,2],[2,2],[2,7],[2,16],[2,6],[2,2],[2,2],[2,1],[2,2],[3,1],[3,26],[3,1],[3,2],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,1],[3,4],[3,1],[3,3],[3,3],[3,1],[3,1],[3,1],[3,1],[3,1],[3,12],[3,2],[3,2],[3,4],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[4,1],[4,1],[4,2],[4,1],[4,1],[4,2],[4,1],[4,1],[4,1],[4,2],[4,1],[4,8],[4,3],[4,1],[4,4],[5,2],[5,2],[5,1],[5,1],[5,1],[5,9],[6,1],[6,2],[6,2],[6,1],[6,1],[6,1],[6,10],[6,1],[7,1],[7,11],[7,4],[7,1],[7,2],[8,2],[8,1],[8,1],[8,1],[8,1],[8,4],[8,7],[9,1],[9,1],[10,2],[10,4],[10,1],[10,1],[11,6],[12,1],[12,1],[12,6],[13,1],[13,5],[13,2],[13,11],[14,8],[14,3],[16,1],[55,1],[17,1],[91,1],[27,1],[16,1],[17,1],[37,1],[54,3],[73,2],[50,1],[19,3],[20,2],[26,1],[55,3],[54,1],[31,1],[68,2],[75,8],[412,1],[21,2],[1,6],[1,1],[1,2],[1,2],[1,4],[1,4],[1,2],[1,6],[1,5],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,9],[1,4],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,6],[1,3],[1,1],[1,2],[1,3],[1,12],[1,16],[1,3],[1,1],[1,1],[1,3],[1,3],[1,502],[1,3],[1,1],[1,1],[1,5],[1,2],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,6],[1,3],[1,2],[1,1],[1,5],[1,1],[1,6],[1,4],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,17],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,4],[1,6],[1,1],[1,1],[1,11],[1,1],[1,4],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,5],[1,2],[2,1],[2,1],[2,3],[2,3],[2,2],[2,2],[2,9],[2,2],[2,1],[2,9],[2,1],[2,2],[2,2],[2,2],[2,5],[2,5],[2,2],[2,1],[2,2],[2,1],[2,1],[2,13],[2,5],[2,2],[2,1],[2,4],[2,1],[2,1],[2,2],[2,1],[2,2],[2,3],[2,3],[2,5],[2,3],[2,3],[2,10],[2,2],[2,2],[2,2],[2,4],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,3],[3,2],[3,2],[3,1],[3,7],[3,2],[3,2],[3,1],[3,5],[3,2],[3,3],[3,1],[3,8],[3,1],[3,1],[3,2],[3,14],[3,2],[4,2],[4,1],[4,2],[4,3],[4,2],[4,7],[4,1],[4,5],[4,1],[4,3],[4,10],[4,1],[4,2],[4,4],[4,4],[4,1],[5,1],[5,4],[5,2],[5,1],[5,1],[5,2],[5,8],[5,3],[5,1],[5,1],[6,2],[6,2],[6,1],[6,1],[6,1],[6,2],[6,15],[6,39],[6,3],[7,2],[7,1],[7,3],[7,1],[7,1],[8,1],[8,1],[9,2],[9,2],[9,1],[9,1],[10,1],[10,1],[10,1],[11,14],[11,1],[11,3],[11,1],[12,1],[12,1],[13,2],[13,2],[14,8],[16,1],[27,1],[21,5],[18,2],[36,1],[36,3],[28,15],[17,13],[18,7],[17,9],[28,2],[19,2],[27,1],[33,11],[40,2],[17,3],[120,2],[136,4],[21,1],[64,1],[23,3],[81,4],[27,1],[126,15],[17,1],[37,2],[21,1],[22,1],[58,1],[1,85],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,3],[1,9],[1,2],[1,3],[1,7],[1,3],[1,2],[1,5],[1,2],[1,1],[1,3],[1,1],[1,1],[1,4],[1,13],[1,74],[1,14],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,2],[1,5],[1,1],[1,4],[1,1],[1,4],[1,1],[1,1],[1,3],[1,2],[1,79],[1,1],[1,1],[1,6],[1,1],[1,2],[1,7],[1,2],[1,1],[1,2],[1,1],[1,7],[1,1],[1,2],[1,1],[1,4],[1,4],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,2],[1,6],[1,1],[1,8],[1,2],[1,2],[1,1],[1,9],[1,1],[1,2],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,11],[1,1],[1,5],[1,1],[1,4],[1,3],[1,8],[1,4],[1,1],[1,9],[1,1],[1,3],[1,1],[1,4],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,3],[1,8],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,11],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[2,6],[2,1],[2,3],[2,1],[2,3],[2,7],[2,6],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,1],[2,4],[2,3],[2,2],[2,1],[2,6],[2,1],[2,3],[2,2],[2,2],[2,1],[2,3],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,4],[2,5],[2,1],[2,1],[3,1],[3,57],[3,2],[3,1],[3,1],[3,2],[3,3],[3,15],[3,4],[3,1],[3,1],[3,9],[3,10],[3,5],[3,1],[3,4],[3,4],[3,1],[3,1],[3,6],[3,1],[4,2],[4,1],[4,1],[4,2],[4,1],[4,14],[4,3],[4,1],[4,1],[4,3],[4,10],[4,1],[4,2],[5,10],[5,1],[5,1],[5,3],[5,1],[5,5],[5,1],[6,5],[6,4],[6,2],[6,2],[6,3],[6,1],[7,1],[7,1],[7,4],[7,1],[7,2],[7,2],[7,2],[7,2],[8,2],[8,1],[8,4],[8,2],[8,4],[8,1],[9,1],[9,1],[10,3],[10,1],[11,1],[11,1],[12,9],[12,4],[12,2],[13,7],[13,4],[13,2],[13,7],[13,1],[14,1],[14,1],[23,1],[19,2],[16,1],[36,4],[15,4],[22,3],[17,1],[17,2],[38,2],[15,1],[34,1],[29,2],[20,7],[23,4],[44,5],[22,2],[18,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,9],[1,1],[1,4],[1,2],[1,2],[1,1],[1,5],[1,1],[1,2],[1,1],[1,4],[1,2],[1,2],[1,1],[1,3],[1,3],[1,3],[1,2],[1,3],[1,1],[1,2],[1,5],[1,3],[1,1],[1,4],[1,1],[1,6],[1,4],[1,3],[1,1],[1,2],[1,1],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,8],[1,1],[1,2],[1,5],[1,1],[1,6],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,3],[1,10],[1,3],[1,7],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,43],[1,23],[1,2],[1,4],[1,33],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,7],[1,2],[1,4],[1,6],[1,1],[1,1],[1,1],[1,2],[1,7],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,136],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,20],[2,1],[2,1],[2,16],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,1],[2,1],[2,2],[2,7],[2,2],[2,1],[2,2],[2,114],[2,1],[2,3],[2,4],[2,1],[2,4],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,6],[2,2],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,4],[2,2],[2,4],[2,3],[2,2],[2,1],[3,2],[3,1],[3,1],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,8],[3,2],[3,1],[3,2],[3,28],[3,1],[3,118],[3,1],[3,1],[3,2],[3,2],[3,3],[3,8],[3,3],[4,1],[4,2],[4,4],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,2],[4,1],[4,1],[4,3],[4,1],[4,3],[4,1],[4,1],[4,1],[5,2],[5,1],[5,6],[5,1],[5,4],[5,2],[5,4],[5,1],[5,4],[6,4],[6,1],[6,3],[6,1],[6,2],[6,1],[7,1],[7,3],[7,1],[7,46],[7,2],[7,1],[8,3],[8,6],[8,1],[8,5],[9,12],[9,1],[9,5],[10,3],[10,3],[11,3],[11,7],[12,3],[12,1],[12,1],[13,1],[13,1],[13,2],[13,13],[13,1],[14,1],[14,1],[58,2],[112,1],[18,3],[19,1],[20,1],[18,1],[15,2],[92,1],[50,1],[40,1],[57,5],[19,2],[19,1],[15,4],[16,5],[54,1],[15,1],[1,2],[1,6],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,6],[1,7],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,11],[1,3],[1,6],[1,1],[1,1],[1,6],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,12],[1,1],[1,1],[1,1],[1,4],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,5],[1,2],[1,1],[1,1],[1,2],[1,8],[1,2],[1,1],[1,1],[1,2],[1,1],[1,19],[1,1],[1,1],[1,4],[1,1],[1,4],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,3],[1,5],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,2],[1,3],[1,9],[1,26],[1,3],[1,17],[1,1],[1,2],[1,1],[1,5],[1,4],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,8],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,30],[2,1],[2,4],[2,1],[2,2],[2,1],[2,1],[2,2],[2,3],[2,4],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,7],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,10],[2,4],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,3],[2,7],[2,1],[2,1],[2,2],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,1],[3,1],[3,2],[3,29],[3,2],[4,2],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,3],[4,1],[5,2],[5,1],[5,1],[5,4],[5,1],[5,1],[5,2],[5,1],[5,1],[5,3],[6,4],[6,1],[6,1],[6,3],[6,2],[6,2],[6,1],[6,1],[6,1],[6,2],[7,2],[7,3],[7,2],[7,1],[7,2],[8,1],[8,1],[8,4],[8,1],[8,3],[9,1],[9,5],[9,1],[9,1],[9,1],[11,1],[11,2],[11,2],[11,3],[12,7],[12,1],[13,1],[14,2],[16,1],[78,3],[17,3],[27,3],[19,2],[67,3],[16,3],[58,3],[17,1],[29,2],[29,1],[23,1],[390,2],[75,2],[26,8],[20,3],[19,2],[16,4],[33,1],[66,2],[20,1],[17,5],[1,1],[1,2],[1,1],[1,1],[1,9],[1,4],[1,2],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,4],[1,5],[1,11],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,3],[1,4],[1,1],[1,2],[1,3],[1,1],[1,1],[1,3],[1,1],[1,7],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,8],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,6],[1,1],[1,1],[1,6],[1,2],[1,1],[1,11],[1,3],[1,1],[1,2],[1,4],[1,4],[1,1],[1,11],[1,7],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,14],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[1,3],[1,6],[1,1],[1,1],[1,3],[1,3],[1,2],[1,2],[1,7],[1,5],[1,2],[1,7],[1,7],[1,1],[1,3],[1,2],[1,4],[1,4],[1,3],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,5],[1,3],[1,1],[1,124],[1,2],[1,6],[1,1],[1,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,5],[2,21],[2,2],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,7],[2,31],[2,1],[2,2],[2,4],[2,1],[2,3],[2,125],[2,1],[2,8],[2,1],[2,4],[2,2],[2,2],[2,1],[2,1],[2,1],[2,4],[2,5],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,8],[2,1],[2,12],[2,278],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,2],[3,3],[3,1],[3,1],[3,1],[3,1],[3,3],[3,2],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,3],[3,1],[3,2],[3,3],[3,1],[4,2],[4,8],[4,1],[4,3],[4,3],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,3],[5,1],[5,1],[5,1],[5,2],[5,2],[5,2],[5,1],[6,2],[6,2],[6,24],[6,2],[6,2],[6,20],[6,1],[6,1],[6,3],[6,1],[6,4],[6,5],[6,3],[7,2],[7,1],[7,4],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,134],[8,1],[8,1],[8,5],[8,1],[8,6],[9,3],[9,15],[10,4],[10,3],[10,1],[11,12],[11,2],[12,2],[12,2],[14,1],[14,6],[15,3],[30,2],[35,1],[28,1],[111,1],[22,1],[25,1],[18,1],[40,4],[58,1],[295,4],[18,3],[35,1],[16,1],[1,1],[1,1],[1,2],[1,1],[1,6],[1,6],[1,2],[1,1],[1,301],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,5],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,2],[1,1],[1,7],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,5],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,17],[1,1],[1,1],[1,2],[1,2],[1,4],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,2],[1,1],[1,23],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,4],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,3],[1,15],[1,4],[1,1],[1,1],[1,3],[1,3],[1,1],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,10],[2,3],[2,1],[2,1],[2,2],[2,7],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[2,2],[2,2],[2,1],[2,1],[2,3],[2,6],[2,1],[2,1],[2,46],[2,1],[2,3],[2,1],[2,4],[2,1],[2,1],[2,1],[2,1],[2,2],[2,4],[2,4],[2,3],[3,11],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,4],[3,1],[3,1],[3,1],[3,3],[3,2],[3,1],[3,2],[3,2],[3,2],[3,1],[3,3],[3,1],[3,2],[3,2],[3,4],[3,1],[3,45],[3,2],[4,11],[4,2],[4,1],[4,2],[4,4],[4,14],[4,4],[4,2],[4,2],[4,1],[5,3],[5,1],[5,1],[5,2],[5,1],[5,2],[5,3],[5,2],[5,1],[5,2],[5,2],[6,1],[6,1],[6,3],[6,2],[6,1],[6,3],[6,1],[6,6],[7,1],[7,2],[7,1],[8,1],[8,2],[8,1],[8,1],[8,1],[8,2],[8,2],[8,2],[9,5],[9,2],[10,1],[10,1],[10,3],[11,8],[11,1],[12,5],[12,1],[14,1]])\n #data = np.array([[26,2],[18,3],[30,4],[19,2],[21,1],[40,1],[17,3],[20,3],[19,3],[15,4],[246,1],[57,2],[16,2],[44,101],[31,1],[19,2],[35,2],[25,1],[28,1],[82,1],[52,11],[19,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,6],[1,1],[1,4],[1,1],[1,7],[1,9],[1,1],[1,2],[1,4],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,9],[1,1],[1,1],[1,1],[1,2],[1,6],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,13],[1,1],[1,4],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,7],[1,2],[1,1],[1,5],[1,1],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,1],[1,4],[1,3],[1,2],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,1],[1,3],[1,37],[1,1],[1,2],[1,1],[1,1],[1,50],[1,1],[1,1],[1,1],[1,8],[1,1],[1,1],[1,1],[1,6],[1,2],[1,3],[1,3],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,2],[1,15],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,9],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,12],[2,3],[2,3],[2,1],[2,1],[2,1],[2,4],[2,1],[2,5],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,3],[2,2],[2,1],[2,13],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,8],[2,3],[2,1],[2,1],[2,13],[2,2],[2,1],[2,2],[2,3],[2,1],[2,1],[3,1],[3,2],[3,5],[3,1],[3,1],[3,11],[3,3],[3,1],[3,1],[3,6],[3,1],[3,3],[3,1],[3,2],[3,4],[3,2],[3,2],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[4,1],[4,2],[4,2],[4,9],[4,1],[4,1],[4,5],[4,1],[4,16],[4,1],[4,2],[4,1],[4,1],[4,1],[4,6],[4,2],[4,2],[5,2],[5,2],[5,2],[5,2],[5,3],[5,1],[6,3],[6,1],[6,4],[6,1],[7,1],[7,1],[7,2],[7,1],[7,1],[8,7],[8,1],[8,1],[9,1],[9,3],[9,2],[9,1],[10,1],[10,11],[11,1],[11,2],[12,4],[13,11],[13,2],[14,3],[22,1],[39,3],[107,1],[46,6],[22,1],[15,1],[29,45],[29,1],[35,1],[23,2],[21,1],[17,1],[57,1],[20,1],[19,4],[24,1],[18,2],[61,2],[51,12],[41,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,6],[1,2],[1,1],[1,4],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,1],[1,1],[1,3],[1,3],[1,2],[1,4],[1,7],[1,3],[1,1],[1,15],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,4],[1,2],[1,2],[1,1],[1,4],[1,2],[1,5],[1,1],[1,1],[1,1],[1,1],[1,5],[1,8],[1,1],[1,1],[1,2],[1,2],[1,134],[1,45],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,4],[1,6],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,19],[1,4],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,19],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,5],[1,3],[1,6],[1,2],[1,1],[1,3],[1,2],[1,2],[1,1],[1,2],[1,1],[1,26],[1,4],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,5],[1,4],[1,1],[1,27],[1,1],[1,1],[1,1],[1,11],[1,2],[1,4],[1,1],[1,1],[1,24],[1,2],[1,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,15],[2,1],[2,1],[2,1],[2,3],[2,1],[2,5],[2,1],[2,4],[2,1],[2,1],[2,5],[2,2],[2,1],[2,1],[2,2],[2,1],[2,3],[2,4],[2,1],[2,3],[2,1],[2,2],[2,17],[2,4],[2,2],[2,7],[2,2],[2,1],[3,1],[3,3],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,3],[3,1],[3,18],[3,1],[3,1],[3,1],[3,6],[3,8],[3,1],[3,1],[3,2],[3,2],[3,1],[4,1],[4,3],[4,1],[4,1],[4,1],[4,4],[4,1],[4,20],[4,2],[4,4],[4,2],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,3],[4,4],[4,2],[4,2],[4,1],[4,1],[5,3],[5,1],[5,1],[6,1],[6,8],[7,1],[7,1],[7,5],[8,21],[8,1],[8,1],[8,2],[9,1],[10,30],[10,2],[10,3],[10,1],[11,1],[11,2],[11,1],[11,1],[12,1],[12,3],[12,6],[13,1],[13,2],[13,1],[14,1],[14,2],[17,1],[52,1],[64,1],[190,2],[25,3],[19,3],[22,1],[15,2],[25,1],[25,2],[38,1],[69,1],[1,1],[1,4],[1,1],[1,21],[1,1],[1,3],[1,11],[1,31],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,2],[1,2],[1,212],[1,6],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,1],[1,3],[1,4],[1,1],[1,2],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,6],[1,1],[1,3],[1,7],[1,2],[1,5],[1,3],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,9],[1,1],[1,2],[1,2],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,78],[1,3],[1,7],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,3],[1,2],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,8],[2,1],[2,1],[2,5],[2,2],[2,1],[2,6],[2,1],[2,4],[2,2],[2,2],[2,1],[2,2],[2,1],[2,1],[2,30],[2,3],[2,5],[2,4],[2,3],[2,1],[2,1],[3,1],[3,2],[3,1],[3,11],[3,1],[3,1],[3,8],[3,2],[3,1],[3,4],[3,3],[3,2],[3,3],[3,1],[3,3],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[4,8],[4,1],[4,2],[4,1],[4,2],[4,1],[4,3],[4,1],[4,2],[4,7],[4,1],[4,1],[4,1],[4,1],[4,7],[5,1],[5,1],[5,2],[5,2],[5,1],[5,11],[5,1],[5,1],[5,1],[5,1],[5,2],[5,1],[5,2],[5,8],[5,1],[6,2],[6,8],[6,1],[6,1],[6,1],[6,2],[6,1],[6,2],[6,1],[7,1],[7,3],[7,1],[7,2],[7,6],[7,2],[8,1],[8,6],[8,15],[9,2],[10,3],[10,1],[10,1],[10,2],[10,5],[10,2],[10,64],[11,1],[11,1],[11,1],[12,1],[12,6],[12,1],[12,2],[14,4],[14,1],[17,1],[21,1],[17,1],[32,1],[16,1],[18,5],[17,1],[16,1],[17,2],[262,1],[22,1],[227,5],[82,4],[28,3],[56,7],[42,2],[26,1],[137,1],[55,19],[29,1],[42,2],[1,5],[1,1],[1,2],[1,22],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,2],[1,3],[1,1],[1,4],[1,1],[1,2],[1,4],[1,1],[1,2],[1,2],[1,1],[1,2],[1,2],[1,5],[1,7],[1,2],[1,2],[1,1],[1,1],[1,7],[1,1],[1,1],[1,1],[1,2],[1,3],[1,16],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,5],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,4],[1,28],[1,6],[1,1],[1,2],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,16],[1,1],[1,2],[1,3],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,7],[1,1],[1,1],[1,2],[1,2],[1,4],[1,3],[1,4],[1,1],[1,1],[1,2],[1,5],[1,1],[1,1],[1,5],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[2,5],[2,5],[2,4],[2,2],[2,32],[2,1],[2,1],[2,4],[2,3],[2,1],[2,1],[2,1],[2,45],[2,3],[2,11],[2,1],[2,1],[2,2],[2,1],[2,4],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,2],[2,3],[2,1],[2,8],[2,2],[2,2],[2,1],[2,2],[2,2],[2,1],[2,7],[2,4],[2,2],[2,4],[2,1],[2,8],[3,1],[3,1],[3,1],[3,3],[3,4],[3,1],[3,10],[3,6],[3,1],[3,1],[3,1],[3,2],[3,4],[3,4],[3,1],[3,1],[3,7],[3,2],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,3],[3,1],[3,1],[3,19],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,1],[4,2],[4,1],[4,9],[4,4],[4,5],[4,3],[4,2],[4,3],[5,1],[5,2],[5,20],[5,1],[5,2],[5,2],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,4],[5,1],[6,2],[6,2],[6,1],[6,1],[6,1],[6,1],[6,1],[6,6],[6,2],[7,1],[7,1],[7,1],[7,4],[8,1],[8,5],[8,14],[9,1],[9,4],[10,1],[10,1],[10,1],[10,1],[11,6],[11,4],[12,1],[12,2],[13,2],[13,1],[13,6],[14,2],[42,4],[264,3],[22,3],[15,6],[19,1],[46,2],[193,1],[15,1],[127,5],[47,1],[16,2],[27,1],[25,1],[19,5],[73,1],[60,1],[27,1],[19,2],[1,2],[1,1],[1,2],[1,2],[1,4],[1,2],[1,1],[1,1],[1,2],[1,1],[1,2],[1,16],[1,2],[1,3],[1,2],[1,1],[1,4],[1,20],[1,3],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,2],[1,3],[1,4],[1,1],[1,1],[1,2],[1,6],[1,1],[1,1],[1,1],[1,47],[1,2],[1,2],[1,5],[1,2],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,16],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,5],[1,2],[1,7],[1,1],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,14],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,3],[1,4],[1,5],[1,1],[1,1],[1,1],[1,17],[1,71],[1,1],[1,1],[1,1],[1,79],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,7],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[2,1],[2,1],[2,1],[2,4],[2,13],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,3],[2,6],[2,3],[2,1],[2,1],[2,1],[2,2],[2,17],[2,2],[2,2],[2,8],[2,1],[2,3],[2,2],[2,11],[2,1],[2,2],[2,5],[2,1],[2,1],[2,2],[2,1],[2,2],[2,2],[2,1],[2,1],[2,3],[2,4],[2,1],[2,6],[2,25],[2,1],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,2],[2,1],[2,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,3],[3,8],[3,5],[3,3],[3,7],[3,1],[3,1],[3,9],[3,6],[3,3],[3,2],[3,8],[3,4],[3,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,6],[4,1],[4,3],[4,2],[4,1],[4,3],[4,1],[4,2],[4,1],[4,1],[4,1],[4,1],[5,1],[5,5],[5,3],[5,2],[5,3],[5,1],[5,3],[6,1],[6,1],[6,1],[6,1],[7,1],[7,1],[7,1],[7,1],[7,32],[7,2],[7,1],[7,4],[7,1],[7,1],[7,4],[8,2],[8,2],[8,1],[8,2],[8,1],[9,1],[9,3],[9,1],[9,1],[9,1],[10,3],[11,4],[11,1],[11,1],[11,3],[11,3],[11,1],[12,1],[12,1],[12,1],[13,2],[13,1],[13,2],[14,5],[26,2],[49,1],[26,1],[18,1],[27,1],[15,1],[23,1],[58,3],[36,2],[19,3],[62,2],[72,2],[90,1],[124,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,3],[1,1],[1,4],[1,2],[1,1],[1,1],[1,18],[1,1],[1,2],[1,4],[1,24],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,4],[1,3],[1,1],[1,3],[1,1303],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,2],[1,1],[1,1],[1,1],[1,1],[1,8],[1,10],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,17],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,3],[1,2],[1,1],[1,4],[1,2],[1,1],[1,2],[1,25],[1,2],[1,7],[1,1],[1,1],[1,6],[1,1],[1,3],[1,2],[1,4],[1,1],[1,1],[1,6],[1,1],[1,2],[1,3],[1,1],[1,4],[1,2],[1,3],[1,2],[1,3],[1,1],[1,1],[1,3],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[2,1],[2,5],[2,1],[2,2],[2,5],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,2],[2,1],[2,2],[2,6],[2,1],[2,2],[2,1],[2,3],[2,1],[2,2],[2,3],[2,13],[2,1],[2,2],[2,1],[2,3],[2,1],[2,4],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,2],[2,3],[2,2],[2,2],[2,1],[2,1],[2,3],[2,1],[2,1],[2,5],[3,2],[3,2],[3,2],[3,5],[3,1],[3,1],[3,1],[3,1],[3,3],[3,2],[3,2],[3,1],[3,1],[3,1],[3,1],[3,5],[3,1],[3,4],[3,2],[3,1],[3,1],[3,3],[3,1],[3,1],[3,3],[4,3],[4,1],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[5,1],[5,2],[5,9],[5,2],[5,1],[5,7],[5,2],[5,1],[5,2],[5,2],[5,1],[6,3],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,29],[6,2],[7,3],[7,2],[7,1],[7,1],[7,2],[7,2],[7,2],[7,3],[7,2],[8,5],[8,1],[8,1],[8,3],[8,2],[8,1],[8,2],[9,1],[9,1],[10,1],[10,14],[10,3],[10,4],[10,3],[10,4],[11,1],[11,5],[11,2],[11,3],[11,1],[11,1],[11,2],[12,1],[12,1],[13,5],[13,1],[13,1],[14,1],[14,3],[14,1],[24,1],[15,1],[19,2],[15,5],[131,1],[28,13],[33,1],[24,1],[17,1],[15,1],[44,2],[16,2],[16,3],[29,7],[29,1],[82,8],[16,1],[17,2],[16,2],[45,1],[159,1],[100,2],[23,1],[15,1],[15,1],[22,1],[48,1],[25,5],[15,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,2],[1,4],[1,44],[1,1],[1,2],[1,40],[1,1],[1,9],[1,1],[1,17],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,25],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,12],[1,1],[1,2],[1,12],[1,2],[1,2],[1,5],[1,2],[1,3],[1,7],[1,5],[1,72],[1,2],[1,8],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,3],[1,1],[1,2],[1,2],[1,5],[1,3],[1,2],[1,3],[1,382],[1,1],[1,3],[1,1],[1,1],[1,6],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,2],[1,6],[1,1],[1,3],[1,3],[1,1],[1,6],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,7],[1,1],[1,1],[1,2],[2,1],[2,1],[2,1],[2,1],[2,12],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,52],[2,2],[2,1],[2,1],[2,2],[2,1],[2,2],[2,9],[2,1],[2,1],[2,18],[2,3],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,1],[2,1],[2,1],[2,1],[3,6],[3,3],[3,4],[3,1],[3,1],[3,1],[3,1],[3,1],[3,4],[3,1],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[3,80],[3,1],[3,2],[3,1],[3,1],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,2],[4,1],[4,4],[4,4],[4,1],[4,2],[4,2],[4,1],[4,2],[4,1],[4,1],[5,1],[5,1],[5,3],[5,3],[5,1],[5,1],[5,1],[5,2],[5,1],[6,4],[6,3],[6,1],[6,6],[6,1],[6,1],[7,2],[7,1],[7,1],[7,2],[7,1],[7,2],[7,1],[7,1],[8,1],[8,4],[8,1],[8,2],[8,3],[9,2],[9,3],[9,3],[9,6],[10,1],[10,1],[10,1],[10,1],[11,8],[11,1],[11,1],[12,2],[13,5],[15,1],[35,7],[16,1],[24,2],[16,1],[25,1],[65,4],[36,1],[16,5],[21,10],[18,1],[16,12],[29,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,5],[1,3],[1,3],[1,3],[1,1],[1,4],[1,3],[1,3],[1,3],[1,1],[1,1],[1,1],[1,2],[1,5],[1,3],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,8],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,2],[1,4],[1,2],[1,7],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,5],[1,1],[1,4],[1,8],[1,6],[1,1],[1,4],[1,1],[1,1],[1,3],[1,1],[1,3],[1,2],[1,7],[1,2],[1,5],[1,2],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,5],[1,1],[1,13],[1,3],[1,2],[1,1],[1,1],[1,10],[1,1],[1,2],[1,1],[1,3],[1,12],[1,2],[1,2],[1,4],[1,1],[1,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,4],[2,3],[2,1],[2,1],[2,1],[2,6],[2,1],[2,6],[2,1],[2,2],[2,6],[2,1],[2,10],[2,1],[2,1],[2,4],[2,1],[2,3],[2,3],[2,1],[2,1],[2,3],[2,5],[2,3],[2,10],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,4],[2,1],[2,1],[2,2],[2,1],[2,3],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[3,2],[3,1],[3,1],[3,1],[3,5],[3,34],[3,2],[3,3],[3,1],[3,1],[3,2],[3,1],[3,5],[3,1],[3,1],[3,2],[3,4],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,25],[3,1],[3,1],[4,1],[4,6],[4,3],[4,1],[4,6],[4,1],[4,1],[4,4],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,1],[4,3],[4,4],[5,1],[5,2],[5,3],[5,1],[5,1],[5,1],[5,4],[5,1],[5,2],[5,4],[5,1],[5,1],[6,1],[6,4],[6,2],[6,1],[6,1],[6,2],[6,3],[7,11],[7,1],[7,5],[8,2],[8,1],[8,1],[9,2],[9,5],[9,4],[9,3],[9,1],[9,2],[9,2],[10,1],[10,2],[11,1],[12,3],[12,1],[13,11],[13,1],[17,1],[201,2],[16,2],[104,4],[123,2],[15,1],[26,5],[74,1],[15,3],[15,7],[16,1],[39,2],[27,1],[32,1],[53,4],[28,1],[25,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,7],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,16],[1,3],[1,2],[1,2],[1,3],[1,1],[1,1],[1,3],[1,11],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,4],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,32],[1,2],[1,1],[1,1],[1,6],[1,1],[1,7],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,2],[1,2],[1,2],[1,2],[1,1],[1,1],[1,55],[1,2],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,5],[1,4],[1,7],[1,1],[1,1],[1,6],[1,2],[1,2],[1,6],[1,3],[1,2],[1,1],[1,6],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,4],[1,9],[1,2],[1,3],[1,1],[2,1],[2,1],[2,11],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,4],[2,1],[2,2],[2,2],[2,2],[2,3],[2,4],[2,2],[2,5],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,3],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,4],[2,2],[3,2],[3,1],[3,1],[3,3],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,6],[3,2],[3,1],[3,1],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,5],[3,1],[3,1],[3,2],[3,2],[3,2],[3,1],[3,1],[3,2],[3,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,2],[4,3],[4,3],[4,1],[4,4],[4,1],[4,2],[4,1],[4,3],[4,1],[5,1],[5,2],[5,1],[5,3],[5,3],[5,1],[5,2],[5,9],[5,1],[5,1],[5,2],[5,1],[5,2],[6,2],[6,3],[6,1],[6,1],[6,2],[6,1],[6,2],[6,2],[6,1],[6,4],[6,2],[7,7],[7,2],[7,4],[7,1],[7,2],[7,19],[7,1],[7,1],[7,1],[8,1],[8,12],[8,1],[8,3],[8,1],[9,1],[9,1],[9,1],[9,1],[9,1],[10,1],[10,1],[10,4],[10,2],[12,3],[12,1],[12,1],[13,1],[13,1],[14,1],[14,1],[14,3],[30,7],[32,1],[40,2],[16,1],[91,6],[122,1],[15,1],[17,1],[20,3],[19,2],[19,1],[98,2],[81,14],[47,4],[38,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,83],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,5],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,4],[1,2],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,7],[1,1],[1,2],[1,4],[1,1],[1,1],[1,88],[1,2],[1,2],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,57],[1,2],[1,6],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,5],[1,5],[1,1],[1,1],[1,9],[1,1],[1,1],[1,3],[1,4],[1,1],[1,2],[1,5],[1,2],[1,3],[1,1],[1,2],[1,4],[1,4],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,6],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,2],[2,2],[2,15],[2,4],[2,1],[2,1],[2,2],[2,1],[2,2],[2,3],[2,3],[2,3],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,2],[2,1],[2,7],[2,1],[2,4],[2,3],[2,2],[2,3],[2,1],[2,1],[2,2],[3,4],[3,1],[3,1],[3,2],[3,3],[3,6],[3,2],[3,9],[3,9],[3,2],[3,2],[3,1],[3,15],[3,1],[3,1],[3,1],[3,3],[4,1],[4,1],[4,2],[4,3],[4,1],[4,2],[4,1],[4,6],[4,2],[4,8],[4,9],[4,1],[4,1],[4,1],[5,1],[5,1],[5,78],[5,1],[5,1],[5,1],[5,17],[5,1],[5,3],[5,2],[5,1],[6,1],[6,1],[6,5],[6,19],[6,1],[6,6],[6,1],[6,1],[6,2],[6,1],[6,1],[6,1],[6,2],[6,1],[7,2],[7,1],[7,1],[7,4],[7,1],[7,28],[7,1],[8,1],[8,1],[8,1],[9,3],[9,1],[9,11],[9,4],[10,1],[10,2],[11,1],[11,1],[11,1],[11,1],[12,1],[14,2],[14,2],[14,2],[18,2],[31,1],[29,2],[16,1],[17,20],[25,1],[20,3],[59,1],[25,1],[27,2],[26,1],[44,1],[17,4],[16,4],[20,6],[67,2],[15,1],[65,1],[17,1],[33,1],[61,2],[1,2],[1,2],[1,2],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,4],[1,4],[1,5],[1,2],[1,1],[1,1],[1,18],[1,1],[1,3],[1,1],[1,2],[1,1],[1,2],[1,2],[1,5],[1,4],[1,1],[1,4],[1,1],[1,1],[1,1],[1,56],[1,1],[1,4],[1,1],[1,9],[1,6],[1,9],[1,1],[1,2],[1,1],[1,1],[1,1],[1,18],[1,10],[1,1],[1,5],[1,1],[1,1],[1,2],[1,5],[1,1],[1,3],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,8],[1,3],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,5],[1,2],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,5],[1,2],[1,27],[1,3],[1,1],[1,2],[1,9],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,15],[1,1],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,17],[1,1],[1,4],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,18],[1,1],[1,2],[1,46],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,7],[1,8],[1,1],[1,3],[1,6],[2,1],[2,1],[2,1],[2,1],[2,5],[2,4],[2,1],[2,2],[2,2],[2,4],[2,2],[2,1],[2,2],[2,1],[2,3],[2,5],[2,1],[2,2],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,12],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,2],[2,3],[2,1],[2,2],[2,1],[2,10],[2,2],[2,8],[2,2],[2,2],[2,1],[2,5],[2,5],[2,4],[2,1],[2,1],[2,1],[2,1],[3,2],[3,6],[3,2],[3,1],[3,58],[3,1],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[3,2],[3,1],[3,1],[3,6],[3,10],[3,1],[3,4],[3,1],[3,1],[3,6],[3,1],[3,29],[3,2],[3,2],[3,6],[3,1],[4,1],[4,4],[4,2],[4,1],[4,46],[4,2],[4,1],[4,2],[4,2],[4,3],[4,11],[4,3],[4,1],[4,2],[4,1],[4,15],[4,2],[5,5],[5,9],[5,1],[5,2],[5,136],[5,48],[5,5],[5,1],[5,1],[5,1],[5,1],[5,1],[6,1],[6,1],[6,10],[6,1],[6,2],[6,1],[7,2],[7,1],[7,3],[7,2],[7,11],[7,6],[7,1],[8,1],[8,3],[8,2],[8,1],[8,12],[8,2],[8,2],[9,1],[9,1],[9,1],[9,4],[10,1],[10,2],[11,2],[12,9],[13,1],[14,2],[21,1],[26,1],[16,2],[2230,1],[29,1],[16,5],[401,3],[33,1],[19,31],[15,4],[28,2],[23,1],[42,4],[40,1],[70,1],[15,3],[15,2],[22,1],[103,1],[256,27],[41,1],[86,1],[17,1],[31,1],[26,1],[105,2],[28,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,6],[1,4],[1,1],[1,4],[1,7],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,3],[1,2],[1,1],[1,2],[1,2],[1,8],[1,1],[1,2],[1,1],[1,5],[1,2],[1,1],[1,1],[1,2],[1,2],[1,2],[1,2],[1,1],[1,9],[1,1],[1,2],[1,2],[1,3],[1,2],[1,1],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,5],[1,1],[1,29],[1,1],[1,4],[1,2],[1,3],[1,3],[1,17],[1,6],[1,2],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,9],[1,3],[1,1],[1,1],[1,1],[1,2],[1,3],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,1],[1,7],[1,1],[1,5],[1,1],[1,1],[1,4],[1,1],[1,2],[1,6],[1,2],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,3],[1,3],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,16],[1,5],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,8],[2,3],[2,1],[2,2],[2,4],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,9],[2,1],[2,23],[2,1],[2,1],[2,1],[2,2],[2,3],[2,1],[2,1],[2,3],[2,1],[2,1],[2,2],[2,1],[2,25],[2,2],[2,3],[2,2],[2,1],[2,1],[2,3],[2,1],[2,3],[2,1],[2,3],[2,1],[2,2],[2,1],[2,1],[2,1],[3,1],[3,2],[3,2],[3,3],[3,2],[3,1],[3,1],[3,5],[3,9],[3,1],[3,3],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,9],[3,1],[3,2],[3,7],[3,3],[3,4],[3,2],[3,1],[3,37],[3,1],[3,1],[3,1],[3,1],[4,1],[4,2],[4,305],[4,4],[4,1],[4,1],[4,1],[4,4],[4,3],[4,1],[4,6],[4,7],[4,1],[4,1],[4,1],[4,1],[4,29],[4,1],[5,10],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[6,2],[6,1],[6,1],[6,2],[7,1],[7,1],[7,2],[7,1],[7,1],[7,1],[7,2],[8,1],[8,3],[8,2],[9,1],[9,1],[10,1],[10,3],[10,1],[11,6],[11,2],[11,1],[11,1],[12,5],[12,4],[12,1],[14,1],[14,1],[23,1],[26,2],[15,2],[16,16],[31,7],[18,3],[22,3],[87,1],[17,2],[17,9],[30,1],[58,4],[24,2],[28,5],[53,1],[23,1],[28,2],[44,1],[60,3],[17,2],[17,1],[1,1],[1,2],[1,1],[1,11],[1,1],[1,1],[1,2],[1,2],[1,3],[1,2],[1,6],[1,3],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,3],[1,2],[1,4],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,7],[1,2],[1,1],[1,1],[1,4],[1,2],[1,1],[1,3],[1,1],[1,5],[1,3],[1,3],[1,3],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,3],[1,5],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,8],[1,15],[1,1],[1,8],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,3],[1,15],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,5],[1,3],[1,1],[1,1],[1,14],[1,1],[1,2],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,1],[1,5],[1,2],[1,3],[1,1],[1,2],[1,9],[1,1],[1,4],[1,1],[1,2],[1,8],[1,1],[1,3],[1,1],[1,1],[1,4],[1,4],[1,3],[1,1],[1,1],[1,9],[1,2],[1,4],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,2],[1,3],[1,2],[1,6],[1,1],[1,18],[2,1],[2,3],[2,3],[2,1],[2,6],[2,1],[2,2],[2,2],[2,5],[2,1],[2,1],[2,1],[2,3],[2,2],[2,6],[2,1],[2,3],[2,3],[2,1],[2,3],[2,2],[2,2],[2,1],[2,1],[2,9],[2,5],[2,1],[2,1],[2,1],[2,2],[2,85],[2,60],[2,2],[2,1],[2,12],[2,1],[2,1],[2,1],[2,8],[2,1],[2,21],[2,1],[2,3],[2,1],[2,1],[2,8],[2,1],[2,1],[3,3],[3,3],[3,1],[3,3],[3,3],[3,1],[3,2],[3,2],[3,1],[3,1],[3,14],[3,1],[3,6],[3,1],[3,2],[3,1],[3,3],[3,2],[3,1],[3,1],[3,1],[3,1],[3,2],[3,3],[3,2],[4,3],[4,2],[4,1],[4,3],[4,1],[4,1],[4,2],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,4],[5,1],[5,1],[5,1],[5,3],[5,2],[5,1],[5,4],[6,6],[6,1],[6,18],[6,1],[6,1],[6,1],[6,5],[6,2],[6,3],[6,2],[7,3],[7,5],[7,2],[7,1],[7,3],[7,5],[7,1],[7,1],[7,1],[7,1],[8,1],[8,1],[8,3],[8,1],[8,1],[8,4],[9,1],[9,2],[9,4],[10,2],[10,1],[11,2],[11,1],[11,1],[12,3],[13,1],[14,2],[32,7],[26,2],[22,2],[15,1],[26,46],[15,2],[16,1],[19,1],[36,1],[16,2],[24,1],[20,5],[1,1],[1,1],[1,1],[1,7],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,1],[1,10],[1,5],[1,13],[1,2],[1,3],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,8],[1,1],[1,3],[1,5],[1,1],[1,2],[1,2],[1,2],[1,4],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,8],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,2],[1,4],[1,3],[1,2],[1,9],[1,19],[1,1],[1,1],[1,1],[1,1],[1,14],[1,3],[1,2],[1,4],[1,2],[1,1],[1,4],[1,1],[1,1],[1,5],[1,2],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,11],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,3],[1,9],[1,2],[1,6],[1,9],[1,3],[1,1],[1,1],[1,5],[1,1],[1,3],[1,2],[1,9],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,4],[1,2],[1,1],[1,3],[1,2],[1,1],[1,12],[1,1],[1,1],[1,1],[1,1],[2,5],[2,2],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,3],[2,3],[2,114],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,9],[2,1],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,2],[2,1],[2,3],[2,19],[2,1],[2,8],[2,2],[2,2],[2,7],[2,1],[2,1],[3,2],[3,1],[3,5],[3,3],[3,1],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,30],[3,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,2],[3,2],[3,1],[3,2],[3,2],[3,1],[3,2],[3,1],[3,2],[4,1],[4,3],[4,1],[4,1],[4,7],[4,2],[4,2],[4,3],[4,3],[4,2],[4,2],[4,1],[4,1],[4,2],[4,1],[4,2],[4,1],[4,1],[4,6],[5,2],[5,1],[5,2],[5,1],[5,7],[5,7],[5,1],[5,2],[5,1],[6,1],[6,1],[6,1],[6,2],[6,1],[6,1],[6,4],[6,1],[7,1],[7,1],[7,1],[7,3],[7,1],[7,1],[7,1],[8,1],[8,2],[8,3],[8,1],[8,1],[8,9],[8,6],[9,1],[9,3],[9,4],[10,4],[10,1],[10,3],[10,1],[10,19],[11,3],[11,2],[11,5],[11,5],[11,1],[12,7],[13,3],[13,4],[13,2],[13,4],[14,2],[16,1],[93,1],[22,2],[42,6],[15,1],[16,3],[36,8],[34,1],[30,3],[43,7],[46,8],[40,1],[22,1],[1,3],[1,1],[1,13],[1,2],[1,3],[1,2],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,5],[1,13],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,6],[1,4],[1,1],[1,4],[1,1],[1,2],[1,3],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,6],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,3],[1,2],[1,3],[1,1],[1,1],[1,3],[1,2],[1,3],[1,3],[1,2],[1,1],[1,3],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,4],[1,2],[1,2],[1,3],[1,7],[1,3],[1,1],[1,1],[1,3],[1,2],[1,1],[1,4],[1,5],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,7],[1,6],[1,1],[1,2],[1,3],[1,3],[1,1],[1,4],[1,2],[1,7],[1,2],[1,5],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,3],[1,6],[1,2],[1,2],[1,1],[1,1],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,3],[2,1],[2,2],[2,12],[2,1],[2,1],[2,3],[2,3],[2,1],[2,2],[2,3],[2,3],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,8],[2,2],[2,1],[2,2],[2,1],[2,1],[2,7],[2,1],[2,1],[2,1],[2,7],[2,2],[2,1],[2,18],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,5],[2,1],[2,1],[2,6],[2,3],[2,1],[3,3],[3,1],[3,1],[3,3],[3,1],[3,1],[3,3],[3,1],[3,2],[3,3],[3,1],[3,1],[3,1],[4,6],[4,1],[4,1],[4,3],[4,1],[4,1],[4,1],[4,2],[4,2],[4,5],[4,2],[4,2],[4,2],[4,2],[4,1],[4,3],[4,2],[4,1],[5,1],[5,3],[5,2],[5,2],[5,1],[5,1],[5,3],[5,1],[5,1],[5,2],[5,4],[5,4],[5,1],[6,2],[6,2],[6,2],[6,1],[6,1],[6,1],[6,1],[6,4],[6,1],[7,2],[7,1],[7,2],[7,1],[7,1],[7,1],[8,2],[8,2],[8,3],[8,14],[9,5],[9,2],[9,1],[9,1],[10,8],[10,2],[11,1],[11,1],[12,1],[12,1],[12,1],[12,7],[12,3],[48,1],[73,3],[22,2],[19,1],[20,1],[40,2],[15,2],[34,1],[22,5],[31,2],[47,28],[51,1],[19,2],[231,1],[15,3],[18,2],[18,3],[101,5],[65,2],[30,11],[18,3],[1,1],[1,2],[1,2],[1,1],[1,3],[1,5],[1,2],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,64],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,4],[1,5],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,6],[1,1],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,14],[1,1],[1,1],[1,1],[1,1],[1,2],[1,12],[1,2],[1,2],[1,1],[1,1],[1,3],[1,2],[1,3],[1,2],[1,1],[1,5],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,2],[1,3],[1,1],[2,2],[2,1],[2,3],[2,2],[2,1],[2,1],[2,2],[2,1],[2,2],[2,2],[2,1],[2,1],[2,10],[2,2],[2,1],[2,2],[2,3],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,6],[2,2],[2,4],[2,9],[2,2],[2,1],[2,3],[2,2],[2,10],[2,3],[2,1],[2,37],[2,2],[2,2],[2,2],[3,9],[3,4],[3,3],[3,2],[3,2],[3,1],[3,19],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,2],[3,2],[3,10],[3,1],[3,1],[3,1],[3,1],[3,3],[3,6],[4,2],[4,5],[4,1],[4,3],[4,10],[4,1],[4,1],[4,1],[4,1],[4,4],[4,5],[4,1],[4,1],[4,2],[5,2],[5,2],[5,1],[5,2],[5,1],[5,3],[5,2],[5,1],[5,1],[6,3],[6,1],[6,1],[6,6],[6,1],[6,3],[7,2],[7,1],[7,1],[7,1],[7,1],[7,1],[8,1],[8,2],[8,1],[8,3],[8,1],[9,1],[9,1],[9,2],[10,3],[10,4],[10,1],[11,1],[12,1],[12,1],[13,1],[13,3],[13,1],[14,1],[35,2],[15,7],[32,1],[80,1],[22,2],[16,1],[25,1],[156,1],[175,2],[460,1],[63,1],[74,3],[121,2],[16,3],[49,5],[29,1],[16,1],[1,5],[1,4],[1,3],[1,5],[1,1],[1,1],[1,2],[1,2],[1,1],[1,3],[1,1],[1,2],[1,1],[1,3],[1,4],[1,12],[1,1],[1,3],[1,1],[1,2],[1,3],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,12],[1,1],[1,1],[1,3],[1,1],[1,2],[1,38],[1,1],[1,1],[1,1],[1,2],[1,5],[1,1],[1,1],[1,10],[1,3],[1,3],[1,4],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,6],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,9],[1,1],[1,1],[1,4],[1,4],[1,3],[1,3],[1,2],[1,1],[1,6],[1,2],[1,3],[1,1],[1,5],[1,2],[1,2],[1,1],[1,1],[1,5],[1,2],[1,1],[1,3],[1,1],[1,6],[1,1],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,2],[1,2],[1,8],[1,1],[1,3],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,4],[1,3],[1,1],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[2,1],[2,1],[2,4],[2,7],[2,1],[2,3],[2,2],[2,3],[2,2],[2,10],[2,2],[2,6],[2,4],[2,2],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,4],[2,1],[2,1],[2,2],[2,2],[2,1],[2,2],[2,3],[2,1],[2,10],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,3],[2,2],[2,2],[3,5],[3,3],[3,26],[3,1],[3,4],[3,2],[3,5],[3,1],[3,3],[3,2],[3,1],[3,1],[3,2],[3,1],[3,2],[3,2],[3,1],[3,4],[3,2],[4,8],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,2],[4,1],[4,5],[4,1],[4,2],[4,2],[4,2],[4,3],[4,2],[5,2],[5,1],[5,2],[5,3],[5,1],[5,1],[5,3],[5,1],[5,1],[5,1],[6,4],[6,2],[6,1],[6,1],[6,7],[6,2],[7,1],[7,1],[7,1],[7,3],[7,3],[7,3],[8,2],[8,1],[8,3],[9,3],[9,2],[9,1],[9,3],[9,2],[10,1],[10,1],[10,4],[11,2],[11,1],[11,1],[12,1],[12,55],[12,1],[13,1],[35,4],[21,9],[26,1],[165,7],[21,1],[55,5],[19,10],[18,5],[17,1],[67,1],[68,4],[19,1],[24,6],[89,3],[21,1],[40,1],[52,2],[16,1],[1,3],[1,4],[1,1],[1,4],[1,2],[1,3],[1,1],[1,3],[1,1],[1,4],[1,1],[1,1],[1,14],[1,5],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,22],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,4],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,2],[1,2],[1,5],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,4],[1,1],[1,2],[1,37],[1,1],[1,2],[1,1],[1,2],[1,2],[1,5],[1,1],[1,1],[1,11],[1,2],[1,1],[1,1],[1,1],[1,7],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,6],[1,2],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,2],[1,3],[1,1],[1,4],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,11],[1,2],[1,1],[1,6],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,8],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,5],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,3],[2,1],[2,1],[2,3],[2,1],[2,2],[2,1],[2,1],[2,1],[2,19],[2,6],[2,3],[2,1],[2,2],[2,3],[2,2],[2,6],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,7],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,7],[2,1],[2,3],[2,3],[2,1],[3,6],[3,2],[3,2],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,3],[3,1],[3,1],[3,29],[3,1],[3,2],[3,3],[3,1],[3,1],[3,1],[3,15],[3,2],[3,1],[3,1],[3,2],[3,1],[3,2],[3,2],[3,7],[3,3],[3,4],[3,1],[4,2],[4,10],[4,1],[4,1],[4,1],[4,1],[4,1],[4,6],[5,3],[5,2],[5,1],[5,4],[5,1],[5,2],[5,1],[6,13],[6,2],[6,2],[6,2],[6,1],[6,1],[6,1],[7,1],[7,1],[7,2],[8,1],[8,1],[8,1],[9,2],[9,1],[9,1],[9,1],[9,1],[9,1],[10,1],[10,1],[10,112],[10,1],[11,1],[11,3],[11,11],[12,1],[13,2],[13,1],[13,2],[14,1],[78,1],[43,1],[20,1],[15,1],[26,5],[17,2],[32,2],[93,2],[57,2],[25,1],[112,4],[18,1],[73,1],[30,55],[24,1],[699,1],[17,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,3],[1,3],[1,1],[1,2],[1,2],[1,3],[1,1],[1,4],[1,5],[1,3],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[1,2],[1,1],[1,2],[1,4],[1,1],[1,2],[1,1],[1,1],[1,6],[1,3],[1,4],[1,1],[1,2],[1,1],[1,1],[1,2],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,4],[1,4],[1,1],[1,3],[1,1],[1,1],[1,1],[1,9],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,13],[1,2],[1,1],[1,1],[1,1],[1,7],[1,3],[1,3],[1,1],[1,1],[1,1],[1,2],[1,15],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,7],[1,3],[1,1],[1,1],[1,1],[1,5],[1,1],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,6],[1,2],[1,4],[1,15],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,2],[1,1],[2,1],[2,10],[2,3],[2,1],[2,1],[2,1],[2,3],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,2],[2,1],[2,24],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,5],[2,3],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,4],[2,1],[3,2],[3,2],[3,1],[3,2],[3,1],[3,3],[3,1],[3,1],[3,1],[3,3],[3,13],[3,10],[3,7],[3,1],[3,1],[3,1],[3,9],[3,9],[3,1],[3,2],[3,11],[3,1],[3,4],[3,1],[3,1],[4,2],[4,1],[4,2],[4,1],[4,115],[4,1],[4,1],[4,1],[4,1],[4,2],[4,2],[4,1],[4,2],[4,4],[4,9],[4,1],[4,1],[5,1],[5,2],[5,3],[5,2],[5,1],[5,4],[5,1],[5,2],[5,1],[5,1],[5,1],[5,7],[5,1],[5,1],[6,39],[6,2],[6,3],[6,1],[7,1],[7,2],[7,3],[7,1],[7,2],[7,8],[7,1],[8,3],[8,1],[8,1],[8,1],[8,1],[9,3],[9,2],[9,1],[10,3],[10,25],[10,1],[10,1],[11,6],[11,1],[11,1],[11,1],[11,7],[12,1],[12,1],[12,1],[13,1],[13,1],[14,8],[14,1],[14,1],[74,2],[26,11],[69,1],[108,1],[20,5],[1263,1],[21,1],[16,1],[16,3],[32,2],[62,2],[50,1],[16,1],[15,1],[22,5],[1,2],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,6],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,5],[1,10],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,7],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,4],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,9],[1,7],[1,9],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,15],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,42],[1,12],[1,3],[1,3],[1,5],[1,2],[1,1],[1,5],[1,4],[1,3],[1,3],[1,4],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,3],[1,1],[1,12],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,3],[1,1],[1,5],[1,1],[1,16],[1,1],[1,7],[1,1],[1,1],[1,3],[1,1],[1,7],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,7],[1,1],[2,1],[2,3],[2,1],[2,1],[2,9],[2,2],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,3],[2,2],[2,3],[2,1],[2,1],[2,1],[2,2],[2,1],[2,4],[2,2],[2,1],[2,10],[2,2],[2,1],[2,4],[2,1],[2,4],[2,3],[2,1],[2,1],[2,1],[2,1],[2,5],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[3,1],[3,3],[3,135],[3,1],[3,10],[3,1],[3,1],[3,3],[3,2],[3,2],[3,2],[3,5],[3,1],[3,2],[3,7],[3,2],[3,1],[3,1],[3,3],[3,3],[3,1],[3,1],[3,1],[3,1],[3,3],[3,1],[4,91],[4,2],[4,2],[4,3],[4,10],[4,3],[4,2],[4,3],[4,1],[4,1],[4,32],[4,2],[4,2],[5,1],[5,1],[5,3],[5,1],[5,3],[5,2],[5,1],[5,34],[5,2],[5,7],[5,2],[5,1],[6,2],[6,1],[6,5],[6,2],[6,1],[6,1],[7,2],[7,2],[7,1],[7,1],[7,6],[7,1],[8,1],[8,2],[8,1],[8,5],[8,4],[8,1],[8,3],[8,1],[9,4],[9,7],[9,1],[11,2],[11,2],[11,1],[11,1],[11,2],[11,19],[11,6],[12,6],[13,2],[13,1],[13,1],[14,1],[76,1],[65,1],[15,2],[19,1],[15,1],[32,1],[33,1],[19,4],[27,3],[62,7],[36,2],[39,3],[44,3],[17,1],[940,4],[20,1],[16,5],[17,4],[21,1],[46,1],[55,1],[251,12],[27,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,12],[1,8],[1,1],[1,1],[1,5],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,9],[1,2],[1,5],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,3],[1,2],[1,1],[1,3],[1,2],[1,3],[1,1],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,32],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,5],[1,1],[1,11],[1,4],[1,15],[1,3],[1,2],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,11],[1,9],[1,1],[1,2],[1,6],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,128],[1,3],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,3],[1,2],[1,3],[1,1],[1,1],[1,1],[1,3],[1,2],[1,2],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,17],[1,1],[1,1],[1,1],[1,3],[1,8],[2,1],[2,1],[2,3],[2,1],[2,3],[2,2],[2,4],[2,2],[2,1],[2,3],[2,1],[2,2],[2,1],[2,2],[2,2],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,10],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,2],[2,1],[2,1],[2,4],[2,1],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,1],[3,1],[3,2],[3,1],[3,8],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,3],[3,2],[3,3],[3,1],[3,1],[3,2],[3,1],[3,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,1],[4,3],[4,1],[4,2],[4,2],[4,1],[4,1],[5,33],[5,5],[5,2],[5,1],[5,5],[5,48],[6,2],[6,3],[6,2],[6,1],[6,1],[6,2],[6,3],[6,1],[6,3],[7,8],[7,1],[7,1],[7,2],[8,1],[8,1],[8,1],[8,1],[8,2],[8,1],[9,1],[9,1],[9,1],[10,1],[10,1],[10,1],[11,2],[11,5],[12,1],[12,2],[12,2],[17,4],[17,1],[15,2],[29,5],[38,1],[20,1],[16,2],[24,1],[42,1],[29,1],[60,2],[20,1],[168,4],[17,33],[83,2],[71,1],[16,1],[18,3],[54,1],[15,8],[22,1],[36,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,2],[1,7],[1,5],[1,1],[1,9],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,7],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,15],[1,1],[1,3],[1,2],[1,2],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,3],[1,2],[1,1],[1,143],[1,1],[1,1],[1,2],[1,4],[1,4],[1,2],[1,2],[1,96],[1,1],[1,4],[1,16],[1,2],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,8],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,4],[1,2],[1,1],[1,5],[1,2],[1,1],[1,1],[1,6],[1,1],[1,15],[1,1],[1,1],[1,3],[1,1],[1,2],[1,1],[1,1],[1,7],[1,1],[1,2],[1,4],[1,1],[1,6],[1,5],[1,6],[1,1],[1,1],[1,1303],[1,2],[1,2],[1,1],[1,5],[1,2],[1,2],[1,12],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,4],[1,1],[1,3],[1,8],[2,1],[2,1],[2,2],[2,3],[2,1],[2,3],[2,1],[2,1],[2,1],[2,5],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,14],[2,1],[2,1],[2,1],[2,5],[2,1],[2,7],[2,3],[2,1],[2,3],[2,2],[2,3],[2,1],[2,1],[2,33],[2,1],[2,1],[2,1],[2,2],[2,3],[2,5],[2,1],[2,2],[2,8],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[3,1],[3,2],[3,1],[3,1],[3,1],[3,3],[3,16],[3,1],[3,4],[3,1],[3,1],[3,8],[3,2],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,3],[3,1],[3,2],[3,1],[3,1],[3,2],[3,5],[3,6],[3,1],[3,1],[3,2],[3,3],[3,1],[3,1],[3,4],[3,1],[4,1],[4,2],[4,1],[4,1],[4,2],[4,1],[4,4],[4,2],[4,3],[4,1],[4,2],[4,2],[4,3],[4,1],[4,1],[4,1],[4,1],[4,45],[5,2],[5,1],[5,4],[5,2],[5,1],[5,1],[5,1],[5,1],[5,3],[5,1],[5,3],[6,5],[6,13],[6,4],[6,1],[6,2],[6,1],[6,2],[7,3],[7,1],[7,2],[7,1],[7,1],[8,1],[8,1],[8,1],[8,11],[8,4],[8,1],[8,1],[9,2],[9,1],[10,1],[10,1],[10,2],[11,25],[11,1],[11,1],[11,7],[11,1],[12,3],[12,1],[12,1],[26,3],[29,11],[18,1],[20,1],[15,1],[16,1],[35,4],[15,1],[63,2],[39,1],[64,4],[15,1],[15,1],[26,1],[64,1],[40,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,12],[1,1],[1,1],[1,2],[1,2],[1,3],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,10],[1,1],[1,1],[1,16],[1,1],[1,2],[1,47],[1,3],[1,1],[1,1],[1,1],[1,4],[1,1],[1,170],[1,2],[1,2],[1,1],[1,1],[1,3],[1,3],[1,1],[1,5],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,2],[1,1],[1,3],[1,1],[1,14],[1,35],[1,1],[1,3],[1,4],[1,2],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,3],[1,2],[1,1],[1,1],[1,2],[1,1],[1,15],[1,13],[1,2],[1,1],[1,1],[1,8],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,3],[1,1],[1,53],[1,1],[1,4],[1,3],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,14],[2,3],[2,1],[2,2],[2,3],[2,9],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,4],[2,8],[2,3],[2,1],[2,1],[2,3],[2,2],[2,1],[2,1],[2,1],[2,2],[2,4],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,3],[2,1],[2,1],[2,4],[2,2],[2,161],[2,1],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,51],[3,1],[3,1],[3,3],[3,1],[3,3],[3,2],[3,1],[3,1],[3,2],[3,3],[3,4],[3,2],[3,2],[3,1],[3,1],[3,10],[3,1],[4,1],[4,1],[4,1],[4,4],[4,1],[4,1],[4,4],[4,1],[4,5],[4,9],[4,1],[4,3],[4,1],[5,4],[5,3],[5,1],[5,1],[5,1],[5,1],[5,1],[5,2],[5,1],[5,1],[5,1],[6,7],[6,1],[6,1],[6,1],[6,1],[6,1],[6,3],[6,2],[7,1],[7,2],[7,1],[7,1],[8,1],[8,2],[8,2],[9,1],[9,1],[10,3],[10,1],[10,1],[10,3],[11,9],[11,1],[11,1],[11,1],[11,1],[11,2],[11,2],[12,1],[12,4],[13,2],[13,2],[13,15],[14,1],[14,1],[17,3],[185,1],[51,1],[21,3],[19,3],[17,1],[29,1],[38,4],[169,24],[41,4],[15,1],[59,5],[87,3],[169,1],[29,5],[28,1],[25,4],[48,1],[15,3],[18,1],[22,2],[36,4],[134,1],[19,1],[15,1],[17,3],[56,1],[24,1],[17,1],[1,1],[1,3],[1,4],[1,3],[1,2],[1,3],[1,6],[1,4],[1,6],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,9],[1,79],[1,1],[1,4],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,7],[1,1],[1,3],[1,3],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,3],[1,5],[1,4],[1,1],[1,2],[1,5],[1,2],[1,1],[1,10],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,24],[1,2],[1,1],[1,11],[1,2],[1,8],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,4],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,3],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,31],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,1],[1,7],[1,1],[1,5],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,1],[1,13],[1,5],[1,3],[1,2],[1,4],[1,2],[1,1],[1,2],[1,1],[1,1],[1,4],[1,3],[1,3],[1,1],[1,2],[1,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,5],[2,2],[2,8],[2,1],[2,1],[2,1],[2,3],[2,13],[2,6],[2,1],[2,4],[2,1],[2,2],[2,2],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,6],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,4],[2,6],[2,1],[2,1],[2,1],[2,1],[2,6],[2,1],[2,1],[2,1],[2,2],[2,2],[2,4],[3,1],[3,1],[3,2],[3,1],[3,5],[3,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,6],[3,1],[3,8],[3,1],[3,1],[3,1],[3,1],[3,13],[3,3],[3,1],[3,2],[3,2],[3,1],[4,4],[4,1],[4,1],[4,3],[4,1],[4,1],[4,1],[4,2],[5,4],[5,1],[5,2],[5,3],[5,1],[5,1],[5,1],[5,1],[5,2],[6,8],[7,1],[7,1],[7,2],[8,2],[8,2],[8,2],[8,3],[8,3],[8,1],[8,1],[9,1],[9,1],[10,1],[10,3],[10,1],[12,3],[12,2],[12,2],[12,1],[12,1],[12,1],[13,3],[13,1],[13,1],[14,1],[17,1],[25,7],[15,6],[111,8],[92,1],[26,21],[328,1],[16,1],[752,1],[16,1],[22,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,2],[1,3],[1,6],[1,1],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,7],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,7],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,5],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,6],[1,1],[1,1],[1,4],[1,2],[1,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,8],[1,2],[1,2],[1,3],[1,2],[1,2],[1,3],[1,1],[1,3],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,6],[1,1],[1,1],[1,2],[1,2],[1,6],[1,1],[1,1],[1,8],[1,5],[1,1],[1,2],[1,4],[1,21],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,4],[1,2],[2,5],[2,1],[2,1],[2,4],[2,2],[2,1],[2,3],[2,1],[2,2],[2,8],[2,1],[2,2],[2,12],[2,2],[2,2],[2,1],[2,5],[2,2],[2,2],[2,1],[2,2],[2,1],[2,3],[2,4],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,4],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,4],[2,5],[2,1],[2,2],[2,2],[2,9],[2,1],[2,1],[3,3],[3,1],[3,1],[3,5],[3,1],[3,2],[3,3],[3,1],[3,12],[3,2],[3,1],[3,1],[3,3],[3,3],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,1],[3,1],[3,7],[4,2],[4,2],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,3],[4,1],[4,3],[5,1],[5,2],[5,1],[5,1],[5,1],[5,1],[6,1],[6,5],[6,11],[6,1],[6,1],[6,2],[6,1],[6,4],[6,1],[6,1],[7,5],[7,1],[7,1],[8,1],[8,3],[9,2],[9,1],[10,1],[11,1],[11,1],[11,2],[11,1],[12,4],[12,2],[13,1],[13,1],[13,2],[14,6],[14,1],[68,4],[113,4],[22,1],[48,79],[28,2],[88,1],[232,2],[23,1],[32,1],[72,2],[26,1],[20,1],[53,1],[16,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,8],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,6],[1,1],[1,3],[1,1],[1,3],[1,4],[1,3],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,5],[1,2],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,5],[1,4],[1,1],[1,1],[1,9],[1,6],[1,5],[1,1],[1,1],[1,3],[1,2],[1,9],[1,2],[1,3],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,2],[1,1],[1,16],[1,3],[1,1],[1,86],[1,1],[1,2],[1,4],[1,2],[1,16],[1,9],[1,4],[1,2],[1,9],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,7],[1,10],[1,5],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,12],[1,2],[1,4],[1,1],[1,1],[1,2],[1,2],[1,4],[2,6],[2,3],[2,2],[2,1],[2,3],[2,2],[2,2],[2,2],[2,6],[2,1],[2,4],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,3],[2,1],[2,1],[2,1],[2,3],[2,1],[2,2],[2,2],[2,1],[2,2],[2,9],[2,10],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,3],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,8],[2,2],[2,1],[2,3],[2,1],[3,1],[3,1],[3,1],[3,2],[3,7],[3,5],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,2],[3,1],[3,1],[3,2],[3,1],[3,2],[3,5],[3,2],[4,1],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,6],[4,2],[5,5],[5,2],[5,9],[5,5],[5,1],[5,2],[5,1],[5,2],[6,7],[6,7],[7,3],[7,8],[7,1],[7,1],[7,2],[7,7],[8,1],[8,1],[8,1],[9,6],[9,4],[10,2],[10,1],[10,1],[10,3],[10,2],[11,1],[12,5],[12,3],[12,1],[13,1],[14,2],[14,3],[14,4],[30,1],[19,1],[27,1],[24,12],[20,24],[20,1],[80,1],[26,1],[25,1],[35,1],[150,1],[22,1],[28,1],[187,2],[15,2],[21,1],[22,1],[17,8],[27,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,4],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,4],[1,1],[1,3],[1,5],[1,1],[1,10],[1,8],[1,1],[1,3],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,3],[1,7],[1,3],[1,1],[1,10],[1,1],[1,4],[1,1],[1,1],[1,2],[1,7],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,4],[1,1],[1,2],[1,3],[1,1],[1,2],[1,2],[1,7],[1,1],[1,1],[1,1],[1,1],[1,5],[1,2],[1,1],[1,5],[1,1],[1,1],[1,5],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,4],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,17],[1,4],[1,2],[1,6],[1,1],[1,2],[1,1],[1,2],[1,1],[1,6],[1,2],[1,1],[1,28],[1,3],[1,1],[1,3],[1,1],[1,2],[1,2],[1,2],[1,1],[1,3],[1,1],[2,1],[2,3],[2,1],[2,4],[2,1],[2,3],[2,2],[2,1],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,9],[2,1],[2,1],[2,7],[2,3],[2,1],[2,1],[2,3],[2,4],[2,2],[2,2],[2,2],[2,1],[2,3],[2,2],[2,3],[2,2],[2,1],[2,1],[2,2],[3,10],[3,1],[3,3],[3,4],[3,4],[3,398],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,1],[3,3],[3,1],[3,1],[3,4],[3,3],[3,2],[3,1],[4,2],[4,16],[4,3],[4,2],[4,1],[4,4],[4,1],[4,1],[4,4],[4,1],[4,1],[4,1],[4,21],[4,5],[4,1],[4,3],[4,2],[4,2],[4,1],[4,2],[4,1],[4,2],[5,3],[5,1],[5,3],[5,1],[5,5],[5,7],[5,1],[5,1],[5,1],[5,7],[5,4],[5,6],[5,1],[6,1],[6,2],[6,3],[6,2],[6,1],[6,3],[7,8],[7,6],[7,1],[7,2],[7,1],[7,1],[8,4],[8,1],[8,4],[8,1],[8,1],[8,8],[8,3],[9,1],[9,1],[9,2],[10,6],[11,1],[11,1],[11,1],[12,1],[12,4],[12,6],[13,3],[13,1],[520,3],[292,13],[16,1],[20,1],[44,3],[22,1],[17,2],[18,1],[46,5],[19,1],[15,3],[28,1],[23,1],[19,13],[25,2],[23,134],[68,1],[79,13],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,5],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,5],[1,1],[1,1],[1,3],[1,1],[1,2],[1,6],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,5],[1,12],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,2],[1,6],[1,1],[1,1],[1,36],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,5],[1,1],[1,5],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,3],[1,2],[1,2],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,4],[1,2],[1,1],[1,22],[1,1],[1,1],[1,1],[1,187],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,2],[1,5],[1,4],[1,1],[1,2],[1,1],[1,20],[1,4],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,1],[2,1],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,5],[2,1],[2,2],[2,1],[2,1],[2,6],[2,6],[2,9],[2,1],[2,2],[2,1],[2,2],[2,2],[2,3],[2,6],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,44],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[3,9],[3,4],[3,1],[3,2],[3,1],[3,1],[3,1],[3,4],[3,2],[3,1],[3,1],[3,21],[3,6],[3,1],[3,2],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,3],[3,1],[3,3],[3,5],[3,1],[3,1],[3,5],[3,1],[3,2],[3,2],[3,1],[3,1],[3,1],[4,92],[4,1],[4,1],[4,1],[4,13],[4,4],[4,1],[4,1],[4,2],[4,1],[4,1],[5,1],[5,1],[5,1],[5,2],[5,1],[5,3],[5,3],[5,1],[5,1],[5,1],[5,4],[5,1],[6,1],[6,3],[6,2],[6,23],[6,2],[6,3],[6,35],[7,1],[7,1],[7,1],[8,690],[8,1],[8,3],[9,2],[9,5],[9,1],[10,4],[11,6],[12,4],[12,1],[14,15],[14,1],[18,1],[46,1],[16,1],[24,4],[27,2],[21,1],[98,1],[107,3],[44,16],[16,1],[28,1],[1,1],[1,2],[1,7],[1,3],[1,1],[1,1],[1,2],[1,2],[1,14],[1,1],[1,1],[1,1],[1,36],[1,1],[1,3],[1,4],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,13],[1,51],[1,1],[1,1],[1,3],[1,1],[1,3],[1,1],[1,6],[1,2],[1,2],[1,1],[1,3],[1,1],[1,5],[1,3],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,94],[1,6],[1,1],[1,1],[1,1],[1,2],[1,4],[1,5],[1,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,5],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,2],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,5],[1,2],[1,1],[1,2],[1,2],[1,5],[1,1],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,4],[1,4],[1,1],[1,28],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,10],[1,4],[1,4],[1,2],[1,1],[1,3],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,3],[1,5],[1,7],[2,1],[2,5],[2,1],[2,3],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,7],[2,7],[2,2],[2,4],[2,3],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,2],[2,5],[2,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,1],[3,1],[3,1],[3,2],[3,2],[3,1],[3,1],[3,5],[3,5],[3,1],[3,1],[3,10],[3,30],[3,1],[3,1],[3,1],[3,3],[3,1],[3,4],[3,3],[3,3],[3,1],[3,1],[3,2],[3,1],[3,92],[3,1],[4,4],[4,1],[4,2],[4,5],[4,1],[4,2],[4,2],[4,1],[4,4],[4,1],[4,1],[4,1],[5,1],[5,2],[5,1],[5,1],[5,1],[5,4],[5,2],[5,1],[5,10],[6,2],[6,1],[6,1],[6,1],[6,4],[6,2],[6,1],[6,1],[6,2],[7,1],[7,1],[7,1],[7,1],[7,2],[7,1],[7,1],[8,5],[8,1],[8,1],[8,5],[8,5],[8,1],[9,2],[9,1],[9,4],[9,4],[10,1],[10,1],[10,5],[10,5],[10,1],[10,1],[11,1],[11,1],[11,1],[11,2],[12,1],[12,2],[12,2],[12,1],[13,1],[13,1],[13,3],[14,1],[14,22],[14,1],[14,1],[14,2],[20,4],[27,1],[18,2],[49,1],[16,3],[15,1],[18,1],[15,1],[18,1],[15,1],[27,2],[21,1],[23,1],[54,1],[22,1],[46,1],[17,1],[37,7],[17,1],[19,1],[33,2],[62,1],[18,4],[18,1],[24,1],[18,1],[36,1],[20,1],[125,1],[18,13],[36,1],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,3],[1,4],[1,3],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,10],[1,6],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,4],[1,1],[1,3],[1,8],[1,2],[1,4],[1,10],[1,1],[1,71],[1,1],[1,2],[1,18],[1,1],[1,3],[1,2],[1,1],[1,1],[1,2],[1,2],[1,1],[1,34],[1,9],[1,2],[1,7],[1,3],[1,3],[1,3],[1,3],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,8],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,6],[1,3],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,2],[1,9],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,6],[1,1],[1,10],[1,1],[1,10],[1,1],[1,2],[1,2],[1,2],[1,3],[1,1],[1,2],[1,3],[1,2],[1,2],[1,20],[1,2],[1,3],[1,2],[1,1],[1,1],[1,5],[1,1],[1,5],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[2,1],[2,1],[2,3],[2,3],[2,2],[2,2],[2,1],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,10],[2,1],[2,1],[2,6],[2,3],[2,5],[2,3],[2,1],[2,1],[2,11],[2,2],[2,3],[2,2],[2,1],[2,7],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,2],[2,1],[2,3],[2,1],[2,3],[2,2],[2,1],[2,6],[2,3],[2,1],[2,1],[2,1],[3,4],[3,2],[3,1],[3,8],[3,1],[3,49],[3,2],[3,2],[3,3],[3,1],[3,2],[3,5],[3,3],[3,2],[3,1],[3,3],[3,1],[3,2],[3,13],[3,7],[3,2],[3,1],[4,2],[4,4],[4,1],[4,2],[4,1],[4,1],[4,1],[4,2],[5,1],[5,4],[5,1],[5,1],[5,1],[5,1],[5,1],[5,4],[5,1],[5,2],[6,1],[6,7],[6,1],[6,1],[6,4],[6,2],[6,3],[6,1],[6,9],[7,1],[7,1],[8,3],[8,7],[8,1],[8,2],[8,2],[8,2],[8,8],[8,1],[9,1],[9,1],[9,1],[9,2],[10,1],[11,3],[12,1],[12,1],[12,2],[12,1],[12,3],[13,1],[14,1],[58,1],[21,1],[36,15],[218,1],[34,1],[20,2],[16,2],[28,1],[38,1],[38,3],[16,1],[165,2],[132,1],[19,2],[260,1],[39,2],[64,1],[18,1],[1,1],[1,1],[1,1],[1,12],[1,1],[1,2],[1,1],[1,5],[1,2],[1,2],[1,1],[1,2],[1,1],[1,13],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,2],[1,5],[1,1],[1,3],[1,2],[1,1],[1,2],[1,6],[1,1],[1,2],[1,2],[1,7],[1,1],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,1],[1,1],[1,3],[1,6],[1,1],[1,1],[1,1],[1,6],[1,3],[1,2],[1,6],[1,2],[1,1],[1,3],[1,1],[1,2],[1,1],[1,1],[1,2],[1,3],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,6],[1,1],[1,2],[1,63],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,2],[1,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,2],[1,3],[1,9],[1,2],[1,1],[1,2],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,10],[1,1],[1,2],[1,1],[1,2],[1,2],[1,7],[1,1],[1,8],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,1],[1,15],[1,6],[1,1],[1,1],[1,422],[1,2],[1,2],[1,4],[1,2],[1,2],[1,3],[1,2],[1,3],[1,1],[1,5],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[2,4],[2,3],[2,1],[2,2],[2,2],[2,3],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,2],[2,2],[2,2],[2,13],[2,11],[2,4],[2,1],[2,2],[2,10],[2,5],[2,2],[2,75],[2,3],[2,1],[2,8],[2,4],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,14],[2,2],[2,15],[2,1],[2,2],[2,4],[2,1],[2,1],[2,2],[2,33],[2,2],[2,1],[2,1],[2,3],[2,2],[2,2],[2,1],[3,1],[3,13],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,6],[3,7],[3,2],[3,1],[3,3],[3,1],[3,1],[3,1],[3,1],[3,1],[3,2],[3,3],[3,3],[3,2],[3,1],[3,6],[3,2],[3,4],[3,2],[4,4],[4,4],[4,4],[4,4],[4,6],[4,1],[4,1],[4,1],[4,3],[4,1],[4,2],[4,5],[4,1],[5,4],[5,1],[5,2],[5,8],[5,3],[5,1],[5,1],[5,1],[5,1],[5,3],[6,1],[6,3],[6,2],[6,4],[6,1],[6,3],[6,1],[6,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,3],[8,1],[8,1],[8,1],[8,7],[9,2],[10,2],[10,1],[10,6],[11,1],[11,3],[11,2],[12,1],[12,1],[14,2],[14,6],[17,2],[19,1],[15,1],[112,1],[16,1],[30,6],[19,3],[15,4],[19,2],[25,1],[17,4],[49,1],[48,1],[26,1],[17,9],[43,3],[51,6],[17,1],[21,3],[26,4],[31,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,9],[1,1],[1,753],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,1],[1,1],[1,7],[1,2],[1,6],[1,3],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,3],[1,4],[1,3],[1,4],[1,1],[1,2],[1,1],[1,6],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,3],[1,3],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,26],[1,3],[1,1],[1,1],[1,4],[1,1],[1,1],[1,5],[1,2],[1,3],[1,1],[1,5],[1,2],[1,2],[1,2],[1,2],[1,1],[1,3],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,3],[1,1],[1,4],[1,8],[1,10],[1,1],[1,2],[1,6],[1,1],[1,2],[1,2],[1,2],[1,6],[1,1],[1,1],[1,15],[1,2],[2,1],[2,12],[2,1],[2,8],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,20],[2,2],[2,2],[2,1],[2,1],[2,2],[2,2],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,14],[2,2],[2,1],[2,5],[2,5],[2,1],[2,2],[2,2],[2,6],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[3,2],[3,3],[3,3],[3,1],[3,1],[3,1],[3,3],[3,1],[3,1],[3,6],[3,8],[3,1],[3,1],[3,1],[3,3],[3,12],[3,1],[3,1],[3,1],[3,1],[3,6],[3,1],[3,2],[3,1],[3,1],[4,5],[4,1],[4,5],[4,5],[4,29],[4,11],[4,1],[4,1],[4,2],[4,1],[4,1],[5,2],[5,4],[5,1],[5,6],[5,1],[5,1],[5,1],[5,1],[6,1],[6,4],[6,1],[6,4],[6,2],[6,2],[6,1],[6,1],[6,2],[6,1],[7,1],[7,2],[7,1],[7,1],[7,2],[8,3],[8,4],[8,5],[8,7],[8,5],[9,5],[9,1],[9,1],[10,2],[10,2],[10,4],[11,1],[11,1],[12,8],[12,1],[12,1],[13,1],[13,1],[13,2],[14,2],[20,4],[18,3],[65,1],[23,1],[20,3],[237,1],[70,5],[80,2],[71,1],[15,4],[18,8],[54,1],[30,1],[15,2],[26,2],[20,1],[17,1],[26,4],[20,13],[1,2],[1,1],[1,3],[1,1],[1,3],[1,5],[1,3],[1,1],[1,5],[1,1],[1,3],[1,7],[1,2],[1,1],[1,1],[1,1],[1,4],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,2],[1,11],[1,1],[1,6],[1,4],[1,3],[1,3],[1,2],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,3],[1,1],[1,2],[1,7],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,5],[1,2],[1,1],[1,1],[1,4],[1,1],[1,10],[1,4],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,2],[1,1],[1,4],[1,1],[1,1],[1,1],[1,3],[1,2],[1,1],[1,2],[1,3],[1,1],[1,2],[1,1],[1,4],[1,1],[1,8],[1,1],[1,1],[1,2],[1,4],[1,1],[1,34],[1,2],[1,2],[1,1],[1,1],[1,4],[1,1],[1,3],[1,7],[1,4],[1,7],[1,7],[1,1],[1,3],[1,1],[1,1],[1,3],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,14],[1,6],[1,6],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[2,2],[2,1],[2,1],[2,4],[2,2],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,2],[2,1],[2,4],[2,1],[2,1],[2,1],[2,1],[2,4],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,2],[2,1],[2,2],[2,6],[2,1],[2,1],[2,1],[2,2],[2,2],[3,3],[3,7],[3,4],[3,2],[3,3],[3,1],[3,1],[3,4],[3,1],[3,14],[3,2],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,9],[3,25],[3,1],[3,1],[4,1],[4,9],[4,1],[4,3],[4,1],[4,1],[4,12],[4,1],[4,3],[4,7],[4,2],[4,1],[4,1],[4,1],[4,1],[4,1],[5,5],[5,2],[5,1],[5,1],[5,2],[5,5],[5,1],[5,1],[5,1],[5,1],[5,1],[6,5],[6,1],[6,3],[6,1],[6,4],[6,1],[6,1],[6,3],[6,2],[6,1],[7,1],[7,1],[7,1],[7,1],[7,1],[8,2],[8,1],[8,1],[8,1],[8,1],[9,2],[10,374],[10,3],[11,1],[11,1],[11,3],[11,8],[11,4],[12,1],[13,3],[13,2],[13,4],[58,1],[43,1],[38,1],[196,1],[55,3],[15,1],[79,1],[16,5],[20,1],[32,1],[111,1],[68,1],[50,17],[327,47],[46,3],[24,3],[41,2],[65,1],[1,2],[1,14],[1,4],[1,1],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,7],[1,4],[1,5],[1,8],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,6],[1,2],[1,1],[1,5],[1,1],[1,3],[1,29],[1,4],[1,2],[1,1],[1,1],[1,4],[1,2],[1,9],[1,5],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,4],[1,2],[1,1],[1,8],[1,2],[1,13],[1,1],[1,1],[1,1],[1,2],[1,2],[1,2],[1,4],[1,6],[1,1],[1,1],[1,3],[1,2],[1,4],[1,2],[1,10],[1,2],[1,2],[1,2],[1,1],[1,4],[1,2],[1,1],[1,5],[1,93],[1,1],[1,1],[1,3],[1,22],[1,1],[1,1],[1,4],[1,2],[1,2],[1,1],[1,1],[1,4],[1,1],[1,6],[1,1],[1,3],[1,4],[1,1],[1,1],[1,2],[1,2],[1,8],[1,3],[1,1],[1,5],[1,6],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,3],[1,2],[1,1],[1,2],[1,2],[1,2],[1,28],[1,1],[1,6],[1,6],[1,2],[2,1],[2,2],[2,1],[2,2],[2,1],[2,2],[2,6],[2,1],[2,1],[2,2],[2,6],[2,2],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,2],[2,2],[2,6],[2,3],[2,3],[2,1],[2,2],[2,2],[2,1],[2,1],[2,14],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,9],[2,2],[2,1],[2,5],[2,1],[2,1],[2,3],[2,2],[2,2],[2,7],[2,16],[2,6],[2,2],[2,2],[2,1],[2,2],[3,1],[3,26],[3,1],[3,2],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,1],[3,4],[3,1],[3,3],[3,3],[3,1],[3,1],[3,1],[3,1],[3,1],[3,12],[3,2],[3,2],[3,4],[3,1],[3,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[4,1],[4,1],[4,2],[4,1],[4,1],[4,2],[4,1],[4,1],[4,1],[4,2],[4,1],[4,8],[4,3],[4,1],[4,4],[5,2],[5,2],[5,1],[5,1],[5,1],[5,9],[6,1],[6,2],[6,2],[6,1],[6,1],[6,1],[6,10],[6,1],[7,1],[7,11],[7,4],[7,1],[7,2],[8,2],[8,1],[8,1],[8,1],[8,1],[8,4],[8,7],[9,1],[9,1],[10,2],[10,4],[10,1],[10,1],[11,6],[12,1],[12,1],[12,6],[13,1],[13,5],[13,2],[13,11],[14,8],[14,3],[16,1],[55,1],[17,1],[91,1],[27,1],[16,1],[17,1],[37,1],[54,3],[73,2],[50,1],[19,3],[20,2],[26,1],[55,3],[54,1],[31,1],[68,2],[75,8],[412,1],[21,2],[1,6],[1,1],[1,2],[1,2],[1,4],[1,4],[1,2],[1,6],[1,5],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,9],[1,4],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,6],[1,3],[1,1],[1,2],[1,3],[1,12],[1,16],[1,3],[1,1],[1,1],[1,3],[1,3],[1,502],[1,3],[1,1],[1,1],[1,5],[1,2],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,6],[1,3],[1,2],[1,1],[1,5],[1,1],[1,6],[1,4],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,1],[1,17],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,4],[1,6],[1,1],[1,1],[1,11],[1,1],[1,4],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,3],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,5],[1,2],[2,1],[2,1],[2,3],[2,3],[2,2],[2,2],[2,9],[2,2],[2,1],[2,9],[2,1],[2,2],[2,2],[2,2],[2,5],[2,5],[2,2],[2,1],[2,2],[2,1],[2,1],[2,13],[2,5],[2,2],[2,1],[2,4],[2,1],[2,1],[2,2],[2,1],[2,2],[2,3],[2,3],[2,5],[2,3],[2,3],[2,10],[2,2],[2,2],[2,2],[2,4],[2,1],[2,2],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,3],[3,2],[3,2],[3,1],[3,7],[3,2],[3,2],[3,1],[3,5],[3,2],[3,3],[3,1],[3,8],[3,1],[3,1],[3,2],[3,14],[3,2],[4,2],[4,1],[4,2],[4,3],[4,2],[4,7],[4,1],[4,5],[4,1],[4,3],[4,10],[4,1],[4,2],[4,4],[4,4],[4,1],[5,1],[5,4],[5,2],[5,1],[5,1],[5,2],[5,8],[5,3],[5,1],[5,1],[6,2],[6,2],[6,1],[6,1],[6,1],[6,2],[6,15],[6,39],[6,3],[7,2],[7,1],[7,3],[7,1],[7,1],[8,1],[8,1],[9,2],[9,2],[9,1],[9,1],[10,1],[10,1],[10,1],[11,14],[11,1],[11,3],[11,1],[12,1],[12,1],[13,2],[13,2],[14,8],[16,1],[27,1],[21,5],[18,2],[36,1],[36,3],[28,15],[17,13],[18,7],[17,9],[28,2],[19,2],[27,1],[33,11],[40,2],[17,3],[120,2],[136,4],[21,1],[64,1],[23,3],[81,4],[27,1],[126,15],[17,1],[37,2],[21,1],[22,1],[58,1],[1,85],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,2],[1,1],[1,2],[1,3],[1,9],[1,2],[1,3],[1,7],[1,3],[1,2],[1,5],[1,2],[1,1],[1,3],[1,1],[1,1],[1,4],[1,13],[1,74],[1,14],[1,1],[1,1],[1,2],[1,1],[1,2],[1,4],[1,2],[1,5],[1,1],[1,4],[1,1],[1,4],[1,1],[1,1],[1,3],[1,2],[1,79],[1,1],[1,1],[1,6],[1,1],[1,2],[1,7],[1,2],[1,1],[1,2],[1,1],[1,7],[1,1],[1,2],[1,1],[1,4],[1,4],[1,3],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,2],[1,6],[1,1],[1,8],[1,2],[1,2],[1,1],[1,9],[1,1],[1,2],[1,1],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,11],[1,1],[1,5],[1,1],[1,4],[1,3],[1,8],[1,4],[1,1],[1,9],[1,1],[1,3],[1,1],[1,4],[1,1],[1,2],[1,3],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,2],[1,3],[1,8],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,11],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[2,6],[2,1],[2,3],[2,1],[2,3],[2,7],[2,6],[2,1],[2,2],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,2],[2,2],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,2],[2,2],[2,1],[2,4],[2,3],[2,2],[2,1],[2,6],[2,1],[2,3],[2,2],[2,2],[2,1],[2,3],[2,1],[2,2],[2,1],[2,1],[2,1],[2,3],[2,2],[2,1],[2,4],[2,5],[2,1],[2,1],[3,1],[3,57],[3,2],[3,1],[3,1],[3,2],[3,3],[3,15],[3,4],[3,1],[3,1],[3,9],[3,10],[3,5],[3,1],[3,4],[3,4],[3,1],[3,1],[3,6],[3,1],[4,2],[4,1],[4,1],[4,2],[4,1],[4,14],[4,3],[4,1],[4,1],[4,3],[4,10],[4,1],[4,2],[5,10],[5,1],[5,1],[5,3],[5,1],[5,5],[5,1],[6,5],[6,4],[6,2],[6,2],[6,3],[6,1],[7,1],[7,1],[7,4],[7,1],[7,2],[7,2],[7,2],[7,2],[8,2],[8,1],[8,4],[8,2],[8,4],[8,1],[9,1],[9,1],[10,3],[10,1],[11,1],[11,1],[12,9],[12,4],[12,2],[13,7],[13,4],[13,2],[13,7],[13,1],[14,1],[14,1],[23,1],[19,2],[16,1],[36,4],[15,4],[22,3],[17,1],[17,2],[38,2],[15,1],[34,1],[29,2],[20,7],[23,4],[44,5],[22,2],[18,1],[1,2],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,9],[1,1],[1,4],[1,2],[1,2],[1,1],[1,5],[1,1],[1,2],[1,1],[1,4],[1,2],[1,2],[1,1],[1,3],[1,3],[1,3],[1,2],[1,3],[1,1],[1,2],[1,5],[1,3],[1,1],[1,4],[1,1],[1,6],[1,4],[1,3],[1,1],[1,2],[1,1],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,8],[1,1],[1,2],[1,5],[1,1],[1,6],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,3],[1,10],[1,3],[1,7],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,2],[1,2],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,1],[1,1],[1,2],[1,1],[1,43],[1,23],[1,2],[1,4],[1,33],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,7],[1,2],[1,4],[1,6],[1,1],[1,1],[1,1],[1,2],[1,7],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,136],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,3],[1,2],[1,1],[1,1],[1,1],[1,20],[2,1],[2,1],[2,16],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,3],[2,2],[2,1],[2,1],[2,2],[2,7],[2,2],[2,1],[2,2],[2,114],[2,1],[2,3],[2,4],[2,1],[2,4],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[2,6],[2,2],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,4],[2,2],[2,4],[2,3],[2,2],[2,1],[3,2],[3,1],[3,1],[3,5],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,8],[3,2],[3,1],[3,2],[3,28],[3,1],[3,118],[3,1],[3,1],[3,2],[3,2],[3,3],[3,8],[3,3],[4,1],[4,2],[4,4],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,2],[4,1],[4,1],[4,3],[4,1],[4,3],[4,1],[4,1],[4,1],[5,2],[5,1],[5,6],[5,1],[5,4],[5,2],[5,4],[5,1],[5,4],[6,4],[6,1],[6,3],[6,1],[6,2],[6,1],[7,1],[7,3],[7,1],[7,46],[7,2],[7,1],[8,3],[8,6],[8,1],[8,5],[9,12],[9,1],[9,5],[10,3],[10,3],[11,3],[11,7],[12,3],[12,1],[12,1],[13,1],[13,1],[13,2],[13,13],[13,1],[14,1],[14,1],[58,2],[112,1],[18,3],[19,1],[20,1],[18,1],[15,2],[92,1],[50,1],[40,1],[57,5],[19,2],[19,1],[15,4],[16,5],[54,1],[15,1],[1,2],[1,6],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,6],[1,7],[1,1],[1,2],[1,2],[1,3],[1,1],[1,1],[1,1],[1,1],[1,11],[1,3],[1,6],[1,1],[1,1],[1,6],[1,4],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,12],[1,1],[1,1],[1,1],[1,4],[1,1],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,2],[1,5],[1,2],[1,1],[1,1],[1,2],[1,8],[1,2],[1,1],[1,1],[1,2],[1,1],[1,19],[1,1],[1,1],[1,4],[1,1],[1,4],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,4],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,3],[1,5],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,2],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,2],[1,3],[1,9],[1,26],[1,3],[1,17],[1,1],[1,2],[1,1],[1,5],[1,4],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,8],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,4],[1,30],[2,1],[2,4],[2,1],[2,2],[2,1],[2,1],[2,2],[2,3],[2,4],[2,2],[2,1],[2,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,2],[2,7],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,10],[2,4],[2,1],[2,1],[2,1],[2,3],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,3],[2,3],[2,7],[2,1],[2,1],[2,2],[2,5],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,4],[2,2],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,1],[3,1],[3,2],[3,29],[3,2],[4,2],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,3],[4,1],[5,2],[5,1],[5,1],[5,4],[5,1],[5,1],[5,2],[5,1],[5,1],[5,3],[6,4],[6,1],[6,1],[6,3],[6,2],[6,2],[6,1],[6,1],[6,1],[6,2],[7,2],[7,3],[7,2],[7,1],[7,2],[8,1],[8,1],[8,4],[8,1],[8,3],[9,1],[9,5],[9,1],[9,1],[9,1],[11,1],[11,2],[11,2],[11,3],[12,7],[12,1],[13,1],[14,2],[16,1],[78,3],[17,3],[27,3],[19,2],[67,3],[16,3],[58,3],[17,1],[29,2],[29,1],[23,1],[390,2],[75,2],[26,8],[20,3],[19,2],[16,4],[33,1],[66,2],[20,1],[17,5],[1,1],[1,2],[1,1],[1,1],[1,9],[1,4],[1,2],[1,3],[1,2],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,1],[1,1],[1,2],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,4],[1,5],[1,11],[1,1],[1,4],[1,2],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,3],[1,4],[1,1],[1,2],[1,3],[1,1],[1,1],[1,3],[1,1],[1,7],[1,1],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,8],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,6],[1,1],[1,1],[1,6],[1,2],[1,1],[1,11],[1,3],[1,1],[1,2],[1,4],[1,4],[1,1],[1,11],[1,7],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,2],[1,2],[1,1],[1,1],[1,14],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[1,3],[1,6],[1,1],[1,1],[1,3],[1,3],[1,2],[1,2],[1,7],[1,5],[1,2],[1,7],[1,7],[1,1],[1,3],[1,2],[1,4],[1,4],[1,3],[1,1],[1,1],[1,4],[1,2],[1,1],[1,1],[1,5],[1,3],[1,1],[1,124],[1,2],[1,6],[1,1],[1,1],[2,1],[2,4],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,2],[2,5],[2,21],[2,2],[2,1],[2,2],[2,1],[2,2],[2,1],[2,1],[2,7],[2,31],[2,1],[2,2],[2,4],[2,1],[2,3],[2,125],[2,1],[2,8],[2,1],[2,4],[2,2],[2,2],[2,1],[2,1],[2,1],[2,4],[2,5],[2,1],[2,2],[2,2],[2,1],[2,1],[2,1],[2,8],[2,1],[2,12],[2,278],[2,1],[2,1],[2,1],[2,1],[2,2],[2,1],[2,1],[3,1],[3,2],[3,1],[3,1],[3,1],[3,2],[3,3],[3,1],[3,1],[3,1],[3,1],[3,3],[3,2],[3,1],[3,1],[3,3],[3,1],[3,3],[3,1],[3,3],[3,1],[3,2],[3,3],[3,1],[4,2],[4,8],[4,1],[4,3],[4,3],[4,1],[4,3],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,2],[4,1],[4,3],[5,1],[5,1],[5,1],[5,2],[5,2],[5,2],[5,1],[6,2],[6,2],[6,24],[6,2],[6,2],[6,20],[6,1],[6,1],[6,3],[6,1],[6,4],[6,5],[6,3],[7,2],[7,1],[7,4],[7,1],[7,1],[7,1],[7,1],[7,1],[7,1],[7,134],[8,1],[8,1],[8,5],[8,1],[8,6],[9,3],[9,15],[10,4],[10,3],[10,1],[11,12],[11,2],[12,2],[12,2],[14,1],[14,6],[15,3],[30,2],[35,1],[28,1],[111,1],[22,1],[25,1],[18,1],[40,4],[58,1],[295,4],[18,3],[35,1],[16,1],[1,1],[1,1],[1,2],[1,1],[1,6],[1,6],[1,2],[1,1],[1,301],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,5],[1,1],[1,2],[1,1],[1,2],[1,2],[1,1],[1,1],[1,1],[1,3],[1,5],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,3],[1,2],[1,1],[1,7],[1,1],[1,2],[1,1],[1,2],[1,1],[1,2],[1,5],[1,1],[1,2],[1,1],[1,3],[1,1],[1,1],[1,17],[1,1],[1,1],[1,2],[1,2],[1,4],[1,3],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,4],[1,1],[1,1],[1,1],[1,1],[1,3],[1,3],[1,2],[1,1],[1,23],[1,1],[1,1],[1,1],[1,1],[1,3],[1,4],[1,1],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,1],[1,2],[1,1],[1,1],[1,4],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[1,1],[1,4],[1,1],[1,2],[1,1],[1,1],[1,1],[1,1],[1,2],[1,3],[1,4],[1,1],[1,1],[1,1],[1,2],[1,1],[1,3],[1,2],[1,2],[1,1],[1,1],[1,3],[1,15],[1,4],[1,1],[1,1],[1,3],[1,3],[1,1],[1,2],[1,2],[1,6],[1,1],[1,2],[1,1],[1,2],[1,2],[1,2],[1,1],[1,1],[1,3],[1,1],[1,1],[1,1],[2,2],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,10],[2,3],[2,1],[2,1],[2,2],[2,7],[2,1],[2,1],[2,4],[2,1],[2,2],[2,1],[2,2],[2,2],[2,1],[2,1],[2,3],[2,6],[2,1],[2,1],[2,46],[2,1],[2,3],[2,1],[2,4],[2,1],[2,1],[2,1],[2,1],[2,2],[2,4],[2,4],[2,3],[3,11],[3,1],[3,1],[3,1],[3,1],[3,2],[3,1],[3,2],[3,4],[3,1],[3,1],[3,1],[3,3],[3,2],[3,1],[3,2],[3,2],[3,2],[3,1],[3,3],[3,1],[3,2],[3,2],[3,4],[3,1],[3,45],[3,2],[4,11],[4,2],[4,1],[4,2],[4,4],[4,14],[4,4],[4,2],[4,2],[4,1],[5,3],[5,1],[5,1],[5,2],[5,1],[5,2],[5,3],[5,2],[5,1],[5,2],[5,2],[6,1],[6,1],[6,3],[6,2],[6,1],[6,3],[6,1],[6,6],[7,1],[7,2],[7,1],[8,1],[8,2],[8,1],[8,1],[8,1],[8,2],[8,2],[8,2],[9,5],[9,2],[10,1],[10,1],[10,3],[11,8],[11,1],[12,5],[12,1],[14,1]])\n \n ida.scatter_plot(data, '{0}/faithful_ida_scatter.png'.format(output_dir))\n ida.histogram(data, '{0}/faithful_ida_hist.png'.format(output_dir))\n ida.linear_regression(data, '{0}/faithful_ida_regression.png'.format(output_dir))\n\n #clustering\n km2 = __run_clustering(data, output_dir)\n\n #expectation-maximization\n __run_em(data, output_dir, km2)\n\n #build bayes fmm model\n __run_bayesfmm(data, iterations, save_diagnostics, output_dir, burnin, km2)", "def start_simulation(self):\n\n\t\tif self.objects==10:#Adding colors for planet\n\t\t\tself.col_planet()\n\t\t\t\n\t\tfor step in range(self.steps-1):#iterator=all simulation steps\n\t\t\tvis.rate(600)#frames per sec\n\t\t\ti=0\n\t\t\tprint self.dt\n\t\t\tos.system('clear')\n\t\t\tprint \"==========================\\n\", \"Date: \",datetime(1930,12,24)+timedelta(seconds=step*self.dt) ,\"\\n==========================\"\n\t\t\tprint \"Steps: \",self.steps,\"Objects: \", self.objects, \"\\ndt: \",round(float(self.times[self.objects+1]/86400),5),\"days\\n==========================\"\n\t\t\tfor planet in self.ob:# iterator = all planets and sun\n\t\t\t\tx,y,z = (self.positions[i][step+1][0], self.positions[i][step+1][1], self.positions[i][step+1][2])\n\t\t\t\tplanet.pos = (x,y,z)#updating positions\n\t\t\t\tr = ((self.positions[0][step+1][0]-x)**2 + (self.positions[0][step+1][1]-y)**2 + (self.positions[0][step+1][2]-z)**2)**0.5#lenght from sun\n\t\t\t\tprint self.names[i], \"=\", r,\"AU\"\n\t\t\t\ti += 1\n\t\t\t\tself.p2.pos = (self.positions[0][step+1][0], self.positions[0][step+1][1], self.positions[0][step+1][2])#moving sun center\n\t\t\t\tself.p.pos = (self.center[0][step+1][0], self.center[0][step+1][1], self.center[0][step+1][2])#moving solar system mass center\n\t\t\t\tself.sun.pos = (self.positions[0][step+1][0], self.positions[0][step+1][1], self.positions[0][step+1][2])#moving sun \n\t\t\tprint \"==========================\\nBarycenter =\", round(((self.positions[0][step+1][0]-self.center[0][step+1][0])**2 + (self.positions[0][step+1][1]-self.center[0][step+1][1])**2 + (self.positions[0][step+1][2]-self.center[0][step+1][2])**2)**0.5,8),\"AU\"", "def run_std(self):\n print \"Initialising grid\"\n self.initialise_grid(50, 100, 3)\n \n self.initialise_shadow_map()\n \n self.num_iterations = 500\n self.jump_length = 1\n \n self.pd_s = 0.6\n self.pd_ns = 0.4\n \n self.avcount = np.zeros(self.num_iterations + 1)\n \n \n before = time.time()\n self.main_loop()\n after = time.time()\n \n time_taken = after - before\n \n print \"Took %f seconds\", time_taken", "def getDimensions(self):\n\t\tprint \"Returning\",self.x,self.y,self.slicesPerTimepoint\n\t\treturn (self.x, self.y, self.slicesPerTimepoint)", "def getDimensions(unique_name=None):", "def main():\n dims = params['dims']\n\n for d in dims:\n print('**** Running test for d={0:d} ****'.format(d))\n run_test(d)", "def main(datafilepath):\n #create midline\n sectionsize = 10000\n TrackData = TrackMaker(sectionsize) # 10000\n moving_window = sectionsize*2\n midline = TrackData[0] \n sections = TrackData[2]\n #midline = midline[sections[0]:sections[5],:] #only work with the midline of the trial \n #steergaze_df = pd.read_feather(datafilepath)\n steergaze_df = pd.read_csv(datafilepath, sep=',',header=0)\n #steergaze_df.reset_index()\n master_steergaze = pd.DataFrame()\n datafolder = os.path.split(datafilepath)[0] \n\n #TODO: due to grouping the future path cuts - off at end of slalom, use the continuous trajectory across roadsections for fp mapping\n\n #modes taken from gaze_through_midline_densities.py\n entry = find_closest_index(midline, [-23, 69])\n firstobject = find_closest_index(midline, [25, 52])\n gazemodes = [entry, firstobject]\n\n mid_diff = np.linalg.norm(np.diff(midline, axis=0, prepend = np.array([[0,0]])), axis = 1)\n midline_dist_array = np.cumsum(mid_diff)\n\n tree = spatial.cKDTree(midline)\n\n #for trial in picked_trials:\t\n for block, blockdata in steergaze_df.groupby(['ID','block']):\n\n print(block)\n begin = timer()\n\n\n blockdata = blockdata.copy()\n blockdata.sort_values('currtime', inplace=True)\n # blockdata.reset_index()\n\n ####pick target\n \"\"\"\n condition = blockdata.condition.values[0]\n target_centres = targets.loc[targets['condition']==int(condition),:]\n #pprint(target_centres)\n\n target_centres = target_centres.reset_index(drop=True)\n #pick starting position.\n start_x = np.sign(blockdata['posx']).values[0]\n #select targets with opposite sign for xcentre, these will be the ones encountered in that block\n target_centres = target_centres.loc[np.sign(target_centres['xcentre'])!=start_x,:] \n target_circles = dp.target_position_circles(target_centres)\n\n \"\"\"\n\n traj_x = blockdata['posx'].values\n traj_z = blockdata['posz'].values\n trajectory = np.transpose(np.array([traj_x, traj_z]))\n\n yaw = blockdata['yaw'].values\n \n #gaze_on_screen = blockdata['hangle'].values, blockdata['vangle'].values\n gaze_on_screen = np.transpose(np.array([blockdata['hangle'].values, blockdata['vangle'].values]))\n\n #print(yaw[0])\n #index = i\n #\tviewpoint = blockdata['posx'].values, blockdata['posz'].values\n roadsection = blockdata['roadsection'].values\n\n #find time headway along MIDLINE \n \"\"\"\n start = timer()\n #idx, *_ = find_closest_index(midline, trajectory[0,:])\n idx = [find_closest_index(midline, viewpoint) for viewpoint in trajectory] \n print(idx[:10])\n print(timer()-start)\n \"\"\"\n\n #closest_indexes = [closest_node(midline, viewpoint) for viewpoint in trajectory] \n #closest indexes\n #print(np.take(midline, 5, axis = 0, mode = 'wrap'))\n #print(np.take(midline, len(midline), axis = 0, mode = 'wrap'))\n #print(np.take(midline, 0, axis = 0, mode = 'wrap'))\n _, closest_indexes = tree.query(trajectory) \n\n end_of_view = closest_indexes + moving_window\n\n #futuremid = np.take(midline, range(closest_indexes[0], end_of_view[0]), axis = 0, mode = 'wrap')\n def takemid(c,e):\n return (np.take(midline, range(c, e), axis = 0, mode = 'wrap'))\n\n start = timer()\n ml_idx, ml_screen_refs, ml_world_refs, ml_th = zip(*[\n closest_on_screen_point(takemid(c,e), t, y, g) \n for c, e, t, y, g in zip(closest_indexes, end_of_view, trajectory, yaw, gaze_on_screen)\n ])\n print(timer() - start) \n \n print(ml_screen_refs.shape)\n print(type(ml_screen_refs))\n ml_screen_refs = ml_screen_refs.reshape(-1, 2)\n ml_world_refs = ml_world_refs.reshape(-1, 2)\n print(ml_th)\n\n blockdata['midline_ref_onscreen_x'] = ml_screen_refs[:, 0]\n blockdata['midline_ref_onscreen_z'] = ml_screen_refs[:, 1]\n blockdata['midline_ref_world_x'] = ml_world_refs[:, 0]\n blockdata['midline_ref_world_z'] = ml_world_refs[:, 1]\n blockdata['th_along_midline'] = ml_th\n\n #find closest point on FUTURE PATH, with th calc along the path \n \n traj_index = range(len(trajectory))\n fp_idx, fp_screen_refs, fp_world_refs, fp_th = zip(*[\n closest_on_screen_point(trajectory[i:(i+1000),:], t, y, g) \n for i, t, y, g in zip(traj_index, trajectory, yaw, gaze_on_screen)\n ])\n #future_traj = trajectory[index:(index+window_fp), :]\n #fp_world_ref, fp_idx, dists, fp_angles = closest_on_screen_point(future_traj, viewpoint, yaw, gaze_on_screen)\n print(fp_screen_refs.shape)\n print(type(fp_screen_refs))\n fp_screen_refs = fp_screen_refs.reshape(-1, 2)\n fp_world_refs = fp_world_refs.reshape(-1, 2)\n print(ml_th)\n\n blockdata['futurepath_ref_onscreen_x'] = fp_screen_refs[:, 0]\n blockdata['futurepath_ref_onscreen_z'] = fp_screen_refs[:, 1]\n blockdata['futurepath_ref_world_x'] = fp_world_refs[:, 0]\n blockdata['futurepath_ref_world_z'] = fp_world_refs[:, 1]\n blockdata['th_along_futurepath'] = fp_th\n \n \n\n #TODO: current method runs into problems if the viewpoint is just before the midline resets (i.e. very large midline_dist_array value).\n #but not a problem for current analysis because trial starts from beginning of midline.\n #th_to_entry\n mid_dist_viewpoint = midline_dist_array[idx]\n\n mid_dist_entry = midline_dist_array[gazemodes[0]]\n th_to_entry = (mid_dist_entry - mid_dist_viewpoint) / 8.0 #if it's negative you have passed the point\n blockdata.loc[index,'veh_th_to_entry'] = th_to_entry\n\n #th_to_object\n mid_dist_object = midline_dist_array[gazemodes[1]]\n th_to_object = (mid_dist_object - mid_dist_viewpoint) / 8.0 #if it's negative you have passed the point\n blockdata.loc[index,'veh_th_to_object'] = th_to_object\t\t\n \n \"\"\"\n trialcode = row['trialcode']\n #plot\t\t\t \n #print(\"th_along_midline\", ml_timeheadway)\n #print('ml_ref', ml_world_ref)\n #print(\"th_along_futurepath\", fp_timeheadway)\n #print(\"fp_ref\", fp_world_ref)\n\n world_gaze = dp.angles_to_world(gaze_on_screen, viewpoint, yaw)\n #print(\"world_gaze\", world_gaze)\n\n plt.ylim(angles_limits_bottom[1],angles_limits_top[1])\n plt.xlim(angles_limits_bottom[0],angles_limits_top[0])\n\n plt.plot(ml_angles[:,0],ml_angles[:,1], 'C3o', markersize = .5, )\n plt.plot(fp_angles[:,0],fp_angles[:,1], 'C2o', markersize = .5)\n plt.plot(ml_screen_ref[0],ml_screen_ref[1], 'C1o', markersize = 5, markeredgecolor = 'k')\n plt.plot(fp_screen_ref[0],fp_screen_ref[1], 'C0o', markersize = 5, markeredgecolor = 'k')\n\n plt.plot(gaze_on_screen[0],gaze_on_screen[1], 'mo', markersize = 5, markeredgecolor = 'k')\n plt.title(str(trialcode))\n\n\n plt.pause(.016) \n plt.cla()\n\n plt.show()\n \"\"\"\n\t\t\n #master_steergaze = pd.concat([master_steergaze, blockdata])\n\n\n compute_time = timer()-begin\n print(\"Processing block took %f seconds\" % compute_time)\n\n\n print(\"APPENDING DATA FRAME\")\n outfilepath = datafolder + '/trout_gazeandsteering_addthfrompath2.csv'\n\n with open(outfilepath, 'a', newline = '') as sgfile:\n blockdata.to_csv(sgfile, mode='a', header=sgfile.tell()==0)\n\n #master_steergaze.to_csv(datafolder + '/trout_gazeandsteering_addthfrompath.csv')\n\n #master_steergaze.to_feather(datafilepath)", "def runSimulation(num_robots, speed, width, height, min_coverage, num_trials,\n robot_type, visualize):\n #initialization of variables\n list_of_results = []\n \n #trial loop\n for i in range(num_trials):\n list_of_results.append(singleSimulation(num_robots, speed, width, height, min_coverage, robot_type, visualize))\n return list_of_results", "def getCube(unique_name):", "def test_imsim():\n import yaml\n import astropy.units as u\n import matplotlib.pyplot as plt\n from tqdm import tqdm\n # Need these for `eval` below\n from numpy import array\n import coord\n\n with open(DATA_DIR / \"wcs_466749.yaml\", 'r') as f:\n wcss = yaml.safe_load(f)\n\n cmds = {}\n with open(DATA_DIR / \"phosim_cat_466749.txt\", 'r') as f:\n for line in f:\n k, v = line.split()\n try:\n v = int(v)\n except ValueError:\n try:\n v = float(v)\n except ValueError:\n pass\n cmds[k] = v\n\n # Values below (and others) from phosim_cat_466749.txt\n rc = cmds['rightascension']\n dc = cmds['declination']\n boresight = galsim.CelestialCoord(\n rc*galsim.degrees,\n dc*galsim.degrees\n )\n obstime = Time(cmds['mjd'], format='mjd', scale='tai')\n obstime -= 15*u.s\n band = \"ugrizy\"[cmds['filter']]\n wavelength_dict = dict(\n u=365.49,\n g=480.03,\n r=622.20,\n i=754.06,\n z=868.21,\n y=991.66\n )\n wavelength = wavelength_dict[band]\n camera = imsim.get_camera()\n\n rotTelPos = cmds['rottelpos'] * galsim.degrees\n telescope = imsim.load_telescope(f\"LSST_{band}.yaml\", rotTelPos=rotTelPos)\n # Ambient conditions\n # These are a guess.\n temperature = 293.\n pressure = 69.0\n H2O_pressure = 1.0\n\n # Start by constructing a refractionless factory, which we can use to\n # cross-check some of the other values in the phosim cmd file.\n factory = imsim.BatoidWCSFactory(\n boresight, obstime, telescope, wavelength,\n camera,\n temperature=temperature,\n pressure=0.0,\n H2O_pressure=H2O_pressure\n )\n\n aob, zob, hob, dob, rob, eo = factory._ICRF_to_observed(\n boresight.ra.rad, boresight.dec.rad, all=True\n )\n np.testing.assert_allclose(\n np.rad2deg(aob)*3600, cmds['azimuth']*3600,\n rtol=0, atol=2.0\n )\n np.testing.assert_allclose(\n (90-np.rad2deg(zob))*3600, cmds['altitude']*3600,\n rtol=0, atol=6.0,\n )\n q = factory.q * galsim.radians\n rotSkyPos = rotTelPos - q\n # Hmmm.. Seems like we ought to be able to do better than 30 arcsec on the\n # rotator? Maybe this is defined at a different point in time? Doesn't seem\n # to affect the final WCS much though.\n np.testing.assert_allclose(\n rotSkyPos.deg*3600, cmds['rotskypos']*3600,\n rtol=0, atol=30.0,\n )\n\n # We accidentally simulated DC2 with the camera rotated 180 degrees too far.\n # That includes the regression test data here. So to fix the WCS code, but\n # still use the same regression data, we need to add 180 degrees here. Just\n # rotate the camera by another 180 degrees\n telescope = telescope.withLocallyRotatedOptic(\n \"LSSTCamera\", batoid.RotZ(np.deg2rad(180))\n )\n\n # For actual WCS check, we use a factory that _does_ know about refraction.\n factory = imsim.BatoidWCSFactory(\n boresight, obstime, telescope, wavelength,\n camera,\n temperature=temperature,\n pressure=pressure,\n H2O_pressure=H2O_pressure\n )\n\n do_plot = False\n my_centers = []\n imsim_centers = []\n if do_plot:\n _, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 12))\n i = 0\n r1 = []\n d1 = []\n r2 = []\n d2 = []\n rng = np.random.default_rng(1234)\n for k, v in tqdm(wcss.items()):\n name = k[18:25].replace('-', '_')\n det = camera[name]\n cpix = det.getCenter(cameraGeom.PIXELS)\n\n wcs = factory.getWCS(det, order=2)\n wcs1 = eval(v)\n # Need to adjust ab parameters to new GalSim convention\n wcs1.ab[0,1,0] = 1.0\n wcs1.ab[1,0,1] = 1.0\n\n my_centers.append(wcs.posToWorld(galsim.PositionD(cpix.x, cpix.y)))\n imsim_centers.append(wcs1.posToWorld(galsim.PositionD(cpix.x, cpix.y)))\n\n corners = det.getCorners(cameraGeom.PIXELS)\n xs = np.array([corner.x for corner in corners])\n ys = np.array([corner.y for corner in corners])\n ra1, dec1 = wcs.xyToradec(xs, ys, units='radians')\n ra2, dec2 = wcs1.xyToradec(xs, ys, units='radians')\n if i == 0:\n labels = ['batoid', 'PhoSim']\n else:\n labels = [None]*2\n if do_plot:\n ax.plot(ra1, dec1, c='r', label=labels[0])\n ax.plot(ra2, dec2, c='b', label=labels[1])\n\n # add corners to ra/dec check lists\n r1.extend(ra1)\n d1.extend(dec1)\n r2.extend(ra2)\n d2.extend(dec2)\n # Add some random points as well\n xs = rng.uniform(0, 4000, 100)\n ys = rng.uniform(0, 4000, 100)\n ra1, dec1 = wcs.xyToradec(xs, ys, units='radians')\n ra2, dec2 = wcs1.xyToradec(xs, ys, units='radians')\n r1.extend(ra1)\n d1.extend(dec1)\n r2.extend(ra2)\n d2.extend(dec2)\n i += 1\n\n if do_plot:\n ax.legend()\n xlim = ax.get_xlim()\n ax.set_xlim(xlim[1], xlim[0])\n plt.show()\n\n dist = sphere_dist(r1, d1, r2, d2)\n print(\"sphere dist mean, max, std\")\n print(\n np.rad2deg(np.mean(dist))*3600,\n np.rad2deg(np.max(dist))*3600,\n np.rad2deg(np.std(dist))*3600,\n )\n np.testing.assert_array_less(\n np.rad2deg(np.mean(dist))*3600,\n 5.0\n )\n if do_plot:\n plt.hist(np.rad2deg(dist)*3600, bins=100)\n plt.show()\n\n if do_plot:\n r1 = np.array([c.ra.rad for c in my_centers])\n d1 = np.array([c.dec.rad for c in my_centers])\n r2 = np.array([c.ra.rad for c in imsim_centers])\n d2 = np.array([c.dec.rad for c in imsim_centers])\n cd = np.cos(np.deg2rad(cmds['declination']))\n q = plt.quiver(r1, d1, np.rad2deg(r1-r2)*3600*cd, np.rad2deg(d1-d2)*3600)\n plt.quiverkey(q, 0.5, 1.1, 5.0, \"5 arcsec\", labelpos='E')\n plt.show()", "def sim_info(path, file, args):\n try:\n f = h5py.File(file, 'r')\n for key in list(f.keys()):\n if args.field in key:\n field = key\n dim = np.shape(f[field])[0]\n if args.zoom_times > 0:\n zoom_cell = int((dim - dim/float(args.zoom_times))/2.)\n else:\n zoom_cell = 0\n xmin = f['minmax_xyz'][0][0]/yt.units.au.in_units('cm').value\n xmax = f['minmax_xyz'][0][1]/yt.units.au.in_units('cm').value\n xmin_full = xmin\n cl = (xmax-xmin)/dim\n cell_positions = np.arange(xmin, xmax-1, cl)\n xmin = f['minmax_xyz'][0][0]/yt.units.au.in_units('cm').value + zoom_cell*cl\n xmax = f['minmax_xyz'][0][1]/yt.units.au.in_units('cm').value - zoom_cell*cl\n if args.axis == \"xy\":\n ymin = f['minmax_xyz'][1][0]/yt.units.au.in_units('cm').value + zoom_cell*cl\n ymax = f['minmax_xyz'][1][1]/yt.units.au.in_units('cm').value - zoom_cell*cl\n else:\n ymin = f['minmax_xyz'][2][0]/yt.units.au.in_units('cm').value + zoom_cell*cl\n ymax = f['minmax_xyz'][2][1]/yt.units.au.in_units('cm').value - zoom_cell*cl\n f.close()\n annotate_freq = ((xmax/cl) - (xmin/cl))/args.velocity_annotation_frequency\n except:\n f = h5py.File(file, 'r')\n f.close()\n if args.field == 'dens':\n field = ('flash', 'dens')\n else:\n part_file = file[:-12] + 'part' + file[-5:]\n f = yt.load(file, particle_filename=part_file)\n field = f.derived_field_list[[x[1] for x in f.derived_field_list].index(args.field)]\n f.close()\n dim = 800\n zoom_cell = 0.0\n if args.ax_lim == None:\n xmin = -1000\n xmax = 1000\n ymin = -1000\n ymax = 1000\n else:\n xmin = -1*args.ax_lim\n xmax = args.ax_lim\n ymin = -1*args.ax_lim\n ymax = args.ax_lim\n cl = (xmax-xmin)/dim\n xmin_full = xmin\n annotate_freq = dim/args.velocity_annotation_frequency\n smoothing = annotate_freq/2\n if args.axis == \"xz\":\n type = \"proj\"\n else:\n type = \"slice\"\n sim_info = {'field': field,\n 'dimension': dim,\n 'zoom_cell': zoom_cell,\n 'movie_file_type': type,\n 'xmin': xmin,\n 'xmax': xmax,\n 'ymin': ymin,\n 'ymax': ymax,\n 'cell_length': cl,\n 'annotate_freq': annotate_freq,\n 'smoothing': smoothing,\n 'xmin_full': xmin_full\n }\n f.close()\n return sim_info", "def getGameState(self):\n row1 = [0, 0, 0]\n row2 = [0, 0, 0]\n row3 = [0, 0, 0]\n tilePosStatement = Statement()\n posTerm1 = Term('?x')\n posTerm2 = Term('?y')\n posTerm3 = Term('?tile')\n tilePosStatement.terms = (posTerm1, posTerm2, posTerm3)\n tilePosStatement.predicate = 'tilePos'\n for fact in self.kb.facts:\n if match(fact.statement, tilePosStatement):\n if fact.statement.terms[2] == Term(Constant('tile1')):\n term = 1\n if fact.statement.terms[2] == Term(Constant('tile2')):\n term = 2\n if fact.statement.terms[2] == Term(Constant('tile3')):\n term = 3\n if fact.statement.terms[2] == Term(Constant('tile4')):\n term = 4\n if fact.statement.terms[2] == Term(Constant('tile5')):\n term = 5\n if fact.statement.terms[2] == Term(Constant('tile6')):\n term = 6\n if fact.statement.terms[2] == Term(Constant('tile7')):\n term = 7\n if fact.statement.terms[2] == Term(Constant('tile8')):\n term = 8\n if fact.statement.terms[2] == Term(Constant('empty')):\n term = -1\n if fact.statement.terms[0] == Term(Constant('pos1')):\n col = 0\n elif fact.statement.terms[0] == Term(Constant('pos2')):\n col = 1\n elif fact.statement.terms[0] == Term(Constant('pos3')):\n col = 2\n if fact.statement.terms[1] == Term(Constant('pos1')):\n row1[col] = term\n\n elif fact.statement.terms[1] == Term(Constant('pos2')):\n row2[col] = term\n\n elif fact.statement.terms[1] == Term(Constant('pos3')):\n row3[col] = term\n\n row1 = tuple(row1)\n row2 = tuple(row2)\n row3 = tuple(row3)\n result = (row1, row2, row3)\n return result\n\n ### Student code goes here", "def run_sequence(seq: Sequence, tracker: Tracker, debug=False, num_gpu=8):\n '''2021.1.2 Add multiple gpu support'''\n try:\n worker_name = multiprocessing.current_process().name\n worker_id = int(worker_name[worker_name.find('-') + 1:]) - 1\n gpu_id = worker_id % num_gpu\n torch.cuda.set_device(gpu_id)\n except:\n pass\n\n def _results_exist():\n if seq.object_ids is None:\n if seq.dataset in ['trackingnet', 'got10k']:\n base_results_path = os.path.join(tracker.results_dir, seq.dataset, seq.name)\n bbox_file = '{}.txt'.format(base_results_path)\n else:\n bbox_file = '{}/{}.txt'.format(tracker.results_dir, seq.name)\n return os.path.isfile(bbox_file)\n else:\n bbox_files = ['{}/{}_{}.txt'.format(tracker.results_dir, seq.name, obj_id) for obj_id in seq.object_ids]\n missing = [not os.path.isfile(f) for f in bbox_files]\n return sum(missing) == 0\n\n if _results_exist() and not debug:\n print('FPS: {}'.format(-1))\n return\n\n print('Tracker: {} {} {} , Sequence: {}'.format(tracker.name, tracker.parameter_name, tracker.run_id, seq.name))\n\n if debug:\n output = tracker.run_sequence(seq, debug=debug)\n else:\n try:\n output = tracker.run_sequence(seq, debug=debug)\n except Exception as e:\n print(e)\n return\n\n sys.stdout.flush()\n\n if isinstance(output['time'][0], (dict, OrderedDict)):\n exec_time = sum([sum(times.values()) for times in output['time']])\n num_frames = len(output['time'])\n else:\n exec_time = sum(output['time'])\n num_frames = len(output['time'])\n\n print('FPS: {}'.format(num_frames / exec_time))\n\n if not debug:\n _save_tracker_output(seq, tracker, output)", "def main():\n\n # initialize a random 3x3 TileGame problem\n tg = TileGame(3)\n # print(TileGame.board_to_pretty_string(tg.get_start_state()))\n # compute path using dfs\n path1 = id_astar(tg, tilegame_heuristic)\n path = ids(tg)\n print(tg.get_start_state())\n # display path\n print('ids')\n # TileGame.print_pretty_path(path)\n print('astar')\n TileGame.print_pretty_path(path1)\n print((time.time() - start_time))\n\n # initialize a small DGraph\n small_dgraph = DGraph([[None, 1], [1, None]], {1})\n # print the path using ids\n # print(ids(small_dgraph))", "def test_run_sim_1():\n rnd = rand.Arrivals(36, 41)\n sim.run_sim(3, 2, 5, 6, 22, rnd)", "def setup_steps(self):\n step1 = ground_step.Ground(5745, 495, 40, 44)\n step2 = ground_step.Ground(5788, 452, 40, 44)\n step3 = ground_step.Ground(5831, 409, 40, 44)\n step4 = ground_step.Ground(5874, 366, 40, 176)\n\n step5 = ground_step.Ground(6001, 366, 40, 176)\n step6 = ground_step.Ground(6044, 408, 40, 40)\n step7 = ground_step.Ground(6087, 452, 40, 40)\n step8 = ground_step.Ground(6130, 495, 40, 40)\n\n step9 = ground_step.Ground(6345, 495, 40, 40)\n step10 = ground_step.Ground(6388, 452, 40, 40)\n step11 = ground_step.Ground(6431, 409, 40, 40)\n step12 = ground_step.Ground(6474, 366, 40, 40)\n step13 = ground_step.Ground(6517, 366, 40, 176)\n\n step14 = ground_step.Ground(6644, 366, 40, 176)\n step15 = ground_step.Ground(6687, 408, 40, 40)\n step16 = ground_step.Ground(6728, 452, 40, 40)\n step17 = ground_step.Ground(6771, 495, 40, 40)\n\n step18 = ground_step.Ground(7760, 495, 40, 40)\n step19 = ground_step.Ground(7803, 452, 40, 40)\n step20 = ground_step.Ground(7845, 409, 40, 40)\n step21 = ground_step.Ground(7888, 366, 40, 40)\n step22 = ground_step.Ground(7931, 323, 40, 40)\n step23 = ground_step.Ground(7974, 280, 40, 40)\n step24 = ground_step.Ground(8017, 237, 40, 40)\n step25 = ground_step.Ground(8060, 194, 40, 40)\n step26 = ground_step.Ground(8103, 194, 40, 360)\n\n step27 = ground_step.Ground(8488, 495, 40, 40)\n\n self.step_group = pygame.sprite.Group(step1, step2,\n step3, step4,\n step5, step6,\n step7, step8,\n step9, step10,\n step11, step12,\n step13, step14,\n step15, step16,\n step17, step18,\n step19, step20,\n step21, step22,\n step23, step24,\n step25, step26,\n step27)", "def _getCoordsFromRun(self, run_dict, bold=False):\n run_dict = copy.deepcopy(run_dict)\n run_dict = self._filterDictionaryBold(run_dict, bold=bold)\n timeDictKey = \"\"\n if \"t\" in run_dict:\n timeDictKey = \"t\"\n else:\n for k in run_dict:\n if k.startswith(\"t\"):\n timeDictKey = k\n logging.info(f\"Assuming {k} to be the time axis.\")\n break\n assert len(timeDictKey) > 0, \"No time array found (starting with t) in model output.\"\n t = run_dict[timeDictKey].copy()\n del run_dict[timeDictKey]\n return timeDictKey, {\n \"output\": list(run_dict.keys()),\n \"space\": list(range(next(iter(run_dict.values())).shape[0])),\n \"time\": t,\n }", "def single_epoch(g,rows,cols,midpoint):\n\n num_top = 10 \n #3 for 8x8\n one_to_select = 0 \n top_nodes = g.top_n_nodes(num_top)\n '''\n for k in range(num_top):\n node_num = top_nodes[k]\n trip_list = g.node2trip_ids[node_num]\n print \"Next Midpoint: %d\" % k\n print node_num\n print g.node_to_coords(node_num)\n print \"Num trips: %d\" % len(trip_list)\n for i in range(len(trip_list)):\n trip_id = trip_list[i]\n line_num = g.trip_id2line_num[trip_id]\n p = Path(trip_id,g,line_num)\n \"\"\"\n print i\n print trip_id\n p.print_path()\n for i in range(p.graph.num_edges):\n if p.edges[i]:\n sys.stdout.write(\"%d, \" % (i + 1))\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"1s: \")\n for key in p.partials.keys():\n if p.partials[key]:\n sys.stdout.write(\"%d, \" % (key + 1))\n sys.stdout.write(\"\\n0s: \")\n for key in p.partials.keys():\n if not p.partials[key]:\n sys.stdout.write(\"%d, \" % (key + 1))\n sys.stdout.write(\"\\n\")\n #\"\"\"\n '''\n\n #trip_list = g.node2trip_ids[g.best_node]\n #midpoint = top_nodes[one_to_select]\n trip_list = g.node2trip_ids[midpoint]\n print \"Selected midpoint: %d\" % midpoint \n print g.node_to_coords(midpoint)\n out_file = open(\"datasets/full_data_%d_%d_%d.txt\" % (rows,cols,midpoint),'w')\n partial_file = open(\"datasets/partials_%d_%d_%d.txt\" % (rows,cols,midpoint), 'w')\n for i in range(len(trip_list)):\n trip_id = trip_list[i]\n line_num = g.trip_id2line_num[trip_id]\n p = Path(trip_id,g,line_num=line_num,midpoint=midpoint)\n \"\"\"\n print i\n print trip_id\n p.print_path()\n for i in range(p.graph.num_edges):\n if p.edges[i]:\n sys.stdout.write(\"%d, \" % (i + 1))\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"1s: \")\n for key in p.partials.keys():\n if p.partials[key]:\n sys.stdout.write(\"%d, \" % (key + 1))\n sys.stdout.write(\"\\n0s: \")\n for key in p.partials.keys():\n if not p.partials[key]:\n sys.stdout.write(\"%d, \" % (key + 1))\n sys.stdout.write(\"\\n\")\n \"\"\"\n out_string = str(p.edges)[1:-1]\n out_file.write(\"%s\\n\" % out_string)\n for i in range(p.graph.num_edges):\n if i in p.partials.keys():\n partial_file.write(\"%d\" % p.partials[i])\n else:\n partial_file.write(\"-1\")\n if i < p.graph.num_edges-1:\n partial_file.write(\",\")\n partial_file.write(\"\\n\")\n\n out_file.close()", "def main():\n # https://github.com/caelan/pddlstream/blob/master/examples/motion/run.py\n # TODO: 3D work and CSpace\n # TODO: visualize just the tool frame of an end effector\n\n np.set_printoptions(precision=3)\n parser = argparse.ArgumentParser()\n parser.add_argument('-a', '--algorithm', default='rrt_connect',\n help='The algorithm seed to use.')\n parser.add_argument('-d', '--draw', action='store_true',\n help='When enabled, draws the roadmap')\n parser.add_argument('-r', '--restarts', default=0, type=int,\n help='The number of restarts.')\n parser.add_argument('-s', '--smooth', action='store_true',\n help='When enabled, smooths paths.')\n parser.add_argument('-t', '--time', default=1., type=float,\n help='The maximum runtime.')\n args = parser.parse_args()\n\n #########################\n\n obstacles = [\n create_box(center=(.35, .75), extents=(.25, .25)),\n create_box(center=(.75, .35), extents=(.225, .225)),\n create_box(center=(.5, .5), extents=(.225, .225)),\n ]\n\n # TODO: alternate sampling from a mix of regions\n regions = {\n 'env': create_box(center=(.5, .5), extents=(1., 1.)),\n 'green': create_box(center=(.8, .8), extents=(.1, .1)),\n }\n\n start = np.array([0., 0.])\n goal = 'green'\n if isinstance(goal, str) and (goal in regions):\n goal = get_box_center(regions[goal])\n else:\n goal = np.array([1., 1.])\n\n title = args.algorithm\n if args.smooth:\n title += '+shortcut'\n viewer = draw_environment(obstacles, regions, title=title)\n\n #########################\n\n #connected_test, roadmap = get_connected_test(obstacles)\n distance_fn = get_distance_fn(weights=[1, 1]) # distance_fn\n\n # samples = list(islice(region_gen('env'), 100))\n with profiler(field='cumtime'): # cumtime | tottime\n # TODO: cost bound & best cost\n for _ in range(args.restarts+1):\n start_time = time.time()\n collision_fn, cfree = get_collision_fn(obstacles)\n sample_fn, samples = get_sample_fn(regions['env'], obstacles=[]) # obstacles\n extend_fn, roadmap = get_extend_fn(obstacles=obstacles) # obstacles | []\n\n if args.algorithm == 'prm':\n path = prm(start, goal, distance_fn, sample_fn, extend_fn, collision_fn,\n num_samples=200)\n elif args.algorithm == 'lazy_prm':\n path = lazy_prm(start, goal, sample_fn, extend_fn, collision_fn,\n num_samples=200, max_time=args.time)[0]\n elif args.algorithm == 'rrt':\n path = rrt(start, goal, distance_fn, sample_fn, extend_fn, collision_fn,\n iterations=INF, max_time=args.time)\n elif args.algorithm == 'rrt_connect':\n path = rrt_connect(start, goal, distance_fn, sample_fn, extend_fn, collision_fn,\n max_time=args.time)\n elif args.algorithm == 'birrt':\n path = birrt(start, goal, distance_fn=distance_fn, sample_fn=sample_fn,\n extend_fn=extend_fn, collision_fn=collision_fn,\n max_time=args.time, smooth=100)\n elif args.algorithm == 'rrt_star':\n path = rrt_star(start, goal, distance_fn, sample_fn, extend_fn, collision_fn,\n radius=1, max_iterations=INF, max_time=args.time)\n elif args.algorithm == 'lattice':\n path = lattice(start, goal, extend_fn, collision_fn, distance_fn=distance_fn)\n else:\n raise NotImplementedError(args.algorithm)\n paths = [] if path is None else [path]\n\n #paths = random_restarts(rrt_connect, start, goal, distance_fn=distance_fn, sample_fn=sample_fn,\n # extend_fn=extend_fn, collision_fn=collision_fn, restarts=INF,\n # max_time=args.time, max_solutions=INF, smooth=100) #, smooth=1000, **kwargs)\n\n # paths = exhaustively_select_portfolio(paths, k=2)\n # print(score_portfolio(paths))\n\n #########################\n\n if args.draw:\n # roadmap = samples = cfree = []\n add_roadmap(viewer, roadmap, color='black')\n add_points(viewer, samples, color='red', radius=2)\n #add_points(viewer, cfree, color='blue', radius=2)\n\n print('Solutions ({}): {} | Time: {:.3f}'.format(len(paths), [(len(path), round(compute_path_cost(\n path, distance_fn), 3)) for path in paths], elapsed_time(start_time)))\n for path in paths:\n add_path(viewer, path, color='green')\n\n if args.smooth:\n for path in paths:\n extend_fn, roadmap = get_extend_fn(obstacles=obstacles) # obstacles | []\n smoothed = smooth_path(path, extend_fn, collision_fn, iterations=INF, max_time=args.time)\n print('Smoothed distance_fn: {:.3f}'.format(compute_path_cost(smoothed, distance_fn)))\n add_path(viewer, smoothed, color='red')\n user_input('Finish?')", "def test_run_time(self):\n\n wrapper = Wrapper()\n name = 'simplemesh'\n corner_points = ((0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (0.0, 1.0))\n extrude_length = 1\n nex = 3\n ney = 3\n nez = 1\n create_quad_mesh(name, wrapper, corner_points,\n extrude_length, nex, ney, nez)\n\n wrapper.CM[CUBA.NAME] = name\n wrapper.CM_extensions[CUBAExt.GE] = (CUBAExt.INCOMPRESSIBLE,\n CUBAExt.LAMINAR_MODEL)\n wrapper.SP[CUBA.TIME_STEP] = 1\n wrapper.SP[CUBA.NUMBER_OF_TIME_STEPS] = 1\n wrapper.SP[CUBA.DENSITY] = 1.0\n wrapper.SP[CUBA.DYNAMIC_VISCOSITY] = 1.0\n wrapper.BC[CUBA.VELOCITY] = {'inflow': ('fixedValue', (0.1, 0, 0)),\n 'outflow': 'zeroGradient',\n 'walls': ('fixedValue', (0, 0, 0)),\n 'frontAndBack': 'empty'}\n wrapper.BC[CUBA.PRESSURE] = {'inflow': 'zeroGradient',\n 'outflow': ('fixedValue', 0),\n 'walls': 'zeroGradient',\n 'frontAndBack': 'empty'}\n\n mesh_inside_wrapper = wrapper.get_dataset(name)\n\n wrapper.run()\n\n # sum data pointwise\n old_vel = 0.0\n old_pres = 0.0\n for point in mesh_inside_wrapper.iter(item_type=CUBA.POINT):\n velo = point.data[CUBA.VELOCITY]\n old_vel += math.sqrt(velo[0]*velo[0] + velo[1]*velo[1] +\n velo[2]*velo[2])\n old_pres += point.data[CUBA.PRESSURE]\n\n wrapper.SP[CUBA.DENSITY] = 5.0\n\n wrapper.run()\n\n # sum data pointwise\n new_vel = 0.0\n new_pres = 0.0\n for point in mesh_inside_wrapper.iter(item_type=CUBA.POINT):\n velo = point.data[CUBA.VELOCITY]\n new_vel += math.sqrt(velo[0]*velo[0] + velo[1]*velo[1] +\n velo[2]*velo[2])\n new_pres += point.data[CUBA.PRESSURE]\n\n self.assertNotAlmostEqual(old_vel, new_vel, 5)\n self.assertNotAlmostEqual(old_pres, new_pres, 5)", "def get_space(): \n space = {\n 'timesteps_per_batch': hp.choice('timesteps_per_batch', [512, 1024, 2048, 4096, 8192]),\n 'vf_stepsize': hp.loguniform('vf_stepsize', -5, -2),\n 'max_kl' : hp.loguniform('max_kl', -2.5, -0.5),\n 'gamma': hp.uniform('gamma', (1-(1/((10**(-1))*4))), (1-(1/((10**(1.5))*4)))), #4:T. Remember to change this if code is altered. -1:T/tau. tau=0.04=dt\n 'lam': hp.uniform('lam', (1-(1/((10**(-1))*4))), (1-(1/((10**(1.5))*4)))) #4:T. Remember to change this if code is altered. -1:T/tau. tau=0.04=dt\n }\n return space", "def height_profile(grofile,trajfile,**kwargs):\n\n\t#---unpack\n\tsn = kwargs['sn']\n\twork = kwargs['workspace']\n\tparallel = kwargs.get('parallel',False)\n\t#---prepare universe\t\n\tuni = MDAnalysis.Universe(grofile,trajfile)\n\tnframes = len(uni.trajectory)\n\t#---MDAnalysis uses Angstroms not nm\n\tlenscale = 10.\n\t#---select residues of interest\n\tselector = kwargs['calc']['specs']['selector']\n\tnojumps = kwargs['calc']['specs'].get('nojumps','')\n\n\n\t#---pack\n\tattrs,result = {},{}\n\tattrs['selector'] = selector\n\tattrs['nojumps'] = nojumps\n\t#result['resnames'] = np.array(sel.residues.resnames)\n\t#result['monolayer_indices'] = np.array(monolayer_indices)\n\t#result['vecs'] = vecs\n\t#result['nframes'] = np.array(nframes)\n\t#result['points'] = coms_out\n\t#result['resids'] = np.array(np.unique(resids))\n\t#result['resids_exact'] = resids\n\tattrs['separator'] = kwargs['calc']['specs']['separator']\n\treturn result,attrs", "def single_run(steps_number):\n values = list()\n numerator = 0\n for i in trange(1, steps_number):\n\n numerator += generate_episode()\n\n values.append(numerator / i)\n\n return np.array(values)", "def test_run_sim():\n rnd = rand.Arrivals(31, 40)\n sim.run_sim(2, 1, 3, 4, 24, rnd)", "def Render(shelf, **options):\n #low, high = options.pop('low', None), options.pop('high', None)\n steps = options.pop('steps')\n low = steps.min()\n high = steps.max()\n n = len(steps)\n\n print(n)\n\n xs = numpy.linspace(low, high, 1001)\n \n ds = shelf.Density(xs)\n return xs, ds", "def add_building_output_locations(self,dictionary, start,end,step): \n \"\"\"\n Given a dictionary of building footprints and associated nodes,element and sides, add the values \n to the netcdf grid file.\n \n The nodes, elements and sides associated with each footprint correspond to the there index in the RiCOM grid file\n \n Dictionary format:\n {id1: {'nodes': [n1, n2,...nn] }, {'elements': [e1,e2,...,en] },{'sides': [s1,s2,...,sn]}, id2: {}, id3 {}, ...., idn {} } \n \n idn = the id of the building footprint that the node, elements and sides belong to\n \n \"\"\"\n \n if (dictionary != {}):\n maxNodes = 0\n maxElements = 0\n maxSides = 0\n nodesAll = []\n elementsAll = []\n sidesAll = []\n id = []\n perimeter = []\n type = []\n for row in dictionary.iteritems(): \n id.append(row[0]) \n n = row[1]['nodes'] \n e = row[1]['elements']\n s = row[1]['sides']\n perimeter.append(row[1]['perimeter'])\n \n if row[1]['type'] == \"BUILDINGS_AS_HOLES\":\n typeNUM = 1\n elif row[1]['type'] == \"BUILDINGS_GRIDDED\":\n typeNUM = 2\n\n elif row[1]['type'] == \"BUILDINGS_AS_POINTS\":\n typeNUM = 3\n else:\n typeNUM = 0\n type.append(typeNUM)\n \n nodesAll.extend(n)\n elementsAll.extend(e)\n sidesAll.extend(s)\n if maxNodes < len(n): maxNodes = len(n)\n if maxElements < len(e): maxElements = len(e)\n if maxSides < len(s): maxSides = len(s)\n \n \n #remove repeated elements, sides and nodes\n nodesAll = list(set(nodesAll))\n elementsAll = list(set(elementsAll))\n sidesAll = list(set(sidesAll))\n \n print \"# elements = %s\" % len(elementsAll)\n print \"# sides = %s\" % len(sidesAll)\n print \"# nodes = %s\" % len(nodesAll)\n\n \n #initialise arrays for entry into netcdf file\n nodes = zeros((len(dictionary),maxNodes))\n elements = zeros((len(dictionary),maxElements))\n sides = zeros((len(dictionary),maxSides)) \n \n i = 0\n for row in dictionary.iteritems(): \n nodes[i,0:(len(row[1]['nodes']))] = row[1]['nodes']\n elements[i,0:(len(row[1]['elements']))] = row[1]['elements']\n sides[i,0:(len(row[1]['sides']))] = row[1]['sides']\n i+=1 \n \n #create dimensions\n try: self.buildings.createDimension('max_number_nodes',maxNodes)\n except Exception, e: print \"WARNING: %s\" % e\n try: self.buildings.createDimension('max_number_elements',maxElements)\n except Exception, e: print \"WARNING: %s\" % e\n try: self.buildings.createDimension('max_number_sides',maxSides)\n except Exception, e: print \"WARNING: %s\" % e\n try: self.buildings.createDimension('number_of_buildings',len(dictionary))\n except Exception, e: print \"WARNING: %s\" % e \n try: self.building_nodes.createDimension('number_of_nodes',len(nodesAll))\n except Exception, e: print \"WARNING: %s\" % e\n try: self.building_elements.createDimension('number_of_elements',len(elementsAll))\n except Exception, e: print \"WARNING: %s\" % e\n try: self.building_sides.createDimension('number_of_sides',len(sidesAll))\n except Exception, e: print \"WARNING: %s\" % e\n \n \n #create variables\n try: building_id = self.buildings.createVariable(varname = 'building_id',datatype = 'i', dimensions=('number_of_buildings',)) \n except Exception, e:\n building_id = self.buildings.variables['building_id']\n print \"WARNING: %s\" % e\n \n try: building_wkt = self.buildings.createVariable(varname = 'building_wkt',datatype = str, dimensions=('number_of_buildings',)) \n except Exception, e:\n building_wkt = self.buildings.variables['building_wkt'] \n print \"WARNING: %s\" % e\n\n try: building_perimeter = self.buildings.createVariable(varname = 'building_perimeter',datatype = 'd', dimensions=('number_of_buildings',)) \n except Exception, e:\n building_perimeter = self.buildings.variables['building_perimeter'] \n print \"WARNING: %s\" % e\n\n\n try: building_type = self.buildings.createVariable(varname = 'building_type',datatype = 'i', dimensions=('number_of_buildings',)) \n except Exception, e:\n building_type = self.buildings.variables['building_type'] \n print \"WARNING: %s\" % e\n\n try: building_nodes = self.buildings.createVariable(varname = 'building_nodes',datatype = 'i', dimensions=('number_of_buildings','max_number_nodes',)) \n except Exception, e:\n building_nodes = self.buildings.variables['building_nodes'] \n print \"WARNING: %s\" % e\n \n try: building_elements = self.buildings.createVariable(varname = 'building_elements',datatype = 'i', dimensions=('number_of_buildings','max_number_elements',)) \n except Exception, e:\n building_elements = self.buildings.variables['building_elements']\n print \"WARNING: %s\" % e\n \n try: building_sides = self.buildings.createVariable(varname = 'building_sides',datatype = 'i', dimensions=('number_of_buildings','max_number_sides',)) \n except Exception, e:\n building_sides = self.buildings.variables['building_sides']\n print \"WARNING: %s\" % e\n \n building_nodes[:] = nodes\n building_elements[:] = elements\n building_sides[:] = sides\n building_id[:] = array(id) \n building_perimeter[:] = array(perimeter)\n building_type[:] = array(type)\n #Set the attributes\n self.building_nodes.start = start\n self.building_nodes.finish = end\n self.building_nodes.step = step\n self.building_elements.start = start\n self.building_elements.finish = end\n self.building_elements.step = step\n self.building_sides.start = start\n self.building_sides.finish = end\n self.building_sides.step = step\n \n #assign the data\n output_ids = {'nodes': [], 'elements': [], 'sides': []}\n try: output_ids['nodes'] = self.building_nodes.createVariable(varname = 'id',datatype = 'i', dimensions=('number_of_nodes',))\n except Exception, e:\n output_ids['nodes'] = self.building_nodes.variables['id']\n print \"WARNING: %s\" % e\n try: output_ids['elements'] = self.building_elements.createVariable(varname = 'id',datatype = 'i', dimensions=('number_of_elements',))\n except Exception, e:\n output_ids['elements'] = self.building_elements.variables['id']\n print \"WARNING: %s\" % e\n try: output_ids['sides'] = self.building_sides.createVariable(varname = 'id',datatype = 'i', dimensions=('number_of_sides',))\n except Exception, e:\n output_ids['sides'] = self.building_sides.variables['id']\n print \"WARNING: %s\" % e\n \n \n output_ids['nodes'][:] = array(nodesAll)\n output_ids['elements'][:] = array(elementsAll)\n output_ids['sides'][:] = array(sidesAll)\n \n \n self.buildingsAdded = True\n else:\n #create dimensions\n try: self.buildings.createDimension('number_of_buildings',0)\n except Exception, e: print \"WARNING: %s\" % e \n try: self.building_nodes.createDimension('number_of_nodes',0)\n except Exception, e: print \"WARNING: %s\" % e\n try: self.building_elements.createDimension('number_of_elements',0)\n except Exception, e: print \"WARNING: %s\" % e\n try: self.building_sides.createDimension('number_of_sides',0)\n except Exception, e: print \"WARNING: %s\" % e \n self.buildingsAdded = True", "def timings_across_runs(self):\n\n\t\t# first determine individual run duration (to make sure that stimulus timings of all runs are correct)\n\t\trun_duration = []\n\t\tfor r in [self.runList[i] for i in self.conditionDict['WMM']]:\n\t\t\tniiFile = NiftiImage(self.runFile(stage = 'processed/mri', run = r))\n\t\t\ttr, nr_trs = round(niiFile.rtime*1)/1000.0, niiFile.timepoints\n\t\t\trun_duration.append(tr * nr_trs)\n\t\trun_duration = np.r_[0,np.cumsum(np.array(run_duration))]\n\n\t\t# timing information stimuli\n\t\tstim_info = []\n\t\trun = 0\n\t\tfor r in [self.runList[i] for i in self.conditionDict['WMM']]:\n\t\t\tstim_events = np.loadtxt(self.runFile(stage = 'processed/behavior', run = r, extension = '.txt', postFix = ['stim' ,'all','task']))\n\t\t\tstim_events[:,:2] += run_duration[run]\n\t\t\tstim_info.append(stim_events)\n\t\t\trun += 1\n\n\t\t# save stim_info as text_file\t\n\t\tnp.savetxt(self.runFile(stage = 'processed/behavior', postFix = ['stim_info_all'],extension = '.txt'), np.vstack(stim_info), fmt = '%3.2f', delimiter = '\\t')", "def BeamPosition():\n \n XPOS, YPOS = [], []\n\n x=0\n for j in range(0,6,1):\n x += 0.1\n y=0\n for k in range(0,6,1):\n y += 0.2\n XPOS.append(x)\n YPOS.append(y)\n\n return XPOS, YPOS", "def simulation():\n\n return {\n \"type\": \"class\",\n \"base\": \"iso.process_step\",\n \"is_abstract\": False,\n \"is_document\": True,\n \"pstr\": (\"({}/{}/{})\", (\"used\", \"ran_for_experiments\", \"ensemble_id\")),\n \"properties\": [\n (\n \"part_of_project\",\n \"linked_to(designing.project)\",\n \"1.N\",\n \"Project or projects for which simulation was run\",\n ),\n (\n \"ran_for_experiments\",\n \"linked_to(designing.numerical_experiment)\",\n \"1.N\",\n \"One or more experiments with which the simulation is \"\n \"associated\",\n ),\n (\n \"sub_experiment\",\n \"linked_to(designing.numerical_experiment)\",\n \"0.1\",\n \"For start-date ensembles, this will indicate the beginning \"\n \"year; for offline models driven by output from another \"\n \"model, this will provide the source_id and variant_label \"\n \"for the 'driving' model.\",\n ),\n (\n \"used\",\n \"linked_to(science.model)\",\n \"1.1\",\n \"The model used to run the simulation\",\n ),\n (\n \"primary_ensemble\",\n \"linked_to(activity.ensemble)\",\n \"0.1\",\n \"Primary Ensemble (ensemble for which this simulation was \"\n \"first run).\",\n ),\n (\n \"institution\",\n \"linked_to(shared.party)\",\n \"0.1\",\n \"institution which carried out the simulation\",\n ),\n (\n \"parent_of\",\n \"linked_to(activity.child_simulation)\",\n \"0.N\",\n \"If appropriate, links to simulations which branched from \"\n \"this one\",\n ),\n (\n \"produced\",\n \"linked_to(data.dataset)\",\n \"0.N\",\n \"Products of the simulation\",\n ),\n (\n \"had_performance\",\n \"linked_to(platform.performance)\",\n \"0.1\",\n \"Performance of the simulation.\",\n ),\n (\n \"ran_on\",\n \"linked_to(platform.machine)\",\n \"0.1\",\n \"The machine on which the simulation was run.\",\n ),\n (\n \"errata\",\n \"shared.online_resource\",\n \"0.1\",\n \"Link to errata associated with this simulation.\",\n ),\n (\n \"ensemble_id\",\n \"activity.axis_member\",\n \"0.N\",\n \"Identification within ensemble axes via axis member. \"\n \"(Multiple axis members within a simulation cannot share the \"\n \"same ensemble_axis.) (There must be an axis_member instance \"\n \"for each ensemble axis in a parent ensemble.)\",\n ),\n # Time\n (\n \"start_time\",\n \"time.date_time\",\n \"0.1\",\n \"The start date-time of the simulation. e.g. \"\n \"2012-04-01 00:00:00\",\n ),\n (\n \"end_time\",\n \"time.date_time\",\n \"0.1\",\n \"The end date-time of the simulation. e.g. \"\n \"2087-11-30 12:00:00\",\n ),\n (\n \"calendar\",\n \"time.calendar\",\n \"0.1\",\n \"The calendar used in the simulation\",\n ),\n # Further Info URL\n (\n \"documentation\",\n \"shared.online_resource\",\n \"0.1\",\n \"On-line location of additional documentation\",\n ),\n # Extra attributes\n (\n \"extra_attributes\",\n \"shared.extra_attribute\",\n \"0.N\",\n \"Additional attributes provided with simulation.\",\n ),\n ],\n \"constraints\": [\n (\"cardinality\", \"rationale\", \"0.0\"),\n ],\n }", "def part2():\r\n my_input = 368078\r\n coords = [(1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1), (0, 1), (1, 1)]\r\n x = y = dx = 0\r\n dy = -1\r\n grid = {}\r\n\r\n while True:\r\n total = 0\r\n for offset in coords:\r\n ox, oy = offset\r\n if (x+ox, y+oy) in grid:\r\n total += grid[(x+ox, y+oy)]\r\n if total > int(my_input):\r\n return total\r\n if (x, y) == (0, 0):\r\n grid[(0, 0)] = 1\r\n else:\r\n grid[(x, y)] = total\r\n if (x == y) or (x < 0 and x == -y) or (x > 0 and x == 1-y):\r\n dx, dy = -dy, dx\r\n x, y = x+dx, y+dy", "def run_time_graph(size):\n map = MoonMap(size)\n set = OrderedDict()\n get = OrderedDict()\n delete = OrderedDict()\n for i in range(200, size):\n set[i] = ( __test_set(i, map)/float(i) ) * 1000000\n get[i] = (__test_get(i, map)/float(i) ) * 1000000\n delete[i] = (__test_delete(i, map)/float(i) ) * 1000000\n\n print(i)\n\n __plot(set, \"Set Speed (Map Size {})\".format(size))\n __plot(get, \"Get Speed (Map Size {})\".format(size) )\n __plot(delete, \"Delete Speed (Map Size {})\".format(size))", "def gen_data(npt, typ, ndim, rstate=None):\n mid = .5 # i'm placing in unit cube\n if typ == 'ball':\n r0 = 0.5\n pts = genball(npt, ndim, rstate=rstate) * r0 + mid\n volume = (np.pi**(ndim / 2) / scipy.special.gamma(ndim / 2 + 1) *\n r0**ndim)\n elif typ == 'pin':\n w = 0.01\n a = 1\n pts = np.zeros((npt, ndim))\n pts[:, 1:] = genball(npt, ndim - 1, rstate=rstate) * w + mid\n pts[:, 0] = (rstate.uniform(size=npt) - 0.5) * a + mid\n volume = (np.pi**((ndim - 1) / 2) /\n scipy.special.gamma((ndim - 1) / 2 + 1) * w**(ndim - 1) * a)\n elif typ == 'torus':\n w = 0.01\n r0 = 0.45\n pts = np.zeros((npt, ndim))\n pts[:, :2] = genshell(r0 - w / 2, r0 + w / 2, npt, 2,\n rstate=rstate) + mid\n pts[:,\n 2:] = (rstate.uniform(size=(npt, ndim - 2)) * 2 - 1) * w / 2 + mid\n volume = w**(ndim - 2) * np.pi * ((r0 + w / 2)**2 - (r0 - w / 2)**2)\n elif typ == 'cylinder':\n w = 0.01\n r0 = 0.45\n a = 1\n pts = np.zeros((npt, ndim))\n pts[:, :2] = genshell(r0 - w / 2, r0 + w / 2, npt, 2,\n rstate=rstate) + mid\n pts[:, 2:] = rstate.uniform(size=(npt, ndim - 2)) * a\n volume = np.pi * ((r0 + w / 2)**2 - (r0 - w / 2)**2)\n elif typ == 'shell':\n r1 = 0.45\n r2 = 0.46\n pts = genshell(r1, r2, npt, ndim, rstate=rstate) + mid\n volume = (np.pi**(ndim / 2) / scipy.special.gamma(ndim / 2 + 1) *\n (r2**ndim - r1**ndim))\n else:\n raise RuntimeError('unknown', typ)\n return pts, volume", "def run():\n scene = lm.scene_object()\n copy_latest_low()\n copy_latest_high()", "def time_position(self):\n rt_most_pixel = None\n lf_most_pixel = None\n time_position = []\n min_time_len = None\n for i in range (len(np.unique(self.pd.objid))):\n trajec = self.dataset.trajec(self.dataset.keys[i])\n times = trajec.time_epoch_secs + trajec.time_epoch_nsecs / 1e9\n time_pos = np.vstack([times, trajec.position_x])\n time_position.append(time_pos)\n if min_time_len == None:\n min_time_len = len(times)\n elif min_time_len > len(times):\n min_time_len = len(times)\n pixels = np.unique(trajec.position_x)\n if rt_most_pixel ==None:\n rt_most_pixel = pixels[-1]\n elif rt_most_pixel < pixels[-1]:\n rt_most_pixel = pixels[-1]\n if lf_most_pixel ==None:\n lf_most_pixel = pixels[0]\n elif lf_most_pixel > pixels[0]:\n lf_most_pixel = pixels[0]\n print min_time_len\n print rt_most_pixel\n print lf_most_pixel\n print rt_most_pixel - lf_most_pixel\n return time_position, rt_most_pixel, lf_most_pixel", "def simulate_memories(simulation_length):\n \n \n pass", "def main():\n\n with its.device.ItsSession() as cam:\n\n props = cam.get_camera_properties()\n its.caps.skip_unless(its.caps.raw16(props) and\n its.caps.manual_sensor(props) and\n its.caps.read_3a(props) and\n its.caps.per_frame_control(props) and\n not its.caps.mono_camera(props))\n debug = its.caps.debug_mode()\n\n # Expose for the scene with min sensitivity\n exp_min, exp_max = props[\"android.sensor.info.exposureTimeRange\"]\n sens_min, _ = props[\"android.sensor.info.sensitivityRange\"]\n # Digital gains might not be visible on RAW data\n sens_max = props[\"android.sensor.maxAnalogSensitivity\"]\n sens_step = (sens_max - sens_min) / NUM_ISO_STEPS\n white_level = float(props[\"android.sensor.info.whiteLevel\"])\n black_levels = [its.image.get_black_level(i,props) for i in range(4)]\n # Get the active array width and height.\n aax = props[\"android.sensor.info.activeArraySize\"][\"left\"]\n aay = props[\"android.sensor.info.activeArraySize\"][\"top\"]\n aaw = props[\"android.sensor.info.activeArraySize\"][\"right\"]-aax\n aah = props[\"android.sensor.info.activeArraySize\"][\"bottom\"]-aay\n raw_stat_fmt = {\"format\": \"rawStats\",\n \"gridWidth\": aaw/IMG_STATS_GRID,\n \"gridHeight\": aah/IMG_STATS_GRID}\n\n e_test = []\n mult = 1.0\n while exp_min*mult < exp_max:\n e_test.append(int(exp_min*mult))\n mult *= EXP_MULT\n if e_test[-1] < exp_max * INCREASING_THR:\n e_test.append(int(exp_max))\n e_test_ms = [e / 1000000.0 for e in e_test]\n\n for s in range(sens_min, sens_max, sens_step):\n means = []\n means.append(black_levels)\n reqs = [its.objects.manual_capture_request(s, e, 0) for e in e_test]\n # Capture raw in debug mode, rawStats otherwise\n caps = []\n for i in range(len(reqs) / SLICE_LEN):\n if debug:\n caps += cam.do_capture(reqs[i*SLICE_LEN:(i+1)*SLICE_LEN], cam.CAP_RAW)\n else:\n caps += cam.do_capture(reqs[i*SLICE_LEN:(i+1)*SLICE_LEN], raw_stat_fmt)\n last_n = len(reqs) % SLICE_LEN\n if last_n == 1:\n if debug:\n caps += [cam.do_capture(reqs[-last_n:], cam.CAP_RAW)]\n else:\n caps += [cam.do_capture(reqs[-last_n:], raw_stat_fmt)]\n elif last_n > 0:\n if debug:\n caps += cam.do_capture(reqs[-last_n:], cam.CAP_RAW)\n else:\n caps += cam.do_capture(reqs[-last_n:], raw_stat_fmt)\n\n # Measure the mean of each channel.\n # Each shot should be brighter (except underexposed/overexposed scene)\n for i,cap in enumerate(caps):\n if debug:\n planes = its.image.convert_capture_to_planes(cap, props)\n tiles = [its.image.get_image_patch(p, 0.445, 0.445, 0.11, 0.11) for p in planes]\n mean = [m * white_level for tile in tiles\n for m in its.image.compute_image_means(tile)]\n img = its.image.convert_capture_to_rgb_image(cap, props=props)\n its.image.write_image(img, \"%s_s=%d_e=%05d.jpg\" % (NAME, s, e_test))\n else:\n mean_image, _ = its.image.unpack_rawstats_capture(cap)\n mean = mean_image[IMG_STATS_GRID/2, IMG_STATS_GRID/2]\n\n print \"ISO=%d, exposure time=%.3fms, mean=%s\" % (\n s, e_test[i] / 1000000.0, str(mean))\n means.append(mean)\n\n\n # means[0] is black level value\n r = [m[0] for m in means[1:]]\n gr = [m[1] for m in means[1:]]\n gb = [m[2] for m in means[1:]]\n b = [m[3] for m in means[1:]]\n\n pylab.plot(e_test_ms, r, \"r.-\")\n pylab.plot(e_test_ms, b, \"b.-\")\n pylab.plot(e_test_ms, gr, \"g.-\")\n pylab.plot(e_test_ms, gb, \"k.-\")\n pylab.xscale('log')\n pylab.yscale('log')\n pylab.title(\"%s ISO=%d\" % (NAME, s))\n pylab.xlabel(\"Exposure time (ms)\")\n pylab.ylabel(\"Center patch pixel mean\")\n matplotlib.pyplot.savefig(\"%s_s=%d.png\" % (NAME, s))\n pylab.clf()\n\n allow_under_saturated = True\n for i in xrange(1, len(means)):\n prev_mean = means[i-1]\n mean = means[i]\n\n if np.isclose(max(mean), white_level, rtol=SATURATION_TOL):\n print \"Saturated: white_level %f, max_mean %f\"% (white_level, max(mean))\n break;\n\n if allow_under_saturated and np.allclose(mean, black_levels, rtol=BLK_LVL_TOL):\n # All channel means are close to black level\n continue\n\n allow_under_saturated = False\n # Check pixel means are increasing (with small tolerance)\n channels = [\"Red\", \"Gr\", \"Gb\", \"Blue\"]\n for chan in range(4):\n err_msg = \"ISO=%d, %s, exptime %3fms mean: %.2f, %s mean: %.2f, TOL=%.f%%\" % (\n s, channels[chan],\n e_test_ms[i-1], mean[chan],\n \"black level\" if i == 1 else \"exptime %3fms\"%e_test_ms[i-2],\n prev_mean[chan],\n INCREASING_THR*100)\n assert mean[chan] > prev_mean[chan] * INCREASING_THR, err_msg", "def scenes_to_frames():\n # Scene 001 from frames 1-150\n cmd.scene('001', animate=0)\n cmd.mview('store', 1)\n cmd.mview('store', 150)\n # Scene 002 from frames 250-400\n cmd.scene('002', animate=0)\n cmd.mview('store', 250)\n cmd.mview('store', 400)", "def get_grid_size(self, ui, res_dir):\r\n print_it('determining grid size', PrintOpts.lvl1.value)\r\n self.sun.simple_clone()\r\n self.sun.clone.make_profile(PreSol.res_x.value, PreSol.res_y.value,\r\n self.init_force)\r\n self.planet.simple_clone()\r\n self.planet.clone.make_slave_to(self.sun.clone)\r\n\r\n init_displ = hertz_displ(self.sun.clone.e, self.planet.e,\r\n self.sun.clone.ny, self.planet.ny,\r\n self.sun.clone.r_hertz_x,\r\n self.sun.clone.r_hertz_y,\r\n self.planet.clone.r_hertz_x,\r\n self.planet.clone.r_hertz_y,\r\n self.sun.norm_forces[0])\r\n too_many_els_in_y = 1\r\n too_many_els_in_x = 1\r\n contact_width_y = 0.05\r\n contact_width_x = 0.05\r\n while too_many_els_in_y != 0 or \\\r\n too_many_els_in_x != 0:\r\n self.sun.clone.make_profile(self.sun.clone.res_x,\r\n self.sun.clone.res_y, self.init_force,\r\n contact_width=contact_width_y,\r\n contact_length=contact_width_x)\r\n self.planet.clone.make_slave_to(self.sun.clone)\r\n\r\n pressure, init_displ = \\\r\n pre_solve_half_space(self.sun.clone.profile,\r\n self.planet.clone.profile,\r\n self.sun.clone.x_axis,\r\n self.sun.clone.y_axis,\r\n self.sun.clone.res_x, self.sun.clone.res_y,\r\n self.sun.clone.delta_x,\r\n self.sun.clone.delta_y, self.sun.clone.e,\r\n self.planet.clone.e, self.sun.clone.ny,\r\n self.planet.clone.ny,\r\n self.sun.norm_forces[0],\r\n init_displ=init_displ, print_prog=False)\r\n\r\n pressure_els_y = sum(\r\n pressure[math.floor(self.sun.clone.res_y / 2), :] > 0)\r\n too_many_els_in_y = self.sun.clone.res_y - pressure_els_y - 2\r\n if too_many_els_in_y:\r\n contact_width_y += -np.sign(\r\n too_many_els_in_y) * contact_width_y / 25\r\n\r\n pressure_els_x = sum(\r\n pressure[:, math.floor(self.sun.clone.res_x / 2)] > 0)\r\n too_many_els_in_x = self.sun.clone.res_x - pressure_els_x - 2\r\n if too_many_els_in_x:\r\n contact_width_x += -np.sign(\r\n too_many_els_in_x) * contact_width_x / 25\r\n\r\n self.sun.make_profile(self.sun.res_x, self.sun.res_y, self.init_force,\r\n contact_width=contact_width_y,\r\n contact_length=contact_width_x)\r\n self.planet.make_slave_to(self.sun)\r\n return init_displ", "def test_dimensions(self):\n\t\t\n\t\t# default\t\n\t\tdetails = self.watcher.describe()\n\t\tprint(details)\n\t\t\n\t\t# default\t\n\t\tdetails = self.watcher.describe(layers=[self.first_layer])\n\t\tprint(details)\n\t\tN = details.N.to_numpy()[0]\n\t\tM = details.M.to_numpy()[0]\n\t\trf = details.rf.to_numpy()[0]\n\t\tnum_evals = details.num_evals.to_numpy()[0]\n\t\tprint(N,M,rf,num_evals)\n\t\t\n\t\tself.assertEqual(N,64)\n\t\tself.assertEqual(M,3)\n\t\tself.assertEqual(rf,9)\n\t\tself.assertEqual(num_evals,M*rf)", "def map_sim_positions(**kwargs):\n\n GR = glo.global_results()\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n fig, ax1 = plt.subplots(figsize=(10,10))\n # p.gal_index = np.where(GR.file_name == 'z0.00_G7169_cG29270')[0][0]\n gal_ob = gal.galaxy(GR=GR, gal_index=p.gal_index)\n # print('TEST!',gal_ob.file_name,p.gal_index)\n simdata = aux.load_temp_file(gal_ob=gal_ob,data_type=p.sim_type)\n\n # Plot\n print(simdata.head())\n ax1.plot(simdata.x,simdata.y,'o',ms=2,mew=2)\n\n print(gal_ob.radius)\n # Limit axes limits a bit to avoid area with no particles...\n # ax1.set_xlim([-2/3*gal_ob.radius,2/3*gal_ob.radius])make_projec\n # ax1.set_ylim([-2/3*gal_ob.radius,2/3*gal_ob.radius])\n ax1.set_xlabel('x [kpc]'); ax1.set_ylabel('y [kpc]')", "def get_dim():\n return (Settings.width, Settings.height)", "def get_x_y_z(drone, p, q, r):\n num_cameras = 2\n camera_constants = [0,math.pi/2]\n rads = np.zeros(num_cameras)\n phis = np.zeros(num_cameras)\n d = np.zeros(num_cameras)\n theta = np.zeros(num_cameras)\n Hs = np.zeros(num_cameras)\n s = 12\n HFOV = math.pi/4\n VFOV = 5*math.pi/36\n HPIX = 1280\n VPIX = 720\n #loop one, where we increment over camera number, and\n # get new information\n\n cent = calculate_centroid(p,q,r)\n for camera_num in range(num_cameras):\n\n A,B = find_a_and_b(p[camera_num],q[camera_num],r[camera_num],cent[camera_num])\n a = find_a(A,B)\n d_in = find_inner_d(a, s)\n angle_c = find_angle_c(a)\n alpha = find_alpha(HFOV, HPIX, A)\n w = find_w(angle_c, s)\n d_out = find_outer_d(w,alpha,a)\n pointy_front = is_point_front(r[camera_num],q[camera_num],p[camera_num],cent[camera_num])\n d[camera_num] = find_d(d_in,d_out,pointy_front)\n theta[camera_num] = find_theta(angle_c,A,B,camera_constants[camera_num])\n k = find_k(drone[camera_num], cent[camera_num])\n angle_k = find_angle_k(k, HFOV, HPIX)\n phi = find_phi(theta[camera_num], angle_k)\n rad = find_r(d[camera_num], angle_k)\n phis[camera_num] = phi\n rads[camera_num] = rad\n\n # end of first loop\n\n cosphis = np.cos(phis)\n sinphis = np.sin(phis)\n big_matrix = np.column_stack((cosphis,sinphis))\n points = np.zeros((int(num_cameras*(num_cameras-1)/2),2))\n i = 0\n for pair in itertools.combinations(range(num_cameras), 2):\n matrix_a = np.vstack((big_matrix[pair[0]],big_matrix[pair[1]]))\n vec_b = np.hstack((rads[pair[0]],rads[pair[1]]))\n point = np.linalg.solve(matrix_a, vec_b)\n points[i] = point\n i += 1\n drone_pos = np.mean(points,axis=0)\n\n # start of third loop\n for camera_num in range(num_cameras):\n d_prime = find_d_prime(d[camera_num], theta[camera_num], drone_pos)\n P,Q,M,N = find_P_Q_M_N(p[camera_num],q[camera_num],r[camera_num])\n h = find_h(d[camera_num],P,Q,M,N)\n angle_4 = find_angle_4(h,d[camera_num])\n Y = find_Y(drone[camera_num], cent[camera_num])\n angle_5 = find_angle_5(Y, VFOV, VPIX)\n angle_6 = angle_5 - angle_4\n h_prime = find_h_prime(d_prime, angle_6)\n Hs[camera_num] = h + h_prime\n drone_h = np.mean(H)\n return np.append(drone_pos,drone_h)", "def ManualTrackingTool(frames_divisions, root_ID, movie_length=1105, show=True):\n\n # Check if frame_divisions list (=argument) has a length of 31 (=captures all cells up to 5 generations):\n if len(frames_divisions) != 31:\n raise Exception(\"Warning, frame_divisions list is incomplete!\\n\"\n \"Length of {} while it should be 31!\\n\"\n \"Please input data for all generations up to gen#5 to proceed.\\n\"\n \"\\tNote: Insert None (boolean, not string = 'None') if the cell did not undergo division event.\")\n\n # Make sure your None gets converted to the movie_length integer:\n frm = [int(item) if item is not None else movie_length for item in frames_divisions]\n\n # TODO: Incorporate the conditions for apoptosis / migration out of FOV:\n\n color_list = [\"dodgerblue\", \"darkorange\", \"forestgreen\", \"firebrick\", \"gold\", \"plum\"]\n\n\n # VERTICAL Lines:\n\n # Gen#0:\n plt.plot([0, 0], [1, frm[0]], color=color_list[0], marker=\"o\")\n\n # Gen#1:\n if frm[0] != movie_length:\n plt.plot([-34, -34], [frm[0] + 1, frm[1]], color=color_list[1], marker=\"o\")\n if frm[0] != movie_length:\n plt.plot([34, 34], [frm[0] + 1, frm[2]], color=color_list[1], marker=\"o\")\n\n # Gen#2:\n if frm[0] != movie_length and frm[1] != movie_length:\n plt.plot([-50, -50], [frm[1] + 1, frm[3]], color=color_list[2], marker=\"o\")\n if frm[0] != movie_length and frm[1] != movie_length:\n plt.plot([-18, -18], [frm[1] + 1, frm[4]], color=color_list[2], marker=\"o\")\n if frm[0] != movie_length and frm[2] != movie_length:\n plt.plot([18, 18], [frm[2] + 1, frm[5]], color=color_list[2], marker=\"o\")\n if frm[0] != movie_length and frm[2] != movie_length:\n plt.plot([50, 50], [frm[2] + 1, frm[6]], color=color_list[2], marker=\"o\")\n\n # Gen#3:\n if frm[0] != movie_length and frm[1] != movie_length and frm[3] != movie_length:\n plt.plot([-58, -58], [frm[3] + 1, frm[7]], color=color_list[3], marker=\"o\")\n if frm[0] != movie_length and frm[1] != movie_length and frm[3] != movie_length:\n plt.plot([-42, -42], [frm[3] + 1, frm[8]], color=color_list[3], marker=\"o\")\n if frm[0] != movie_length and frm[1] != movie_length and frm[4] != movie_length:\n plt.plot([-26, -26], [frm[4] + 1, frm[9]], color=color_list[3], marker=\"o\")\n if frm[0] != movie_length and frm[1] != movie_length and frm[4] != movie_length:\n plt.plot([-10, -10], [frm[4] + 1, frm[10]], color=color_list[3], marker=\"o\")\n if frm[0] != movie_length and frm[2] != movie_length and frm[5] != movie_length:\n plt.plot([10, 10], [frm[5] + 1, frm[11]], color=color_list[3], marker=\"o\")\n if frm[0] != movie_length and frm[2] != movie_length and frm[5] != movie_length:\n plt.plot([26, 26], [frm[5] + 1, frm[12]], color=color_list[3], marker=\"o\")\n if frm[0] != movie_length and frm[2] != movie_length and frm[6] != movie_length:\n plt.plot([42, 42], [frm[6] + 1, frm[13]], color=color_list[3], marker=\"o\")\n if frm[0] != movie_length and frm[2] != movie_length and frm[6] != movie_length:\n plt.plot([58, 58], [frm[6] + 1, frm[14]], color=color_list[3], marker=\"o\")\n\n # Gen#4:\n if frm[0] != movie_length and frm[1] != movie_length and frm[3] != movie_length and frm[7] != movie_length:\n plt.plot([-62, -62], [frm[7] + 1, frm[15]], color=color_list[4], marker=\"o\")\n if frm[0] != movie_length and frm[1] != movie_length and frm[3] != movie_length and frm[7] != movie_length:\n plt.plot([-54, -54], [frm[7] + 1, frm[16]], color=color_list[4], marker=\"o\")\n if frm[0] != movie_length and frm[1] != movie_length and frm[3] != movie_length and frm[8] != movie_length:\n plt.plot([-46, -46], [frm[8] + 1, frm[17]], color=color_list[4], marker=\"o\")\n if frm[0] != movie_length and frm[1] != movie_length and frm[3] != movie_length and frm[8] != movie_length:\n plt.plot([-38, -38], [frm[8] + 1, frm[18]], color=color_list[4], marker=\"o\")\n if frm[0] != movie_length and frm[1] != movie_length and frm[4] != movie_length and frm[9] != movie_length:\n plt.plot([-30, -30], [frm[9] + 1, frm[19]], color=color_list[4], marker=\"o\")\n if frm[0] != movie_length and frm[1] != movie_length and frm[4] != movie_length and frm[9] != movie_length:\n plt.plot([-22, -22], [frm[9] + 1, frm[20]], color=color_list[4], marker=\"o\")\n if frm[0] != movie_length and frm[1] != movie_length and frm[4] != movie_length and frm[10] != movie_length:\n plt.plot([-14, -14], [frm[10] + 1, frm[21]], color=color_list[4], marker=\"o\")\n if frm[0] != movie_length and frm[1] != movie_length and frm[4] != movie_length and frm[10] != movie_length:\n plt.plot([-6, -6], [frm[10] + 1, frm[22]], color=color_list[4], marker=\"o\")\n if frm[0] != movie_length and frm[2] != movie_length and frm[5] != movie_length and frm[11] != movie_length:\n plt.plot([6, 6], [frm[11] + 1, frm[23]], color=color_list[4], marker=\"o\")\n if frm[0] != movie_length and frm[2] != movie_length and frm[5] != movie_length and frm[11] != movie_length:\n plt.plot([14, 14], [frm[11] + 1, frm[24]], color=color_list[4], marker=\"o\")\n if frm[0] != movie_length and frm[2] != movie_length and frm[5] != movie_length and frm[12] != movie_length:\n plt.plot([22, 22], [frm[12] + 1, frm[25]], color=color_list[4], marker=\"o\")\n if frm[0] != movie_length and frm[2] != movie_length and frm[5] != movie_length and frm[12] != movie_length:\n plt.plot([30, 30], [frm[12] + 1, frm[26]], color=color_list[4], marker=\"o\")\n if frm[0] != movie_length and frm[2] != movie_length and frm[6] != movie_length and frm[13] != movie_length:\n plt.plot([38, 38], [frm[13] + 1, frm[27]], color=color_list[4], marker=\"o\")\n if frm[0] != movie_length and frm[2] != movie_length and frm[6] != movie_length and frm[13] != movie_length:\n plt.plot([46, 46], [frm[13] + 1, frm[28]], color=color_list[4], marker=\"o\")\n if frm[0] != movie_length and frm[2] != movie_length and frm[6] != movie_length and frm[14] != movie_length:\n plt.plot([54, 54], [frm[14] + 1, frm[29]], color=color_list[4], marker=\"o\")\n if frm[0] != movie_length and frm[2] != movie_length and frm[6] != movie_length and frm[14] != movie_length:\n plt.plot([62, 62], [frm[14] + 1, frm[30]], color=color_list[4], marker=\"o\")\n\n # Gen#5:\n if frm[0] != movie_length and frm[1] != movie_length and frm[3] != movie_length and frm[7] != movie_length and frm[15] != movie_length:\n plt.plot([-64, -64], [frm[15] + 1, movie_length], color=color_list[5], marker=\"o\")\n if frm[0] != movie_length and frm[1] != movie_length and frm[3] != movie_length and frm[7] != movie_length and frm[15] != movie_length:\n plt.plot([-60, -60], [frm[15] + 1, movie_length], color=color_list[5], marker=\"o\")\n\n if frm[0] != movie_length and frm[1] != movie_length and frm[3] != movie_length and frm[7] != movie_length and frm[16] != movie_length:\n plt.plot([-56, -56], [frm[16] + 1, movie_length], color=color_list[5], marker=\"o\")\n if frm[0] != movie_length and frm[1] != movie_length and frm[3] != movie_length and frm[7] != movie_length and frm[16] != movie_length:\n plt.plot([-52, -52], [frm[16] + 1, movie_length], color=color_list[5], marker=\"o\")\n\n if frm[0] != movie_length and frm[1] != movie_length and frm[4] != movie_length and frm[8] != movie_length and frm[17] != movie_length:\n plt.plot([-48, -48], [frm[17] + 1, movie_length], color=color_list[5], marker=\"o\")\n if frm[0] != movie_length and frm[1] != movie_length and frm[4] != movie_length and frm[8] != movie_length and frm[17] != movie_length:\n plt.plot([-44, -44], [frm[17] + 1, movie_length], color=color_list[5], marker=\"o\")\n\n if frm[0] != movie_length and frm[1] != movie_length and frm[4] != movie_length and frm[8] != movie_length and frm[18] != movie_length:\n plt.plot([-40, -40], [frm[18] + 1, movie_length], color=color_list[5], marker=\"o\")\n if frm[0] != movie_length and frm[1] != movie_length and frm[4] != movie_length and frm[8] != movie_length and frm[18] != movie_length:\n plt.plot([-36, -36], [frm[18] + 1, movie_length], color=color_list[5], marker=\"o\")\n\n if frm[0] != movie_length and frm[2] != movie_length and frm[5] != movie_length and frm[9] != movie_length and frm[19] != movie_length:\n plt.plot([-32, -32], [frm[19] + 1, movie_length], color=color_list[5], marker=\"o\")\n if frm[0] != movie_length and frm[2] != movie_length and frm[5] != movie_length and frm[9] != movie_length and frm[19] != movie_length:\n plt.plot([-28, -28], [frm[19] + 1, movie_length], color=color_list[5], marker=\"o\")\n\n if frm[0] != movie_length and frm[2] != movie_length and frm[5] != movie_length and frm[9] != movie_length and frm[20] != movie_length:\n plt.plot([-24, -24], [frm[20] + 1, movie_length], color=color_list[5], marker=\"o\")\n if frm[0] != movie_length and frm[2] != movie_length and frm[5] != movie_length and frm[9] != movie_length and frm[20] != movie_length:\n plt.plot([-20, -20], [frm[20] + 1, movie_length], color=color_list[5], marker=\"o\")\n\n if frm[0] != movie_length and frm[2] != movie_length and frm[6] != movie_length and frm[10] != movie_length and frm[21] != movie_length:\n plt.plot([-16, -16], [frm[21] + 1, movie_length], color=color_list[5], marker=\"o\")\n if frm[0] != movie_length and frm[2] != movie_length and frm[6] != movie_length and frm[10] != movie_length and frm[21] != movie_length:\n plt.plot([-12, -12], [frm[21] + 1, movie_length], color=color_list[5], marker=\"o\")\n\n if frm[0] != movie_length and frm[2] != movie_length and frm[6] != movie_length and frm[10] != movie_length and frm[22] != movie_length:\n plt.plot([-8, -8], [frm[22] + 1, movie_length], color=color_list[5], marker=\"o\")\n if frm[0] != movie_length and frm[2] != movie_length and frm[6] != movie_length and frm[10] != movie_length and frm[22] != movie_length:\n plt.plot([-4, -4], [frm[22] + 1, movie_length], color=color_list[5], marker=\"o\")\n\n if frm[0] != movie_length and frm[1] != movie_length and frm[3] != movie_length and frm[11] != movie_length and frm[23] != movie_length:\n plt.plot([4, 4], [frm[23] + 1, movie_length], color=color_list[5], marker=\"o\")\n if frm[0] != movie_length and frm[1] != movie_length and frm[3] != movie_length and frm[11] != movie_length and frm[23] != movie_length:\n plt.plot([8, 8], [frm[23] + 1, movie_length], color=color_list[5], marker=\"o\")\n\n if frm[0] != movie_length and frm[1] != movie_length and frm[3] != movie_length and frm[11] != movie_length and frm[24] != movie_length:\n plt.plot([12, 12], [frm[24] + 1, movie_length], color=color_list[5], marker=\"o\")\n if frm[0] != movie_length and frm[1] != movie_length and frm[3] != movie_length and frm[11] != movie_length and frm[24] != movie_length:\n plt.plot([16, 16], [frm[24] + 1, movie_length], color=color_list[5], marker=\"o\")\n\n if frm[0] != movie_length and frm[1] != movie_length and frm[4] != movie_length and frm[12] != movie_length and frm[25] != movie_length:\n plt.plot([20, 20], [frm[25] + 1, movie_length], color=color_list[5], marker=\"o\")\n if frm[0] != movie_length and frm[1] != movie_length and frm[4] != movie_length and frm[12] != movie_length and frm[25] != movie_length:\n plt.plot([24, 24], [frm[25] + 1, movie_length], color=color_list[5], marker=\"o\")\n\n if frm[0] != movie_length and frm[1] != movie_length and frm[4] != movie_length and frm[12] != movie_length and frm[26] != movie_length:\n plt.plot([28, 28], [frm[26] + 1, movie_length], color=color_list[5], marker=\"o\")\n if frm[0] != movie_length and frm[1] != movie_length and frm[4] != movie_length and frm[12] != movie_length and frm[26] != movie_length:\n plt.plot([32, 32], [frm[26] + 1, movie_length], color=color_list[5], marker=\"o\")\n\n if frm[0] != movie_length and frm[2] != movie_length and frm[5] != movie_length and frm[13] != movie_length and frm[27] != movie_length:\n plt.plot([36, 36], [frm[27] + 1, movie_length], color=color_list[5], marker=\"o\")\n if frm[0] != movie_length and frm[2] != movie_length and frm[5] != movie_length and frm[13] != movie_length and frm[27] != movie_length:\n plt.plot([40, 40], [frm[27] + 1, movie_length], color=color_list[5], marker=\"o\")\n\n if frm[0] != movie_length and frm[2] != movie_length and frm[5] != movie_length and frm[13] != movie_length and frm[28] != movie_length:\n plt.plot([44, 44], [frm[28] + 1, movie_length], color=color_list[5], marker=\"o\")\n if frm[0] != movie_length and frm[2] != movie_length and frm[5] != movie_length and frm[13] != movie_length and frm[28] != movie_length:\n plt.plot([48, 48], [frm[28] + 1, movie_length], color=color_list[5], marker=\"o\")\n\n if frm[0] != movie_length and frm[2] != movie_length and frm[6] != movie_length and frm[14] != movie_length and frm[29] != movie_length:\n plt.plot([52, 52], [frm[29] + 1, movie_length], color=color_list[5], marker=\"o\")\n if frm[0] != movie_length and frm[2] != movie_length and frm[6] != movie_length and frm[14] != movie_length and frm[29] != movie_length:\n plt.plot([56, 56], [frm[29] + 1, movie_length], color=color_list[5], marker=\"o\")\n\n if frm[0] != movie_length and frm[2] != movie_length and frm[6] != movie_length and frm[14] != movie_length and frm[30] != movie_length:\n plt.plot([60, 60], [frm[30] + 1, movie_length], color=color_list[5], marker=\"o\")\n if frm[0] != movie_length and frm[2] != movie_length and frm[6] != movie_length and frm[14] != movie_length and frm[30] != movie_length:\n plt.plot([64, 64], [frm[30] + 1, movie_length], color=color_list[5], marker=\"o\")\n\n\n # HORIZONTAL LINES:\n # Gen#1:\n if frm[0] != movie_length:\n plt.plot([-34, 34], [frm[0], frm[0]], color=\"darkgrey\", marker=\"o\", markersize=5, zorder=1)\n\n # Gen#2:\n if frm[1] != movie_length:\n plt.plot([-50, -18], [frm[1], frm[1]], color=\"darkgrey\", marker=\"o\", markersize=5, zorder=1)\n if frm[2] != movie_length:\n plt.plot([18, 50], [frm[2], frm[2]], color=\"darkgrey\", marker=\"o\", markersize=5, zorder=1)\n\n # Gen#3:\n if frm[3] != movie_length:\n plt.plot([-58, -42], [frm[3], frm[3]], color=\"darkgrey\", marker=\"o\", markersize=5, zorder=1)\n if frm[4] != movie_length:\n plt.plot([-26, -10], [frm[4], frm[4]], color=\"darkgrey\", marker=\"o\", markersize=5, zorder=1)\n if frm[5] != movie_length:\n plt.plot([10, 26], [frm[5], frm[5]], color=\"darkgrey\", marker=\"o\", markersize=5, zorder=1)\n if frm[6] != movie_length:\n plt.plot([42, 58], [frm[6], frm[6]], color=\"darkgrey\", marker=\"o\", markersize=5, zorder=1)\n\n # Gen#4:\n if frm[7] != movie_length:\n plt.plot([-62, -54], [frm[7], frm[7]], color=\"darkgrey\", marker=\"o\", markersize=5, zorder=1)\n if frm[8] != movie_length:\n plt.plot([-46, -38], [frm[8], frm[8]], color=\"darkgrey\", marker=\"o\", markersize=5, zorder=1)\n if frm[9] != movie_length:\n plt.plot([-30, -22], [frm[9], frm[9]], color=\"darkgrey\", marker=\"o\", markersize=5, zorder=1)\n if frm[10] != movie_length:\n plt.plot([-14, -6], [frm[10], frm[10]], color=\"darkgrey\", marker=\"o\", markersize=5, zorder=1)\n if frm[11] != movie_length:\n plt.plot([6, 14], [frm[11], frm[11]], color=\"darkgrey\", marker=\"o\", markersize=5, zorder=1)\n if frm[12] != movie_length:\n plt.plot([22, 30], [frm[12], frm[12]], color=\"darkgrey\", marker=\"o\", markersize=5, zorder=1)\n if frm[13] != movie_length:\n plt.plot([38, 46], [frm[13], frm[13]], color=\"darkgrey\", marker=\"o\", markersize=5, zorder=1)\n if frm[14] != movie_length:\n plt.plot([54, 62], [frm[14], frm[14]], color=\"darkgrey\", marker=\"o\", markersize=5, zorder=1)\n\n # Gen#5:\n # TODO: Update this!\n\n\n # Other stuff:\n plt.text(x=17, y=-15, s=\"Root ID #{}\".format(root_ID), bbox=dict(facecolor=color_list[0], alpha=0.5),\n horizontalalignment='center', verticalalignment='center')\n plt.xlim(-75, 75)\n plt.ylim(-75, movie_length + 75)\n plt.ylabel(\"Absolute time [frames]\")\n plt.title(\"Semi-Automated Plotter - Manual Tracking; <'17_07_24' & 'pos13'>\")\n #plt.savefig(\"/Volumes/lowegrp/Data/Kristina/MDCK_90WT_10Sc_NoComp/17_07_24/pos13/manual_tracking/Manual_GenTree_CellID_{}.jpeg\".format(root_ID), bbox_inches=\"tight\")\n plt.show()\n plt.close()", "def calculate_dimensions(self):\n x_coordinates = np.sort(self.grid['x'][:, 0]) # first x node\n self.nr_nodes_z = np.where(x_coordinates == x_coordinates[0])[0].size\n self.nr_elements_x = self.elements.shape[0] / (self.nr_nodes_z - 1)\n self.nr_nodes_x = self.nr_elements_x + 1\n self.nr_elements_z = self.nr_nodes_z - 1", "def run(max_Iteration, size):\r\n\t# Initialize scene\r\n\tcmds.file(new = True, force = True)\r\n\tcmds.lookThru( 'top' )\r\n\tcmds.grid(toggle=False)\r\n\t\r\n\t# Setup window for progress bar\r\n\twindow = cmds.window()\r\n\tcmds.columnLayout()\r\n\tprogressControl = cmds.progressBar(maxValue=max_Iteration, width=300)\r\n\tcmds.showWindow(window)\r\n\t\r\n\t# Create shader to paint spheres with\r\n\tshader=cmds.shadingNode(\"blinn\",asShader=True, name = \"shader\" + str(1))\r\n\tattr = shader + \".color\"\r\n\tcmds.setAttr (attr, 1,1,1)\r\n\t\r\n\t# Calculates the midpoint of point1 and point2 and returns result\r\n\tdef midpoint(point1, point2):\r\n\t\treturn [(point1[0] + point2[0])/2, (point1[1] + point2[1])/2]\r\n\t\r\n\t# Set starting point for Sierpinski algorithm\r\n\tcurr_point = [0,0] \r\n\r\n\t# Define an equilateral triangle in space\r\n\tv1 = [0,0]\r\n\tv2 = [1,0]\r\n\tv3 = [.5,np.sqrt(3)/2]\r\n\r\n\t# Draw max_Iteration number of spheres \r\n\tfor i in range(max_Iteration):\r\n\t\tval = randint(0,2) # Select random vertex of our equilateral triangle\r\n\t\t# Calculate midpoint of above vertex and our current point:\r\n\t\tif val == 0:\r\n\t\t\tcurr_point = midpoint(curr_point, v1)\r\n\t\tif val == 1:\r\n\t\t\tcurr_point = midpoint(curr_point, v2)\r\n\t\tif val == 2:\r\n\t\t\tcurr_point = midpoint(curr_point, v3)\r\n\t\t\t\r\n\t\t# Draw corresponding sphere in space\r\n\t\tcmds.polySphere(n=\"sp\"+str(i))\r\n\t\tcmds.move(size*curr_point[0], 0, size*curr_point[1])\r\n\t\tcmds.scale(0.5,0.5,0.5) \r\n\t\tcmds.hyperShade(assign=\"shader\"+str(1))\r\n\t\t\r\n\t\t# Update progress bar and viewport\r\n\t\tcmds.progressBar(progressControl, edit=True, step=1)\r\n\t\tcmds.viewFit( 'top', all=True )\r\n\t\tcmds.dolly( 'top', os=1.5 )\r\n\t\tcmds.refresh()\r\n\t# Update progress bar and viewport\t\r\n\tcmds.progressBar(progressControl, edit=True, step=1)\r\n\tcmds.refresh()\r\n\tcmds.toggleWindowVisibility(window)", "def get_render_obs(self):\n x, y, z = self.robot.body_xyz\n # print (x, y, z)\n\n if self.camera_type == 'follow':\n cameraEyePosition = [x, y-1.25, 1.0]\n cameraTargetPosition = [x, y, 1.0]\n elif self.camera_type == 'fixed':\n cameraEyePosition = [2.0, y-2.5, 1.0]\n cameraTargetPosition = [2.0, y, 1.0]\n\n cameraUpVector = [0, 0, 1]\n\n fov = 90\n aspect = self.render_dims[0] / self.render_dims[1]\n nearPlane = 0.05 # this ensures outside body, may see limbs\n farPlane = 100.0\n\n viewMatrix = p.computeViewMatrix(cameraEyePosition, cameraTargetPosition, cameraUpVector, physicsClientId=self.physicsClientId)\n projectionMatrix = p.computeProjectionMatrixFOV(fov, aspect, nearPlane, farPlane);\n img_arr = p.getCameraImage(self.render_dims[0], self.render_dims[1], viewMatrix, projectionMatrix, renderer=p.ER_BULLET_HARDWARE_OPENGL, physicsClientId=self.physicsClientId)\n\n rgb=img_arr[2] #color data RGB\n gray = cv2.cvtColor(rgb, cv2.COLOR_RGB2GRAY)\n gray = gray.reshape((1, *self.render_dims))\n gray[gray > 0] = 255\n\n # assign patch at bottom to show distance, this is to differentiate frames\n # bar_width_pix = int(y/5.0*self.render_dims[1])\n # bar_height_pix = 10\n # gray[0][self.render_dims[0]-bar_height_pix:, 0:bar_width_pix] = 255\n return gray", "def get_grids(N_X, N_Y, N_frame):\n if N_frame>1:\n fx, fy, ft = np.mgrid[(-N_X//2):((N_X-1)//2 + 1), (-N_Y//2):((N_Y-1)//2 + 1), (-N_frame//2):((N_frame-1)//2 + 1)]\n else:\n fx, fy, ft = np.mgrid[(-N_X//2):((N_X-1)//2 + 1), (-N_Y//2):((N_Y-1)//2 + 1), 0:1]\n fx, fy, ft = fx*1./N_X, fy*1./N_Y, ft*1./N_frame\n\n return fx, fy, ft", "def show_plot(self):\n runs = self.GetParent().runs\n if len(runs) <= 0: return\n\n t1 = time.time()\n total_width = self.GetParent().total_width\n\n newwidth = total_width * (self.GetParent().zoom / 100)\n newmid = total_width * (self.GetParent().pan/100)\n newxmin = newmid - (newwidth/2)\n newxmax = newxmin + newwidth\n\n if newxmin < 0:\n newxmin = 0\n newxmax = newwidth\n elif newxmax > total_width:\n newxmax = total_width\n newxmin = newxmax - newwidth\n\n assert newxmin >= 0 and newxmin <= total_width\n\n #print \"**** Zoom: %s, pan: %s, total_width: %s, newwidth: %s, newmid: %s, newxmin: %s, newxmax: %s\" \\\n # %(self.GetParent().zoom,self.GetParent().pan,total_width,newwidth,newmid,newxmin,newxmax)\n\n left = 0\n width_so_far = 0\n self.figure.clear()\n braggsmax = max(flex.max(r.culled_braggs) for r in runs)\n braggsmin = min(flex.min(r.culled_braggs) for r in runs)\n distsmax = max(flex.max(r.culled_distances) for r in runs)\n distsmin = min(flex.min(r.culled_distances) for r in runs)\n sifomax = max(flex.max(r.culled_sifoils) for r in runs)\n sifomin = min(flex.min(r.culled_sifoils) for r in runs)\n wavemax = max(flex.max(r.culled_wavelengths) for r in runs)\n wavemin = min(flex.min(r.culled_wavelengths) for r in runs)\n\n #above tricks don't work for hit rates as they can be empty if the run is new\n goodruns = []\n for run in runs:\n if len(run.hit_rates) > 0: goodruns.append(run)\n if len(goodruns) > 0:\n hitsmax = max(flex.max(r.hit_rates) for r in goodruns)\n hitsmin = min(flex.min(r.hit_rates) for r in goodruns)\n else:\n hitsmax = hitsmin = 0\n\n first_run = True\n for run in runs:\n right = left + run.width()\n\n if right < newxmin or left > newxmax:\n left += run.width()\n #print \"Not showing run %s\"%run.runId\n continue\n\n if left < newxmin:\n xmin = run.min() + (newxmin - left)\n else:\n xmin = run.min()\n\n if right > newxmax:\n xmax = run.min() + (newxmax - left)\n else:\n xmax = run.max()\n\n #print \"Run: %s, run.width(): %s, left: %s, right: %s, run.min(): %s, run.max(): %s, xmin: %s, xmax: %s, width_so_far: %s, xmax-xmin: %s\" \\\n #%(run.runId,run.width(),left,right,run.min(),run.max(),xmin,xmax,width_so_far,xmax-xmin)\n\n ax1 = self.figure.add_axes([0.05+(0.9*width_so_far/newwidth), 0.05, 0.9*(xmax-xmin)/newwidth, 0.4])\n ax2 = self.figure.add_axes([0.05+(0.9*width_so_far/newwidth), 0.45, 0.9*(xmax-xmin)/newwidth, 0.2], sharex=ax1)\n ax3 = self.figure.add_axes([0.05+(0.9*width_so_far/newwidth), 0.65, 0.9*(xmax-xmin)/newwidth, 0.1], sharex=ax1)\n ax4 = self.figure.add_axes([0.05+(0.9*width_so_far/newwidth), 0.75, 0.9*(xmax-xmin)/newwidth, 0.1], sharex=ax1)\n ax5 = self.figure.add_axes([0.05+(0.9*width_so_far/newwidth), 0.85, 0.9*(xmax-xmin)/newwidth, 0.1], sharex=ax1)\n left += run.width()\n width_so_far += (xmax-xmin)\n\n ax1.grid(True, color=\"0.75\")\n ax2.grid(True, color=\"0.75\")\n ax3.grid(True, color=\"0.75\")\n ax4.grid(True, color=\"0.75\")\n ax5.grid(True, color=\"0.75\")\n ax1.plot(run.culled_bragg_times.select(run.culled_indexed),\n run.culled_braggs.select(run.culled_indexed), 'd', color=[0.0,1.0,0.0])\n ax1.plot(run.culled_bragg_times.select(~run.culled_indexed),\n run.culled_braggs.select(~run.culled_indexed), 'd', color=[0.0,0.5,1.0])\n ax2.plot(run.hit_rates_times, run.hit_rates, 'o-', color=[0.0,1.0,0.0])\n ax3.plot(run.culled_bragg_times, run.culled_wavelengths, '^', color=[0.8,0.0,0.2])\n ax4.plot(run.culled_bragg_times, run.culled_sifoils, '<', color=[0.8,0.0,0.2])\n ax5.plot(run.culled_bragg_times, run.culled_distances, '>', color=[0.8,0.0,0.2])\n ax1.set_ylabel(\"# of Bragg spots\")\n ax2.set_ylabel(\"Hit rate (%)\")\n ax3.set_ylabel(\"WaveL\")\n ax4.set_ylabel(\"SiFoils(mm)\")\n ax5.set_ylabel(\"Dist (mm)\")\n ax1.set_xlim(xmin, xmax)\n ax1.set_ylim(braggsmin, braggsmax)\n ax2.set_ylim(hitsmin, hitsmax)\n ax3.set_ylim(wavemin, wavemax)\n ax4.set_ylim(sifomin-10, sifomax+10)\n ax5.set_ylim(distsmin-3, distsmax+3)\n ax1.set_xlabel(\"Time\")\n for ax in ax1, ax2, ax3, ax4, ax5:\n if (ax is not ax1) :\n for label in ax.get_xticklabels():\n label.set_visible(False)\n ax.get_yticklabels()[0].set_visible(False)\n if not first_run:\n ax.get_yaxis().set_visible(False)\n\n ax1.xaxis.set_major_formatter(ticker.FuncFormatter(status_plot.format_time))\n ax3.yaxis.set_major_formatter(ticker.FormatStrFormatter(\"%.3f\"))\n ax5.yaxis.set_major_formatter(ticker.FormatStrFormatter(\"%.0f\"))\n ax5.set_title(\"%d:%d/%d:%.1f%% I:%d\"%(run.runId, run.hits_count, len(run.braggs), 100*run.hits_count/len(run.braggs),run.indexed.count(True)))\n\n labels = ax1.get_xticklabels()\n for label in labels:\n label.set_rotation(30)\n\n first_run = False\n\n self.figure.autofmt_xdate()\n self.canvas.draw()\n self.parent.Refresh()\n\n t2 = time.time()\n print(\"Plotted in %.2fs\" % (t2 - t1))", "def AlljointRuns():\n #800 nm\n forwardModelJointFit(getFiles(mintime=(15, 40, 07), maxtime=(15, 45, 14), folder='data/29Jul/'),\n out='J800nm', wavelength='800nm') #0.31, 0.3\n forwardModelJointFit(getFiles(mintime=(15, 12, 20), maxtime=(15, 24, 16), folder='data/31Jul/'),\n out='J800nm5k', wavelength='800nm') #0.28 0.31\n forwardModelJointFit(getFiles(mintime=(15, 28, 40), maxtime=(15, 39, 21), folder='data/31Jul/'),\n out='J800nm10k', wavelength='800nm') #0.27 0.29\n forwardModelJointFit(getFiles(mintime=(15, 43, 24), maxtime=(15, 51, 47), folder='data/31Jul/'),\n out='J800nm20k', wavelength='800nm') #0.27 0.28\n forwardModelJointFit(getFiles(mintime=(15, 56, 11), maxtime=(16, 02, 58), folder='data/31Jul/'),\n out='J800nm30k', wavelength='800nm')\n forwardModelJointFit(getFiles(mintime=(16, 12, 39), maxtime=(16, 18, 25), folder='data/31Jul/'),\n out='J800nm38k', wavelength='800nm')\n forwardModelJointFit(getFiles(mintime=(16, 21, 52), maxtime=(16, 26, 16), folder='data/31Jul/'),\n out='J800nm50k', wavelength='800nm')\n forwardModelJointFit(getFiles(mintime=(16, 32, 02), maxtime=(16, 35, 23), folder='data/31Jul/'),\n out='J800nm54k', wavelength='800nm')\n #700 nm\n forwardModelJointFit(getFiles(mintime=(17, 20, 17), maxtime=(17, 33, 17), folder='data/30Jul/'),\n out='J700nm5k', wavelength='700nm') # 0.28 0.32\n forwardModelJointFit(getFiles(mintime=(17, 37, 35), maxtime=(17, 46, 51), folder='data/30Jul/'),\n out='J700nm9k', wavelength='700nm') # 0.27 0.32\n forwardModelJointFit(getFiles(mintime=(17, 48, 35), maxtime=(17, 56, 03), folder='data/30Jul/'),\n out='J700nm52k', wavelength='700nm') # 0.26 0.31\n forwardModelJointFit(getFiles(mintime=(17, 58, 18), maxtime=(17, 59, 31), folder='data/30Jul/'),\n out='J700nm32k', wavelength='700nm')\n #600 nm\n forwardModelJointFit(getFiles(mintime=(15, 22, 00), maxtime=(15, 36, 32), folder='data/30Jul/'),\n out='J600nm5k', wavelength='600nm') #0.27 0.31\n forwardModelJointFit(getFiles(mintime=(15, 39, 58), maxtime=(15, 47, 58), folder='data/30Jul/'),\n out='J600nm54k', wavelength='600nm') #0.299, 0.333\n forwardModelJointFit(getFiles(mintime=(15, 52, 07), maxtime=(16, 06, 32), folder='data/30Jul/'),\n out='J600nm10k', wavelength='600nm') #0.28 0.32\n #890 nm\n forwardModelJointFit(getFiles(mintime=(13, 37, 37), maxtime=(13, 50, 58), folder='data/01Aug/'),\n out='J890nm5k', wavelength='890nm') #0.28 0.35\n forwardModelJointFit(getFiles(mintime=(14, 00, 58), maxtime=(14, 11, 54), folder='data/01Aug/'),\n out='J890nm10k', wavelength='890nm') #0.28 0.33\n forwardModelJointFit(getFiles(mintime=(14, 17, 57), maxtime=(14, 25, 49), folder='data/01Aug/'),\n out='J890nm30k', wavelength='890nm') #0.3 0.33\n forwardModelJointFit(getFiles(mintime=(14, 30, 03), maxtime=(14, 34, 37), folder='data/01Aug/'),\n out='J890nm50k', wavelength='890nm') #0.3 0.3", "def dimension(self):", "def main():\n\n para_list = [para]\n loss_list = []\n\n for i in range(30):\n para_list.append(diff_fun(loss_fun, para_list[i]))\n loss_list.append(loss_fun(para_list[i]))\n\n env = QEnv()\n env.backend(BackendName.LocalBaiduSim2)\n\n q = env.Q.createList(n)\n\n state_prepare(q, 0)\n universal_cir(q, 0, para_list[-1])\n\n MeasureZ(*env.Q.toListPair())\n taskResult = env.commit(shots, fetchMeasure=True)\n print(taskResult['counts'])", "def __getitem__(self, cur_episode):\n if self.platform == \"win\":\n env = lmdb.open(self.lmdb_file, subdir=False,\n readonly=True, lock=False,\n readahead=False, meminit=False)\n else:\n env = self.env\n # episode_set = self.episode_sets[episode]\n total_support_x = []\n total_query_x = []\n total_support_y = []\n total_query_y = []\n\n for t in range(self.t_task):\n # create a task (n_way*k_shot+ n_way*k_query)\n\n support_x = []\n query_x = []\n support_y = []\n query_y = []\n\n support_imgs = []\n query_imgs = []\n\n # select n_way classes randomly\n selected_classes = np.random.choice(self.total_cls, self.n_way)\n # select k_shot + k_query for each class\n for selected_class in selected_classes:\n selected_imgs = np.random.choice(\n self.dic_img_label[self.num2label[selected_class]], self.k_shot + self.k_query, False)\n support_imgs += selected_imgs[:self.k_shot].tolist()\n query_imgs += selected_imgs[self.k_shot:].tolist()\n\n with env.begin(write=False) as txn:\n for i, img_id in enumerate(support_imgs):\n res = pyarrow.deserialize(txn.get(u'{}'.format(img_id).encode('ascii')))\n support_x.append(self.transform(res[0]))\n support_y.append(np.array([self.label2num[res[1]]]))\n\n for i, img_id in enumerate(query_imgs):\n res = pyarrow.deserialize(txn.get(u'{}'.format(img_id).encode('ascii')))\n query_x.append(self.transform(res[0]))\n query_y.append(np.array([self.label2num[res[1]]]))\n support_x = torch.stack(support_x, 0)\n query_x = torch.stack(query_x, 0)\n support_y = np.array(support_y)\n query_y = np.array(query_y)\n\n # shuffle:\n index = np.random.permutation(len(support_y))\n support_x = support_x[index]\n if not self.fet_global:\n support_y = np.array([i for i in range(self.n_way) for j in range(self.k_shot)])\n support_y = support_y[index]\n\n index = np.random.permutation(len(query_y))\n query_x = query_x[index]\n if not self.fet_global:\n query_y = np.array([i for i in range(self.n_way) for j in range(self.k_query)])\n\n query_y = query_y[index]\n\n # a batch\n total_query_x.append(query_x)\n total_query_y.append(query_y)\n total_support_x.append(support_x)\n total_support_y.append(support_y)\n\n total_query_x = torch.cat(total_query_x, 0)\n total_query_y = np.hstack(total_query_y)\n total_support_x = torch.cat(total_support_x, 0)\n total_support_y = np.hstack(total_support_y)\n\n imgs = torch.cat([total_support_x, total_query_x], 0)\n labels = torch.from_numpy(np.hstack([total_support_y, total_query_y]).reshape([-1, 1]))\n return imgs, labels", "def _spawn_runways() -> pd.DataFrame:\n\n n = NUMBER_OF_RUNWAYS\n runway_data = np.empty((n, 5))\n\n if not n % 2:\n for i, N in enumerate(range(1, n, 2)):\n\n x = N * (RUNWAY_SEPARATION + RUNWAY_WIDTH) / 2\n y_base, y_top = - RUNWAY_LENGTH / 2, RUNWAY_LENGTH / 2\n\n runway_data[i, 0] = x\n runway_data[i, 1] = y_base\n runway_data[i, 2] = x\n runway_data[i, 3] = y_top\n runway_data[i, 4] = 0\n\n runway_data[i + n // 2, 0] = - x\n runway_data[i + n // 2, 1] = y_base\n runway_data[i + n // 2, 2] = - x\n runway_data[i + n // 2, 3] = y_top\n runway_data[i + n // 2, 4] = 0\n\n else:\n for i, N in enumerate(range(- n // 2 + 1, n // 2 + 1)):\n\n x = N * (RUNWAY_SEPARATION + RUNWAY_WIDTH)\n y_base, y_top = - RUNWAY_LENGTH / 2, RUNWAY_LENGTH / 2\n\n runway_data[i, 0] = x\n runway_data[i, 1] = y_base\n runway_data[i, 2] = x\n runway_data[i, 3] = y_top\n runway_data[i, 4] = 0\n\n runway_info = pd.DataFrame(runway_data)\n return runway_info", "def sample_simulation() -> Dict[str, Tuple[str, float]]:\n sim = Simulation('stations.json', 'sample_rides.csv')\n sim.run(datetime(2017, 6, 1, 8, 0, 0),\n datetime(2017, 6, 1, 9, 0, 0))\n\n return sim.calculate_statistics()", "def render_dof(scene, camera, HEIGHT=100, WIDTH=100, V_SAMPLES=6, H_SAMPLES=6):\n output = np.zeros((HEIGHT, WIDTH, RGB_CHANNELS), dtype=np.uint8)\n if not scene or scene.is_empty() or not camera or camera.inside(\n scene.objects\n ):\n print(\"Cannot generate an image\")\n return output\n total_samples = H_SAMPLES * V_SAMPLES\n # This is for showing progress %\n iterations = HEIGHT * WIDTH * total_samples\n step_size = np.ceil((iterations * PERCENTAGE_STEP) / 100).astype('int')\n counter = 0\n bar = Bar('Raytracing', max=100 / PERCENTAGE_STEP)\n # This is needed to use it in Git Bash\n bar.check_tty = False\n for j in range(HEIGHT):\n for i in range(WIDTH):\n color = np.array([0, 0, 0], dtype=float)\n lens_sample_offsets = []\n n0 = camera.n0\n n1 = camera.n1\n for n in range(V_SAMPLES):\n for m in range(H_SAMPLES):\n r0, r1 = np.random.random_sample(2)\n ap_sx = camera.lens_params.ap_sx\n ap_sy = camera.lens_params.ap_sy\n x_offset = ((r0 - 0.5) * m) / H_SAMPLES * ap_sx\n y_offset = ((r1 - 0.5) * n) / V_SAMPLES * ap_sy\n lens_sample_offsets.append((x_offset, y_offset))\n random_start = np.random.random_integers(0, total_samples - 1)\n for n in range(V_SAMPLES):\n for m in range(H_SAMPLES):\n r0, r1 = np.random.random_sample(2)\n x = i + ((float(m) + r0) / H_SAMPLES)\n y = HEIGHT - 1 - j + ((float(n) + r1) / V_SAMPLES)\n # Get x projected in view coord\n xp = (x / float(WIDTH)) * camera.scale_x\n # Get y projected in view coord\n yp = (y / float(HEIGHT)) * camera.scale_y\n pp = camera.p00 + xp * camera.n0 + yp * camera.n1\n npe = utils.normalize(pp - camera.position)\n sample_idx = n + m * H_SAMPLES - random_start\n x_offset, y_offset = lens_sample_offsets[sample_idx]\n ps = pp + x_offset * n0 + y_offset * n1\n fp = pp + npe * camera.lens_params.f\n director = utils.normalize(fp - ps)\n ray = Ray(ps, director)\n\n color += raytrace(ray, scene) / float(total_samples)\n counter += 1\n if counter % step_size == 0:\n bar.next()\n output[j][i] = color.round().astype(np.uint8)\n bar.finish()\n return output", "def render(self, mode='human'):\n screen_width = 800\n screen_height = 550\n\n # Width is one column for each variable\n n_sect = 7\n world_width = n_sect*2 # X axis is just pixels\n \n buff_axis = cfg['buff_axis']\n #bottom of the screen scales to the input/output range of values\n world_height_bottom = np.max(self.maxes)+buff_axis\n \n # Top is for counting steps\n world_height_top = 100\n\n #Split the screen:\n world_top = .3\n world_bottom = 1-world_top\n screen_height_bottom = world_bottom*screen_height\n\n #Set where to draw the steps axis\n axes_line1 = screen_height*(world_bottom + .2)\n\n # Scale the pixels in the screen:\n scalex = screen_width/world_width\n scaley_bottom= screen_height_bottom/world_height_bottom\n\n # Some adjustments to move some objects up/ right\n move_oval = -scalex*.2\n move_up= scaley_bottom * buff_axis*.5\n\n #set sizes of shapes:\n self.oval_length = 25.0\n self.oval_width = 50.0\n self.rect_width = 70.0\n self.rect_height = 5.0 \n\n #Step plot:\n scalestep = screen_width/cfg['scalestep']\n\n #color shades:\n light_col = .7\n dark_col = 1\n c11 = .6\n c22 = .8\n c33 = 1\n\n if self.viewer is None:\n #TO DO: find an alternative to copy-paste to generate multiple similar shapes\n self.viewer = rendering.Viewer(screen_width, screen_height)\n \n #Input states:\n\n #the temp action\n self.temptrans1 = self.make_oval(0,0,light_col)\n self.temptrans2 = self.make_oval(0,0,dark_col)\n #flow action:\n self.flowtrans1 = self.make_oval(light_col,0,light_col)\n self.flowtrans2 = self.make_oval(dark_col,0,dark_col)\n\n #output states:\n #out1:\n #the gauge is a rectangle \n self.outgauge1 = self.make_rect(0,c33,0)\n #goal is red rectangle\n self.outgoal1= self.make_rect(c33,0,0)\n \n #out2:\n #the gauge is a rectangle \n self.outgauge2 = self.make_rect(0,c22,0)\n #goal is red rectangle\n self.outgoal2= self.make_rect(c22,0,0)\n\n #out3:\n #the gauge is a rectangle \n self.outgauge3 = self.make_rect(0,c11,0)\n #goal is red rectangle\n self.outgoal3 = self.make_rect(c11,0,0)\n\n #lines on which \"controls\" sit\n for l in range(n_sect): \n self.make_line(scalex*((l*2)+1),0, scalex*((l*2)+1),screen_height*world_bottom)\n\n # Line separating the top and bottom of the screen. \n self.make_line(0,world_bottom*screen_height,screen_width,world_bottom*screen_height)\n # Step # axis.\n self.make_line(scalex*1.5,axes_line1,screen_width-scalex*1,axes_line1)\n\n # The dot tracking the step #\n dot = rendering.make_circle(self.oval_length)\n self.dottrans = rendering.Transform()\n dot.add_attr(self.dottrans)\n dot.set_color(0,0,0)\n self.viewer.add_geom(dot)\n\n #labels: \n num = 0\n label_buff_y = 1.07\n label_buff_x = .2\n img_scale = .5\n img_wid = 179 *img_scale\n img_height = 124 * img_scale\n\n for label in self.labels:\n pth = (self.label_dir+label+'.png')\n self.txt = rendering.Image(pth,img_wid,img_height)\n locx = (num*2)+1\n self.txtis = rendering.Transform(translation=(scalex*locx +locx* label_buff_x,world_bottom*screen_height*label_buff_y))\n self.txt.add_attr(self.txtis)\n self.viewer.add_geom(self.txt)\n num = num+1\n\n #step label\n pth = (self.label_dir+'Step.png')\n self.txt = rendering.Image(pth,img_wid,img_height)\n self.txtis = rendering.Transform(translation=(scalex*.5,axes_line1))\n self.txt.add_attr(self.txtis)\n self.viewer.add_geom(self.txt)\n\n if self.state is None: return None\n\n x = self.state\n\n # 4 ins:\n self.flowtrans1.set_translation(move_oval+scalex*1,move_up+scaley_bottom*x[0])\n self.temptrans1.set_translation(move_oval+scalex*3,move_up+scaley_bottom*x[1])\n self.flowtrans2.set_translation(move_oval+scalex*5,move_up+scaley_bottom*x[2])\n self.temptrans2.set_translation(move_oval+scalex*7,move_up+scaley_bottom*x[3])\n\n # 3 outs: current & goal:\n self.outgauge1.set_translation(scalex*9,move_up+scaley_bottom*x[4])\n self.outgoal1.set_translation(scalex*9,move_up+scaley_bottom*x[7])\n self.outgauge2.set_translation(scalex*11,move_up+scaley_bottom*x[5])\n self.outgoal2.set_translation(scalex*11,move_up+scaley_bottom*x[8])\n self.outgauge3.set_translation(scalex*13,move_up+scaley_bottom*x[6])\n self.outgoal3.set_translation(scalex*13,move_up+scaley_bottom*x[9])\n\n #step info:\n self.dottrans.set_translation(scalex*1.5 + self.steps*scalestep, axes_line1)\n done_grow = .5*self.done\n self.dottrans.set_scale(1+done_grow,1+done_grow) #expand size when done\n\n return self.viewer.render(return_rgb_array = mode=='rgb_array')", "def getGameState(self):\n ### Student code goes here\n #print(\":::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\")\n row1_tuple = ()\n row1_list = {}\n ask1 = parse_input(\"fact: (on ?X ?Y pos1)\")\n answer1 = self.kb.kb_ask(ask1)\n if answer1 != False:\n for ans in answer1.list_of_bindings:\n tile = ans[0].bindings[0].constant.element\n if len(tile.split('tile',1)) > 1:\n tile = int(tile.split('tile',1)[1])\n else:\n tile = -1\n pos = (ans[0].bindings[1].constant.element).split('pos',1)[1]\n row1_list[int(pos)] = tile\n #print(\"ROW1: \", len(row1_list))\n for i in range(len(row1_list)):\n val = row1_list[i+1]\n #print(val)\n row1_tuple = row1_tuple + (val,)\n\n row2_tuple = ()\n row2_list = {}\n ask2 = parse_input(\"fact: (on ?X ?Y pos2)\")\n answer2 = self.kb.kb_ask(ask2)\n if answer2 != False:\n for ans in answer2.list_of_bindings:\n tile = ans[0].bindings[0].constant.element\n if len(tile.split('tile',1)) > 1:\n tile = int(tile.split('tile',1)[1])\n else:\n tile = -1\n pos = (ans[0].bindings[1].constant.element).split('pos',1)[1]\n row2_list[int(pos)] = tile\n #print(\"ROW2: \", len(row2_list))\n for i in range(len(row2_list)):\n val = row2_list[i+1]\n row2_tuple = row2_tuple + (val,)\n\n row3_tuple = ()\n row3_list = {}\n ask3 = parse_input(\"fact: (on ?X ?Y pos3)\")\n answer3 = self.kb.kb_ask(ask3)\n if answer3 != False:\n for ans in answer3.list_of_bindings:\n tile = ans[0].bindings[0].constant.element\n if len(tile.split('tile',1)) > 1:\n tile = int(tile.split('tile',1)[1])\n else:\n tile = -1\n pos = (ans[0].bindings[1].constant.element).split('pos',1)[1]\n row3_list[int(pos)] = tile\n #print(\"ROW3: \", len(row3_list))\n for i in range(len(row3_list)):\n val = row3_list[i+1]\n row3_tuple = row3_tuple + (val,)\n #print(\"-----------------------------------------------------------------------------------------------\")\n\n\n state_tuple = (row1_tuple,row2_tuple,row3_tuple)\n #print(state_tuple)\n return state_tuple", "def get_real_samples(self):\n # Define the camera poses\n if not self.opt.same_view:\n if self.opt.full_sphere_sampling:\n self.cam_pos = uniform_sample_sphere(\n radius=self.opt.cam_dist, num_samples=self.opt.batchSize,\n axis=None, angle=self.opt.angle,\n theta_range=np.deg2rad(self.opt.theta), phi_range=np.deg2rad(self.opt.phi))\n else:\n self.cam_pos = uniform_sample_sphere(\n radius=self.opt.cam_dist, num_samples=self.opt.batchSize,\n axis=None, angle=self.opt.angle,\n theta_range=np.deg2rad(self.opt.theta),\n phi_range=np.deg2rad(self.opt.phi))\n if self.opt.full_sphere_sampling_light:\n self.light_pos1 = uniform_sample_sphere(\n radius=self.opt.cam_dist, num_samples=self.opt.batchSize,\n axis=None, angle=self.opt.angle,\n theta_range=np.deg2rad(self.opt.theta),\n phi_range=np.deg2rad(self.opt.phi))\n # self.light_pos2 = uniform_sample_sphere(radius=self.opt.cam_dist, num_samples=self.opt.batchSize,\n # axis=self.opt.axis, angle=np.deg2rad(40),\n # theta_range=self.opt.theta, phi_range=self.opt.phi)\n else:\n print(\"inbox\")\n light_eps = 0.15\n self.light_pos1 = np.random.rand(self.opt.batchSize,3)*self.opt.cam_dist + light_eps\n self.light_pos2 = np.random.rand(self.opt.batchSize,3)*self.opt.cam_dist + light_eps\n\n # TODO: deg2rad in all the angles????\n\n # Create a splats rendering scene\n large_scene = create_scene(self.opt.width, self.opt.height,\n self.opt.fovy, self.opt.focal_length,\n self.opt.n_splats)\n lookat = self.opt.at if self.opt.at is not None else [0.0, 0.0, 0.0, 1.0]\n large_scene['camera']['at'] = tch_var_f(lookat)\n\n # Render scenes\n data, data_depth, data_normal, data_cond = [], [], [], []\n inpath = self.opt.vis_images + '/'\n for idx in range(self.opt.batchSize):\n # Save the splats into the rendering scene\n if self.opt.use_mesh:\n if 'sphere' in large_scene['objects']:\n del large_scene['objects']['sphere']\n if 'disk' in large_scene['objects']:\n del large_scene['objects']['disk']\n if 'triangle' not in large_scene['objects']:\n large_scene['objects'] = {\n 'triangle': {'face': None, 'normal': None,\n 'material_idx': None}}\n samples = self.get_samples()\n\n large_scene['objects']['triangle']['material_idx'] = tch_var_l(\n np.zeros(samples['mesh']['face'][0].shape[0],\n dtype=int).tolist())\n large_scene['objects']['triangle']['face'] = Variable(\n samples['mesh']['face'][0].cuda(), requires_grad=False)\n large_scene['objects']['triangle']['normal'] = Variable(\n samples['mesh']['normal'][0].cuda(),\n requires_grad=False)\n else:\n if 'sphere' in large_scene['objects']:\n del large_scene['objects']['sphere']\n if 'triangle' in large_scene['objects']:\n del large_scene['objects']['triangle']\n if 'disk' not in large_scene['objects']:\n large_scene['objects'] = {\n 'disk': {'pos': None,\n 'normal': None,\n 'material_idx': None}}\n large_scene['objects']['disk']['radius'] = tch_var_f(\n np.ones(self.opt.n_splats) * self.opt.splats_radius)\n large_scene['objects']['disk']['material_idx'] = tch_var_l(\n np.zeros(self.opt.n_splats, dtype=int).tolist())\n large_scene['objects']['disk']['pos'] = Variable(\n samples['splats']['pos'][idx].cuda(),\n requires_grad=False)\n large_scene['objects']['disk']['normal'] = Variable(\n samples['splats']['normal'][idx].cuda(),\n requires_grad=False)\n\n # Set camera position\n if not self.opt.same_view:\n large_scene['camera']['eye'] = tch_var_f(self.cam_pos[idx])\n else:\n large_scene['camera']['eye'] = tch_var_f(self.cam_pos[0])\n\n large_scene['lights']['pos'][0,:3]=tch_var_f(self.light_pos1[idx])\n #large_scene['lights']['pos'][1,:3]=tch_var_f(self.light_pos2[idx])\n\n # Render scene\n res = render(large_scene,\n norm_depth_image_only=self.opt.norm_depth_image_only,\n double_sided=True, use_quartic=self.opt.use_quartic)\n\n # Get rendered output\n if self.opt.render_img_nc == 1:\n depth = res['depth']\n im_d = depth.unsqueeze(0)\n else:\n depth = res['depth']\n im_d = depth.unsqueeze(0)\n im = res['image'].permute(2, 0, 1)\n target_normal_ = get_data(res['normal'])\n target_normalmap_img_ = get_normalmap_image(target_normal_)\n im_n = tch_var_f(\n target_normalmap_img_).view(im.shape[1], im.shape[2],\n 3).permute(2, 0, 1)\n\n # Add depth image to the output structure\n if self.iteration_no % self.opt.save_image_interval == 0:\n imsave((inpath + str(self.iteration_no) +\n 'real_normalmap_{:05d}.png'.format(idx)),\n target_normalmap_img_)\n imsave((inpath + str(self.iteration_no) +\n 'real_depth_{:05d}.png'.format(idx)), get_data(depth))\n # imsave(inpath + str(self.iteration_no) + 'real_depthmap_{:05d}.png'.format(idx), im_d)\n # imsave(inpath + str(self.iteration_no) + 'world_normalmap_{:05d}.png'.format(idx), target_worldnormalmap_img_)\n data.append(im)\n data_depth.append(im_d)\n data_normal.append(im_n)\n data_cond.append(large_scene['camera']['eye'])\n # Stack real samples\n real_samples = torch.stack(data)\n real_samples_depth = torch.stack(data_depth)\n real_samples_normal = torch.stack(data_normal)\n real_samples_cond = torch.stack(data_cond)\n self.batch_size = real_samples.size(0)\n if not self.opt.no_cuda:\n real_samples = real_samples.cuda()\n real_samples_depth = real_samples_depth.cuda()\n real_samples_normal = real_samples_normal.cuda()\n real_samples_cond = real_samples_cond.cuda()\n\n # Set input/output variables\n\n self.input.resize_as_(real_samples.data).copy_(real_samples.data)\n self.input_depth.resize_as_(real_samples_depth.data).copy_(real_samples_depth.data)\n self.input_normal.resize_as_(real_samples_normal.data).copy_(real_samples_normal.data)\n self.input_cond.resize_as_(real_samples_cond.data).copy_(real_samples_cond.data)\n self.label.resize_(self.batch_size).fill_(self.real_label)\n # TODO: Remove Variables\n self.inputv = Variable(self.input)\n self.inputv_depth = Variable(self.input_depth)\n self.inputv_normal = Variable(self.input_normal)\n self.inputv_cond = Variable(self.input_cond)\n self.labelv = Variable(self.label)", "def grid(iant,xgrid=[0],ygrid=[0],sleep=4):\n d=Carma(iant).drive()\n d.setOffset(xgrid[0],ygrid[0])\n time.sleep(sleep)\n time.sleep(sleep)\n for y in ygrid:\n for x in xgrid:\n print x,y\n d.setOffset(x,y)\n time.sleep(sleep)", "def generate_pristine_graphene(x_dim, y_dim, filename1):\n y_number = round(y_dim / 1.228)\n x_number = int(x_dim / 2.127)\n x_addition = (x_dim / 2.127 ) % 1\n list_of_coords = []\n a = 0\n b = 0\n c = 0\n list_of_coords = fill_row(list_of_coords, y_number, a,b,c, [], 5, prev = False)\n for i in range(1,x_number):\n if (i == x_number-1):\n if (i % 2 == 1):\n a += 1.228\n b += 2.127\n list_of_coords = fill_row(list_of_coords, y_number, a, b, c, [], 6, prev = True)\n fill_hexagon(list_of_coords, -1.228, b, c, [0, 1, 3, 4, 5], full=6, prev=False)\n if (i % 2 == 0):\n a -= 1.228\n b += 2.127\n list_of_coords = fill_row(list_of_coords, y_number, a, b, c, [], 6, prev = False)\n fill_hexagon(list_of_coords, y_number*1.228, b, c, [0, 1, 3, 4, 5], full=6, prev=False)\n elif (i % 2 == 1):\n a += 1.228\n b += 2.127\n list_of_coords = fill_row(list_of_coords, y_number, a, b, c, [], 6, prev = True)\n elif (i % 2 == 0):\n a -= 1.228\n b += 2.127\n list_of_coords = fill_row(list_of_coords, y_number, a, b, c, [], 6, prev = False)\n list_x_steps = [0, 0.33, 0.66, 1]\n x_step = min(list_x_steps, key=lambda x:abs(x-x_addition))\n if (x_step == 0.33):\n list_of_coords = fill_row(list_of_coords, y_number, 0, 0, 0, [], 6, prev = False)\n fill_hexagon(list_of_coords, y_number*1.228, 0, 0, [0, 1, 2, 3, 4], full=6, prev=False)\n elif (x_step == 0.66):\n if (x_number % 2 == 1):\n a += 1.228\n b += 2.127\n list_of_coords = fill_row(list_of_coords, y_number, a, b, c, [2], 6, prev = True)\n elif (x_number % 2 == 0):\n a -= 1.228\n b += 2.127\n list_of_coords = fill_row(list_of_coords, y_number, a, b, c, [2], 6, prev = False)\n elif (x_step == 1):\n if (x_number % 2 == 1):\n a += 1.228\n b += 2.127\n list_of_coords = fill_row(list_of_coords, y_number, a, b, c, [], 6, prev = True)\n elif (x_number % 2 == 0):\n a -= 1.228\n b += 2.127\n list_of_coords = fill_row(list_of_coords, y_number, a, b, c, [], 6, prev = False)\n writepdb3(list_of_coords, filename1)\n print('done.')\n return list_of_coords", "def step6_run_all(flow_dataset_npz=\"flow_dataset.npz\"):\n global objs, predictions, ticks, timestamps, is_slider, is_spinner, is_note_end, sv, slider_ticks, dist_multiplier, divisor, note_distance_basis, slider_length_base, slider_types, slider_type_rotation, slider_cos, slider_sin, slider_cos_each, slider_sin_each, slider_type_length, slider_lengths, tick_diff, note_distances, maps, labels, special_train_data, special_train_labels, early_stop, loss_ma, extvar, plot_noise\n\n objs, predictions, ticks, timestamps, is_slider, is_spinner, is_note_end, sv, slider_ticks, dist_multiplier = read_map_predictions(\n \"temp/rhythm_data.npz\")\n\n # get divisor from GAN_PARAMS\n divisor = GAN_PARAMS[\"divisor\"]\n\n # get basis\n note_distance_basis = GAN_PARAMS[\"note_distance_basis\"]\n\n # get next_from_slider_end\n next_from_slider_end = GAN_PARAMS[\"next_from_slider_end\"]\n\n # should be slider length each tick, which is usually SV * SMP * 100 / 4\n # e.g. SV 1.6, timing section x1.00, 1/4 divisor, then slider_length_base = 40\n slider_length_base = sv / divisor\n\n # weight for each type of sliders\n slider_type_probs = [0.25, 0.25, 0.25, 0.05, 0.05, 0.03, 0.03, 0.01,\n 0.01, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.015, 0.015, 0.01]\n slider_types = np.random.choice(\n len(slider_type_probs), is_slider.shape, p=slider_type_probs).astype(int)\n\n # these data must be kept consistent with the sliderTypes in load_map.js\n slider_type_rotation = np.array([0, -0.40703540572409336, 0.40703540572409336, -0.20131710837464062, 0.20131710837464062,\n -0.46457807316944644, 0.46457807316944644, 1.5542036732051032, -\n 1.5542036732051032, 0, 0, 0.23783592745745077, -0.23783592745745077,\n 0.5191461142465229, -0.5191461142465229, -0.16514867741462683, 0.16514867741462683, 3.141592653589793])\n\n # this is vector length! I should change the variable name probably...\n slider_type_length = np.array([1.0, 0.97, 0.97, 0.97, 0.97, 0.97, 0.97,\n 0.64, 0.64, 0.94, 0.94, 0.94, 0.94, 0.94, 0.94, 0.96, 0.96, 0])\n\n slider_cos = np.cos(slider_type_rotation)\n slider_sin = np.sin(slider_type_rotation)\n\n slider_cos_each = slider_cos[slider_types]\n slider_sin_each = slider_sin[slider_types]\n\n slider_lengths = np.array([slider_type_length[int(\n k)] * slider_length_base[i] for i, k in enumerate(slider_types)]) * slider_ticks\n\n tick_diff = np.concatenate([[100], ticks[1:] - ticks[:-1]])\n\n if next_from_slider_end:\n tick_diff = np.concatenate(\n [[100], tick_diff[1:] - np.floor(slider_ticks * is_slider)[:-1]])\n\n # Timing section reset == tick_diff < 0\n # Use 1 as default value\n tick_diff = np.where(tick_diff < 0, 1, tick_diff)\n\n note_distances = np.clip(tick_diff, 1, divisor * 2) * \\\n (note_distance_basis / divisor)\n\n # Load the flow dataset saved in part 4\n with np.load(flow_dataset_npz) as flow_dataset:\n maps = flow_dataset[\"maps\"]\n labels = np.ones(maps.shape[0])\n\n order2 = np.argsort(np.random.random(maps.shape[0]))\n special_train_data = maps[order2]\n special_train_labels = labels[order2]\n\n early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=20)\n\n # Start model training\n\n loss_ma = [90, 90, 90]\n extvar = {\"begin\": 10}\n\n plot_noise = np.random.random((1, GAN_PARAMS[\"g_input_size\"]))\n\n if GAN_PARAMS[\"max_epoch\"] == 0:\n osu_a = put_everything_in_the_center()\n else:\n osu_a = generate_map()\n\n data = objs, predictions, ticks, timestamps, is_slider, is_spinner, is_note_end, sv, slider_ticks, dist_multiplier, slider_types, slider_length_base\n return osu_a, data", "def build_experiments(self):\n\n # width=500, height=350, pos_x= 2.0, pos_y=0.0, pos_z= 1.4, angle=-30.0\n cameraRGB = Camera('Camera', PostProcessing='SceneFinal')\n cameraRGB.set_image_size(500, 350)\n cameraRGB.set_position(2.0, 0.0, 1.4)\n cameraRGB.set_rotation(-30.0, 0.0, 0.)\n cameraRGB.set(FOV=100)\n\n camera = Camera('CameraSem', PostProcessing='SemanticSegmentation')\n camera.set_image_size(320, 180)\n camera.set_position(2.0, 0.0, 1.4)\n camera.set_rotation(-30.0, 0.0, 0.)\n camera.set(FOV=100)\n\n if self._city_name == 'Town01':\n poses_tasks = self._poses_town01()\n vehicles_tasks = []\n pedestrians_tasks = []\n for i in range(len(poses_tasks)):\n vehicles_tasks.append(0)\n pedestrians_tasks.append(0)\n\n experiment_vector = []\n\n for weather in self.weathers:\n\n for iteration in range(len(poses_tasks)):\n poses = poses_tasks[iteration]\n vehicles = vehicles_tasks[iteration]\n pedestrians = pedestrians_tasks[iteration]\n\n conditions = CarlaSettings()\n conditions.set(\n SendNonPlayerAgentsInfo=True,\n NumberOfVehicles=vehicles,\n NumberOfPedestrians=pedestrians,\n WeatherId=weather,\n QualityLevel=1\n )\n\n conditions.set(SynchronousMode=True)\n conditions.set(DisableTwoWheeledVehicles=True)\n\n conditions.add_sensor(camera)\n conditions.add_sensor(cameraRGB)\n\n experiment = Experiment()\n experiment.set(\n Conditions=conditions,\n Poses=poses,\n Task=iteration,\n Repetitions=1\n )\n\n experiment_vector.append(experiment)\n\n return experiment_vector", "def get_scnlist_tilecache(self):\n scns2tilecache = list()\n if self.calc_scn_tilecache():\n logger.debug(\"Creating Database Engine and Session.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n logger.debug(\"Perform query to find scene.\")\n query_result = ses.query(EDDSentinel1ASF).filter(\n sqlalchemy.or_(\n EDDSentinel1ASF.ExtendedInfo.is_(None),\n sqlalchemy.not_(EDDSentinel1ASF.ExtendedInfo.has_key('tilecache'))),\n EDDSentinel1ASF.Invalid == False,\n EDDSentinel1ASF.ARDProduct == True).order_by(EDDSentinel1ASF.Acquisition_Date.asc()).all()\n if query_result is not None:\n for record in query_result:\n scns2tilecache.append(record.PID)\n ses.close()\n logger.debug(\"Closed the database session.\")\n return scns2tilecache", "def run_analysis(self):\n ### skip some snapshots for testing purposes\n nskip = 199\n read_char.skip_snapshots(self.hfile, self.ifile, nskip)\n ### read in the first two steps (required for velocity related computations\n xs_old, ys_old, lx_old, ly_old, tstep_old, natoms_old = read_char.read_snapshot(self.hfile, self.ifile)\n x_old = xs_old*lx_old\n y_old = ys_old*ly_old\n xs,ys,lx,ly,tstep,natoms = read_char.read_snapshot(self.hfile, self.ifile)\n x = xs*lx\n y = ys*ly\n ### loop over all steps of the input file\n for step in range(nskip+1,self.nsteps-1):\n print step\n ### read in coordinates (as required)\n xs_new,ys_new,lx_new,ly_new,tstep_new,natoms_new = read_char.read_snapshot(self.hfile, self.ifile)\n x_new = xs_new*lx_new\n y_new = ys_new*ly_new\n ### compute further current per/atom quantities\n phi = misc_tools.compute_orientation(x,y,lx,ly,self.npol)\n vx,vy = misc_tools.compute_velocity(x_old,y_old, x_new, y_new, lx, ly, tstep_old, tstep_new, natoms)\n ### start desired analysis methods\n # density\n if self.density_flag:\n self.density.compute(step,x,y,lx,ly,natoms, plot = 'False')\n # number fluctuations\n if self.nf_flag:\n self.numberfluctuation.compute(step,xs,ys, plot = 'False')\n # voronoi density\n if self.voronoi_flag:\n self.voronoidensity.compute(step,x,y,lx,ly,natoms, plot = 'False')\n # velocity / worticity\n if self.velocity_flag:\n self.velocityworticity.compute(step,x,y,vx,vy,natoms,lx,ly, plot = 'False')\n # orientation / velocity\n if self.orientvel_flag:\n self.orientvel.compute(step,x,y,vx,vy,phi,natoms, plot = 'False')\n # defect points\n if self.pointdefects_flag:\n self.pointdefects.compute(step,x,y,phi,lx,ly,natoms)\n ### move coordinate arrays\n xs_old = np.copy(xs)\n ys_old = np.copy(ys)\n x_old = np.copy(x)\n y_old = np.copy(y)\n tstep_old = tstep\n xs = np.copy(xs_new)\n ys = np.copy(ys_new)\n x = np.copy(x_new)\n y = np.copy(y_new)\n tstep = tstep_new\n return", "def get_observations(self):\n joint_states = self.joints_state\n self.force = self.wrench_stamped.wrench.force\n self.torque = self.wrench_stamped.wrench.torque\n self.static_taxel = self.tactile_static.taxels\n# dynamic_taxel= tactile_dynamic\n\n# print(\"[force]\", self.force.x, self.force.y, self.force.z)\n# print(\"[torque]\", self.torque.x, self.torque.y, self.torque.z)\n shp_joint_ang = joint_states.position[0]\n shl_joint_ang = joint_states.position[1]\n elb_joint_ang = joint_states.position[2]\n wr1_joint_ang = joint_states.position[3]\n wr2_joint_ang = joint_states.position[4]\n wr3_joint_ang = joint_states.position[5]\n\n shp_joint_vel = joint_states.velocity[0]\n shl_joint_vel = joint_states.velocity[1]\n elb_joint_vel = joint_states.velocity[2]\n wr1_joint_vel = joint_states.velocity[3]\n wr2_joint_vel = joint_states.velocity[4]\n wr3_joint_vel = joint_states.velocity[5]\n\n q = [shp_joint_ang, shl_joint_ang, elb_joint_ang, wr1_joint_ang, wr2_joint_ang, wr3_joint_ang]\n# print(\"q(observation):\", q)\n eef_x, eef_y, eef_z = self.get_xyz(q)\n self.end_effector = self.get_xyz(q)\n eef_x_ini, eef_y_ini, eef_z_ini = self.get_xyz(self.init_joint_pose2) \n\n delta_image_r, delta_image_l = self.get_image()\n self.cnn_image_r = agent.update_cnn(delta_image_r)\n self.cnn_image_l = agent.update_cnn(delta_image_l)\n self.cnn_image_r_list = self.cnn_image_r.tolist()\n self.cnn_image_l_list = self.cnn_image_l.tolist()\n print(\"r_list\", self.cnn_image_r_list)\n print(\"l_list\", self.cnn_image_l_list)\n\n observation = []\n# rospy.logdebug(\"List of Observations==>\"+str(self.observations))\n for obs_name in self.observations:\n if obs_name == \"shp_joint_ang\":\n observation.append((shp_joint_ang - self.init_joint_pose2[0]) * self.joint_n)\n elif obs_name == \"shl_joint_ang\":\n observation.append((shl_joint_ang - self.init_joint_pose2[1]) * self.joint_n)\n elif obs_name == \"elb_joint_ang\":\n observation.append((elb_joint_ang - self.init_joint_pose2[2]) * self.joint_n)\n elif obs_name == \"wr1_joint_ang\":\n observation.append((wr1_joint_ang - self.init_joint_pose2[3]) * self.joint_n)\n elif obs_name == \"wr2_joint_ang\":\n observation.append((wr2_joint_ang - self.init_joint_pose2[4]) * self.joint_n)\n elif obs_name == \"wr3_joint_ang\":\n observation.append((wr3_joint_ang - self.init_joint_pose2[5]) * self.joint_n)\n elif obs_name == \"shp_joint_vel\":\n observation.append(shp_joint_vel)\n elif obs_name == \"shl_joint_vel\":\n observation.append(shl_joint_vel)\n elif obs_name == \"elb_joint_vel\":\n observation.append(elb_joint_vel)\n elif obs_name == \"wr1_joint_vel\":\n observation.append(wr1_joint_vel)\n elif obs_name == \"wr2_joint_vel\":\n observation.append(wr2_joint_vel)\n elif obs_name == \"wr3_joint_vel\":\n observation.append(wr3_joint_vel)\n elif obs_name == \"eef_x\":\n observation.append((eef_x - eef_x_ini) * self.eef_n)\n elif obs_name == \"eef_y\":\n observation.append((eef_y - eef_y_ini) * self.eef_n)\n elif obs_name == \"eef_z\":\n observation.append((eef_z - eef_z_ini) * self.eef_n)\n elif obs_name == \"force_x\":\n observation.append((self.force.x - self.force_ini.x) / self.force_limit1 * self.force_n)\n elif obs_name == \"force_y\":\n observation.append((self.force.y - self.force_ini.y) / self.force_limit1 * self.force_n)\n elif obs_name == \"force_z\":\n observation.append((self.force.z - self.force_ini.z) / self.force_limit1 * self.force_n)\n elif obs_name == \"torque_x\":\n observation.append((self.torque.x - self.torque_ini.x) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"torque_y\":\n observation.append((self.torque.y - self.torque_ini.y) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"torque_z\":\n observation.append((self.torque.z - self.torque_ini.z) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"image_cnn\":\n for x in range(0, 10):\n observation.append(self.cnn_image_r_list[0][x])\n# print(\"r_list\", self.cnn_image_r_list[0][x])\n for x in range(0, 10):\n observation.append(self.cnn_image_l_list[0][x])\n# print(\"l_list\", self.cnn_image_l_list[0][x])\n elif obs_name == \"static_taxel\":\n for x in range(0, 28):\n observation.append((self.static_taxel[0].values[x] - self.static_taxel_ini[0].values[x]) * self.taxel_n)\n for x in range(0, 28):\n observation.append((self.static_taxel[1].values[x] - self.static_taxel_ini[1].values[x]) * self.taxel_n)\n# elif obs_name == \"dynamic_taxel\":\n# observation.append(dynamic_taxel[0].values) * self.taxel_n\n# observation.append(dynamic_taxel[1].values) * self.taxel_n\n else:\n raise NameError('Observation Asked does not exist=='+str(obs_name))\n\n print(\"observation\", list(map(round, observation, [3]*len(observation))))\n# print(\"observation\", observation)\n\n return observation", "def get_grid_info(network_file):\n rows = -1\n columns = -1\n landmarks = -1\n time_steps = -1\n\n network_file.seek(0) #move the pointer to line 0\n\n for line in network_file:\n if line.startswith(\"PositionRow_\"):\n\n if rows == -1:\n lines = line.strip().split(\",\")\n rows = int(lines[-1])\n line = line.strip().split()\n pos,time_step = line[0].split(\"_\");\n time_steps = max(time_steps, int(time_step)) ##constantly update time_steps\n elif line.startswith(\"PositionCol_\"):\n if columns == -1:\n line = line.strip().split(\",\")\n columns = int(line[-1])\n elif line.startswith(\"ObserveLandmark\"):\n observation,direction,time_step = line.split()[0].split(\"_\")\n landmarks = max(landmarks, int(observation[-1]));\n\n return rows, columns, landmarks, time_steps", "def get_dimensional_measurements():\n return Global_Module.global_dimensional_measurements", "def getGameState(self):\n ### Student code goes here\n row1 = ()\n row2 = ()\n row3 = ()\n for currRow in range(1,4):\n for currCol in range(1,4):\n tileFound = False\n for fact in self.kb.facts:\n if fact.statement.predicate == \"located\":\n tile = fact.statement.terms[0].term.element\n column = fact.statement.terms[1].term.element\n row = fact.statement.terms[2].term.element\n\n tileNumber = int(tile[-1])\n columnNumber = int(column[-1])\n rowNumber = int(row[-1])\n\n if rowNumber == currRow and columnNumber == currCol:\n tileFound = True\n if rowNumber == 1:\n row1 += tuple([tileNumber])\n elif rowNumber == 2:\n row2 += tuple([tileNumber])\n elif rowNumber == 3:\n row3 += tuple([tileNumber])\n \n break\n\n if not tileFound:\n if currRow == 1:\n row1 += tuple([-1])\n elif currRow == 2:\n row2 += tuple([-1])\n elif currRow == 3:\n row3 += tuple([-1])\n\n\n return (row1, row2, row3)", "def run():\n\n data = parse_data()\n\n wide = 25\n tall = 6\n\n layers = []\n for index in range(0, len(data), wide * tall):\n item = data[index : index + wide * tall]\n item = [item[x : x + wide] for x in range(0, wide * tall, wide)]\n layers.append(item)\n\n lowest, layer = get_layer_containing_fewest_zeroes(layers)\n\n ones = sum([Counter(l).get(\"1\", 0) for l in layer])\n twos = sum([Counter(l).get(\"2\", 0) for l in layer])\n assert (ones * twos) == 1820\n\n display_layers(layers, wide, tall) # ckuj", "def get_run_info_novaseq( instrument_model, application_version, tree, pipeline_type ):\n run_stats = {}\n\n setup_node = tree.getroot().find(\"Setup\")\n if setup_node is None:\n setup_node = tree.getroot()\n\n flowcell_node = tree.getroot().find(\"RfidsInfo\")\n instrument_id_node = tree.getroot().find('InstrumentName')\n run_start_date_node = tree.getroot().find('RunStartDate')\n\n # Now actually populate various stats\n run_stats['flow_cell_id'] = flowcell_node.find('FlowCellSerialBarcode').text\n run_stats['date'] = run_start_date_node.text\n run_stats['instrument'] = instrument_id_node.text\n run_stats['flow_cell_mode'] = flowcell_node.find('FlowCellMode').text\n if( run_stats['flow_cell_mode'] in [ 'SP', 'S1', 'S2' ] ):\n run_stats['lanes'] = 2\n elif( run_stats['flow_cell_mode'] in [ 'S4' ] ):\n run_stats['lanes'] = 4\n else:\n raise ValueError( 'Unrecognized flow cell mode \\'%s\\'' % ( run_stats['flow_cell_mode'] ) )\n run_stats['run_id'] = tree.getroot().find('RunId').text\n\n # Read1 and Read2 may be absent\n run_stats['r1_length'] = int(setup_node.find('Read1NumberOfCycles').text)\n run_stats['p7_index_length'] = int(setup_node.find('IndexRead1NumberOfCycles').text)\n\n if( setup_node.find('Read2NumberOfCycles') != None ):\n run_stats['r2_length'] = int(setup_node.find('Read2NumberOfCycles').text)\n run_stats['p5_index_length'] = int(setup_node.find('IndexRead2NumberOfCycles').text)\n run_stats['paired_end'] = True\n else:\n run_stats['paired_end'] = False\n\n application = setup_node.find('Application').text\n application_version = setup_node.find('ApplicationVersion').text\n\n run_stats['instrument_type'] = instrument_model\n\n # Notes:\n # o NovaSeq application 1.7.0 can run reagent kit version 1.0 and 1.5\n # o the NWGC tells us:\n # The NovaSeq v1.5 reagents are run on the NovaSeq that has an updated\n # software which is version 1.7 that flips the i5 indices already on\n # the sequencer when the data comes off. Typically, when the data\n # comes off the sequencers, we need to flip both the i7 and i5 indices\n # to the reverse complement in order to run fastqs or demux the data.\n # With this being the case, only the i7 will need to be reverse\n # complemented typically when data comes off the v1.5 version.\n # o however, not reverse complementing v1.5 fastqs in demultiplexing\n # gives 'normal' looking sample-specific fastq files so I do not\n # reverse complement here but allow for the possiblity in future\n # reagent kits.\n # o The SBS consumable version differs between the two kits. The line is\n # <SbsConsumableVersion>1</SbsConsumableVersion>\n # Key\n # 1= v1.0 SBS Reagents\n # 3= v1.5 SBS Reagents\n if( application_version == '1.7.0' or application_version == '1.7.5' ):\n sbs_consumable_version = flowcell_node.find('SbsConsumableVersion').text\n if( sbs_consumable_version == '1' ):\n run_stats['reverse_complement_i5'] = False\n elif( sbs_consumable_version == '3' ):\n if( pipeline_type == 'RNA-seq' ):\n run_stats['reverse_complement_i5'] = True\n elif( pipeline_type == 'ATAC-seq' ):\n run_stats['reverse_complement_i5'] = False\n else:\n raise ValueError('Unrecognized pipeline_type value \\'%s\\'' % ( pipeline_type ))\n else:\n run_stats['reverse_complement_i5'] = False\n\n return run_stats", "def simulate(initstate, t, timestep=forward, drive=donothing, bounds = [0.97, 0.97, 0.97, 0.97], saveinterval=10, beta=0.281105, eps=0.013, gamma=0.0880, mu=0.3, nu=0, dudt_x = dudt, dvdt_x = dvdt, dndt_x = dndt, grav=True, cori=True, advx=True, advy=True, attn=True): # gives surface height array of the system after evert dt\n bounds = np.asarray(bounds, dtype=np.float32)\n h, n, u, v, f, dx, dy, dt = [initstate[k] for k in ('h', 'n', 'u', 'v', 'lat', 'dx', 'dy', 'dt')]\n \n f = np.float32(((2*2*np.pi*np.sin(f*np.pi/180))/(24*3600))[:,np.newaxis])\n \n \n du0 = np.zeros_like(u)\n dv0 = np.zeros_like(v)\n dn0 = np.zeros_like(n)\n \n \n dndt_x(h, n, u, v, dx, dy, dn0)\n dn = (dn0, np.copy(dn0), np.copy(dn0))\n \n dudt_x(h, n, f, u, v, dx, dy, du0)\n du = (du0, np.copy(du0), np.copy(du0), np.copy(du0))\n \n dvdt_x(h, n, f, u, v, dx, dy, dv0)\n dv = (dv0, np.copy(dv0), np.copy(dv0), np.copy(dv0))\n \n nu = (dx+dy)/1000\n \n mmax = np.max(np.abs(n))\n landthresh = 1.5*np.max(n) # threshhold for when sea ends and land begins\n itrs = int(np.ceil(t/dt))\n saveinterval = np.int(saveinterval//dt)\n assert (dt >= 0), 'negative dt!' # dont try if timstep is zero or negative\n \n ntt = np.zeros((np.int(np.ceil(itrs/saveinterval)),)+n.shape, dtype=np.float32)\n maxn = np.zeros(n.shape, dtype=n.dtype) # max height in that area\n \n coastx = np.less(h, landthresh) # where the reflective condition is enforced on the coast\n \n print('simulating...')\n try:\n for itr in range(itrs):# iterate for the given number of iterations\n if itr%saveinterval == 0:\n ntt[np.int(itr/saveinterval),:,:] = n\n print(np.argmax( ntt[np.int(itr/saveinterval),:,:],axis=0)[5])\n \n \n maxn = np.max((n, maxn), axis=0) # record new maxes if they are greater than previous records \n \n # pushes n, u, v one step into the future\n n,u,v, du, dv, dn = timestep(h, n, u, v, f, dt, dx, dy, du, dv, dn, beta=beta, eps=eps, gamma=gamma, mu=mu, nu=nu, dudt_x=dudt_x, dvdt_x=dvdt_x, dndt_x=dndt_x, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn)\n\n land(h, n, u, v, coastx) # how to handle land/coast\n border(n, u, v, 15, bounds) \n drive(h, n, u, v, f, dt, dx, dy, nu, coastx, bounds, mu, itr)\n print('simulation complete')\n except Exception as e:\n print('timestep: ', itr)\n raise e\n return ntt, maxn#, minn, timemax # return surface height through time and maximum heights", "def find_endpoints(batch_trajectories):\n # empty lists to fill\n site_lats = []\n site_lons = []\n last_lats = []\n last_lons = []\n lats_150 = []\n lons_150 = [] \n last_times = []\n times_150 = []\n last_sst = []\n sst_150 = []\n \n # temporary lists as placeholders\n temp_site_lats = []\n temp_site_lons = []\n temp_lats = []\n temp_lons = []\n temp_lats150 = []\n temp_lons150 = []\n temp_times = []\n temp_times150 = []\n temp_sst = []\n temp_sst150 = []\n\n for speed in range(len(batch_trajectories)):\n # working with one speed at a time means working with one nc file at\n # a time\n \n # reset temporary lists\n temp_site_lats = []\n temp_site_lons = []\n temp_lats = []\n temp_lons = []\n temp_lats150 = []\n temp_lons150 = []\n temp_times = []\n temp_times150 = []\n temp_sst = []\n temp_sst150 = []\n\n # extract variables into lists\n lats = batch_trajectories[speed].variables['lat'][:]\n lons = batch_trajectories[speed].variables['lon'][:]\n lats150 = batch_trajectories[speed].variables['lat150'][:]\n lons150 = batch_trajectories[speed].variables['lon150'][:]\n times = batch_trajectories[speed].variables['time'][:]\n ssts = batch_trajectories[speed].variables['temp'][:]\n ssts_150 = batch_trajectories[speed].variables['temp150'][:]\n\n # if a particle is deleted before time is up, values are masked. \n # We'd like to get the last valid number.\n for trajectory in range(len(lats)):\n i = -1 # index for the last value\n while np.ma.is_masked(lats[trajectory][i]) is True:\n i -= 1 # if the value is masked, go to one value sooner\n \n j = i # use j for the 150m values\n while lats150[trajectory][j] > 0:\n # we want the first index where the latitude is recorded.\n # j is actually the last one where it's not recorded, so we\n # extract the information at index j+1\n j -= 1\n\n # once i and j are determined for a trajectory, we can extract the\n # variables and append them to temporary lists.\n temp_site_lats.append(lats[trajectory][0])\n temp_site_lons.append(lons[trajectory][0])\n temp_lats.append(lats[trajectory][i])\n temp_lons.append(lons[trajectory][i])\n temp_lats150.append(lats150[trajectory][j+1])\n temp_lons150.append(lons150[trajectory][j+1])\n temp_times.append(times[trajectory][i])\n temp_sst.append(ssts[trajectory][i])\n temp_sst150.append(ssts_150[trajectory][j+1])\n temp_times150.append(times[trajectory][j+1])\n \n # after the temporary lists are appended by sinking speed, they\n # are appended to the big lists that are returned by the function.\n # this keeps the structure of being separated by sinking speed.\n site_lats.append(temp_site_lats)\n site_lons.append(temp_site_lons)\n last_lats.append(temp_lats)\n last_lons.append(temp_lons)\n lats_150.append(temp_lats150)\n lons_150.append(temp_lons150)\n last_times.append(temp_times)\n times_150.append(temp_times150)\n last_sst.append(temp_sst)\n sst_150.append(temp_sst150)\n \n return site_lats, site_lons, last_lats, last_lons, lats_150, lons_150,\\\n last_times, times_150, last_sst, sst_150", "def run():\n #Initialise variables\n data = build_station_list()\n update_water_levels(data)\n ls = []\n ID = []\n \n #Number of days in past taken data from\n dt = 7\n #How many graphs per window\n limit = 4\n #How many stations\n number = 6\n \n #Create list of measuring_id's sorted by water level\n for station in data:\n if station.typical_range_consistent() == True and station.relative_water_level() != None:\n ls.append((station, station.relative_water_level()))\n\n ls = sorted_by_key(ls, 1)\n \n for station in ls:\n ID.append(station[0])\n \n s = count_inconsistent_sets(ID[:number], dt)\n \n ID = ID[:number+s]\n\n plot_water_levels(ID, dt, limit, s)", "def create_spheres(self,depth_arr):\n\n\n '''\n depth_arr- depth image as numpy array\n '''\n\n try:\n #points=[nose,left_wrist,right,wrist,left_ankle,right ankle]\n points=[self.rpts[0],self.rpts[15],self.rpts[16],self.rpts[27],self.rpts[28]]\n self.spheres.points=[]\n self.spheres.header.frame_id = \"kinect_frame\"\n self.spheres.header.stamp= rospy.Time.now()\n \n self.spheres.id = 0\n self.spheres.action =Marker.ADD\n \n #points\n self.spheres.type = Marker.SPHERE_LIST\n self.spheres.color.r = 1.0\n self.spheres.color.a = 1.0\n \n self.spheres.scale.x = 0.08\n self.spheres.scale.y = 0.08\n self.spheres.scale.z = 0.01\n for p in points:\n depth_val=float(depth_arr[p[1], p[0]])\n pts_x,pts_y,pts_z=self.depth_to_xyz(p[0],p[1],depth_val)\n \n self.sphere_point=Point()\n self.sphere_point.x = pts_x\n self.sphere_point.y = pts_y\n self.sphere_point.z = pts_z\n self.spheres.points.append(self.sphere_point)\n \n except:\n pass", "def build(self):\n if not hasattr(self, 'subtitle'):\n self.subtitle = self.data_code['subtitle']\n #print('ntimes=%s nelements=%s ntotal=%s subtitle=%s' % (\n #self.ntimes, self.nelements, self.ntotal, self.subtitle))\n nnodes = 1\n\n #self.names = []\n #self.nelements //= nnodes\n self.nelements //= self.ntimes\n #self.ntotal\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n #print('ntotal=%s ntimes=%s nelements=%s' % (self.ntotal, self.ntimes, self.nelements))\n\n self.ntotal = self.nelements * nnodes * 2\n if self.is_sort1:\n ntimes = self.ntimes\n ntotal = self.ntotal\n else:\n #print(\"ntimes=%s nelements=%s ntotal=%s nnodes=%s\" % (self.ntimes, self.nelements, self.ntotal, nnodes))\n ntimes = self.ntotal\n ntotal = self.nelements // 2\n #self.ntotal = ntotal\n #print(\"**BEND: ntimes=%s ntotal=%s\" % (ntimes, ntotal))\n #self.ntotal = nelements * nnodes * 2\n\n dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)\n self._times = np.zeros(ntimes, dtype=dtype)\n #self.ntotal = self.nelements * nnodes\n\n self.element_node = np.zeros((ntotal, 2), dtype=idtype)\n\n # the number is messed up because of the offset for the element's properties\n if not self.nelements * nnodes * 2 == self.ntotal:\n msg = 'ntimes=%s nelements=%s nnodes=%s ne*nn=%s ntotal=%s' % (\n self.ntimes, self.nelements, nnodes, self.nelements * nnodes,\n self.ntotal)\n raise RuntimeError(msg)\n\n # [angle, sc, sd, se, sf, omax, omin, mst, msc]\n self.data = np.zeros((ntimes, ntotal, 9), dtype=fdtype)", "def build_detector():\n world_size = 1000000 # 1 km\n\n d = Detector(ice)\n\n #add DOMs at locations x,y,z\n \n channel_id = 0\n\n for x in np.arange(-500000,500001,100000):\n for y in np.arange(-500000,500001,100000):\n for z in np.arange(-500000,500001,100000):\n d.add_pmt(build_dom(),displacement=(x,y,z),channel_id=channel_id)\n channel_id += 1\n\n world = Solid(make.box(world_size,world_size,world_size),ice,vacuum,color=0x33ffffff)\n d.add_solid(world)\n\n return d", "def run_sims(nsims = 10, plot = True):\n mat = init_matrix()\n init_loc = np.where(mat == 1)\n init_loc = (init_loc[0][0], init_loc[1][0])\n loc_list = [init_loc]\n\n for _ in range(nsims):\n loc_list.append(matrix_step(loc_list[-1])) # the most recent entry in the list\n # print(loc_list[-2], loc_list[-1])\n\n if plot:\n plot_matrix(loc_list)\n return(loc_list)", "def run_task(self, config, simParams):\n super().run_task(config, simParams)\n\n fovSize = simParams.get_microscope().get_image_dimensions()\n umPerPix = simParams.get_microscope().get_microns_per_pixel()\n \n sx = self._parameters[\"margin\"]/umPerPix\n sy = self._parameters[\"margin\"]/umPerPix\n ex = fovSize[0] - self._parameters[\"margin\"]/umPerPix\n ey = fovSize[1] - self._parameters[\"margin\"]/umPerPix\n\n # Create save polygons.\n allPoly = []\n for zi in range(simParams.get_number_z()):\n\n tmp = []\n for fov in range(simParams.get_number_positions()):\n [px, py] = simParams.get_fov_xy(fov)\n\n exRect = shapely.geometry.Polygon([[px + sx, py + sy],\n [px + sx, py + ey],\n [px + ex, py + ey],\n [px + ex, py + sy]])\n tmp.append(exRect)\n\n allPoly.append(tmp)\n\n self.save_data({\"extra-cellular\" : allPoly})\n\n # Reference images.\n allFOV = []\n for fov in range(simParams.get_number_positions()):\n allFOV.append(simParams.get_fov_rect(fov))\n\n for zi in range(simParams.get_number_z()):\n fig = plt.figure(figsize = (8,8))\n\n # Draw FOV.\n for elt in allFOV:\n coords = elt.exterior.coords.xy\n x = list(coords[0])\n y = list(coords[1])\n plt.plot(x, y, color = 'gray')\n\n # Draw extra-cellular space.\n for elt in allPoly[zi]:\n coords = elt.exterior.coords.xy\n x = list(coords[0])\n y = list(coords[1])\n plt.plot(x, y, color = 'black')\n\n ax = plt.gca()\n ax.set_aspect('equal', 'datalim')\n \n plt.title(\"z plane {0:d}\".format(zi))\n plt.xlabel(\"pixels\")\n plt.ylabel(\"pixels\")\n\n fname = \"z_{0:d}.pdf\".format(zi)\n fig.savefig(os.path.join(self.get_path(), fname),\n format='pdf',\n dpi=100)\n\n plt.close()", "def generateCoord(self, resolutionList):\r\n locatorList = []\r\n\r\n print \"Scanning Eye\"\r\n self.getEyeCoord(locatorList, resolutionList[0])\r\n print \"Got Eye Coord\"\r\n print \"Scanning NoseBridge\"\r\n self.getNoseBridgeCoord(locatorList, resolutionList[5])\r\n print \"Got NoseBridge Coord\"\r\n print \"Scanning Nose\"\r\n self.getNoseCoord(locatorList, resolutionList[3])\r\n print \"Got Nose Coord\"\r\n print \"Scanning Mouth\"\r\n self.getMouthCoord(locatorList, resolutionList[1])\r\n print \"Got Mouth Coord\"\r\n print \"Scanning MouthLoop\"\r\n self.getMouthLoopCoord(locatorList, resolutionList[2])\r\n print \"Got MouthLoop Coord\"\r\n print \"Scanning Eyebrow\"\r\n self.getEyebrowCoord(locatorList, resolutionList[4])\r\n print \"Got Eyebrow Coord\"\r\n print \"Scanning Ear\"\r\n self.getEarCoord(locatorList)\r\n print \"Got Ear Coord\"\r\n print \"Scanning SideProfile\"\r\n self.getSideProfileCoord(locatorList)\r\n print \"Got SideProfile Coord\"\r\n\r\n print \"Scanning FrontProfile\"\r\n self.getFrontProfileCoord(locatorList)\r\n print \"Got FrontProfile Coord\"\r\n\r\n #Grouping locatorList\r\n cmds.select(locatorList)\r\n locatorGrp = cmds.group(name = \"LocatorCoordGrp#\")\r\n\r\n self.scaleToUnitVolume(locatorGrp)\r\n\r\n self.reverseName(locatorGrp)\r\n for locator in locatorList:\r\n if \"SideProfile_Coord\" in locator:\r\n cmds.move(0, locator, x=True, ws=True)\r\n return locatorGrp", "def enumerate_viewports(self,*args):\n schema=\"org.compiz.core\"\n path=\"/org/compiz/profiles/unity/plugins/core/\"\n keys=['hsize','vsize']\n screen = Gdk.Screen.get_default()\n screen_size=[screen.get_width(),screen.get_height()]\n grid=[int(str(self.gsettings_get(schema,path,key))) for key in keys]\n x_vals=[screen_size[0]*x for x in range(0,grid[0])]\n y_vals=[screen_size[1]*x for x in range(0,grid[1])]\n \n viewports=[(x,y) for y in y_vals for x in x_vals ]\n viewports_dict = OrderedDict()\n for ix,vp in enumerate(viewports,1):\n viewports_dict[vp] = ix\n return viewports_dict", "def runSurvey():\n fieldFile = globals()['settings']['fieldFile']\n # Number of particles to launch\n numParticles = globals()['settings']['numParticles']\n # Radius of spherical simulation boundary used for launching and exiting\n rLim = globals()['settings']['rLim']\n # Particle stepping method\n steppingMethod = globals()['settings']['steppingMethod']\n # Coarseness of output grid that counts particle fluxes in simulation volume\n fluxGridCoarseness = globals()['settings']['fluxGridCoarseness']\n \n # B field in R and Z\n r, z, BR = fieldGrid('fields/Brs_' + fieldFile)\n _, _, BZ = fieldGrid('fields/Bzs_' + fieldFile)\n _, _, habitatBR = fieldGrid('fields/Brs_habitat_' + fieldFile)\n _, _, habitatBZ = fieldGrid('fields/Bzs_habitat_' + fieldFile)\n r = r[:-1]\n z = z[:-1]\n BR = BR[:-1,:-1] # I MAY CAUSE A BUG IN THE FUTURE\n BZ = BZ[:-1,:-1]\n habitatMax = np.max((habitatBR**2+habitatBZ**2)**.5)\n habitatPrescription = 30\n BR += habitatBR*habitatPrescription/habitatMax\n BZ += habitatBZ*habitatPrescription/habitatMax\n print('Habitat prescription (T):', habitatPrescription)\n Bmagnitude = (BR**2+BZ**2)**.5\n\n qms, vs = qmAndVelocitySpectrum(numParticles)\n if globals()['settings']['qmPrescribed']:\n qms = np.ones(numParticles)*globals()['settings']['qmPrescribed']\n if globals()['settings']['v0Prescribed']:\n vs = np.ones(numParticles)*globals()['settings']['v0Prescribed']\n\n startingPoints = [randomPointOnSphere(rLim) for _ in range(numParticles)]\n directions = [randomDirectionCos(-sp) for sp in startingPoints]\n\n # Simulate without magnetic field\n start = time.time()\n rReduced, zReduced, gridOff, _, habitatCrossingsOff, GDTcrossingsOff, gridOffUnscaled, _ = monteCarloRun(startingPoints, qms, vs, directions, BR, BZ, r, z, rLim, fluxGridCoarseness, 0)\n print('Time elapsed (s):', int(time.time()-start))\n \n # Simulate with magnetic field\n start = time.time()\n _, _, gridOn, trappedOn, habitatCrossingsOn, GDTcrossingsOn, gridOnUnscaled, trappedOnUnscaled = monteCarloRun(startingPoints, qms, vs, directions, BR, BZ, r, z, rLim, fluxGridCoarseness, steppingMethod)\n print('Time elapsed (s):', int(time.time()-start))\n # np.save('cache/{}particles_accel.npy'.format(numParticles), [rReduced, zReduced, gridOn])\n try:\n print('---\\nGDT crossing change: {}%'.format(round(100*(GDTcrossingsOn-GDTcrossingsOff)/GDTcrossingsOff, 3)))\n print('Habitat crossing change: {}%\\n---'.format(round(100*(habitatCrossingsOn-habitatCrossingsOff)/habitatCrossingsOff, 3)))\n except Exception as e:\n print(e)\n \n # plotDiff(r, z, Bmagnitude, gridOn, gridOff)\n plot6panel(r, z, rReduced, zReduced, Bmagnitude, gridOn, gridOff, trappedOn)" ]
[ "0.5814169", "0.568188", "0.56700325", "0.5589331", "0.5585894", "0.5571232", "0.5549986", "0.5539828", "0.5512618", "0.5489092", "0.5436523", "0.5434775", "0.541683", "0.541113", "0.54004127", "0.5391306", "0.5379776", "0.5379776", "0.5369655", "0.5350718", "0.53469867", "0.53130716", "0.53061", "0.5303365", "0.5301052", "0.5298041", "0.52978635", "0.52916765", "0.5279312", "0.52687865", "0.52684045", "0.5262016", "0.52565706", "0.52254164", "0.52192414", "0.5218424", "0.5211985", "0.5207974", "0.5197176", "0.5189187", "0.51729804", "0.5159088", "0.5145382", "0.51308817", "0.51193386", "0.5112018", "0.51115733", "0.510917", "0.51070225", "0.51061517", "0.5105311", "0.5096599", "0.5088731", "0.5086748", "0.5080908", "0.50727385", "0.5070832", "0.50705665", "0.50672096", "0.50623405", "0.5056756", "0.50510055", "0.50494605", "0.5047459", "0.50464195", "0.50463074", "0.50398386", "0.5036823", "0.5035572", "0.5035374", "0.50262785", "0.50249046", "0.502356", "0.50197214", "0.50195724", "0.50175357", "0.5008036", "0.50058436", "0.5005167", "0.5001582", "0.49973133", "0.49920315", "0.49909613", "0.49835724", "0.49803424", "0.49799114", "0.4977494", "0.49750254", "0.49707434", "0.49705037", "0.4967687", "0.49576396", "0.49498403", "0.49424517", "0.49376372", "0.49365807", "0.49319264", "0.49317607", "0.49308014", "0.49304265", "0.4929789" ]
0.0
-1
Overrides the default implementation, want to check that the quadratic form is equal as well
def __eq__(self, other): if type(other) is type(self): # TODO: check that this does not mix Clifford classes without different symmetric bilinear forms, # as created with class factories. return ( self.items() == other.items() and self.symmetric_bilinear_form.__code__.co_code == other.symmetric_bilinear_form.__code__.co_code ) return NotImplemented
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_equality(self):\n\n s3 = space(curvature=1/5)\n for k in (0, -1, 1, 1.75, 0.325, 1/7, -1.75, -0.325, -1/7):\n s1 = space(fake_curvature=k)\n s2 = space(fake_curvature=k)\n self.assertTrue(s1 == s2)\n self.assertTrue(hash(s1) == hash(s2))\n self.assertTrue(str(s1) == str(s2))\n self.assertTrue(repr(s1) == repr(s2))\n self.assertTrue(s1 != s3)", "def test_single_quadrant(self):", "def test_eq():\n x, y = fwd.Variable(), fwd.Variable()\n f = fwd.sin(x) + fwd.cos(y)\n g = fwd.sin(x) + fwd.cos(y)\n h = fwd.sin(y) + fwd.cos(x)\n assert f == g\n assert f != h", "def test_eq_2():\n a = FixedPoint(1, 'Q2.8')\n b = FixedPoint(1, 'Q2.8')\n assert a == b", "def almost_equals(self, other, decimal=...): # -> bool:\n ...", "def test_equal(self):\n self.assertTrue(Fraction(7,10)==Fraction(7,10))", "def test_solve_quadratic(self):\n iden1 = Identity()\n iden2 = Identity()\n iden3 = Identity()\n iden1.x.fixed = False\n iden2.x.fixed = False\n iden3.x.fixed = False\n term1 = LeastSquaresTerm(iden1.target, 1, 1)\n term2 = LeastSquaresTerm(iden2.target, 2, 2)\n term3 = LeastSquaresTerm(iden3.target, 3, 3)\n prob = LeastSquaresProblem([term1, term2, term3])\n prob.solve()\n self.assertAlmostEqual(prob.objective, 0)\n self.assertAlmostEqual(iden1.x.val, 1)\n self.assertAlmostEqual(iden2.x.val, 2)\n self.assertAlmostEqual(iden3.x.val, 3)", "def testEquality(self):\n pass", "def test_eq():\n # Test for equality special method with scalar Rnode object and float value\n x = Rnode(2.0)\n try:\n assert (x == 2.0) == True\n assert (x == 1.0) == False\n except AssertionError as e:\n print(e)\n raise AssertionError\n\n # Test for equality special method with two scalar Rnode object\n x = Rnode(2.0)\n y = Rnode(2.0)\n z = Rnode(1.0)\n try:\n assert (x == y) == True\n assert (x == z) == False\n except AssertionError as e:\n print(e)\n raise AssertionError", "def __eq__(self,v2):\n\t\treturn -1e-13<(self-v2).norm()<1e-13", "def __eq__(self,other):\n if isinstance(other, RegularPoly):\n return(self.vert_count == other.vert_count and self.radius == other.radius)\n else:\n raise NotImplementedError('Incorrect data type')", "def test_q(self):\n assert np.allclose(self.stepper.q, self.ODE.exact(self.stepper.t), rtol=1e-3, atol=1e-5)", "def __eq__(self, frac):\n return self.equal == frac.equal", "def __eq__(self, *args):\n return _ida_hexrays.qvector_ccase_t___eq__(self, *args)", "def test_cases():\r\n quadratic_roots(1,3,-21)\r\n quadratic_roots(2,-4,-6)\r\n quadratic_roots(1,4,-12)\r\n quadratic_roots(4,12,9)\r\n quadratic_roots(-2,-11,-21)\r\n quadratic_roots(4,1,4)\r\n quadratic_roots(1,1,0)\r\n quadratic_roots(1,0,-16)\r\n quadratic_roots(1,-14,-49)\r\n quadratic_roots(1,10,25)", "def is_equation(self): \n return False", "def is_perfect_square():", "def calc_q_square(self):\n return self._q_x()**2 + self._q_z()**2", "def __eq__(self, other):\r\n return abs(self.x - other.x) + abs(self.y - other.y) < Vertex.epsilon", "def test_case_02_equilateral(self):\n self.__assert_equals_test_case(self.yield_equilateral_triangles(), 'Equilateral Triangle')", "def test__eq__() -> None:\n point_1 = Point(1, 2)\n point_2 = Point(-2, -4)\n point_3 = Point(3, 3)\n point_4 = Point(0, 0)\n line_segment_1 = LineSegment(first=point_1, second=point_2)\n line_segment_2 = LineSegment(first=point_1, second=point_2)\n line_segment_3 = LineSegment(first=point_3, second=point_4)\n\n assert line_segment_1 == line_segment_2\n assert not line_segment_1 == line_segment_3", "def test_inverse_c(self):\n for q in self.all:\n self.assertTrue((q * q.inverse()).almost_equal(q.inverse()*q))", "def __eq__(self, other):\n if other is self:\n return True\n if isinstance(other, CoordFunctionSymb):\n if other.parent() != self.parent():\n return False\n else:\n return bool(other._express == self._express)\n else:\n return bool(self._express == other)", "def __eq__(self, other):\n return (self.real+(self.imag*1j)) == (other.real+(other.imag*1j))\n #return (Complex(self.real, self.imag) == Complex(other.real, other.imag))", "def test_eq_1():\n a = FixedPoint(1, 'Q2.8')\n assert a == 1", "def test_equality_check_against_other_object_doesnt_raise_exception(self):\n test_object = Vec3(1, 2, 3)\n self.assertFalse(test_object == Quat(1, 2, 3, 4))\n self.assertFalse(Quat(1, 2, 3, 4) == test_object)\n self.assertTrue(test_object != Quat(1, 2, 3, 4))\n self.assertTrue(Quat(1, 2, 3, 4) != test_object)", "def test_neq():\n # Test for not equal special method with scalar Rnode object and float value\n x = Rnode(2.0)\n try:\n assert (x != 2) == False\n assert (x != 1) == True\n except AssertionError as e:\n print(e)\n raise AssertionError\n\n # Test for equality special method with two scalar Dual object\n x = Rnode(2.0)\n y = Rnode(2.0)\n z = Rnode(1.0)\n try:\n assert (x != y) == False\n assert (x != z) == True\n except AssertionError as e:\n print(e)\n raise AssertionError", "def __eq__(self, other):\n if self.slope() == None:\n return other.slope() == None and self.xintercept() == other.xintercept()\n return self.slope() == other.slope() and self.yintercept() == other.yintercept()", "def __eq__(self,other):\n self.numerator=self.numerator*other.denominator\n other.numerator=self.denominator*other.numerator\n if(self.numerator==other.numerator):\n return True\n else:\n return False\n return", "def test_eq(self):\n st_1 = State(substance=\"water\", T=Q_(400.0, \"K\"), p=Q_(101325.0, \"Pa\"))\n st_2 = State(substance=\"water\", T=Q_(400.0, \"K\"), p=Q_(101325.0, \"Pa\"))\n assert st_1 == st_2", "def test_quadraticMultitaperIsDifferent(self):\n data = _load_mtdata('v22_174_series.dat.gz')\n # Calculate the spectra.\n spec, freq = mtspec(data, 1.0, 4.5, number_of_tapers=2)\n # No NaNs are supposed to be in the output.\n self.assertEqual(np.isnan(spec).any(), False)\n self.assertEqual(np.isnan(freq).any(), False)\n spec2, freq2 = mtspec(data, 1.0, 4.5, number_of_tapers=2,\n quadratic=True)\n # No NaNs are supposed to be in the output.\n self.assertEqual(np.isnan(spec2).any(), False)\n self.assertEqual(np.isnan(freq2).any(), False)\n # Test that these are not equal.\n self.assertRaises(AssertionError, np.testing.assert_almost_equal,\n spec, spec2)", "def square(q_1: Q) -> Q:\n\n end_q_type = f\"{q_1.q_type}²\"\n\n qxq = _commuting_products(q_1, q_1)\n\n sq_q = Q(q_type=end_q_type, representation=q_1.representation)\n sq_q.t = qxq[\"tt\"] - qxq[\"xx+yy+zz\"]\n sq_q.x = qxq[\"tx+xt\"]\n sq_q.y = qxq[\"ty+yt\"]\n sq_q.z = qxq[\"tz+zt\"]\n\n return sq_q", "def __eq__(self, line):\n \n return abs( 1 - np.dot(sm.unitvec(self.vec), sm.unitvec(line.vec))) < 10*_eps", "def __neq__(self, other: 'ComplexVal'):\n return Not(self.__eq__(other))", "def eq(self, y):\n return 1 - self.ne(y)", "def isEquilateral(self):\n\t\treturn self.a == self.b == self.c", "def equal(q_1: Q, q_2: Q, scalar: bool = True, vector: bool = True) -> bool:\n\n q_1.check_representations(q_2)\n\n q_1_t, q_1_x, q_1_y, q_1_z = (\n sp.expand(q_1.t),\n sp.expand(q_1.x),\n sp.expand(q_1.y),\n sp.expand(q_1.z),\n )\n q_2_t, q_2_x, q_2_y, q_2_z = (\n sp.expand(q_2.t),\n sp.expand(q_2.x),\n sp.expand(q_2.y),\n sp.expand(q_2.z),\n )\n\n if not scalar and not vector:\n raise ValueError(\"Equals needs either scalar_q or vector_q to be set to True\")\n\n t_equals = math.isclose(q_1_t, q_2_t)\n x_equals = math.isclose(q_1_x, q_2_x)\n y_equals = math.isclose(q_1_y, q_2_y)\n z_equals = math.isclose(q_1_z, q_2_z)\n\n result = False\n\n if scalar and not vector and t_equals:\n result = True\n\n elif not scalar and vector and x_equals and y_equals and z_equals:\n result = True\n\n elif scalar and vector and t_equals and x_equals and y_equals and z_equals:\n result = True\n\n return result", "def test_notequal(self):\n self.assertTrue(Fraction(144,2)!=Fraction(8,4))", "def __eq__(self, other):\n equal = self.numerator == other.numerator and self.denominator == other.denominator\n\n if equal == False:\n self.numerator = abs(self.numerator)\n self.denominator = abs(self.denominator)\n other.numerator = abs(other.numerator)\n other.denominator = abs(other.denominator)\n\n if self.numerator < other.numerator:\n smallnum = self.numerator\n else:\n smallnum = other.numerator\n\n if self.denominator < other.denominator:\n smallnum2 = self.denominator\n else:\n smallnum2 = other.denominator\n\n num1 = f\"{smallnum}/{smallnum2}\"\n\n gcd1 = math.gcd(self.numerator, other.numerator)\n gcd2 = math.gcd(self.denominator, other.denominator)\n\n num2 = f\"{gcd1}/{gcd2}\"\n\n return num1 == num2\n\n else:\n return equal", "def __eq__(self, other):\n if self.coeff != other.coeff:\n return False\n \n if self.GetKeggID() != other.GetKeggID():\n return False\n \n if self.phase.Name() != other.phase.Name():\n return False\n \n return True", "def almost_eq(e1,e2) :\n\treturn round(e1-e2,4) == 0.0", "def test_scalar_multiples(self):\n\n # test for all kinds of curvatures K\n for k in (0, 1, -1, 1/11, -1/11, 11, -3):\n \n s = space(curvature=k)\n\n # use a small enough magnitude to not break math for very negative K\n magic = 0.33377777373737737777\n phi_ref = 1.61803398874989484820458683436559\n for rp in (\n (),\n (1,),\n (4/5, -3/5),\n (0, 2/11, -6/11, 9/11),\n ):\n p = s.make_point(rp, magic)\n\n # ensure: (0) p = 0\n self.assertTrue(point_isclose(\n p * 0,\n s.make_origin(len(p)-1)\n ))\n\n # ensure: (-1) p = -p\n self.assertTrue(point_isclose(\n p * -1,\n -p\n ))\n\n # ensure: (2) p = 2p = p + p\n p2 = p + p\n self.assertTrue(point_isclose(\n p * 2,\n p2\n ))\n\n # ensure: (4) p = (2) (2p)\n p4 = p2 + p2\n self.assertTrue(point_isclose(\n p * 4,\n p2 * 2\n ))\n\n # ensure: (5) p = 5p = 2(2p) + p\n p5 = p4 + p\n self.assertTrue(point_isclose(\n p * 5,\n p5\n ))\n \n # don't do non-integer tests for K > 0 because looping strangeness\n if k <= 0:\n # ensure: (phi) (phi p) = (phi) p + p\n pphi = p * phi_ref\n self.assertTrue(point_isclose(\n pphi * phi_ref,\n pphi + p\n ))", "def __eq__(self, other):\n if not isinstance(other, OneOfSolidNumericsSolver):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self, other):\n return np.allclose(self.P, other.P)", "def __ge__(self, other):\n return other._is_subpolyhedron(self)", "def is_equation(self):\n return True", "def is_equation(self):\n return True", "def test_equality_method(self):\r\n wc1 = WhereClause('a', EqualsOperator(), 'c')\r\n wc2 = WhereClause('a', EqualsOperator(), 'c')\r\n assert wc1 == wc2", "def test_centeredEquation(self):\n\n A33, K = self.cs.centeredEquation\n self.assertTrue((self.A33 == A33).all())\n self.assertEqual(K, 1.)", "def test_solve_quadratic_fixed(self):\n iden1 = Identity()\n iden2 = Identity()\n iden3 = Identity()\n iden1.x.val = 4\n iden2.x.val = 5\n iden3.x.val = 6\n iden1.x.name = 'x1'\n iden2.x.name = 'x2'\n iden3.x.name = 'x3'\n iden2.x.fixed = False\n term1 = LeastSquaresTerm(iden1.target, 1, 1)\n term2 = LeastSquaresTerm(iden2.target, 2, 2)\n term3 = LeastSquaresTerm(iden3.target, 3, 3)\n prob = LeastSquaresProblem([term1, term2, term3])\n prob.solve()\n self.assertAlmostEqual(prob.objective, 10)\n self.assertAlmostEqual(iden1.x.val, 4)\n self.assertAlmostEqual(iden2.x.val, 2)\n self.assertAlmostEqual(iden3.x.val, 6)", "def __eq__(self, other):\n if self.n != other.n or self.m != other.m:\n raise TypeError(\"Illegal dimensions for eq operator (%s x %s - %s x %s)\" %\n (self.n, self.m, other.n, other.m))\n return self.values == other.values", "def __eq__(self, other):\n return LimitedGoniometer.__eq__(self,other) and \\\n (np.deg2rad(self.chi) == other.chi)", "def __eq__(self, other):\n return LimitedGoniometer.__eq__(self,other) and \\\n (np.deg2rad(self.chi) == other.chi)", "def __eq__(self, other):\n return LimitedGoniometer.__eq__(self,other) and \\\n (np.deg2rad(self.chi) == other.chi)", "def __eq__(self, other):\n return LimitedGoniometer.__eq__(self,other) and \\\n (np.deg2rad(self.chi) == other.chi)", "def test_almost_equal(self):\n x = Point(\n lat=23.4,\n lng=23.1,\n author=self.u\n )\n self.assertTrue(self.a == x)\n self.assertFalse(self.a != x)", "def is_equation(self):\n return False", "def __eq__(self, other):\n\t\ttry:\n\t\t\treturn (np.array_equal(self.val, other.val) and \n\t\t\t\t\tnp.array_equal(self.der, other.der))\n\t\texcept:\n\t\t\t# Compare scalar Vars with derivative 1 to scalars\n\t\t\tif len(self.val) == 1 and np.array_equal(self.der, [1.]):\n\t\t\t\treturn self.val == other\n\t\t\treturn False", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return math.isclose(self.x, other.x, rel_tol=1e-12, abs_tol=1e-12) and\\\n math.isclose(self.y, other.y, rel_tol=1e-12, abs_tol=1e-12)\n else:\n return False", "def __eq__(*args, **kwargs):\n return _gdi_.Pen___eq__(*args, **kwargs)", "def __ne__(self, polynomial_2: object) -> bool:\n return not self.__eq__(polynomial_2)", "def test_equilateral(self):\r\n self.assertEqual(triangle_classification(1, 1, 1), 'Equilateral Triangle')\r\n self.assertEqual(triangle_classification(3.33, 3.33, 3.33), 'Equilateral Triangle')\r\n self.assertNotEqual(triangle_classification(3.33, 3.33, 3.333), 'Equilateral Triangle')\r\n self.assertEqual(triangle_classification(1e0, 1e0, 1e0), 'Equilateral Triangle')", "def test_non_euclidean_scale_curvature(self):\n\n magic = 77773.333773777773733\n for kdir in (1, -1):\n for mul in (2, 5, 1/3, 1/11, magic, 1/magic):\n for name, dim in (\n ('sphere_s1', 1),\n ('sphere_v2', 2),\n ('sphere_s2', 2),\n ('sphere_v3', 3)\n ):\n s1 = space(fake_curvature=kdir)\n s2 = space(fake_curvature=kdir / mul)\n self.assertTrue(isclose(\n getattr(s1, name)(1) * mul**dim,\n getattr(s2, name)(mul)\n ))", "def test_pythagorean_triples(self):\n\n s = space(0)\n for a, b, c in (\n (3, 4, 5),\n (8, 15, 17),\n (33, 56, 65)\n ):\n self.assertTrue(isclose(\n s.hypot(a, b),\n c\n ))\n self.assertTrue(isclose(\n s.leg(a, c),\n b\n ))", "def _test_equiv(self, v0, v1, query_points=None):\n def array_as_set(array2d):\n return set(tuple(x) for x in array2d)\n\n # all points are filled\n assert g.np.all(v0.is_filled(v1.points))\n assert g.np.any(v1.is_filled(v0.points))\n\n # test different number of dimensions\n self.assertEqual(v0.shape, v1.shape)\n self.assertEqual(v0.filled_count, v1.filled_count)\n self.assertEqual(v0.volume, v1.volume)\n g.np.testing.assert_equal(v0.encoding.dense, v1.encoding.dense)\n # points will be in different order, but should contain same coords\n g.np.testing.assert_equal(\n array_as_set(v0.points), array_as_set(v1.points))\n # g.np.testing.assert_equal(v0.origin, v1.origin)\n # g.np.testing.assert_equal(v0.pitch, v1.pitch)\n if query_points is not None:\n indices0 = v0.points_to_indices(query_points)\n indices1 = v1.points_to_indices(query_points)\n g.np.testing.assert_equal(indices0, indices1)\n g.np.testing.assert_allclose(\n v0.points_to_indices(v0.indices_to_points(indices0)), indices0)\n g.np.testing.assert_allclose(\n v1.points_to_indices(v1.indices_to_points(indices1)), indices1)\n g.np.testing.assert_equal(\n v0.is_filled(query_points),\n v1.is_filled(query_points))", "def values_eq(self, a, b):\r\n return a == b", "def __eq__(self, rhs):\n return self.x == rhs.x and self.y == rhs.y", "def __eq__(self, other):\n x_eq = self.x == other.x\n y_eq = self.y == other.y\n return x_eq and y_eq", "def __eq__(self, other):\n x_eq = self.x == other.x\n y_eq = self.y == other.y\n return x_eq and y_eq", "def realEqual(x,y,eps=10e-10):\n return abs(x-y) < eps", "def test_hash_equality(self):\n origin = np.random.randn(3)\n normal = np.random.randn(3)\n up_vector = np.random.randn(3)\n up_vector2 = np.random.randn(3)\n p1 = shapes_3d.CoordinatePlane(origin, normal, up_vector)\n p2 = shapes_3d.CoordinatePlane(origin, normal, up_vector)\n p3 = shapes_3d.CoordinatePlane(origin, normal, up_vector2)\n \n self.assertEqual(p1, p2)\n self.assertNotEqual(p1, p3)", "def test_euclidean_scale(self):\n\n s = space(curvature=0)\n\n magic = 77773.333773777773733\n for mul in (2, 5, 1/3, 1/11, magic, 1/magic):\n for name, dim in (\n ('sphere_s1', 1),\n ('sphere_v2', 2),\n ('sphere_s2', 2),\n ('sphere_v3', 3)\n ):\n self.assertTrue(isclose(\n getattr(s, name)(1) * mul**dim,\n getattr(s, name)(mul)\n ))", "def __eq__(self, other):\r\n if isinstance(other, vec4):\r\n return self.x==other.x and self.y==other.y and self.z==other.z\r\n else:\r\n return 0", "def chemical_equations_equal(eq1, eq2, exact=False):\r\n\r\n left1, arrow1, right1 = split_on_arrow(eq1)\r\n left2, arrow2, right2 = split_on_arrow(eq2)\r\n\r\n if arrow1 == '' or arrow2 == '':\r\n return False\r\n\r\n # TODO: may want to be able to give student helpful feedback about why things didn't work.\r\n if arrow1 != arrow2:\r\n # arrows don't match\r\n return False\r\n\r\n try:\r\n factor_left = divide_chemical_expression(left1, left2)\r\n if not factor_left:\r\n # left sides don't match\r\n return False\r\n\r\n factor_right = divide_chemical_expression(right1, right2)\r\n if not factor_right:\r\n # right sides don't match\r\n return False\r\n\r\n if factor_left != factor_right:\r\n # factors don't match (molecule counts to add up)\r\n return False\r\n\r\n if exact and factor_left != 1:\r\n # want an exact match.\r\n return False\r\n\r\n return True\r\n except ParseException:\r\n # Don't want external users to have to deal with parsing exceptions. Just return False.\r\n return False", "def almost_equals(self, other):\n import math\n ox, oy = other\n dx = self[0] - ox\n dy = self[1] - oy\n return (dx*dx + dy*dy) < pygonal.EPSILON2", "def test_quadratic_features_explicit():\n X_observed = QuadraticFeatures().fit_transform(X_standardized)\n X_expected = np.hstack(\n [\n X_standardized,\n (X_standardized[:, 0] * X_standardized[:, 0]).reshape((-1, 1)),\n (X_standardized[:, 1] * X_standardized[:, 1]).reshape((-1, 1)),\n (X_standardized[:, 0] * X_standardized[:, 1]).reshape((-1, 1)),\n ]\n )\n np.testing.assert_array_equal(X_observed, X_expected)", "def contractor(self, *args, **kwargs):\n vertices = copy.deepcopy(args[0])\n nrange = len(vertices[0])\n xpts = []\n ypts = []\n for i in range(nrange):\n xpts.append(vertices[0][i].value)\n ypts.append(vertices[1][i].value)\n constraint = copy.deepcopy(args[1])\n \n \n \n \n qxdot,qxddot,qydot,qyddot = self.update_allq(xpts,ypts)\n \n ## the all important computation split (need to abstract this kind of thing)\n ##lhs = (np.sqrt(qxdot*qxdot + qydot*qydot)**3.) *constraint\n lhs = ( ( np.sqrt(qxdot**2 + qydot**2) )**3 )*constraint\n \n # check2 = qxdot*qyddot\n # if check2.width() < 1.e-2:\n # check2.min.value = check2.real.value\n # check2.max.value = check2.real.value\n # t1 = (lhs - check2)/qydot\n \n #\n # qyddot\n #\n check2 = qydot*qxddot\n if check2.width() < 1.e-2 and check2.contains(0.):\n check2.inf = 0.\n check2.sup = 0.\n #if qxdot.contains(0.) and abs(qxdot.min.value)>1.e-6:\n # print 'qxdot = ',qxdot\n # print 'qxdot not invertable, implement other logic please'\n if abs(float(qxdot.inf))<1.e-6:\n qxdot.inf = 1.e-10\n print 'invert qxdot'\n print 'qxdot = ', qxdot\n \n #t1 = (lhs + qydot*qxddot)/(qxdot)\n t1 = (lhs + check2)/(qxdot)\n \n t1 = t1 & qyddot # go ahead and shrink t1 to qyddot - they are logically equivalent\n total_ans = []\n useful_indices = []\n bad_indices = []\n for i in range(len(ypts)): \n min_ans = 0.\n for j in range(len(ypts)):\n if j==i:\n pass\n else:\n min_ans = (ypts[j]*float(self.localBasis[2,j])) + min_ans\n min_ans = t1 - min_ans\n if (abs(float(self.localBasis[2,i])) > 0.0):\n min_ans = min_ans/float(self.localBasis[2,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(ypts, total_ans)\n for i in useful_indices:\n if new_ans[i].isempty == False: # abs( new_ans[i].width() ) > 0.:\n ypts[i] = ypts[i] & new_ans[i]\n qxdot,qxddot,qydot,qyddot = self.update_allq(xpts,ypts)\n else:\n print 'warning, possible constraint violation, curvature 1'\n \n ## \n ## qxdot\n ##\n check2 = qydot*qxddot\n if check2.width() < 1.e-2 and check2.contains(0.):\n check2.inf = 0.\n check2.sup = 0.\n #if qyddot.contains(0.):\n # print 'qyddot = ',qyddot\n # print 'qyddot not invertable, implement other logic please'\n \n if qyddot.contains(0.) and qyddot.width()<1.e-6:\n qxdot.inf = 0.#1.e-10\n print 'invert qyddot'\n print 'qyddot = ',qyddot\n fix = (lhs + check2)*(1./qyddot)#*(qyddot**-1.)\n fix = fix & qxdot # go ahead and shrink fix to qxdot - they are logically equivalent\n total_ans = []\n useful_indices = []\n bad_indices = []\n \n for i in range(len(xpts)): #contract on x[i]\n min_ans = 0.\n for j in range(len(xpts)): # add up all jth pieces of the dot product except i\n if j==i:\n pass\n else:\n \n min_ans = (xpts[j]*float(self.localBasis[1,j] ) ) + min_ans\n min_ans = fix - min_ans\n if (abs(float(self.localBasis[1,i]) ) >0.0 ):\n min_ans = min_ans/float(self.localBasis[1,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(xpts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n xpts[i] = xpts[i] & new_ans[i]\n qxdot,qxddot,qydot,qyddot = self.update_allq(xpts,ypts)\n else:\n print 'warning, possible constraint violation, curvature 2'\n \n \n ## switch to the other side\n \n ##\n ## contract on qydot\n ##\n check2 = qxdot*qyddot\n if check2.width() < 1.e-2 and check2.contains(0.):\n check2.inf = 0.\n check2.sup = 0.\n# if qxddot.contains(0.):\n# print 'qxddot = ',qxddot\n# print 'qxddot not invertable, implement other logic please'\n# qxddot.min.value = 0.\n if qxddot.contains(0.):\n qxddot.inf = 0.\n \n print 'invert qxddot'\n print 'qxddot = ',qxddot\n t1 = (lhs - check2)/(-qxddot)#*(-qxddot**-1)\n t1 = t1 & qydot\n total_ans = []\n useful_indices = []\n bad_indices = []\n for i in range(len(ypts)): \n min_ans = 0.\n for j in range(len(ypts)):\n if j==i:\n pass\n else:\n #print 't1 = ',t1\n #print 'ypts[{}] = {}'.format(i,ypts[i])\n #print 'localbasis[{},{}] = {}'.format(1,i,self.localBasis[1,j])\n min_ans = (ypts[j]*float(self.localBasis[1,j])) + min_ans\n min_ans = t1 - min_ans\n if (abs(float(self.localBasis[1,i])) > 0.0):\n min_ans = min_ans/float(self.localBasis[1,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(ypts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n ypts[i] = ypts[i] & new_ans[i]\n else:\n print 'warning, possible constraint violation, curvature 3'\n \n ##contract on qxdot\n \n check2 = qxdot*qyddot\n if check2.width() < 1.e-2 and check2.contains(0.):\n check2.inf = 0.\n check2.sup = 0.\n #contract on qxddot\n# if qydot.contains(0.):\n# print 'qydot = ',qxddot\n# print 'qydot not invertable, implement other logic please'\n if qydot.contains(0.):\n qydot.inf = 0.\n print 'invert qydot'\n print 'qydot = ',qydot\n fix = (lhs - qxdot*qyddot)/(-qydot)#*(-qydot**-1)\n fix = fix & qxddot # go ahead and shrink t1 to quddot - they are logically equivalent\n total_ans = []\n useful_indices = []\n bad_indices = []\n for i in range(len(xpts)):\n min_ans = 0.\n for j in range(len(xpts)):\n if j==i:\n pass\n else:\n min_ans = (xpts[j]*float(self.localBasis[2,j] ) ) + min_ans\n min_ans = fix - min_ans\n if (abs(float(self.localBasis[2,i]) ) >0.0 ):\n min_ans = min_ans/float(self.localBasis[2,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(xpts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n xpts[i] = xpts[i] & new_ans[i]\n else:\n print 'warning, possible constraint violation, curvature 4'\n \n for i in range(nrange):\n vertices[0][i].value = xpts[i]\n vertices[1][i].value = ypts[i]\n return vertices", "def test_equivalent():\n # Positive test\n assert u.equivalent(np.arange(10)*q.um, q.cm)\n\n # Negative units test\n assert not u.equivalent(np.arange(10)*q.um, q.Jy)\n\n # Negative dtype test\n assert not u.equivalent(np.arange(10), q.um)", "def __eq__(self, other):\n return LimitedGoniometer.__eq__(self,other) and \\\n (np.deg2rad(self.chi) == other.chi) and \\\n (np.deg2rad(self.omega) == other.omega)", "def __eq__(self, other):\n if self.rows != other.rows or self.cols != other.cols:\n return False\n for i in range(self.rows):\n for j in range(self.cols):\n # Need isclose (Python >= 3.5) for float precision\n if not math.isclose(self[i, j], other[i, j]):\n return False\n return True", "def __eq__(self, other):\n return abs(self - other) < 10e-10", "def test_sphere2(self):\n fun = get_problem('sphere2', dimension=2, lower=-1, upper=1)\n self.assertAlmostEqual(fun(np.zeros(2)), 0.0)", "def __eq__(self, polynomial_2: object) -> bool:\n if not isinstance(polynomial_2, Polynomial):\n return False\n\n if self.degree != polynomial_2.degree:\n return False\n\n for i in range(self.degree + 1):\n if self.coefficients[i] != polynomial_2.coefficients[i]:\n return False\n\n return True", "def test_equality_with_quat(self):\n quat = Quat(1, 2, 3, 4)\n self.assertEqual(quat, Quat(1, 2, 3, 4))", "def __ge__(self, other):\n return self.x ** 2 + self.y ** 2 >= other.x ** 2 + other.y ** 2", "def is_pythagorean_triplet(a,b,c):\n\treturn a**2 + b**2 == c**2", "def __ge__(self, other: Compound[Scalar]) -> bool:\n return (self._points_set >= other._points_set\n if isinstance(other, Multipoint)\n else NotImplemented)", "def _check_matrix(self, x, *args):\n if self._special and x.determinant() != 1:\n raise TypeError('matrix must have determinant one')\n F = self.invariant_bilinear_form()\n if x * F * x.transpose() != F:\n raise TypeError('matrix must be orthogonal with respect to the invariant form')\n # TODO: check that quadratic form is preserved in characteristic two", "def __eq__(self, other: object) -> bool:\n if not isinstance(other, Z2Symmetries):\n return False\n\n return (\n self.symmetries == other.symmetries\n and self.sq_paulis == other.sq_paulis\n and self.sq_list == other.sq_list\n and self.tapering_values == other.tapering_values\n )", "def approx_eq(a, b):\n return abs(a-b) < approx_eq.eps", "def test_quadratic_trinomial_init(self):\n a, b, c = 2, 3, 4\n expected = Polynomial(a, b, c)\n\n qt = QuadraticTrinomial(a, b, c)\n\n self.assertEqual(expected, qt)", "def __eq__(self,f2):\n return self.__num * f2.den == self.__den * f2.num", "def __eq__(self, other):\n if isinstance(other, Vector):\n return abs(self - other) < EPSILON\n else:\n raise TypeError(other)", "def eqs_and_deriv(self, _):\n pass", "def test_qing(self):\n fun = get_problem('qing', self.dimension, -500, 500)\n self.assertAlmostEqual(fun(self.array10), 584.0, delta=1e-4)", "def double(self):\n if self.__valeur1 == self.__valeur2:\n return True\n else:\n return False", "def sqrtx():\n return Operator([[(1.+1.j)/2,(1.-1.j)/2],[(1.-1.j)/2,(1.+1.j)/2]])", "def __eq__(self, other):\n firstnum = self.num*other.den\n secondnum = self.den*other.num\n\n return firstnum == secondnum", "def _is_valid(self):\n\n if (\n self.poly.weight_0 != 0\n or len(self.poly.weight_1) != self.num_qubits\n or len(self.poly.weight_2) != int(self.num_qubits * (self.num_qubits - 1) / 2)\n or len(self.poly.weight_3)\n != int(self.num_qubits * (self.num_qubits - 1) * (self.num_qubits - 2) / 6)\n ):\n return False\n if (\n (self.linear).shape != (self.num_qubits, self.num_qubits)\n or len(self.shift) != self.num_qubits\n or not np.allclose((np.linalg.det(self.linear) % 2), 1)\n ):\n return False\n if (\n not (set(self.poly.weight_1.flatten())).issubset({0, 1, 2, 3, 4, 5, 6, 7})\n or not (set(self.poly.weight_2.flatten())).issubset({0, 2, 4, 6})\n or not (set(self.poly.weight_3.flatten())).issubset({0, 4})\n ):\n return False\n if not (set(self.shift.flatten())).issubset({0, 1}) or not (\n set(self.linear.flatten())\n ).issubset({0, 1}):\n return False\n return True", "def _almost_equal(x, y):\n pass", "def __ne__(self, other):\n self.numerator=self.numerator*other.denominator\n other.numerator=self.denominator*other.numerator\n if(self.numerator!=other.numerator):\n return True\n else:\n return False" ]
[ "0.66910017", "0.6484679", "0.64480114", "0.6388875", "0.6279408", "0.6213788", "0.61958015", "0.61131495", "0.60789216", "0.60751086", "0.6066792", "0.6038577", "0.602096", "0.5967689", "0.5960483", "0.5953086", "0.59330404", "0.5928619", "0.5919364", "0.5909345", "0.5903139", "0.5901893", "0.5898977", "0.58942723", "0.5893356", "0.5888294", "0.58879066", "0.5879077", "0.58788073", "0.587871", "0.58785176", "0.587071", "0.5869322", "0.58556104", "0.5841106", "0.5828044", "0.58268034", "0.57999444", "0.5799115", "0.579521", "0.57749176", "0.57746446", "0.5762074", "0.5761746", "0.57597905", "0.5758456", "0.5758456", "0.575843", "0.57518137", "0.572623", "0.5726137", "0.5720673", "0.5720673", "0.5720673", "0.5720673", "0.5709495", "0.5705747", "0.5699197", "0.56984985", "0.56957006", "0.56933916", "0.5686011", "0.56849796", "0.56812763", "0.5677632", "0.5675949", "0.5674548", "0.5674478", "0.5674478", "0.56720597", "0.5664179", "0.56623274", "0.5660357", "0.5658187", "0.5654967", "0.5651221", "0.5648928", "0.56478906", "0.56421745", "0.5635238", "0.5634406", "0.56325686", "0.5631171", "0.5629551", "0.5627462", "0.562714", "0.56261337", "0.5620178", "0.5619951", "0.5614783", "0.5614686", "0.5611186", "0.5608016", "0.5603061", "0.55943644", "0.558087", "0.55786633", "0.55771536", "0.55743647", "0.5568747", "0.5563547" ]
0.0
-1
Overrides the default implementation
def __hash__(self): return hash(tuple(sorted(self.__dict__.items())))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self):\r\n raise NotImplementedError('override me')", "def __call__(self):\n raise NotImplementedError", "def override(self):\n return None", "def __call__(self):\n raise NotImplementedError()", "def __call__(self):\n pass", "def __call__(self):\n pass", "def __call__( self ):\n pass", "def __call__(self):\n\t\treturn", "def base(self):\n raise NotImplementedError()", "def __call__(self) -> None:", "def __init__(self):\n\t\tsuper().__init__()", "def __init__(self):\n\t\tsuper().__init__()", "def __call__(self, *args, **kwargs):\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def __call__(self, **kwargs):\n raise NotImplementedError", "def basic(self):\n pass", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __call__(self, *args, **kwargs) -> None:\n raise NotImplementedError()", "def _base(self):\n pass", "def __init__(self):\n\n super().__init__()", "def __init__(self):\n\n super().__init__()", "def __init__(self):\n super()", "def __init__(self) -> None:\n super().__init__()", "def __init__(self) -> None:\n super().__init__()", "def run(self):\n raise Exception('derived class should redefine this function')", "def __init__(self):\n super().__init__()", "def __init__(self):\n super().__init__()", "def __init__(self):\n super().__init__()", "def __init__(self):\n super().__init__()", "def __init__(self):\n super().__init__()", "def __init__(self):\n super().__init__()", "def __init__(self):\n super().__init__()", "def __init__(self):\n super().__init__()", "def __init__(self):\n super().__init__()", "def __init__(self):\n super().__init__()", "def __init__(self):\n super().__init__()", "def __init__(self):\n raise NotImplementedError()", "def __init__(self):\r\n\t\tpass", "def extension (self):\n assert False, \"To be implemented by child\"", "def _patch_implementation(self, original, *args, **kwargs):\n pass", "def __init__(self, *args, **kwargs):\n\t\tsuper().__init__(*args, **kwargs)", "def __init__(self):\r\n super().__init__()", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self, *args, **kwargs):\n super(self.__class__, self).__init__(*args, **kwargs)", "def use(self):", "def __call__(self):\n return self", "def __init__ (self):\n pass", "def __call__(self):", "def __call__(self):", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass" ]
[ "0.8084532", "0.7757971", "0.7661786", "0.7621949", "0.75538856", "0.75538856", "0.7523752", "0.74512076", "0.7332789", "0.73304206", "0.729408", "0.729408", "0.7254287", "0.7254287", "0.72166955", "0.72166955", "0.72166955", "0.71078914", "0.69840276", "0.6974919", "0.6974919", "0.6974919", "0.6974919", "0.6945564", "0.69447464", "0.6920196", "0.6920196", "0.68995243", "0.6872062", "0.6872062", "0.686463", "0.68502116", "0.68502116", "0.68502116", "0.68502116", "0.68502116", "0.68502116", "0.68502116", "0.68502116", "0.68502116", "0.68502116", "0.68502116", "0.6848468", "0.683811", "0.68246967", "0.6812622", "0.680204", "0.6800107", "0.6783674", "0.6783674", "0.6783674", "0.6783674", "0.6783674", "0.6783674", "0.6783674", "0.6783674", "0.6783674", "0.6783674", "0.6749037", "0.6749037", "0.6749037", "0.6749037", "0.6749037", "0.6749037", "0.6749037", "0.6749037", "0.6749037", "0.6749037", "0.6749037", "0.6749037", "0.6749037", "0.6749037", "0.6744225", "0.67382264", "0.6732416", "0.67121875", "0.67087287", "0.67087287", "0.66986454", "0.66986454", "0.66986454", "0.66986454", "0.66986454", "0.66986454", "0.66986454", "0.66986454", "0.66986454", "0.66986454", "0.66986454", "0.66986454", "0.66986454", "0.66986454", "0.66986454", "0.66986454", "0.66986454", "0.66986454", "0.66986454", "0.66986454", "0.66986454", "0.66986454", "0.66986454" ]
0.0
-1
r"""Uses the ideal in the clifford algebra,
def _clifford_swap(cls, slot_i, slot_j) -> Tensor: return Tensor( { Tensor._merge_keys((slot_j,), (slot_i,)): -1, Tensor._merge_keys(): 2 * cls.symmetric_bilinear_form(slot_i, slot_j), } )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solvate(self):\n\n pass", "def solve(self):", "def get_sol(self):", "def idealize(self) -> None:\n self.k = np.zeros(6, dtype=float)\n self.p = np.zeros(2, dtype=float)\n self.c = np.zeros(2, dtype=float)", "def clifford_set(u):\n i, x, y, z = u.v\n result = []\n result.append(u.clone()) # I\n result.append(Uop(-x, i, -z, y, u.hierarchy, u.construction + [\"X\"], gateset=u.gateset)) # iX, but treat it as X due to only phase difference\n result.append(Uop((i-x)/SQRT2, (x+i)/SQRT2, (y-z)/SQRT2, (z+y)/SQRT2, u.hierarchy, u.construction + [\"(I+iX)\"], gateset=u.gateset))\n result.append(Uop((i+x)/SQRT2, (x-i)/SQRT2, (y+z)/SQRT2, (z-y)/SQRT2, u.hierarchy, u.construction + [\"(I-iX)\"], gateset=u.gateset))\n result.append(Uop((i-y)/SQRT2, (x+z)/SQRT2, (y+i)/SQRT2, (z-x)/SQRT2, u.hierarchy, u.construction + [\"(I+iY)\"], gateset=u.gateset))\n result.append(Uop((i+y)/SQRT2, (x-z)/SQRT2, (y-i)/SQRT2, (z+x)/SQRT2, u.hierarchy, u.construction + [\"(I-iY)\"], gateset=u.gateset))\n for idx in range(6):\n i, x, y, z = result[idx].v\n c = result[idx].construction[-1:] if idx != 0 else []\n result.append(Uop(-z, -y, x, i, u.hierarchy, u.construction + c + [\"Z\"], gateset=u.gateset)) # iZ\n result.append(Uop((i-z)/SQRT2, (x-y)/SQRT2, (y+x)/SQRT2, (z+i)/SQRT2, u.hierarchy, u.construction + c + [\"(I+iZ)\"], gateset=u.gateset))\n result.append(Uop((i+z)/SQRT2, (x+y)/SQRT2, (y-x)/SQRT2, (z-i)/SQRT2, u.hierarchy, u.construction + c + [\"(I-iZ)\"], gateset=u.gateset))\n\n return result", "def circuitSat(C):", "def idealOpAmp():", "def method3(self):\n cres=0.\n Ux_aloc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy_aloc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.ALDM[ix ,iy, : , : ]\n mat2=self.ALDM[(ix%self.kS.Nx)+1, iy, : , : ]\n mat3=self.ALDM[ix ,(iy%self.kS.Ny)+1, : , : ]\n \n Ux_aloc[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat2)[self.NL-1:,self.NL-1:])\n Uy_aloc[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat3)[self.NL-1:,self.NL-1:])\n\n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux_aloc[ix,iy]*Uy_aloc[ix+1,iy]/Ux_aloc[ix,iy+1]/Uy_aloc[ix,iy])\n cres+=ftemp/2./pi/1j\n \n return cres.real\n #End of method3", "def relu(self):\n return self * self.ge(0)", "def __init__(self, M, rat):\n self.M = M\n xc0, _ = np.polynomial.chebyshev.chebgauss(M-0)\n xc1, _ = np.polynomial.chebyshev.chebgauss(M-1)\n xc2, _ = np.polynomial.chebyshev.chebgauss(M-2)\n # vandermonde and inverse vandermonde matrices\n self.V0 = np.polynomial.chebyshev.chebvander(xc0, M-1)\n self.V1 = np.polynomial.chebyshev.chebvander(xc1, M-2)\n self.V2 = np.polynomial.chebyshev.chebvander(xc2, M-3)\n self.VI0 = np.linalg.inv(self.V0)\n self.VI1 = np.linalg.inv(self.V1)\n self.VI2 = np.linalg.inv(self.V2)\n # differentiation matrices\n DC01 = np.polynomial.chebyshev.chebder(np.eye(M-0)) / rat\n DC12 = np.polynomial.chebyshev.chebder(np.eye(M-1)) / rat\n DC00 = np.row_stack([DC01, np.zeros(M)])\n self.D00 = self.V0.dot(DC00.dot(self.VI0))\n self.D01 = self.V1.dot(DC01.dot(self.VI0))\n self.D12 = self.V2.dot(DC12.dot(self.VI1))\n # boundary condition operators\n self.ibc_dirichlet = np.polynomial.chebyshev.chebvander(1, M-1).dot(self.VI0)\n self.obc_dirichlet = np.polynomial.chebyshev.chebvander(-1, M-1).dot(self.VI0)\n self.ibc_neumann = self.ibc_dirichlet.dot(self.D00)\n self.obc_neumann = self.obc_dirichlet.dot(self.D00)\n # rank reduction operators\n temp = np.zeros([M-1, M-0], dtype=float)\n np.fill_diagonal(temp, 1.0)\n self.R01 = self.V1.dot(temp.dot(self.VI0))\n temp = np.zeros([M-2, M-1], dtype=float)\n np.fill_diagonal(temp, 1.0)\n self.R12 = self.V2.dot(temp.dot(self.VI1))\n self.R02 = self.R12.dot(self.R01)\n # get poof operator from M-1 --> M\n temp = np.zeros([M, M-1], dtype=float)\n np.fill_diagonal(temp, 1.0)\n self.P10 = self.V0.dot(temp.dot(self.VI1))", "def test_superposition_ud_cnot():\n program = dedent(\n \"\"\"\\\n register q0[0]\n register q1[1]\n H q1\n CNOT q1 q0\n \"\"\"\n )\n\n result = run(program, run_gate_array, return_distribution=True)\n assert isclose(result, [0.5, 0.0, 0.0, 0.5]).all()", "def naive(self):\n if self.ch in (2, 3):\n raise TypeError(\"cannot be defined over characteristic two/three.\")\n\n sform = self.simple()\n\n k = 0\n f = sform.cubic\n for i in range(card(sform.basefield)):\n x = sform.basefield.createElement(i)\n k += sform.basefield.Legendre(f(x))\n return -k", "def test_superposition_cnot():\n\n program = dedent(\n \"\"\"\\\n register q0[0]\n register q1[1]\n H q0\n CNOT q0 q1\n \"\"\"\n )\n\n result = run(program, run_gate_array, return_distribution=True)\n assert isclose(result, [0.5, 0.0, 0.0, 0.5]).all()", "def solve(self):\n ...", "def solve(self):\n pass", "def solve(self):\n pass", "def contract_tenors(self):\n\n\tself.r_outer_r[:,:,0,1,:] = self.r_outer_r[:,:,0,1,:]/(1. - self.k_dot_r[0,1,:])\n\tself.r_outer_r[:,:,0,2,:] = self.r_outer_r[:,:,0,2,:]/(1. - self.k_dot_r[0,2,:])\n\t\n\tself.r_outer_r[:,:,1,0,:] = self.r_outer_r[:,:,1,0,:]/(1. - self.k_dot_r[1,0,:])\n\tself.r_outer_r[:,:,1,2,:] = self.r_outer_r[:,:,1,2,:]/(1. - self.k_dot_r[1,2,:])\n\t\n\tself.r_outer_r[:,:,2,0,:] = self.r_outer_r[:,:,2,0,:]/(1. - self.k_dot_r[2,0,:])\n\tself.r_outer_r[:,:,2,1,:] = self.r_outer_r[:,:,2,1,:]/(1. - self.k_dot_r[2,1,:])\n\n\tself.delta_l = np.zeros((3,3,self.N),dtype=np.complex_)\n \n\tself.delta_l[0,1,:] = get_l(self,0,1)\n\tself.delta_l[1,0,:] = get_l(self,1,0)\n\t\n\tself.delta_l[0,2,:] = get_l(self,0,2)\n\tself.delta_l[2,0,:] = get_l(self,2,0)\n\t\n\tself.delta_l[1,2,:] = get_l(self,1,2)\n\tself.delta_l[2,1,:] = get_l(self,2,1)\n \n\treturn", "def clebsch_gordan((J1,M1),(J2,M2),(J3,M3)):\n cg=(-1)**(J2-J1-M3)*math.sqrt(2*J3+1)*pygsl.sf.coupling_3j(int(2*J1), int(2*J2), int(2*J3), int(2*M1), int(2*M2),int(-2*M3))[0]\n #\n return cg", "def solve_prep(self):\n\n par = self.par\n sol = self.sol\n\n # a. retirement\n sol.m_ret = np.zeros((par.T,par.Nm_ret))\n sol.c_ret = np.zeros((par.T,par.Nm_ret))\n sol.a_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_v_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vm_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vn_ret = np.zeros((par.T,par.Nm_ret))\n\n # b. working\n if par.solmethod == 'G2EGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.ucon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.dcon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.acon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.z = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n \n elif par.solmethod == 'NEGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((0,0,0))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((0,0,0))\n \n sol.c_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))\n sol.inv_v_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))", "def covar(self):\n a, c, d, b = self.to_ccw()\n return a * d - b * c", "def disagreement(self):\n return 0.5*(np.dot(np.dot(np.transpose(self.x),self.L),self.x)).item(0)", "def a_realization(self):\n if self.t==1:\n return self.kmonomial()\n else:\n return self.kHallLittlewoodP()", "def analyticSol (x):\n\treturn x*(1-x);", "def solution(self) -> State:", "def coefficient(self) -> float:\n ...", "def CL(self):", "def euc_dist(self, squared=True):", "def method1(self):\n cres=0. # Variable for storing Chern number.\n # The U matrices from Fukui's method; storage...\n Ux=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n \n # ... and calculation of U matrices\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.alleigvecs[:,:,ix ,iy ]\n if ix<self.kS.Nx:\n mat2=self.alleigvecs[:,:,ix+1,iy ]\n else:\n mat2=self.alleigvecs[:,:,1 ,iy ]\n if iy<self.kS.Ny:\n mat3=self.alleigvecs[:,:,ix ,iy+1]\n else:\n mat3=self.alleigvecs[:,:,ix ,1 ]\n Ux[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat2)[:self.NL,:self.NL])\n Uy[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat3)[:self.NL,:self.NL])\n \n # Local estimates of Berry curvature; storage ...\n ftempall=np.zeros((self.kS.Nx,self.kS.Ny),complex)\n # ... and calculation\n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux[ix,iy]*Uy[ix+1,iy]/Ux[ix,iy+1]/Uy[ix,iy])\n ftempall[ix,iy]=ftemp # ... of local Berry curvature ...\n cres+=ftemp/2./pi/1j # ... and of Berry phase (Chern number).\n\n return cres.real, ftempall", "def test_reference_conversion_factors():\n assert constants.eV == pytest.approx(1.602176565e-19)\n assert constants.eV * constants.N_A / constants.kcal == pytest.approx(23.06, 3e-5)\n assert constants.hartree * constants.N_A / constants.kcal == pytest.approx(627.5095)\n assert constants.hartree / constants.eV == pytest.approx(27.2114)\n assert constants.hartree * constants.centi / (\n constants.h * constants.c\n ) == pytest.approx(219474.63)", "def __xor__(self, other):\n\n if isinstance(other, Dyadic):\n return NotImplemented\n if isinstance(other, (int, type(Zero()))):\n if (other == 0):\n return self * 0\n self._check_vector(other)\n\n def _det(mat):\n \"\"\"This is needed as a little method for to find the determinant\n of a list in python; needs to work for a 3x3 list.\n SymPy's Matrix won't take in Vector, so need a custom function.\n You shouldn't be calling this.\n\n \"\"\"\n\n return (mat[0][0] * (mat[1][1] * mat[2][2] - mat[1][2] * mat[2][1])\n + mat[0][1] * (mat[1][2] * mat[2][0] - mat[1][0] *\n mat[2][2]) + mat[0][2] * (mat[1][0] * mat[2][1] -\n mat[1][1] * mat[2][0]))\n\n outvec = Vector([])\n ar = other.args # For brevity\n for i, v in enumerate(ar):\n tempx = v[1].x\n tempy = v[1].y\n tempz = v[1].z\n tempm = ([[tempx, tempy, tempz], [self & tempx, self & tempy,\n self & tempz], [Vector([ar[i]]) & tempx,\n Vector([ar[i]]) & tempy, Vector([ar[i]]) & tempz]])\n outvec += _det(tempm)\n return outvec", "def _get_coeffs(self):\n # lift (Clmax) and parasitic drag (Cd0max)\n self.cl = 0.0\n self.cd = 0.0\n kpp = 0.0\n\n for sail in self.sails:\n\n self.cl += sail.cl(self.awa) * sail.area * sail.bk\n self.cd += sail.cd(self.awa) * sail.area * sail.bk\n kpp += sail.cl(self.awa) ** 2 * sail.area * sail.bk * sail.kp\n\n self.cl /= self.area\n self.cd /= self.area\n\n # viscous quadratic parasitic drag and induced drag\n devisor_1 = self.area * self.cl ** 2\n devisor_2 = np.pi * self._heff(self.awa) ** 2\n self.CE = (kpp / devisor_1 if devisor_1 else 0.0) + (self.area / devisor_2 if devisor_2 else 0.0)\n\n # fraction of parasitic drag due to jib\n self.fcdj = 0.0\n for sail in self.sails:\n if sail.type == \"jib\":\n self.fcdj = (\n sail.bk * sail.cd(self.awa) * sail.area / (self.cd * self.area)\n )\n\n # final lift and drag\n self.cd = self.cd * (\n self.flat * self.fcdmult(self.flat) * self.fcdj + (1 - self.fcdj)\n ) + self.CE * self.cl ** 2 * self.flat ** 2 * self.fcdmult(self.flat)\n self.cl = self.flat * self.cl", "def _compute_imprimitivity(self):\n m = floor(self._.d / 2)\n self._.antipodal = all(full_simplify(\n self._.b[i] - self._.c[self._.d - i]) == 0\n for i in range(self._.d) if i != m)\n self._.bipartite = all(a == 0 for a in self._.a)\n if self._.antipodal:\n try:\n self._.r = integralize(\n 1 + self._.b[m] / self._.c[self._.d - m])\n except TypeError:\n raise InfeasibleError(\"covering index not integral\")\n if self._.d >= 2:\n if self._.d == 2:\n b = [self._.b[0]/(self._.b[1]+1)]\n c = [Integer(1)]\n else:\n b = self._.b[:m]\n c = list(self._.c[1:m+1])\n if is_divisible(self._.d, 2):\n c[-1] *= self._.r\n scheme = self._get_class()(tuple(b), tuple(c))\n else:\n scheme = ASParameters(P=[[1]])\n self._.antipodal_subscheme = self.add_subscheme(scheme,\n self.ANTIPODAL)\n if self._.bipartite:\n if self._.d >= 2:\n b = tuple(self._.b[2*i]*self._.b[2*i+1]/self._.c[2]\n for i in range(m))\n c = tuple(self._.c[2*i+1]*self._.c[2*i+2]/self._.c[2]\n for i in range(m))\n scheme = self._get_class()(b, c)\n else:\n scheme = ASParameters(P=[[1]])\n self._.bipartite_subscheme = self.add_subscheme(scheme,\n self.BIPARTITE)", "def change_variables((a,b,c,d), (n,r,m)): \n return ( n*a**2 + r*a*b + m*b**2, 2*(n*a*c + m*b*d) + r*(a*d + c*b), \\\n n*c**2 + r*c*d + m*d**2 )", "def _compute_correction(self, initial_state, final_state, a, b, c, s):\r\n pertub = self.pertub\r\n pertub_s = pertub *10\r\n \r\n pred_no_pertub = self._motion_update_one_shot(initial_state, a, b, c, s)\r\n pred_pertub_a = self._motion_update_one_shot(initial_state, a +pertub, b, c, s)\r\n pred_pertub_b = self._motion_update_one_shot(initial_state, a, b +pertub, c, s)\r\n # no need to correct C, C is constrained by kappa_final\r\n # # pred_pertub_c = self._motion_update_one_shot(initial_state, a, b, c +pertub, s)\r\n pred_pertub_s = self._motion_update_one_shot(initial_state, a, b, c, s +pertub_s)\r\n\r\n d_state = np.zeros((3,1))\r\n d_pertub_state = np.zeros((3,3))\r\n Jacobian = np.zeros((3,3))\r\n for i in range(0, 3):\r\n d_pertub_state[i][0] = (final_state[i] - pred_pertub_a[i]) # a\r\n d_pertub_state[i][1] = (final_state[i] - pred_pertub_b[i]) # b\r\n # d_pertub_state[i][2] = (final_state[i] - pred_pertub_c[i]) # c (no update)\r\n d_pertub_state[i][2] = (final_state[i] - pred_pertub_s[i]) # s\r\n \r\n d_state[i] = final_state[i] - pred_no_pertub[i]\r\n \r\n Jacobian[i][0] = (d_pertub_state[i][0] - d_state[i])/pertub # a\r\n Jacobian[i][1] = (d_pertub_state[i][1] - d_state[i])/pertub # b\r\n # Jacobian[i][2] = (d_pertub_state[i][2] - d_state[i])/pertub # c (no update)\r\n Jacobian[i][2] = (d_pertub_state[i][2] - d_state[i])/pertub_s # s\r\n\r\n # inv_Jacobian = np.linalg.inv(Jacobian)\r\n inv_Jacobian = np.linalg.pinv(Jacobian)\r\n correction = np.dot(inv_Jacobian, d_state)\r\n # pdb.set_trace()\r\n return correction", "def informedness(self):\n a, c, d, b = self.to_ccw()\n p1, q1 = a + b, c + d\n n = p1 + q1\n\n if n == 0:\n return np.nan\n elif p1 == n:\n return 0.0\n # return _div(a - b, 2 * (a + b))\n elif q1 == n:\n return 0.0\n # return _div(d - c, 2 * (d + c))\n else:\n return _div(self.covar(), p1 * q1)", "def reaction_forces(Ca, la, x1, x2, x3, xa, h, d1, d3, theta, P, q, E, I):\r\n \r\n equation_matrix = np.array([[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \r\n [1, 0, 0, 1, 0, 1, 0, np.sin(theta), 0, 0, 0, 0, (P*np.sin(theta)+q*la*np.cos(theta))], \r\n [0, 1, 0, 0, 1, 0, 1, np.cos(theta), 0, 0, 0, 0, (P*np.cos(theta)-q*la*np.sin(theta))],\r\n \r\n [-(Ca/4-h/2), 0, 0, -(Ca/4-h/2) ,0 , -(Ca/4-h/2), 0, (np.cos(theta)*h/2-np.sin(theta)*Ca/4), 0, 0, 0, 0, (P*np.cos(theta)*h/2*-P*np.sin(theta)*Ca/4)], \r\n [0, (x2-x1), 0, 0, 0, 0, -(x3-x2), (np.cos(theta)*xa/2), 0, 0, 0, 0, (-P*np.cos(theta)*xa/2+q*la*np.sin(theta)*(la/2-x2))], \r\n [-(x2-x1), 0, 0, 0, 0, (x3-x2), 0, -np.sin(theta)*xa/2, 0, 0, 0, 0, (P*np.sin(theta)*xa/2+q*la*np.cos(theta)*(la/2-x2))], \r\n \r\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, x1, 1, -q*np.sin(theta)*((x1**4)/24)], \r\n [0, ((x2-x1)**3)/6, 0, 0, 0, 0, 0, ((np.cos(theta))*((xa/2)**3)/6), 0, 0, x2, 1, (-q*np.sin(theta)*((x2**4)/24))], \r\n [0, ((x3-x1)**3)/6, 0, 0, ((x3-x2)**3)/6, 0, 0, ((np.cos(theta))*((x3-x2+xa/2)**3)/6), 0, 0, x3, 1, (-q*np.sin(theta)*((x3**4)/24)+P*(np.cos(theta))*(x3-x2-xa/2)**3/6)], \r\n [0, 0, 0, 0, 0, 0, 0, 0, x1, 1, 0, 0, (-E*I*d1*+q*np.cos(theta)*(x1**4)/24)], \r\n [(((x2-x1)**3)/6), 0, 0, 0, 0, 0, 0, ((-np.sin(theta))*((xa/2)**3)/6), x2, 1, 0, 0, (q*np.cos(theta)*(x2**4)/24)], \r\n [(((x3-x1)**3)/6),0,0,(((x3-x2)**3)/6),0,0,0,((-np.sin(theta))*((x3-x2+xa/2)**3)/6),x3,1,0,0,(-E*I*d3*+q*np.cos(theta)*((x3**4)/24)+P/6*np.sin(theta)*(x3-x2-xa/2)**3)]])\r\n \r\n \r\n unknown_matrix = equation_matrix[:,:-1]\r\n constant_matrix = equation_matrix[:,-1]\r\n \r\n \r\n solution_matrix = np.linalg.solve(unknown_matrix,constant_matrix)\r\n \r\n solution_matrix = solution_matrix/1000\r\n \r\n (R1y, R1z, R2x, R2y, R2z, R3y, R3z, RI, c1, c2, c3, c4) = tuple(solution_matrix)\r\n \r\n print((R1y, R1z, R2x, R2y, R2z, R3y, R3z, RI, c1, c2, c3, c4))", "def _solve(self, mu=None):\n pass", "def Mo96(self,dc,nu):\n return 1. + (nu**2.-1.)/dc", "def conj(z):", "def __rtruediv__(self, other):\r\n return other * self.reciprocal()", "def mobius(decomp): #fix 1 value\n return 0 if any([decomp[p] >= 2 for p in decomp]) else (-1) ** (breadth(decomp) % 2)", "def narration_target(self):", "def __truediv__(self, o): \n return self * o.inv()", "def complex_inverse(c1,cr):", "def rawsolve(self,):\n m = self.m\n n = self.n\n z = self.z\n mark = self.mark\n kAAt = self.kAAt\n iAAt = self.iAAt\n AAt = self.AAt\n diag = self.diag\n consistent = True\n eps = 0.0\n m2 = m+n\n\n if self.ndep:\n eps = self.epssol * np.abs(z).max()\n\n #/*------------------------------------------------------+\n #| |\n #| -1 |\n #| z <- L z |\n #| */\n\n for i in range(m2):\n if mark[i]:\n beta = z[i]\n for k in range(kAAt[i], kAAt[i+1]):\n row = iAAt[k]\n z[row] -= AAt[k]*beta\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n #/*------------------------------------------------------+\n #| |\n #| -1 |\n #| z <- D z |\n #| */\n\n for i in range(m2-1, -1, -1):\n if mark[i]:\n z[i] = z[i]/diag[i]\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n #/*------------------------------------------------------+\n #| |\n #| t -1 |\n #| z <- (L ) z |\n #| */\n\n for i in range(m2-1, -1, -1):\n if mark[i]:\n beta = z[i]\n for k in range(kAAt[i], kAAt[i+1]):\n beta -= AAt[k]*z[iAAt[k]]\n z[i] = beta\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n return consistent", "def test_direct_c_python_correspondence_with_correction():\n N = 10\n r = 0.5 + np.arange(N).astype('float64') \n x = 2*r.reshape((1, -1))**2\n out1 = abel.direct._pyabel_direct_integral(x, r, 1)\n out2 = abel.direct._cabel_direct_integral( x, r, 1)\n assert_allclose(out1, out2, rtol=1e-9, atol=1e-9)", "def contractor(self, *args, **kwargs):\n vertices = copy.deepcopy(args[0])\n nrange = len(vertices[0])\n xpts = []\n ypts = []\n for i in range(nrange):\n xpts.append(vertices[0][i].value)\n ypts.append(vertices[1][i].value)\n constraint = copy.deepcopy(args[1])\n \n \n \n \n qxdot,qxddot,qydot,qyddot = self.update_allq(xpts,ypts)\n \n ## the all important computation split (need to abstract this kind of thing)\n ##lhs = (np.sqrt(qxdot*qxdot + qydot*qydot)**3.) *constraint\n lhs = ( ( np.sqrt(qxdot**2 + qydot**2) )**3 )*constraint\n \n # check2 = qxdot*qyddot\n # if check2.width() < 1.e-2:\n # check2.min.value = check2.real.value\n # check2.max.value = check2.real.value\n # t1 = (lhs - check2)/qydot\n \n #\n # qyddot\n #\n check2 = qydot*qxddot\n if check2.width() < 1.e-2 and check2.contains(0.):\n check2.inf = 0.\n check2.sup = 0.\n #if qxdot.contains(0.) and abs(qxdot.min.value)>1.e-6:\n # print 'qxdot = ',qxdot\n # print 'qxdot not invertable, implement other logic please'\n if abs(float(qxdot.inf))<1.e-6:\n qxdot.inf = 1.e-10\n print 'invert qxdot'\n print 'qxdot = ', qxdot\n \n #t1 = (lhs + qydot*qxddot)/(qxdot)\n t1 = (lhs + check2)/(qxdot)\n \n t1 = t1 & qyddot # go ahead and shrink t1 to qyddot - they are logically equivalent\n total_ans = []\n useful_indices = []\n bad_indices = []\n for i in range(len(ypts)): \n min_ans = 0.\n for j in range(len(ypts)):\n if j==i:\n pass\n else:\n min_ans = (ypts[j]*float(self.localBasis[2,j])) + min_ans\n min_ans = t1 - min_ans\n if (abs(float(self.localBasis[2,i])) > 0.0):\n min_ans = min_ans/float(self.localBasis[2,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(ypts, total_ans)\n for i in useful_indices:\n if new_ans[i].isempty == False: # abs( new_ans[i].width() ) > 0.:\n ypts[i] = ypts[i] & new_ans[i]\n qxdot,qxddot,qydot,qyddot = self.update_allq(xpts,ypts)\n else:\n print 'warning, possible constraint violation, curvature 1'\n \n ## \n ## qxdot\n ##\n check2 = qydot*qxddot\n if check2.width() < 1.e-2 and check2.contains(0.):\n check2.inf = 0.\n check2.sup = 0.\n #if qyddot.contains(0.):\n # print 'qyddot = ',qyddot\n # print 'qyddot not invertable, implement other logic please'\n \n if qyddot.contains(0.) and qyddot.width()<1.e-6:\n qxdot.inf = 0.#1.e-10\n print 'invert qyddot'\n print 'qyddot = ',qyddot\n fix = (lhs + check2)*(1./qyddot)#*(qyddot**-1.)\n fix = fix & qxdot # go ahead and shrink fix to qxdot - they are logically equivalent\n total_ans = []\n useful_indices = []\n bad_indices = []\n \n for i in range(len(xpts)): #contract on x[i]\n min_ans = 0.\n for j in range(len(xpts)): # add up all jth pieces of the dot product except i\n if j==i:\n pass\n else:\n \n min_ans = (xpts[j]*float(self.localBasis[1,j] ) ) + min_ans\n min_ans = fix - min_ans\n if (abs(float(self.localBasis[1,i]) ) >0.0 ):\n min_ans = min_ans/float(self.localBasis[1,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(xpts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n xpts[i] = xpts[i] & new_ans[i]\n qxdot,qxddot,qydot,qyddot = self.update_allq(xpts,ypts)\n else:\n print 'warning, possible constraint violation, curvature 2'\n \n \n ## switch to the other side\n \n ##\n ## contract on qydot\n ##\n check2 = qxdot*qyddot\n if check2.width() < 1.e-2 and check2.contains(0.):\n check2.inf = 0.\n check2.sup = 0.\n# if qxddot.contains(0.):\n# print 'qxddot = ',qxddot\n# print 'qxddot not invertable, implement other logic please'\n# qxddot.min.value = 0.\n if qxddot.contains(0.):\n qxddot.inf = 0.\n \n print 'invert qxddot'\n print 'qxddot = ',qxddot\n t1 = (lhs - check2)/(-qxddot)#*(-qxddot**-1)\n t1 = t1 & qydot\n total_ans = []\n useful_indices = []\n bad_indices = []\n for i in range(len(ypts)): \n min_ans = 0.\n for j in range(len(ypts)):\n if j==i:\n pass\n else:\n #print 't1 = ',t1\n #print 'ypts[{}] = {}'.format(i,ypts[i])\n #print 'localbasis[{},{}] = {}'.format(1,i,self.localBasis[1,j])\n min_ans = (ypts[j]*float(self.localBasis[1,j])) + min_ans\n min_ans = t1 - min_ans\n if (abs(float(self.localBasis[1,i])) > 0.0):\n min_ans = min_ans/float(self.localBasis[1,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(ypts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n ypts[i] = ypts[i] & new_ans[i]\n else:\n print 'warning, possible constraint violation, curvature 3'\n \n ##contract on qxdot\n \n check2 = qxdot*qyddot\n if check2.width() < 1.e-2 and check2.contains(0.):\n check2.inf = 0.\n check2.sup = 0.\n #contract on qxddot\n# if qydot.contains(0.):\n# print 'qydot = ',qxddot\n# print 'qydot not invertable, implement other logic please'\n if qydot.contains(0.):\n qydot.inf = 0.\n print 'invert qydot'\n print 'qydot = ',qydot\n fix = (lhs - qxdot*qyddot)/(-qydot)#*(-qydot**-1)\n fix = fix & qxddot # go ahead and shrink t1 to quddot - they are logically equivalent\n total_ans = []\n useful_indices = []\n bad_indices = []\n for i in range(len(xpts)):\n min_ans = 0.\n for j in range(len(xpts)):\n if j==i:\n pass\n else:\n min_ans = (xpts[j]*float(self.localBasis[2,j] ) ) + min_ans\n min_ans = fix - min_ans\n if (abs(float(self.localBasis[2,i]) ) >0.0 ):\n min_ans = min_ans/float(self.localBasis[2,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(xpts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n xpts[i] = xpts[i] & new_ans[i]\n else:\n print 'warning, possible constraint violation, curvature 4'\n \n for i in range(nrange):\n vertices[0][i].value = xpts[i]\n vertices[1][i].value = ypts[i]\n return vertices", "def discriminant(self):\r\n return self.__b**2 - (4 * self.__a * self.__c)", "def test_clifford_1_qubit_generation(self):\n clifford_dicts = [\n {\"stabilizer\": [\"+Z\"], \"destabilizer\": [\"+X\"]},\n {\"stabilizer\": [\"+X\"], \"destabilizer\": [\"+Z\"]},\n {\"stabilizer\": [\"+Y\"], \"destabilizer\": [\"+X\"]},\n {\"stabilizer\": [\"+X\"], \"destabilizer\": [\"+Y\"]},\n {\"stabilizer\": [\"+Z\"], \"destabilizer\": [\"+Y\"]},\n {\"stabilizer\": [\"+Y\"], \"destabilizer\": [\"+Z\"]},\n {\"stabilizer\": [\"-Z\"], \"destabilizer\": [\"+X\"]},\n {\"stabilizer\": [\"+X\"], \"destabilizer\": [\"-Z\"]},\n {\"stabilizer\": [\"-Y\"], \"destabilizer\": [\"+X\"]},\n {\"stabilizer\": [\"+X\"], \"destabilizer\": [\"-Y\"]},\n {\"stabilizer\": [\"-Z\"], \"destabilizer\": [\"-Y\"]},\n {\"stabilizer\": [\"-Y\"], \"destabilizer\": [\"-Z\"]},\n {\"stabilizer\": [\"-Z\"], \"destabilizer\": [\"-X\"]},\n {\"stabilizer\": [\"-X\"], \"destabilizer\": [\"-Z\"]},\n {\"stabilizer\": [\"+Y\"], \"destabilizer\": [\"-X\"]},\n {\"stabilizer\": [\"-X\"], \"destabilizer\": [\"+Y\"]},\n {\"stabilizer\": [\"-Z\"], \"destabilizer\": [\"+Y\"]},\n {\"stabilizer\": [\"+Y\"], \"destabilizer\": [\"-Z\"]},\n {\"stabilizer\": [\"+Z\"], \"destabilizer\": [\"-X\"]},\n {\"stabilizer\": [\"-X\"], \"destabilizer\": [\"+Z\"]},\n {\"stabilizer\": [\"-Y\"], \"destabilizer\": [\"-X\"]},\n {\"stabilizer\": [\"-X\"], \"destabilizer\": [\"-Y\"]},\n {\"stabilizer\": [\"+Z\"], \"destabilizer\": [\"-Y\"]},\n {\"stabilizer\": [\"-Y\"], \"destabilizer\": [\"+Z\"]},\n ]\n cliffords = [Clifford.from_dict(i) for i in clifford_dicts]\n for n in range(24):\n clifford = CliffordUtils.clifford_1_qubit(n)\n self.assertEqual(clifford, cliffords[n])", "def solveForModeB1(X, M, n, maxInner, epsilon, tol,sita,Y1, lambta2):\n # Pi(n) = [A(N) kr A(N-1) kr ... A(n+1) kr A(n-1) kr .. A(1)]^T\n Pi = tensorTools.calculatePi(X, M, n)\n #print 'Pi size', Pi.shape\n #print 'pi='+str(Pi)\n #print(M.U[n])\n for iter in range(maxInner):\n # Phi = (X(n) elem-div (B Pi)) Pi^T\n #print X.vals.shape,X.shape\n #print X.vals.flatten().shape\n Phi = tensorTools.calculatePhi(X, M.U[n], Pi, n, epsilon=epsilon)\n #print('phi'+str(Phi))\n #print(Phi)\n # check for convergence that min(B(n), E - Phi(n)) = 0 [or close]\n kktModeViolation = np.max(np.abs(np.minimum(M.U[n], 1-Phi).flatten()))\n if (kktModeViolation < tol):\n break\n\n B=M.U[n]\n #print B.shape\n colNorm = np.apply_along_axis(np.linalg.norm, 0, B, 1)\n zeroNorm = np.where(colNorm == 0)[0]\n colNorm[zeroNorm] = 1\n B = B / colNorm[np.newaxis, :]\n tm=np.hstack((np.ones((B.shape[0],1)),B))\n Y1=Y1.reshape((Y1.shape[0],1))\n\n derive=-1.0*lambta2/B.shape[0]*np.dot((Y1-np.dot(tm,sita)),sita.T)\n #print derive.shape\n #print np.multiply(M.U[n],derive[:,1:]).shape\n #print np.multiply(M.U[n],Phi).shape\n M.U[n] = np.array(np.multiply(M.U[n],Phi))-np.array((np.multiply(M.U[n],derive[:,1:])))\n\n #print 'after'\n #print M.U[n][0]\n #print(\" Mode={0}, Inner Iter={1}, KKT violation={2}\".format(n, iter, kktModeViolation))\n return M, Phi, iter, kktModeViolation", "def calculate_useful_stuff(self):\n\n self.pos = self.pair[1].position - self.pair[0].position\n self.vel = self.pair[1].velocity - self.pair[0].velocity\n\n self.r = self.pos.length()\n self.v = self.vel.length()\n self.inv_r = 1.0 / self.r\n\n self.rvec = self.pos * self.inv_r\n self.rdot = (self.rvec * self.vel).sum() * self.rvec\n self.rdot_mag = self.rdot.length()\n\n self.vth = self.vel - self.rdot\n self.vth_mag = self.vth.length()\n self.vth_vec = self.vth / self.vth_mag", "def matthewscc(self):\n if not self.total_examples:\n return 0.\n\n true_pos = float(self.true_positives)\n false_pos = float(self.false_positives)\n false_neg = float(self.false_negatives)\n true_neg = float(self.true_negatives)\n terms = [(true_pos + false_pos),\n (true_pos + false_neg),\n (true_neg + false_pos),\n (true_neg + false_neg)]\n denom = 1.\n for t in filter(lambda t: t != 0., terms):\n denom *= t\n return ((true_pos * true_neg) - (false_pos * false_neg)) / math.sqrt(denom)", "def solve(self):\n raise NotImplementedError(\"This method needs to be implemented.\")", "def _solve(self) -> CasADiArrayType:\n pass", "def solve(self):\n \n raise NotImplementedError(\"not implemented!\")", "def nits(self):", "def test_inverse_c(self):\n for q in self.all:\n self.assertTrue((q * q.inverse()).almost_equal(q.inverse()*q))", "def lawsonite():\n\n rho = 3090.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 214.; C[0,1] = 69.; C[0,2] = 82.; C[0,3] = 0.; C[0,4] = 0.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 226.; C[1,2] = 65.; C[1,3] = 0.; C[1,4] = 0.; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 259.; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 60.; C[3,4] = 0.; C[3,5] = 0.\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 65.; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 17.\n\n return C, rho", "def solve(self):\n is_valid = self.verify_sub_matrixes()\n \n if not is_valid:\n raise ValueError((\n \"El determinante es igual a cero \"\n \"el método no puede continuar\"\n ))\n \n (lower, upper) = self.doolittle_factorization()\n\n lower_solution_vector = lower.solve_matrix(matrix=None, vector=self.vector.vector)\n lower_solution_vector.print_vector()\n upper_solution_vector = upper.solve_matrix(\n matrix=None, vector=lower_solution_vector.vector)\n upper_solution_vector.print_vector()\n\n comprobation = self.matrix.comprobation(upper_solution_vector.vector)\n return comprobation", "def _init_numeric(self):\n from scipy.special import erf\n covbasis = np.zeros((self.nmax, self.nmax))\n overbasis = np.zeros((self.nmax, self.nmax))\n #Get local references to these variables so that we don't need `self`\n #all over in the overbasis calculation below.\n alpha = self.alpha\n rb = self.rb\n \n for i in range(self.nmax):\n for j in range(self.nmax):\n covbasis[j,i] = np.exp(-alpha * (rb[i] - rb[j])**2)\n overbasis[j,i] = (np.exp(-alpha*(rb[i]**2+rb[j]**2))*np.sqrt(2.)* \n alpha**1.5*(rb[i] + rb[j]) + \n alpha*np.exp(-0.5*alpha*(rb[i] - rb[j])**2)*\n np.sqrt(np.pi)*\n (1. + alpha*(rb[i] + rb[j])**2)*\n (1.0 + erf(np.sqrt(alpha/2.0)*(rb[i]+rb[j]))))\n \n overbasis /= np.sqrt(128. * alpha**5)\n\n from numpy.linalg import cholesky\n choloverlap = cholesky(overbasis)\n\n for i in range(self.nmax):\n for j in range(i):\n choloverlap[j,i] = 0.0\n\n from numpy.linalg import solve\n self.transformbasis = solve(covbasis, choloverlap)", "def dolomite():\n\n rho = 2840.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 205.; C[0,1] = 71.; C[0,2] = 57.4; C[0,3] = -19.5; C[0,4] = 13.7; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 205.; C[1,2] = 57.4; C[1,3] = 19.5; C[1,4] = -13.7; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 113.; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 39.8; C[3,4] = 0.; C[3,5] = -13.7\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 39.8; C[4,5] = -19.5\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 67.\n\n return C, rho", "def solve(self):\n\n # Assign variables to each quantity being solved.\n r_lookup, lookup, num = {}, {}, 0\n for element in self.elements:\n if is_wire(element) and element is not self.ground:\n lookup[num] = element\n r_lookup[element] = num\n num += 1\n elif not is_cs(element) and element is not self.ground:\n lookup[num] = element\n r_lookup[element] = num\n num += 1\n\n # Set up the linear algebraic equation Ax=b\n A = np.zeros((num, num))\n b = np.zeros(num)\n for row, element in lookup.items():\n if is_wire(element) and element is not self.ground:\n for two_sided in element.attached:\n if is_cs(two_sided):\n if two_sided.pos is element:\n b[row] += -1 * two_sided.current\n else:\n b[row] += two_sided.current\n else:\n if two_sided.pos is element:\n flow = 1\n else:\n flow = -1\n A[row, r_lookup[two_sided]] = flow\n elif is_vs(element):\n check_connected(element)\n if element.pos is not self.ground:\n A[row, r_lookup[element.pos]] = 1\n if element.neg is not self.ground:\n A[row, r_lookup[element.neg]] = -1\n b[row] = element.voltage\n elif is_resistor(element):\n check_connected(element)\n if element.pos is not self.ground:\n A[row, r_lookup[element.pos]] = 1\n if element.neg is not self.ground:\n A[row, r_lookup[element.neg]] = -1\n A[row, r_lookup[element]] = -1 * element.resistance\n\n b = b.reshape((num, 1))\n try:\n x = np.linalg.solve(A, b)\n except np.linalg.LinAlgError:\n raise CircuitError('Insufficient information to solve circuit')\n\n # Assign values to all circuit components\n for i in range(num):\n item = lookup[i]\n if is_wire(item):\n item.potential = x[i, 0]\n elif isinstance(item, DualSided):\n item.current = x[i, 0]\n\n # Mark circuit as solved\n self.been_solved = True", "def __det3x3__(a):\r\n # val = +a[0,0] * ( a[1,1] * a[2,2] - a[2,1] * a[1,2] )\r\n # val += -a[0,1] * ( a[1,0] * a[2,2] - a[2,0] * a[1,2] )\r\n # val += +a[0,2] * ( a[1,0] * a[2,1] - a[2,0] * a[1,1] )\r\n val = +a[0] * (a[4] * a[8] - a[7] * a[5])\r\n val += -a[1] * (a[3] * a[8] - a[6] * a[5])\r\n val += +a[2] * (a[3] * a[7] - a[6] * a[4])\r\n return val", "def C_Na_eq():\n global C_Na, C_Mg, C_dNTP\n return C_Na + 120*sqrt(C_Mg - C_dNTP)", "def null_coroot(self):\n assert(self.cartan_type().is_affine())\n coef = self.cartan_type().acheck()\n return sum(coef[k]*self.simple_coroots()[k] for k in coef.keys())", "def alg(c):\n return c[0]*G[0] + c[1]*G[1] + c[2]*G[2]", "def method2(self):\n cres=np.zeros(self.NL,dtype=float) # List of invariants\n # The U matrices from Fukui's method; storage...\n Ux_loc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy_loc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n \n for il in range(self.NL):\n # ... and calculation of U matrices for each layer\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.LDM[il,ix ,iy ,:,:]\n mat2=self.LDM[il,(ix%self.kS.Nx)+1 ,iy ,:,:]\n mat3=self.LDM[il,ix ,(iy%self.kS.Ny)+1 ,:,:]\n \n Ux_loc[ix,iy]=np.dot(np.conj(mat1.T),mat2)[1,1]\n Uy_loc[ix,iy]=np.dot(np.conj(mat1.T),mat3)[1,1]\n \n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux_loc[ix,iy]*Uy_loc[ix+1,iy]/Ux_loc[ix,iy+1]/Uy_loc[ix,iy])\n cres[il]+=(ftemp/2./pi/1j).real # Layer specific topological invariant\n \n return cres", "def _coset_representative(self, g, H):\n if H.order() == 1:\n return g\n # The base of self must be an extension of H.base.\n if not(self.base[:len(H.base)] == H.base):\n self._schreier_sims(base=H.base)\n orbits = H.basic_orbits[:]\n h_transversals = [list(_.values()) for _ in H.basic_transversals]\n transversals = [list(_.values()) for _ in self.basic_transversals]\n base = self.base\n base_ordering = _base_ordering(base, self.degree)\n def step(l, x):\n gamma = sorted(orbits[l], key = lambda y: base_ordering[y^x])[0]\n i = [base[l]^h for h in h_transversals[l]].index(gamma)\n x = h_transversals[l][i]*x\n if l < len(orbits)-1:\n for u in transversals[l]:\n if base[l]^u == base[l]^x:\n break\n x = step(l+1, x*u**-1)*u\n return x\n return step(0, g)", "def _solve_explicit(self, initial_conditions):\n coeff = self.a ** 2 * self.tau / self.h ** 2\n current_solution = initial_conditions\n next_solution = np.empty_like(current_solution)\n solutions = []\n\n for t in self.t_grid:\n next_solution[1:-1] = (\n current_solution[1:-1]\n + (current_solution[:-2] - 2 * current_solution[1:-1] + current_solution[2:]) * coeff\n ) + self.rhs(self.x_grid[1:-1], t) * self.tau\n\n # left bc\n if self.left_bc_type == \"DIRICHLET\":\n next_solution[0] = self.left_bc(t)\n elif self.left_bc_type == \"NEUMANN\":\n next_solution[0] = (\n 4 * next_solution[1]\n - next_solution[2]\n - 2 * self.h * self.left_bc(t)\n ) / 3.0\n\n # right bc\n if self.right_bc_type == \"DIRICHLET\":\n next_solution[-1] = self.right_bc(t)\n elif self.right_bc_type == \"NEUMANN\":\n next_solution[-1] = (\n 4 * next_solution[-2]\n - next_solution[-3]\n + 2 * self.h * self.right_bc(t)\n ) / 3.0\n if self.mode == \"VISUALIZATION\":\n solutions.append((t, next_solution.copy()))\n current_solution = next_solution\n if self.mode == \"TEST\":\n # print(\"Result: \", current_solution.tolist())\n # print(\"Right answer: \", self.anl_solution.tolist())\n self._norma(current_solution)\n elif self.mode == \"VISUALIZATION\":\n return solutions", "def make_oneq_cliffords():\n ixyz_list = [g().to_matrix() for g in (IGate, XGate, YGate, ZGate)]\n ih_list = [g().to_matrix() for g in (IGate, HGate)]\n irs_list = [\n IGate().to_matrix(),\n SdgGate().to_matrix() @ HGate().to_matrix(),\n HGate().to_matrix() @ SGate().to_matrix(),\n ]\n oneq_cliffords = [\n Operator(ixyz @ ih @ irs) for ixyz in ixyz_list for ih in ih_list for irs in irs_list\n ]\n return oneq_cliffords", "def conjugate(self, ???):", "def c(\n dp: np.ndarray,\n ddp: np.ndarray,\n ) -> np.ndarray:\n\n return \\\n np.abs(ddp[0, :]*dp[1, :] - dp[0, :]*ddp[1, :]) / \\\n (dp[0, :]**2 + dp[1, :]**2)**1.5", "def extended_euclidean(self):\n self.a = gmpy2.invert(self.e1, self.e2)\n self.b = (float(self.gcd(self.e1, self.e2)-(self.a*self.e1)))/float(self.e2)", "def corr(self):\n pass", "def eq510821ad2(db, fy, fcp):", "def __abs__(self):\n return Factor().__build( VarSet(self.v) , np.fabs(self.t) )", "def fRCrim(Swe,Vc1,Vc2,Vc3,Vk,PHIe,Rc1,Rc2,Rc3,Rk,Rw,Rh,Cwv,Ckv,Alpha,Tout):\n#\n# 1. Compute and normalise volumetric components:\n#\t-----------------------------------------------\n\tVw=PHIe*Swe\n\tVh=PHIe*(1-Swe)\n\tVwe=(Vw-Cwv)/(1-Cwv)\n\tVwe=ImposeLimits(Vwe,0,1)\n\tVke=(Vk-Ckv)/(1-Ckv)\n\tVke=ImposeLimits(Vke,0,1)\n\tSum=abs(Vc1)+abs(Vc2)+abs(Vc3)+abs(Vke)+abs(Vwe)+abs(Vh)\n\tVc1=abs(Vc1)/Sum\n\tVc2=abs(Vc2)/Sum\n\tVc3=abs(Vc3)/Sum\n\tVk=abs(Vk)/Sum\n\tVw=abs(Vw)/Sum\n\tVh=abs(Vh)/Sum\n#\n#\t2. Determine conductivity of components:\n#\t----------------------------------------\n\tSigc1=1/Rc1\n\tSigc2=1/Rc2\n\tSigc3=1/Rc3\n\tSigk=1/Rk\n\tSigw=1/Rw\n\tSigh=1/Rh\n#\n#\t3. Compute Conductivity:\n#\t========================\n\tTrm1=Vc1*(Sigc1**(1/Alpha))\n\tTrm2=Vc2*(Sigc2**(1/Alpha))\n\tTrm3=Vc3*(Sigc3**(1/Alpha))\n\tTrm4=(Vk**2.2)*(Sigk**(1/Alpha)) # Factor of 2.2 included to get data to fit to Yang et al\n\tTrm5=Vw*(Sigw**(1/Alpha))\n\tTrm6=Vh*(Sigh**(1/Alpha))\n\tCrf=(Trm1+Trm2+Trm3+Trm4+Trm5+Trm6)**Alpha\n#\n#\n# 4. Output result:\n#\t-----------------\n\tif(Tout==0):\n\t\tFr=Crf\n\telse:\n\t\tFr=1/Crf\n\treturn Fr", "def epsilon_delta(self):", "def Theory_Algebraic(N,Kappa,d) :\n\n\t# Calculate the radius from the epxcted mean degree:\n\tr = (1.0 / ((np.pi) ** 0.5)) * ((((Kappa) / N) * scipy.special.gamma((d + 2.0) / 2.0)) ** (1.0 / d))\n\n\t#Compute the algebraic connectivity:\n\tMu2 = Kappa- N*(r**(d/2.0))*scipy.special.jv( (d/2.0) , 2*math.pi*r )\n\n\treturn Mu2", "def conj(self, o): \n return (o.inv()) * self * o", "def invredc(A, B, C, D, y, v):\n # Description to help the user\n\n # calculate the number of samples of the output\n N = np.shape(y)[1] # the number of samples is the number of columns of y\n\n # calculate system's dimensions: number of states, number of inputs and number of outputs\n n = A.shape[0] # number of states\n # m=B.shape[1] # number of inputs, maybe it's not necessary\n p = C.shape[0] # number of outputs\n\n # A. Output Basis Change\n # here the output basis change and its important quantities and matrices are calculated\n\n # rank of the feedforward matrix:\n r = np.linalg.matrix_rank(D)\n\n # to calculate the S1 matrix, we have partitioned the matrix into [S1a;S2a]\n # firstly, we obtain S1a\n # since D0 must possess full row rank (rank(D0)=r), a simple way to do that is to use the scipy.linalg.orth function\n D0 = (scilin.orth(D.transpose())).transpose()\n # calculating S1a as a solution of the problem S1a*D=D0 using the pseudoinverse (Moore-Penrose inverse):\n S1at = scilin.pinv(D.transpose()) @ D0.transpose()\n S1a = S1at.transpose()\n # S1b is the null space (kernel) of D from the left\n S1b = (scilin.null_space(D.transpose())).transpose()\n # assembling the S1 matrix\n S1 = np.concatenate((S1a, S1b), axis=0) # axis=0 concatenate vertically (row wise)\n\n # the C2 matrix is obtained by a partition of S1*C, which can by also obtained with the use of S1b\n # calculating C2\n C2 = S1b @ C\n # rank of C2\n q = np.linalg.matrix_rank(C2)\n\n # calculating the matrix S2, which is very similar to S1, and it is also partitioned as S2=[S2a;S2b]\n # since C2bar has to possess full row rank (rank(C2)=q)\n C2tilde = (scilin.orth(C2.transpose())).transpose()\n # calculating S2a as a solution of the problem S2a*C2=C2bar using the pseudoinverse (Moore-Penrose inverse):\n S2at = scilin.pinv(C2.transpose()) @ C2tilde.transpose()\n S2a = S2at.transpose()\n # S2b is the null space (kernel) of C2 from the left\n S2b = (scilin.null_space(C2.transpose())).transpose()\n # assembling the S2 matrix\n S2 = np.concatenate((S2a, S2b), axis=0) # axis=0 concatenate vertically (row wise)\n\n # now that we have S1 and S2, we can assemble the S matrix\n # we defined the notation: S=Sa*S1, where Sa is partitioned as Sa=[I 0;0 S2]=[Sa1 Sa2]\n # partitions of Sa\n Sa11 = np.identity(r)\n Sa12 = np.zeros((r, p - r))\n Sa21 = np.zeros((p - r, r))\n Sa22 = S2\n # assembling the columns of Sa, Sa=[Sa1 Sa2]\n Sa1 = np.concatenate((Sa11, Sa21), axis=0) # concatenate vertically (row wise)\n Sa2 = np.concatenate((Sa12, Sa22), axis=0) # concatenate vertically (row wise)\n # finally, assembling the matrix Sa:\n Sa = np.concatenate((Sa1, Sa2), axis=1) # concatenate horizontally (column wise)\n # obtaining the S matrix by the multiplication\n S = Sa @ S1\n\n # doing the transformation of the output ytilde=Sy\n ytilde = S @ y\n # we'll not partition the output yet, first, we'll do the State-Space Basis Change\n\n # B. State-Space Basis Change\n # in this section we'll do the state-space basis change of the system\n\n # the first step is the calculation of the transformation matrix, as defined in the paper\n # we'll call T^{-1} as M, so C2tilde*M=[0 I]. And we'll partition M as M=[M1 M2]. C2tilde*M=[C2tilde*M1 C2tilde*M2]\n # since rank(C2tilde)=q, nullity(C2tilde)=n-q\n # M1 can be defined as a basis of the null space of C2tilde\n M1 = scilin.null_space(C2tilde)\n # and M2 is the solution of the equation C2tilde*M2=I. To calculate this solution, we'll use the pseudoinverse again\n M2 = scilin.pinv(C2tilde)\n # now, we assemble the M matrix with the concatenate function\n M = np.concatenate((M1, M2), axis=1) # concatenate horizontally (column wise)\n # finally, we calculate the T matrix by inverting M\n T = np.linalg.inv(M)\n\n # now, we proceed to the transformation of the state-space matrices\n # transformation of the system's dynamic matrix\n Atilde = T @ A @ M\n # transformation of the system's input matrix\n Btilde = T @ B\n # transformation of the system's output matrix\n Ctilde = C @ M\n # transformation of the system's feedforward matrix (it's the same)\n # Dtilde=D # actually, this step is not necessary\n # transformation of the additional system input v\n vtilde = T @ v\n\n # in the next step, we need to partition the new system's matrices and outputs\n\n # partition of the outputs\n # y1 has r lines and N columns\n y1 = ytilde[0:r, :]\n # y2 has q lines and N columns, and it starts at the r+1 line (which in python is the r line since the vector index starts at 0)\n y2 = ytilde[r : r + q, :]\n # y3 is irrelevant, then, it will be neglected\n\n # partitioning the system matrices\n # firstly, the system's dynamic matrix Atilde\n A11 = Atilde[0 : n - q, 0 : n - q]\n A12 = Atilde[0 : n - q, n - q : n]\n A21 = Atilde[n - q : n, 0 : n - q]\n A22 = Atilde[n - q : n, n - q : n]\n # the system's input matrix Btilde\n B1 = Btilde[0 : n - q, :]\n B2 = Btilde[n - q : n, :]\n # the system's output matrix Ctilde\n C11 = Ctilde[0:r, 0 : n - q]\n C12 = Ctilde[0:r, n - q : n]\n\n # partition the additional input vtilde\n v1 = vtilde[0 : n - q, :]\n v2 = vtilde[n - q : n, :]\n\n # C. Reduction of State-Space Dimension\n # now, we'll do the reduction of the state-space system\n\n # following the equations in the paper\n # calculating y1hat\n y1hat = y1 - C12 @ y2\n # we have to discard the last sample to make the dimensions of y1hat and y2hat match\n y1hat = y1hat[:, 0 : N - 1]\n\n # calculating y2hat\n # preallocating variables before the loop\n y2hat = np.zeros((q, N - 1))\n # running the loop\n for k in range(\n 0, N - 1\n ): # the loop has to run N-1 times, from 0 to N-2, because of y2[k+1] on the equation\n y2hat[:, k] = y2[:, k + 1] - A22 @ y2[:, k] - v2[:, k]\n\n # assembling the reduced system's output vector\n yhat = np.concatenate((y1hat, y2hat), axis=0)\n\n # calculating the additional input vhat\n vhat = v1 + A12 @ y2\n # discarding the last sample\n vhat = vhat[:, 0 : N - 1]\n\n # now, we'll assemble the reduced state-space system\n # reduced system's dynamic matrix\n Ahat = A11\n # reduced system's input matrix\n Bhat = B1\n # reduced system's output matrix\n Chat = np.concatenate((C11, A21), axis=0) # concatenate vertically (row wise)\n # reduced system's feedforward matrix\n Dhat = np.concatenate((D0, B2), axis=0) # concatenate vertically (row wise)\n # calculating rhat, the new rank of the feedforward matrix Dhat (an important quantity of the algorithm)\n rhat = np.linalg.matrix_rank(Dhat)\n\n # calculating the new dimension of the reduced system\n # reduced system's state vector dimension\n nhat = n - q\n # reduced system's output vector dimension\n phat = r + q\n\n return Ahat, Bhat, Chat, Dhat, yhat, vhat, nhat, phat, rhat", "def __abs__(self):\n return self.square() ** 0.5", "def inductorenergy(L, I):\n return 1 / 2 * L * I ** 2", "def c1(adp1, adp2):\n\n def get_axis(adp):\n \"\"\"\n Returns ADP as its three principle axis representation.\n :param adp: List/Array type of length 6.\n :returns: List of three arrays of length 3.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n w, v = np.linalg.eig(adp)\n return [np.array((w[j] * v[:, j]).flatten().tolist()[0]) for j \\\n in xrange(3)]\n\n adp1_axis = get_axis(adp1)\n adp2_axis = get_axis(adp2)\n\n val = 0\n for i in xrange(3):\n addval = abs(norm(adp1_axis[i] - adp2_axis[i]))\n addval = addval * abs((1 - abs(np.dot(adp1_axis[i], adp2_axis[i]))))\n val += addval\n return val", "def Ncen(self, m):\n pass", "def _calc_C(self, lambdify=True):\n\n C = None\n C_func = None\n # check to see if we have our term saved in file\n C, C_func = self._load_from_file('C', lambdify)\n\n if C is None and C_func is None:\n # if no saved file was loaded, generate function\n print('Generating centrifugal and Coriolis compensation function')\n\n # first get the inertia matrix\n M = self._calc_M(lambdify=False)\n\n # C_{kj} = sum_i c_{ijk}(q) \\dot{q}_i\n # c_{ijk} = 1/2 * sum_i (\\frac{\\partial M_{kj}}{\\partial q_j} +\n # \\frac{\\partial M_{ki}}{\\partial q_j} - \\frac{\\partial M_{ij}}\n # {\\partial q_k})\n C = sp.zeros(self.N_JOINTS, self.N_JOINTS)\n for kk in range(self.N_JOINTS):\n for jj in range(self.N_JOINTS):\n for ii in range(self.N_JOINTS):\n dMkjdqi = M[kk, jj].diff(self.q[ii])\n dMkidqj = M[kk, ii].diff(self.q[jj])\n dMijdqk = M[ii, jj].diff(self.q[kk])\n C[kk, jj] += .5 * (dMkjdqi + dMkidqj - dMijdqk) * self.dq[ii]\n C[kk, jj] = C[kk, jj]\n C = sp.Matrix(C)\n\n # save to file\n abr_control.utils.os_utils.makedirs(\n '%s/C' % self.config_folder)\n cloudpickle.dump(C, open(\n '%s/C/C' % self.config_folder, 'wb'))\n\n if lambdify is False:\n # if should return expression not function\n return C\n\n if C_func is None:\n C_func = self._generate_and_save_function(\n filename='C', expression=C,\n parameters=self.q+self.dq)\n return C_func", "def _r_inv(self):\n raise NotImplementedError", "def __rtruediv__(self, other):\n value = -1 / (self.val * self.val)\n total = {self.var: other * value}\n return AutoDiffReverse(other / self.val, None, total)", "def norm(self):", "def usolve(self, ub):\n raise NotImplementedError", "def test_solvers_bc():\n tol = 3E-12 # Appropriate tolerance for these tests (P2, 20x20 mesh)\n import sympy as sym\n x, y = sym.symbols('x[0], x[1]')\n u = 1 + x**2 + 2*y**2\n f = -sym.diff(u, x, 2) - sym.diff(u, y, 2)\n f = sym.simplify(f)\n u_00 = u.subs(x, 0) # x=0 boundary\n u_01 = u.subs(x, 1) # x=1 boundary\n g = -sym.diff(u, y).subs(y, 1) # x=1 boundary\n r = 1000 # arbitrary function can go here\n s = u\n\n # Turn to C/C++ code for UFL expressions\n f = sym.printing.ccode(f)\n u_00 = sym.printing.ccode(u_00)\n u_01 = sym.printing.ccode(u_01)\n g = sym.printing.ccode(g)\n r = sym.printing.ccode(r)\n s = sym.printing.ccode(s)\n print('Test problem (C/C++):\\nu = %s\\nf = %s' % (u, f))\n print('u_00: %s\\nu_01: %s\\ng = %s\\nr = %s\\ns = %s' %\n (u_00, u_01, g, r, s))\n\n # Turn into FEniCS objects\n u_00 = Expression(u_00)\n u_01 = Expression(u_01)\n f = Expression(f)\n g = Expression(g)\n r = Expression(r)\n s = Expression(s)\n u_exact = Expression(sym.printing.ccode(u))\n\n # Define boundary conditions\n boundary_conditions = {0: {'Dirichlet': u_00},\n 1: {'Dirichlet': u_01},\n 2: {'Robin': (r, s)},\n 3: {'Neumann': g}}\n\n for Nx, Ny in [(3,3), (3,5), (5,3), (20,20)]:\n for degree in 1, 2, 3:\n for linear_solver in ['direct']:\n print('solving on 2(%dx%dx) mesh with P%d elements'\n % (Nx, Ny, degree)),\n print(' %s solver, %s function' %\n (linear_solver, solver_func.__name__))\n kappa = Constant(1)\n u, kappa = solver_bc(\n kappa, f, boundary_conditions, Nx, Ny, degree,\n linear_solver=linear_solver,\n abs_tol=0.1*tol,\n rel_tol=0.1*tol)\n # Make a finite element function of the exact u_D\n V = u.function_space()\n u_e_Function = interpolate(u_exact, V) # exact solution\n # Check that dof arrays are equal\n u_e_array = u_e_Function.vector().array() # dof values\n max_error = (u_e_array - u.vector().array()).max()\n msg = 'max error: %g for 2(%dx%d) mesh, degree=%d,'\\\n ' %s solver, %s' % \\\n (max_error, Nx, Ny, degree, linear_solver,\n solver_func.__name__)\n print(msg)\n assert max_error < tol, msg", "def blueschist_mafic():\n\n rho = 3190.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 190.79; C[0,1] = 62.28; C[0,2] = 52.94; C[0,3] = -0.44; C[0,4] = 4.68; C[0,5] = 0.6\n C[1,0] = C[0,1]; C[1,1] = 218.38; C[1,2] = 53.1; C[1,3] = -0.87; C[1,4] = 1.57; C[1,5] = 0.28\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 158.04; C[2,3] = -0.44; C[2,4] = 2.66; C[2,5] = -0.35\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 60.86; C[3,4] = -0.29; C[3,5] = 1.86\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 58.94; C[4,5] = -0.2\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 69.63\n\n return C, rho", "def test_2_layer():\r\n # angular frequency in radians * THz\r\n w = 100 * nu.THz\r\n # Relative permittivity of metal and dielectric\r\n em = -4.56 + 0.12j\r\n ed = 1.23 + 0.01j\r\n ex_list = ez_list = [ed, em]\r\n # Relative permeabilities\r\n mu_list = [1,1]\r\n # Dictionary of input parameters\r\n input_params = {'w': w, 'd_list': [inf,inf], 'ex_list': ex_list,\r\n 'ez_list': ez_list, 'mu_list': mu_list}\r\n \r\n # Calculate the theoretical kx\r\n theo_kx = (w / nu.c0) * cmath.sqrt((em * ed) / (em + ed))\r\n if theo_kx.imag < 0:\r\n theo_kx *= -1\r\n print('Theoretical kx:',\r\n '(%.7g+%.7gj) rad/um' % (theo_kx.real / nu.um**-1, theo_kx.imag / nu.um**-1))\r\n \r\n # If I use the theoretical kx value, the mode should be correct and\r\n # all my tests should pass.\r\n params = deepcopy(input_params)\r\n params['kx'] = theo_kx\r\n params = find_all_params_from_kx(params)\r\n kzd, kzm = params['kz_list']\r\n # check that kz_list is correct\r\n assert_floats_are_equal(kzd**2, (w**2 / nu.c0**2) * ed**2 / (em + ed))\r\n assert_floats_are_equal(kzm**2, (w**2 / nu.c0**2) * em**2 / (em + ed))\r\n # check that layer_bottom_list is correct\r\n assert params['layer_bottom_list'][0] == -inf\r\n assert params['layer_bottom_list'][1] == 0\r\n # Check that the boundary condition matrix agrees with hand-calculation\r\n bc_mat = bc_matrix(params)\r\n # ...top-left is Ex0down / H0down\r\n assert_floats_are_equal(bc_mat[0,0], -kzd / (w * ed * nu.eps0))\r\n # ...top-right is -Ex1up / H1up\r\n assert_floats_are_equal(bc_mat[0,1], -kzm / (w * em * nu.eps0))\r\n # ...bottom-left is eps0 * Ez0down / H0down\r\n assert_floats_are_equal(bc_mat[1,0], ed * -theo_kx / (w * ed * nu.eps0))\r\n # ...bottom-right is -eps1 * Ez1up / H1up\r\n assert_floats_are_equal(bc_mat[1,1], -em * -theo_kx / (w * em * nu.eps0))\r\n # Check that one of the eigenvalues is almost zero (compared to the size\r\n # of the matrix elements).\r\n eigenvalues = np.linalg.eig(bc_mat)[0]\r\n assert abs(eigenvalues).min() / abs(bc_mat).max() < 1e-6\r\n # Check that the mode passes all tests.\r\n assert check_mode(params, thorough=True) is True\r\n # Check that I can scale the fields and it still passes all tests.\r\n params_scaled = rescale_fields(1.23+4.56j, params)\r\n assert check_mode(params_scaled, thorough=True) is True\r\n \r\n # Now try my kx-finding algorithm, to see if it finds the right value.\r\n kx_list = find_kx(input_params)\r\n print('kx_list:',\r\n ['(%.7g+%.7gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1)\r\n for kx in kx_list])\r\n kx = kx_list[0]\r\n assert_floats_are_equal(theo_kx, kx)\r\n \r\n plot_mode(params)\r\n \r\n print('If you see this message, all the tests succeeded!!')", "def _solve_implicit(self, initial_conditions):\n coeff = self.a ** 2 * self.tau / self.h ** 2\n l_and_u = (1, 1)\n ab = np.empty((3, self.n_x))\n # main diagonal\n ab[1] = 1 + 2.0 * coeff\n # upper and lower diagonals\n ab[0] = ab[2] = -coeff\n\n # left bc\n if self.left_bc_type == \"DIRICHLET\":\n ab[0][1] = 0 # upper diagonal\n ab[1][0] = 1 # main diagonal\n elif self.left_bc_type == \"NEUMANN\":\n ab[0][1] = 1 # upper diagonal\n ab[1][0] = -1 # main diagonal\n\n # right bc\n if self.right_bc_type == \"DIRICHLET\":\n ab[1][-1] = 1 # main diagonal\n ab[2][-2] = 0 # lower diagonal\n elif self.right_bc_type == \"NEUMANN\":\n ab[1][-1] = 1 # main diagonal\n ab[2][-2] = -1 # lower diagonal\n\n current_solution = initial_conditions\n solutions = []\n\n for t in self.t_grid:\n b = current_solution + self.tau * self.rhs(self.x_grid, t)\n # left bc\n if self.left_bc_type == \"DIRICHLET\":\n b[0] = self.left_bc(t)\n elif self.left_bc_type == \"NEUMANN\":\n b[0] = self.h * self.left_bc(t)\n # right bc\n if self.right_bc_type == \"DIRICHLET\":\n b[-1] = self.right_bc(t)\n elif self.right_bc_type == \"NEUMANN\":\n b[-1] = self.h * self.right_bc(t)\n\n next_solution = solve_banded(l_and_u, ab, b)\n if self.mode == \"VISUALIZATION\":\n solutions.append((t, next_solution.copy()))\n current_solution = next_solution\n if self.mode == \"TEST\":\n # print(\"Result: \", current_solution.tolist())\n # print(\"Right answer: \", self.anl_solution.tolist())\n self._norma(current_solution)\n elif self.mode == \"VISUALIZATION\":\n return solutions", "def olivine():\n\n rho = 3355.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 320.5; C[0,1] = 68.15; C[0,2] = 71.6; C[0,3] = 0.; C[0,4] = 0.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 196.5; C[1,2] = 76.8; C[1,3] = 0.; C[1,4] = 0.; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 233.5; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 64.; C[3,4] = 0.; C[3,5] = 0.\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 77.; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 78.7\n\n return C, rho", "def updateBD(self):\r\n # itereigenupdated is always up-to-date in the diagonal case\r\n # just double check here\r\n if self.itereigenupdated == self.countiter:\r\n return\r\n\r\n if self.sp.neg.cmuexp: # cave:\r\n self.update_exponential(self.Zneg, -self.sp.neg.cmuexp)\r\n # self.C += self.Zpos # pos update after Zneg would be the correct update, overall:\r\n # self.C = self.Zpos + Cs * Mh.expms(-self.sp.neg.cmuexp*Csi*self.Zneg*Csi) * Cs\r\n self.Zneg = np.zeros((self.N, self.N))\r\n\r\n if self.sigma_vec is not 1 and not np.all(self.sigma_vec == 1):\r\n self.C = dot(dot(np.diag(self.sigma_vec), self.C), np.diag(self.sigma_vec))\r\n self.sigma_vec[:] = 1\r\n\r\n if self.opts['CMA_const_trace'] in (True, 1, 2): # normalize trace of C\r\n if self.opts['CMA_const_trace'] == 2:\r\n s = np.exp(np.mean(np.log(self.dC)))\r\n else:\r\n s = np.mean(self.dC)\r\n self.C /= s\r\n self.dC /= s\r\n self.C = (self.C + self.C.T) / 2\r\n # self.C = np.triu(self.C) + np.triu(self.C,1).T # should work as well\r\n # self.D, self.B = eigh(self.C) # hermitian, ie symmetric C is assumed\r\n\r\n if type(self.opts['CMA_eigenmethod']) == type(1):\r\n print('WARNING: option CMA_eigenmethod should be a function, not an integer')\r\n if self.opts['CMA_eigenmethod'] == -1:\r\n # pygsl\r\n # easy to install (well, in Windows install gsl binaries first,\r\n # set system path to respective libgsl-0.dll (or cp the dll to\r\n # python\\DLLS ?), in unzipped pygsl edit\r\n # gsl_dist/gsl_site_example.py into gsl_dist/gsl_site.py\r\n # and run \"python setup.py build\" and \"python setup.py install\"\r\n # in MINGW32)\r\n if 1 < 3: # import pygsl on the fly\r\n try:\r\n import pygsl.eigen.eigenvectors # TODO efficient enough?\r\n except ImportError:\r\n print('WARNING: could not find pygsl.eigen module, either install pygsl \\n' +\r\n ' or set option CMA_eigenmethod=1 (is much slower), option set to 1')\r\n self.opts['CMA_eigenmethod'] = 0 # use 0 if 1 is too slow\r\n\r\n self.D, self.B = pygsl.eigen.eigenvectors(self.C)\r\n\r\n elif self.opts['CMA_eigenmethod'] == 0:\r\n # TODO: thoroughly test np.linalg.eigh\r\n # numpy.linalg.eig crashes in 200-D\r\n # and EVecs with same EVals are not orthogonal\r\n self.D, self.B = np.linalg.eigh(self.C) # self.B[i] is a row and not an eigenvector\r\n else: # is overall two;ten times slower in 10;20-D\r\n self.D, self.B = Misc.eig(self.C) # def eig, see below\r\n else:\r\n self.D, self.B = self.opts['CMA_eigenmethod'](self.C)\r\n\r\n\r\n # assert(sum(self.D-DD) < 1e-6)\r\n # assert(sum(sum(np.dot(BB, BB.T)-np.eye(self.N))) < 1e-6)\r\n # assert(sum(sum(np.dot(BB * DD, BB.T) - self.C)) < 1e-6)\r\n idx = np.argsort(self.D)\r\n self.D = self.D[idx]\r\n self.B = self.B[:,idx] # self.B[i] is a row, columns self.B[:,i] are eigenvectors\r\n # assert(all(self.B[self.countiter % self.N] == self.B[self.countiter % self.N,:]))\r\n\r\n # qqqqqqqqqq\r\n if 11 < 3: # limit condition number to 1e13\r\n climit = 1e13 # cave: conditioncov termination is 1e14\r\n if self.D[-1] / self.D[0] > climit:\r\n self.D += self.D[-1] / climit\r\n for i in xrange(self.N):\r\n self.C[i][i] += self.D[-1] / climit\r\n\r\n if 11 < 3 and any(abs(sum(self.B[:,0:self.N-1] * self.B[:,1:], 0)) > 1e-6):\r\n print('B is not orthogonal')\r\n print(self.D)\r\n print(sum(self.B[:,0:self.N-1] * self.B[:,1:], 0))\r\n else:\r\n # is O(N^3)\r\n # assert(sum(abs(self.C - np.dot(self.D * self.B, self.B.T))) < N**2*1e-11)\r\n pass\r\n self.D **= 0.5\r\n self.itereigenupdated = self.countiter", "def __abs__(self):\r\n return math.sqrt(self*self)", "def __truediv__(self, other):\n try:\n value = -1 / (other.val * other.val)\n total = {self.var: 1 / other.val, other.var: value * self.val}\n return AutoDiffReverse(self.val / other.val, None, total)\n except AttributeError:\n total = {self.var: 1 / other}\n return AutoDiffReverse(self.val / other, None, total)", "def associated_coroot(self):", "def lizardite():\n\n rho = 2610.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 245.; C[0,1] = 50.; C[0,2] = 31.; C[0,3] = 0.; C[0,4] = 0.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 245.; C[1,2] = 31.; C[1,3] = 0.; C[1,4] = 0.; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 23.; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 11.6; C[3,4] = 0.; C[3,5] = 0.\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 11.6; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 97.5\n\n return C, rho", "def contraction_max_algos():\n return cutensor.contractionMaxAlgos()" ]
[ "0.65014106", "0.6379623", "0.63238484", "0.6098798", "0.5973083", "0.59521735", "0.5935254", "0.590939", "0.58923155", "0.58820564", "0.58624995", "0.5835705", "0.5805436", "0.58031327", "0.5798486", "0.5798486", "0.5775567", "0.5768856", "0.57159203", "0.57043415", "0.5688966", "0.5688351", "0.566874", "0.5655472", "0.56505555", "0.5609141", "0.5603492", "0.5597988", "0.55907845", "0.5573569", "0.5570107", "0.5563961", "0.55571586", "0.5554974", "0.5553669", "0.5531841", "0.5527357", "0.5520132", "0.55185604", "0.55172575", "0.5497346", "0.5478706", "0.54786664", "0.5471083", "0.54668033", "0.546451", "0.5456466", "0.54550314", "0.5448982", "0.5430029", "0.5426402", "0.5422368", "0.5421293", "0.54173654", "0.54162526", "0.54105115", "0.54034156", "0.53942907", "0.53936696", "0.5390781", "0.53813493", "0.53752816", "0.53747004", "0.53746146", "0.53739274", "0.5372719", "0.5370021", "0.53606737", "0.53604877", "0.5352616", "0.53468156", "0.53399783", "0.53324854", "0.5330611", "0.5327969", "0.532702", "0.53248394", "0.5324141", "0.5320963", "0.53203857", "0.53189087", "0.53184056", "0.5297844", "0.5295899", "0.5292856", "0.5290774", "0.5277163", "0.5270442", "0.52651954", "0.52606666", "0.52555466", "0.52551025", "0.5253237", "0.52514935", "0.52506757", "0.5249951", "0.5249826", "0.5249504", "0.52431977", "0.52422416", "0.523976" ]
0.0
-1
r""" Swaps the order of the tensor multiplications
def swap(self, adjacent_transposition): result = Tensor() for key_self in self.keys(): # ensure that the swap can be made with the available slots if max(adjacent_transposition) < len(key_self): prefix = Tensor({Tensor._merge_keys(*key_self[0 : min(adjacent_transposition)]): self[key_self]}) root = type(self)._clifford_swap( *key_self[min(adjacent_transposition) : max(adjacent_transposition) + 1] ) postfix = Tensor({Tensor._merge_keys(*key_self[max(adjacent_transposition) + 1 :]): 1}) result = result + prefix * root * postfix else: result = result + Tensor({key_self: self[key_self]}) self.clear() self.update(result) return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mul_inplace(a, b):", "def transpose(self):\n order = list(self.order)\n order[-2], order[-1] = order[-1], order[-2]\n self.order = order", "def SwapSides(self):\n for c in self.reactants:\n c.coeff = -c.coeff", "def __matmul__(self, tensor):\n return self.matmul(tensor)", "def _eval_transpose(self):\n coeff, matrices = self.as_coeff_matrices()\n return MatMul(\n coeff, *[transpose(arg) for arg in matrices[::-1]]).doit()", "def _multi_matmul(arrays, order, i, j, constant=False) -> Tensor:\n if i == j:\n return arrays[i]\n else:\n return matmul(\n _multi_matmul(arrays, order, i, order[i, j], constant),\n _multi_matmul(arrays, order, order[i, j] + 1, j, constant),\n constant,\n )", "def MultTranspose(self, *args):\n return _hypre.HypreParMatrix_MultTranspose(self, *args)", "def swap_xy(tensor: tf.Tensor) -> tf.Tensor:\n return moveaxis(tensor, -2, -1)", "def flipud(n):\n times = lambda x: jnp.flipud(x)\n trans = lambda x: jnp.flipud(x)\n return Operator(times=times, trans=trans, shape=(n,n))", "def jmatswap(ind: int):\n return _jmswap[ind - 1]", "def __mul__(self, tensor):\n return self.mul(tensor)", "def tensor_mult(a, # n_1 x n_2 x ... x n_d tensor\n b, # m_{1} x m_{2} x ... x m_{l} tensor\n a_dims, # list of dimensions of a to broadcast multiply\n b_dims, # list of dimensions of b to broadcast multiply\n):\n \n assert len(a_dims) == len(b_dims), \"a_dims and b_dims should have the same length!\"\n assert np.all([a.shape[a_dims[i]] == b.shape[b_dims[i]] for i in range(len(a_dims))]), \"a_dims %s and b_dims%s dimensions do not match!\" %(a_dims, b_dims)\n\n d_a = a.ndim\n d_b = b.ndim\n #bring the relevant dimensions to the front\n missing_a = [i for i in range(d_a) if i not in a_dims]\n new_order_a = a_dims + missing_a\n a_t = np.transpose(a, tuple(new_order_a))\n missing_b = [i for i in range(d_b) if i not in b_dims]\n new_order_b = b_dims + missing_b\n b_t = np.transpose(b, tuple(new_order_b))\n\n #expand the tensors to make the shapes compatible\n a_t = np.reshape(a_t, list(a_t.shape)+len(missing_b)*[1])\n b_t = np.reshape(b_t, [b.shape[i] for i in b_dims]+len(missing_a)*[1]+[b.shape[i] for i in missing_b])\n\n #multiply\n c_t = a_t * b_t\n\n #reshape the results: a_dims ; missing_a ; missing_b -> original shape of a ; missing_b\n a_t_index = np.unique(new_order_a, return_index=True)[1].tolist()\n b_t_index = np.arange(d_a, d_a+d_b-len(a_dims)).tolist()\n c = np.transpose(c_t, a_t_index+b_t_index)\n return c", "def mul(self):\n a = self.pop()\n b = self.pop()\n c= a*b\n self.push(c)", "def _clifford_swap(cls, slot_i, slot_j) -> Tensor:\n\n return Tensor(\n {\n Tensor._merge_keys((slot_j,), (slot_i,)): -1,\n Tensor._merge_keys(): 2 * cls.symmetric_bilinear_form(slot_i, slot_j),\n }\n )", "def transpose():", "def _rewrite_multiply(self, node: saldag.Multiply):\n\n out_rel_cols = node.out_rel.columns\n operands = node.operands\n target_col = node.target_col\n\n # Update target column collusion set\n target_col_out = out_rel_cols[target_col.idx]\n\n target_col_out.coll_sets |= utils.coll_sets_from_columns(operands)\n\n # The other columns weren't modified so the collusion sets\n # simply carry over\n for in_col, out_col in zip(node.get_in_rel().columns, out_rel_cols):\n if in_col != target_col:\n out_col.coll_sets |= copy.deepcopy(in_col.coll_sets)", "def transpose(self):\n return self.conjugate()", "def transpose(m):\n\n pass", "def _z2matmul(self, left, right):\n prod = np.mod(np.dot(left, right), 2)\n return prod", "def _outer(a, b):\n a_flat = torch.reshape(a, [-1])\n b_flat = torch.reshape(b, [-1])\n a_mul = torch.unsqueeze(a_flat, dim=-1)\n b_mul = torch.unsqueeze(b_flat, dim=0)\n return a_mul * b_mul", "def __imatmul__(self, tensor):\n # Note: Matching PyTorch convention, which is not in-place here.\n return self.matmul(tensor)", "def tensor_outer_product(tensor1, tensor2):\n shape_1 = tensor1.shape\n shape_2 = tensor2.shape\n s1 = len(shape_1)\n s2 = len(shape_2)\n \n shape_1 = shape_1 + (1, )*s2\n shape_2 = (1, )*s1 + shape_2\n return np.reshape(tensor1, shape_1) * np.reshape(tensor2, shape_2)", "def mult_operation(self):\n n1 = self.memory[self.memory[self._cursor + 1]]\n n2 = self.memory[self.memory[self._cursor + 2]]\n position = self.memory[self._cursor + 3]\n self.memory[position] = n1 * n2\n # print(f'Cursor: {self._cursor}\\tAssigning position {position} with value {n1} * {n2} = {n1 * n2}')\n return", "def _kronecker_product(mat1: tf.Tensor, mat2: tf.Tensor) -> tf.Tensor:\n m1, n1 = mat1.get_shape().as_list()\n mat1_rsh = tf.reshape(mat1, [m1, 1, n1, 1])\n m2, n2 = mat2.get_shape().as_list()\n mat2_rsh = tf.reshape(mat2, [1, m2, 1, n2])\n return tf.reshape(mat1_rsh * mat2_rsh, [m1 * m2, n1 * n2])", "def symmetrize(self):\n if self.is_symmetric:\n return self\n else:\n return self.append(self.reverse()).squash().scale(0.5)", "def symmetrize(n):\n times = lambda x: jnp.concatenate((jnp.flipud(x), x))\n trans = lambda x: x[n:] + x[n-1::-1]\n return Operator(times=times, trans=trans, shape=(2*n,n))", "def transpose(self) -> None:\n ...", "def _transpose_shift(E):\n bsz, n_head, max_len, _ = E.size()\n zero_pad = layers.zeros(shape=(bsz, n_head, max_len, 1))\n E = layers.reshape(x=layers.concat([E, zero_pad], axis=-1),\n shape=(bsz, n_head, -1, max_len))\n indice = layers.arange(start=0, end=max_len, dtype=int)\n E = layers.index_select(input=E, index=indice, dim=-2)\n E = layers.transpose(E, perm=[0, 1, 3, 2])\n return E", "def _apply_swap(self, state, axes, **kwargs):\n all_axes = list(range(len(state.shape)))\n all_axes[axes[0]] = axes[1]\n all_axes[axes[1]] = axes[0]\n return self._transpose(state, all_axes)", "def SqrtSwap():\n\n return Operator(np.array([[[[ 1.0, 0.0],\n [ 0.0, 0.5 * (1 + 1j)]],\n [[ 0.0, 0.0],\n [ 0.5 * (1 - 1j), 0.0]]],\n [[[ 0.0, 0.5 * (1 - 1j)],\n [ 0.0, 0.0]],\n [[ 0.5 * (1 + 1j), 0.0],\n [ 0.0, 1.0]]]]))", "def shortcut(self, states):\n l = len(states)\n t_ = tf.matmul(tf.concat(0, states), self._U_a)\n return tf.split(0, l, t_), l", "def transforms_multiply(t0s, t1s):\r\n \r\n return ut.matrix_multiply(t0s, t1s)", "def reorderEigenspaces(self, *order):\n order = self._reorder(order)\n if self._has(\"m\"):\n self._.m = tuple(self._.m[i] for i in order)\n if self._has(\"P\"):\n self._.P = Matrix(SR, [self._.P[i] for i in order])\n if self._has(\"Q\"):\n self._.Q = Matrix(SR, [[r[j] for j in order] for r in self._.Q])\n if self._has(\"q\"):\n self._.q.reorder(order)\n if self._has(\"qPolynomial_ordering\") and self._.qPolynomial_ordering:\n self._.qPolynomial_ordering = sorted(\n [tuple(order.index(i) for i in o)\n for o in self._.qPolynomial_ordering])", "def tensor_resort(inputs, tensor_order):\n pass", "def _cswap(i, j, S):\n N = _rswap(i, j, S.transpose()).transpose()\n return N", "def __imul__(self, tensor):\n return self.mul_(tensor)", "def compress(self, tensor):", "def MatMulOrder(D):\r\n\tnum = len(D)-1 # number of matrix in the chain\r\n\tprint(f\"There are {num} matrix to multiply\")\r\n\tM = [[0 for _ in range(num)] for _ in range(num)]\r\n\tP = [[0 for _ in range(num)] for _ in range(num)]\r\n\r\n\t# i要从大到小\r\n\t# i == j时, M[i][j]=0,所以不用更新\r\n\t# i-th矩阵到j-th矩阵的乘的最优值初始化为inf\r\n\tfor i in range(num-2, -1, -1):\r\n\t\tfor j in range(i+1, num):\r\n\t\t\tM[i][j] = 100000000\r\n\t\t\tfor k in range(i, j):\r\n\t\t\t\tnew = M[i][k] + M[k+1][j] + D[i]*D[k+1]*D[j+1]\r\n\t\t\t\tif new < M[i][j]:\r\n\t\t\t\t\tM[i][j] = new \r\n\t\t\t\t\tP[i][j] = k\r\n\treturn M, P", "def matrix_mult(m1, m2):\n pass", "def multiplies(x, y):\n x[:] *= y[:]\n return x", "def reverse_matrix(self):\n return SWAP.matrix @ self.matrix @ SWAP.matrix", "def matrix_sym_op(x):\n return (x + tf.transpose(x))/2", "def T(self):\n return Op('transpose', self)", "def swap(self, *args):\n return _osgAnimation.vectorMatrixKeyframe_swap(self, *args)", "def ttm(t, m, k):\n\n dim_list = [] # initialize a list to save dimension index to transpose the tensor reshapped from 2D matrix\n shape_list = [] # initialize a list to save the dimensions to reshape 2D matrix back to tensor\n total_dim = len(t.shape)\n for i in range(total_dim):\n dim_list.append((k - i) % total_dim)\n shape_list.append(t.shape[(k - i) % total_dim])\n dim_order = tuple(dim_list)\n shape_list[0] = m.shape[0]\n\n t_unfold = unfold_axis(t, k)\n t_mul = np.matmul(m, t_unfold)\n t_mul = np.reshape(t_mul,tuple(shape_list))\n t_mul = np.transpose(t_mul, dim_order)\n\n return t_mul", "def __rmul__(self,that):\n return self.__opExpand2(that, np.multiply)", "def _untransform(self, X: Tensor) -> Tensor:\n X_new = X.clone()\n X_new[..., self.indices] = 10.0 ** X_new[..., self.indices]\n return X_new", "def transpose(tensor):\n raise NotImplementedError", "def tf_kron(a: tf.Tensor,\n b: tf.Tensor) -> tf.Tensor:\n assert len(a.shape) == 2, \"a, should be 2x2 tensor\"\n assert len(b.shape) == 2, \"b, should be 2x2 tensor\"\n a_shape = list(b.shape) \n b_shape = list(b.shape)\n return tf.reshape(tf.reshape(a,[a_shape[0],1,a_shape[1],1])*tf.reshape(b,[1,b_shape[0],1,b_shape[1]]),[a_shape[0]*b_shape[0],a_shape[1]*b_shape[1]])", "def recursive_multiply(a, b):\n if len(a) == 2:\n return naive_multiply(a, b)\n\n a11 = a[0:int(len(a) / 2)]\n for index, row in enumerate(a11):\n a11[index] = row[0:int(len(row) / 2)]\n\n a12 = a[0:int(len(a) / 2)]\n for index, row in enumerate(a12):\n a12[index] = row[int(len(a) / 2):len(a)]\n\n a21 = a[int(len(a) / 2):len(a)]\n for index, row in enumerate(a21):\n a21[index] = row[0:int(len(row) / 2)]\n\n a22 = a[int(len(a) / 2):len(a)]\n for index, row in enumerate(a22):\n a22[index] = row[int(len(a) / 2):len(a)]\n\n b11 = b[0:int(len(b) / 2)]\n for index, row in enumerate(b11):\n b11[index] = row[0:int(len(row) / 2)]\n\n b12 = b[0:int(len(b) / 2)]\n for index, row in enumerate(b12):\n b12[index] = row[int(len(b) / 2):len(b)]\n\n b21 = b[int(len(b) / 2):len(b)]\n for index, row in enumerate(b21):\n b21[index] = row[0:int(len(row) / 2)]\n\n b22 = b[int(len(b) / 2):len(b)]\n for index, row in enumerate(b22):\n b22[index] = row[int(len(b) / 2):len(b)]\n\n c11 = matrix_add(recursive_multiply(a11, b11), recursive_multiply(a12, b21)) # C11 = A11*B11 + A12*B21\n c12 = matrix_add(recursive_multiply(a11, b12), recursive_multiply(a12, b22)) # C12 = A11*B12 + A12*B22\n c21 = matrix_add(recursive_multiply(a21, b11), recursive_multiply(a22, b21)) # C21 = A21*B11 + A22*B21\n c22 = matrix_add(recursive_multiply(a21, b12), recursive_multiply(a22, b22)) # C22 = A21*B12 + A22*B22\n\n # Append c12 to c11\n for row_index, row in enumerate(c11):\n for col_index, col in enumerate(c12):\n row.append(c12[row_index][col_index])\n\n # Append c22 to c21\n for row_index, row in enumerate(c21):\n for col_index, col in enumerate(c12):\n row.append(c22[row_index][col_index])\n\n # Append c21 to c11\n for i in c21:\n c11.append(i)\n\n return c11", "def transpose(ts: Tensor) -> Tensor:\n assert len(ts.shape) == 2\n return permute(ts, (1, 0))", "def transpose(self):\n pass", "def de_mult(self,z):\n if isinstance(z,np.ndarray) and z.size>1:\n assert np.all(np.diff(z)>0.)\n return (z+1.)**(3.*(1.+self.w))", "def permute(ts: Tensor, axes) -> Tensor:\n permute_op = PermuteOp(axes)\n return permute_op(ts, None)", "def __mul__(self, other):\r\n T = type(other)\r\n # mat4*scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n return mat4(map(lambda x,other=other: x*other, self.mlist))\r\n # mat4*vec3\r\n if isinstance(other, _vec3):\r\n m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist\r\n w = float(m41*other.x + m42*other.y + m43*other.z + m44)\r\n return _vec3(m11*other.x + m12*other.y + m13*other.z + m14, \r\n m21*other.x + m22*other.y + m23*other.z + m24, \r\n m31*other.x + m32*other.y + m33*other.z + m34)/w\r\n # mat4*vec4\r\n if isinstance(other, _vec4):\r\n m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist\r\n return _vec4(m11*other.x + m12*other.y + m13*other.z + m14*other.w, \r\n m21*other.x + m22*other.y + m23*other.z + m24*other.w, \r\n m31*other.x + m32*other.y + m33*other.z + m34*other.w,\r\n m41*other.x + m42*other.y + m43*other.z + m44*other.w)\r\n # mat4*mat4\r\n if isinstance(other, mat4):\r\n m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist\r\n n11,n12,n13,n14,n21,n22,n23,n24,n31,n32,n33,n34,n41,n42,n43,n44 = other.mlist\r\n return mat4( m11*n11+m12*n21+m13*n31+m14*n41,\r\n m11*n12+m12*n22+m13*n32+m14*n42,\r\n m11*n13+m12*n23+m13*n33+m14*n43,\r\n m11*n14+m12*n24+m13*n34+m14*n44,\r\n\r\n m21*n11+m22*n21+m23*n31+m24*n41,\r\n m21*n12+m22*n22+m23*n32+m24*n42,\r\n m21*n13+m22*n23+m23*n33+m24*n43,\r\n m21*n14+m22*n24+m23*n34+m24*n44,\r\n\r\n m31*n11+m32*n21+m33*n31+m34*n41,\r\n m31*n12+m32*n22+m33*n32+m34*n42,\r\n m31*n13+m32*n23+m33*n33+m34*n43,\r\n m31*n14+m32*n24+m33*n34+m34*n44,\r\n\r\n m41*n11+m42*n21+m43*n31+m44*n41,\r\n m41*n12+m42*n22+m43*n32+m44*n42,\r\n m41*n13+m42*n23+m43*n33+m44*n43,\r\n m41*n14+m42*n24+m43*n34+m44*n44)\r\n # unsupported\r\n else:\r\n raise TypeError, \"unsupported operand type for *\"", "def swap_qubits(self, axis1, axis2):\n\n self._t = np.swapaxes(self._t, 2*axis1, 2*axis2)\n self._t = np.swapaxes(self._t, 2*axis1 + 1, 2*axis2 + 1)\n return self", "def permute(self, ordering: np.ndarray, *, axis: int) -> None:\n\t\tif axis == 0:\n\t\t\tself.values = self.values[ordering, :]\n\t\telif axis == 1:\n\t\t\tself.values = self.values[:, ordering]\n\t\telse:\n\t\t\traise ValueError(\"axis must be 0 or 1\")", "def lazy_matrix_mul(m_a, m_b):\n return np.dot(m_a, m_b)", "def __mul__(self, other):\n #\n # TODO - your code here\n #\n final_matrix = []\n for i in range(self.h):\n temp_row = []\n for j in range(other.w):\n # take dot-product of row of\n # matrix in 1st arg with col of\n # matrix in 2nd arg\n temp_row.append(dot_product(get_row(self.g, i), get_col(other.g, j)))\n final_matrix.append(temp_row)\n return Matrix(final_matrix)\n # TODO - your code here", "def __mul__(self,that):\n return self.__opExpand2(that, np.multiply)", "def mmultiply(self, matrix):\n try:\n result_matrix = [[0 for row in range(len(self.matrix))] for col in range(len(matrix[0]))]\n for i in range(len(self.matrix)):\n for j in range(len(matrix[0])):\n for k in range(len(matrix)):\n result_matrix[i][j] += self.matrix[i][k] * matrix[k][j]\n self.matrix = result_matrix\n except IndexError:\n pass\n pass", "def chain_matmul_square(As):\n\n As_matmul = As\n while As_matmul.shape[0] > 1:\n if As_matmul.shape[0] % 2:\n A_last = As_matmul[-1:]\n else:\n A_last = None\n \n As_matmul = torch.matmul(As_matmul[0:-1:2], As_matmul[1::2])\n if A_last is not None:\n As_matmul = torch.cat([As_matmul, A_last], dim=0)\n \n return As_matmul.squeeze(0)", "def forward_substitution(self):\r\n for col in range(0, self.SIZE):\r\n self.check_solvability(self.matrix[col][col], self.result[col])\r\n self.result[col] = self.divide(self.result[col], self.matrix[col][col])\r\n for row in range(col + 1, self.SIZE):\r\n self.result[row] -= (self.result[col] * self.matrix[row][col])\r\n return self.result", "def axis_element_wise_multiplication(t1, t2, which_axis):\n # assert len(K.int_shape(t1)) == len(K.int_shape(t2)) + 1, \"rank(t1) should be rank(t2) + 1\"\n slices = tf.unstack(t1, axis=which_axis)\n # assert K.int_shape(slices[0]) == K.int_shape(t2), \"Slices of t1 were not the same shape as t2\"\n multiplies = []\n for s in slices:\n multiplies.append(t2 * s)\n return tf.stack(multiplies, axis=2)", "def multiply(t):\n return mul(*t)", "def lr_flip(self):\n for g in self.grid:\n g.reverse()", "def _untransform(self, X: Tensor) -> Tensor:\n pass # pragma: no cover", "def __mul__(self, othertr):\n res = self.dot(othertr)\n return res", "def multiply(self, layer):\n pass", "def contract(tensor):\n temp = np.einsum('ikma, jlan', tensor, tensor)\n M = np.zeros((tensor.shape[0]**2, tensor.shape[1]**2, tensor.shape[2], tensor.shape[3]))\n for i,j,k,l,m,n in it.product(*[range(x) for x in temp.shape]):\n M[i + tensor.shape[0]*j, k + tensor.shape[1]*l, m, n] = temp[i,j,k,l,m,n]\n return M", "def test_tensor_math_ops(free_alg):\n\n dr = free_alg\n p = dr.names\n r = p.R\n v = p.v\n w = Vec('w')\n x = IndexedBase('x')\n i, j, k = p.R_dumms[:3]\n a = sympify('a')\n\n v1 = dr.sum((i, r), x[i] * v[i])\n w1 = dr.sum((i, r), x[i] * w[i])\n assert v1.n_terms == 1\n assert w1.n_terms == 1\n\n v1_neg = -v1\n assert v1_neg == dr.sum((i, r), -x[i] * v[i])\n\n v1_1 = v1 + 2\n assert v1_1.n_terms == 2\n assert v1_1 == 2 + v1\n\n w1_1 = w1 + a\n assert w1_1.n_terms == 2\n assert w1_1 == a + w1\n\n prod = v1_1 * w1_1\n # Test scalar multiplication here as well.\n expected = (\n 2 * a + a * v1 + 2 * w1 +\n dr.sum((i, r), (j, r), x[i] * x[j] * v[i] * w[j])\n )\n assert prod.simplify() == expected.simplify()\n\n # Test the commutator operation.\n comm_v1v1 = v1 | v1\n assert comm_v1v1.simplify() == 0\n # Here the tensor subtraction can also be tested.\n comm_v1w1 = v1 | w1\n expected = (\n dr.sum((i, r), (j, r), x[i] * x[j] * v[i] * w[j]) -\n dr.sum((i, r), (j, r), x[j] * x[i] * w[i] * v[j])\n )\n assert comm_v1w1.simplify() == expected.simplify()\n\n alpha = symbols('alpha')\n assert alpha not in v1.free_vars\n tensor = v1 / alpha\n assert tensor.n_terms == 1\n terms = tensor.local_terms\n assert len(terms) == 1\n term = terms[0]\n assert term.sums == ((i, r),)\n assert term.amp == x[i] / alpha\n assert term.vecs == (v[i],)\n assert alpha in tensor.free_vars", "def shift_observable(self,M):\n u = np.array([[1]])\n for i in range(0,minsite):\n M[i] = np.tensordot(u, M[i],axes=(-1,1)).transpose(1,0,2)\n l,u = self.left_cannonical(M[i])\n M[i] = l", "def kronecker_product(mat1, mat2):\n m1, n1 = mat1.get_shape().as_list()\n mat1_rsh = tf.reshape(mat1, [m1, 1, n1, 1])\n m2, n2 = mat2.get_shape().as_list()\n mat2_rsh = tf.reshape(mat2, [1, m2, 1, n2])\n return tf.reshape(mat1_rsh * mat2_rsh, [m1 * m2, n1 * n2])", "def product(self):\n return self.right[self.i:] + self.left[:self.i], self.left[self.i:] + self.right[:self.i]", "def _unroll_block_matrix(mat1: tf.Tensor) -> tf.Tensor:\n n_dim, m1, n1 = mat1.get_shape().as_list()\n mat1_rsh = tf.reshape(mat1, [n_dim, m1, 1, n1])\n mat2 = tf.eye(n_dim, dtype=tf.float64)\n mat2_rsh = tf.reshape(mat2, [n_dim, 1, n_dim, 1])\n return tf.reshape(mat1_rsh * mat2_rsh, [n_dim * m1, n_dim * n1])", "def _multi_matmul_chain_order(arrays):\n n = len(arrays)\n # p stores the dimensions of the matrices\n # Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]\n # Using -2 to generalize for shapes that are more than 2 dimmensions\n p = [a.shape[-2] for a in arrays] + [arrays[-1].shape[-1]]\n # m is a matrix of costs of the subproblems\n # m[i,j]: min number of scalar multiplications needed to compute A_{i..j}\n m = np.zeros((n, n), dtype=np.double)\n # s is the actual ordering\n # s[i, j] is the value of k at which we split the product A_i..A_j\n s = np.empty((n, n), dtype=np.intp)\n\n for l in range(1, n):\n for i in range(n - l):\n j = i + l\n m[i, j] = np.inf\n for k in range(i, j):\n q = m[i, k] + m[k + 1, j] + p[i] * p[k + 1] * p[j + 1]\n if q < m[i, j]:\n m[i, j] = q\n s[i, j] = k # Note that Cormen uses 1-based index\n return s", "def lazy_matrix_mul(m_a, m_b):\n return (np.matmul(m_a, m_b))", "def symmetrize(a):\n return a + a.T - np.diag(a.diagonal());", "def _swap(self, i, j, k):\n\t\tif self.verbose:\n\t\t\tprint(i, k)\n\t\t\tprint(i, j)\n\t\t\tprint(j, k)\n\t\tself.arrangement[i],self.arrangement[k] = self.arrangement[k],self.arrangement[i]\n\t\tself.arrangement[i],self.arrangement[j] = self.arrangement[j],self.arrangement[i]\n\t\tself.arrangement[j],self.arrangement[k] = self.arrangement[k],self.arrangement[j]", "def product_on_basis(self, t1, t2):\n return tensor( (module.monomial(x1)*module.monomial(x2) for (module, x1, x2) in zip(self._sets, t1, t2)) ) #.", "def test_tensors_can_be_rewritten(free_alg):\n\n dr = free_alg\n p = dr.names\n v = Vec('v')\n a, b = p.R_dumms[:2]\n\n x = IndexedBase('x')\n o = IndexedBase('o')\n y = IndexedBase('y')\n z = IndexedBase('z')\n\n tensor = dr.einst(\n x[a] * v[a] + o[a, b] * y[b] * v[a] + z[b] * v[b] # Terms to rewrite.\n + z[a, b] * v[a] * v[b] # Terms to keep.\n )\n\n w = Wild('w')\n r = IndexedBase('r')\n rewritten, defs = tensor.rewrite(v[w], r[w])\n\n assert rewritten == dr.einst(\n z[a, b] * v[a] * v[b] + r[a] * v[a] + r[b] * v[b]\n )\n assert len(defs) == 2\n assert r[a] in defs\n assert defs[r[a]] == dr.einst(x[a] + o[a, b] * y[b])\n assert r[b] in defs\n assert defs[r[b]] == dr.sum(z[b])", "def __matmul__(self, q: np.ndarray) -> np.ndarray:\n return self.product(q)", "def reorder(self, order):\n order = np.array(order)\n self.matrix = self.matrix[order, :]\n self.matrix = self.matrix[:, order]\n self.order = self.order[order]\n self.calculate_fitness()\n return self.matrix", "def __rmul__(self, other):#标量乘法\n if isinstance(other, numbers.Number):\n pass\n # \n # TODO - your code here\n #\n result = [];\n row_result = [];\n \n for row in self.g:\n row_result = [m*other for m in row];\n result.append(row_result);\n return Matrix(result);", "def transpose(self):\r\n m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist\r\n return mat4(m11,m21,m31,m41,\r\n m12,m22,m32,m42,\r\n m13,m23,m33,m43,\r\n m14,m24,m34,m44)", "def __mul__(self, other):\n return Matrix3(\n self.i * other,\n self.j * other,\n self.k * other,\n )", "def test_tensor_network_flip(self):\n circuit = jet.Circuit(num_wires=1)\n circuit.append_gate(jet.PauliX(), wire_ids=[0])\n tn = circuit.tensor_network()\n\n tensor = tn.contract()\n assert tensor.indices == [\"0-1\"]\n assert tensor.shape == [2]\n assert tensor.data == pytest.approx([0, 1])", "def make_mult_op0(k: int):\n op = make_mult_op(k)\n i = np.concatenate((np.arange(0, k), np.arange(k + 1, 2 * k + 1)))\n j = i[:, np.newaxis]\n\n def op0(v: V) -> M:\n \"\"\"Multiplication operator function.\n\n :v: Vector of shape (2 * 4 + 1,).\n :returns: Toeplitz matrix m of shape (2 * k, 2 * k).\n\n \"\"\"\n m = op(v)\n m0: M = m[i, j]\n return m0\n return op0", "def test_flip_vectors(self):\r\n m_matrix = array([[1.0, 0.0, 1.0], [2.0, 4.0, 4.0]])\r\n jn_matrix = array([[1.2, 0.1, -1.2], [2.5, 4.0, -4.5]])\r\n new_matrix = _flip_vectors(jn_matrix, m_matrix)\r\n assert_almost_equal(new_matrix, array([[1.2, 0.1, 1.2], [2.5, 4.0, 4.5]]))", "def _matmul_broadcast(x, y, name):\n with tf.variable_scope(name) as scope:\n return tf.reduce_sum(\n tf.nn.dropout(x[..., tf.newaxis] * y[..., tf.newaxis, :, :],1), axis=-2\n )", "def test_flip_vectors(self):\n m_matrix = array([[1.0, 0.0, 1.0], [2.0, 4.0, 4.0]])\n jn_matrix = array([[1.2, 0.1, -1.2], [2.5, 4.0, -4.5]])\n new_matrix = _flip_vectors(jn_matrix, m_matrix)\n assert_almost_equal(new_matrix, array([[1.2, 0.1, 1.2], [2.5, 4.0, 4.5]]))", "def mul(Z,X,Y):", "def __mul__(left, right):\n \n if isinstance(left, Plucker) and isinstance(right, Plucker):\n # reciprocal product\n return np.dot(left.uw, right.v) + np.dot(right.uw, left.v)\n elif isinstance(left, Plucker) and arg.ismatrix(right, (4,None)):\n return left.skew @ right; # postmultiply by 4xN", "def matrixMul(self, matrix, matrix2):\n matrix0 = matrix[:]\n matrix[0] = matrix0[0] * matrix2[0] + matrix0[2]*matrix2[1] # + matrix0[4]*0\n matrix[1] = matrix0[1] * matrix2[0] + matrix0[3]*matrix2[1] # + matrix0[5]*0\n matrix[2] = matrix0[0] * matrix2[2] + matrix0[2]*matrix2[3] # + matrix0[4]*0\n matrix[3] = matrix0[1] * matrix2[2] + matrix0[3]*matrix2[3] # + matrix0[5]*0\n matrix[4] = matrix0[0] * matrix2[4] + matrix0[2]*matrix2[5] + matrix0[4]\n matrix[5] = matrix0[1] * matrix2[4] + matrix0[3]*matrix2[5] + matrix0[5]", "def prod(tensor, axis=None):\n raise NotImplementedError", "def _swap_axis(input_tensor, dim_index, last_index, name=None):\n return array_ops.transpose(\n input_tensor,\n array_ops.concat([\n math_ops.range(dim_index), [last_index],\n math_ops.range(dim_index + 1, last_index), [dim_index]\n ], 0),\n name=name)", "def mul_dense(x, y): # pragma: no cover\n return x * y", "def swap3(d, n, v, i, j, k):\n for a in range(n):\n for m in range(d):\n if m == i or m == j or m == k:\n continue\n x = a*d*d + d*i + m\n y = a*d*d + d*j + m\n z = a*d*d + d*k + m\n v[x],v[y],v[z] = v[z],v[x],v[y]\n\n x = a*d*d + d*m + i\n y = a*d*d + d*m + j\n z = a*d*d + d*m + k\n v[x],v[y],v[z] = v[z],v[x],v[y]\n\n x = a*d*d + d*i + i\n y = a*d*d + d*j + j\n z = a*d*d + d*k + k\n v[x],v[y],v[z] = v[z],v[x],v[y]\n\n x = a*d*d + d*i + j\n y = a*d*d + d*j + k\n z = a*d*d + d*k + i\n v[x],v[y],v[z] = v[z],v[x],v[y]\n\n x = a*d*d + d*i + k\n y = a*d*d + d*j + i\n z = a*d*d + d*k + j\n v[x],v[y],v[z] = v[z],v[x],v[y]", "def build_symmetry_operations(symmetry: List[Any]) -> None:\n dim = len(symmetry[0][0])\n unit = numpy.identity(dim, dtype=int)\n for permutation in symmetry:\n perm = unit[:, numpy.argsort(permutation[0])]\n permutation[0] = perm", "def permute(variable, output_order=('t', 'z', 'zb', 'y', 'x')):\n input_dimensions = variable.dimensions\n\n # filter out irrelevant dimensions\n dimensions = [x for x in output_order if x in input_dimensions]\n\n # create the mapping\n mapping = [dimensions.index(x) for x in input_dimensions]\n\n if mapping:\n return np.transpose(variable[:], mapping)\n else:\n return variable[:] # so that it does not break processing \"mapping\"" ]
[ "0.6521079", "0.62019324", "0.6163201", "0.6071542", "0.60506517", "0.5975764", "0.59555125", "0.59367365", "0.59306496", "0.5920101", "0.59189886", "0.58538663", "0.5833324", "0.5821733", "0.5819354", "0.580724", "0.5802979", "0.5783564", "0.5771913", "0.5763691", "0.57438606", "0.5732729", "0.5717387", "0.5692661", "0.5671847", "0.5662415", "0.56556594", "0.5655534", "0.5645948", "0.564113", "0.56292206", "0.56146884", "0.560915", "0.5608644", "0.55944586", "0.5587494", "0.5556741", "0.55460656", "0.55404395", "0.55393064", "0.5538805", "0.5537251", "0.5506557", "0.5504713", "0.55037403", "0.5500213", "0.5497053", "0.5485069", "0.5468826", "0.5461367", "0.5451236", "0.54495263", "0.5438175", "0.54267573", "0.5423746", "0.5412524", "0.54123896", "0.54040635", "0.53829116", "0.5377371", "0.5372326", "0.53652585", "0.5363752", "0.5359804", "0.53587943", "0.535842", "0.53569883", "0.53409415", "0.5338667", "0.53330284", "0.53329235", "0.5327938", "0.53094774", "0.5304432", "0.5301865", "0.5297046", "0.5288386", "0.5287278", "0.5285879", "0.5280294", "0.5279608", "0.52717817", "0.52707934", "0.52683586", "0.52576524", "0.5253188", "0.5249182", "0.52459973", "0.5245608", "0.52402836", "0.5237659", "0.52374154", "0.523732", "0.5228228", "0.5226155", "0.52201974", "0.5218717", "0.52180225", "0.52179754", "0.5215988" ]
0.541512
55
For each entry in the connection matrix, examine the points that are supposed to be connected and if they don't match change the connection matrix to reflect the actual connection order.
def whereConnect(self, refsec, fixsec): ref10 = self.sections[refsec][0] ref11 = self.sections[refsec][-1] fix00 = self.sections[fixsec][0] fix01 = self.sections[fixsec][-1] if ref10 == fix00: return 0 # use the 0th node's rad of ref sec for new rad elif ref10 == fix01: return 0 elif ref11 == fix00: return 1 elif ref11 == fix01: return 1 else: print('No valid connection found between sections %i and %i' % (refsec, fixsec))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_connection_between_nodes(self):\n\n for i, node in enumerate(self.list_empty_nodes):\n line = node.labyrinth_position[0]\n column = node.labyrinth_position[1]\n\n for j in range(i+1, len(self.list_empty_nodes)):\n line_j = self.list_empty_nodes[j].labyrinth_position[0]\n column_j = self.list_empty_nodes[j].labyrinth_position[1]\n \n if i != j and ((line == line_j and column == column_j - 1) \\\n or (line == line_j and column == column_j + 1) \\\n or (column == column_j and line == line_j - 1) \\\n or (column == column_j and line == line_j + 1)) \\\n and (not node in self.list_empty_nodes[j].connected_to) \\\n and (not self.list_empty_nodes[j] in node.connected_to):\n node.connected_to.append(self.list_empty_nodes[j])\n self.list_empty_nodes[j].connected_to.append(node)", "def connectivity_matrix(self):\n # TODO: make this more memory efficient by ordering i,j in code when needed.\n temp = []\n for i in range(self.n_atom):\n for j in range(i+1, self.n_atom):\n if self.bond(i, j):\n temp.append([i+1, j+1])\n self.connect = np.asarray(temp)", "def _refine_matrix_with_additional_connections(self):\n new_graph = self.graph.copy()\n for node in tqdm.tqdm(self.graph.nodes(), disable=not self.verbose):\n if self.graph.node[node][\"type\"] == \"hashtag\":\n for neighbour in self.graph.neighbors(node):\n if self.graph.node[neighbour][\"type\"] == \"username\":\n for other_node in self.graph.neighbors(neighbour):\n if self.graph.node[other_node][\"type\"] == \"hashtag\" \\\n and not self.graph.has_edge(node, other_node) \\\n and not node == other_node:\n new_graph.add_edge(node, other_node)\n self.graph = new_graph", "def _connect_neighbours(self):\n for prev in self.unvisited:\n for next in self.unvisited:\n if (next[0] == prev[0] and next[1] == prev[1] + 1) or (next[0] == prev[0] + 1 and next[1] == prev[1]):\n self.graph.addEdge((prev, next))\n self.visited.add(prev)\n self.visited.add(next)\n if self._find_intersection():\n self.intersection.append(prev)\n self.intersection.append(next)", "def calcConn(self,conMat):\n for i in range(len(conMat)):\n for j in range(i,len(conMat)):\n if conMat[i,j]==1:\n self.drawLine(i,j)", "def handle_diagonals_crossing_connections(self, conn: Connection):\n if not self.is_connection_diagonal(conn):\n return False\n j1 = self.get_junc_from_node(conn.me)\n j2 = self.get_junc_from_node(conn.other)\n # check if top-left to bottom-right diagonal of top-right to bottom-left diagonal\n indices_diff = (j1.indices.row - j2.indices.row, j1.indices.col - j2.indices.col)\n if indices_diff[0] == indices_diff[1]:\n # top-left to bottom-right\n top_left = j1 if indices_diff[0] == -1 else j2 # else diff is 1\n top_right = self.get_junc((top_left.indices.row, top_left.indices.col + 1))\n bottom_left = self.get_junc((top_left.indices.row + 1, top_left.indices.col))\n if self.are_juncs_connected(top_right, bottom_left):\n # print(conn, top_right, bottom_left, sep=\"\\n\")\n # we should remove the connection.\n if bottom_left.right.node_id in top_right.down.get_connections_ids():\n top_right.down.remove_connection_by_id(bottom_left.right.node_id)\n if bottom_left.up.node_id in top_right.left.get_connections_ids():\n top_right.left.remove_connection_by_id(bottom_left.up.node_id)\n else:\n # top-right to bottom-left\n top_right = j1 if indices_diff[0] == -1 else j2 # else diff is 1\n top_left = self.get_junc((top_right.indices.row, top_right.indices.col - 1))\n bottom_right = self.get_junc((top_right.indices.row + 1, top_right.indices.col))\n if self.are_juncs_connected(top_left, bottom_right):\n # print(conn, top_left, bottom_right, sep=\"\\n\")\n # we should remove the connection.\n if bottom_right.left.node_id in top_left.down.get_connections_ids():\n top_left.down.remove_connection_by_id(bottom_right.left.node_id)\n if bottom_right.up.node_id in top_left.right.get_connections_ids():\n top_left.right.remove_connection_by_id(bottom_right.up.node_id)", "def alter_connection_order(connections, order, chip):\n # Sort the connections by distance between gates from shortest to longest\n if order >= 2:\n length_order = {}\n\n for connect in connections:\n reorder = connect.strip(\"\\n\").split(\",\")\n source_coords = [chip.gates[reorder[0]][\"x\"], chip.gates[reorder[0]][\"y\"], 0]\n target_coords = [chip.gates[reorder[1]][\"x\"], chip.gates[reorder[1]][\"y\"], 0]\n gate_dif = abs(source_coords[0] - target_coords[0]) + abs(source_coords[1] - target_coords[1])\n\n # Check if there are gates with the same distance\n while gate_dif in length_order:\n gate_dif += .1\n\n length_order[gate_dif] = connect\n\n sort = sorted(length_order)\n connections = [length_order[key] for key in sort]\n\n # Reverse the connections order\n if order == 1 or order == 3:\n connections = connections[::-1]\n\n return connections", "def check_if_connected(self):\n def matrix_to_list():\n \"\"\"Creates a adjacency list from matrix.\n\n Returns:\n AdjList: List of successors for each vertex.\n \"\"\"\n graph = {\n node: [\n neighbour for neighbour in range(self.rank) if self.matrix[node, neighbour] != inf\n ] for node in range(self.rank)\n }\n return graph\n\n def dfs(visited: list, graph: AdjList, node: int):\n \"\"\"DFS algorithm for traversing.\"\"\"\n if node not in visited:\n visited.append(node)\n for neighbour in graph[node]:\n dfs(visited, graph, neighbour)\n\n # check_if_connected body.\n adj_list = matrix_to_list()\n visited = list()\n dfs(visited, adj_list, 0)\n if len(visited) != self.rank:\n raise ValueError('Graph not connected!')", "def add_pconn(self):\n self.use_pconn= True\n self.pconn = []\n for i,c in enumerate(self.conn):\n atoms_pconn = []\n atoms_image = []\n for ji, j in enumerate(c):\n # If an atom or vertex is connected to another one multiple times (in an image), this\n # will be visible in the self.conn attribute, where the same neighbour will be listed\n # multiple times.\n # Sometimes, the distances are a bit different from each other, and in this case, we\n # have to increase the threshold, until the get_distvec function will find all imgis.\n n_conns = c.count(j)\n t = 0.01\n while True:\n d,r,imgi = self.get_distvec(i,j,thresh=t)\n t += 0.01\n if n_conns == len(imgi):\n break\n if len(imgi) == 1:\n # only one neighbor .. all is fine\n atoms_pconn.append(images[imgi[0]])\n atoms_image.append(imgi[0])\n else:\n # we need to assign an image to each connection\n # if an atom is connected to another atom twice this means it must be another\n # image\n for ii in imgi:\n # test if this image is not used for this atom .. then we can use it\n if atoms_image.count(ii)==0:\n atoms_image.append(ii)\n atoms_pconn.append(images[ii])\n else:\n # ok, we have this image already\n use_it = True\n #print(c, \"=>\", j)\n #print(atoms_image)\n for k, iii in enumerate(atoms_image):\n #print('k',k)\n if (iii == ii) and (c[k] == j): use_it=False\n if use_it:\n atoms_image.append(ii)\n atoms_pconn.append(images[ii])\n self.pconn.append(atoms_pconn)\n #if len(atoms_pconn) != len(c): print(\"AOSUHDAPUFHPOUFHPWOUFHPOUDHSPUODHASIUDHAUSIDHSD\")\n return\n\n # 'na',lower(label),xyz,i,j)", "def connect(self, selected):\n # Mise a jour de la matrice laplacienne\n self.LaplacianMatrix[selected[0],selected[1]] = -1\n self.LaplacianMatrix[selected[1],selected[0]] = -1\n for i in selected[0] : self.LaplacianMatrix[i,i] += 1 \n for i in selected[1] : self.LaplacianMatrix[i,i] += 1 \n # Mise a jour de la liste d'adjacence\n self.addEdge(selected[0],selected[1])\n self.addEdge(selected[1],selected[0])", "def connect_nodes(self):\n for src_id, trg_id in itertools.product(self.selected_nodes, repeat=2):\n if src_id != trg_id:\n app.edges[src_id].add(trg_id)\n self.mark_as_unsaved()\n self.update()", "def rotateconnections(self,cells,cellkey):\n cell = cells[cellkey]\n for c in cell.conns:\n if c == 'u':\n key = str(cell.xpos) + str(cell.ypos - 1)\n elif c == 'd':\n key = str(cell.xpos) + str(cell.ypos + 1)\n elif c == 'l':\n key = str(cell.xpos-1) + str(cell.ypos)\n elif c == 'r':\n key = str(cell.xpos +1) + str(cell.ypos) \n self.rotatecell(cells[key])", "def convert_connections(self, connections):\n model = self.model\n for conn in getattr(model, u'connection', []):\n comp1 = model.get_component_by_name(conn.map_components.component_1)\n comp2 = model.get_component_by_name(conn.map_components.component_2)\n for mapping in conn.map_variables:\n var1 = model.get_variable_by_name(comp1.name, mapping.variable_1)\n var2 = model.get_variable_by_name(comp2.name, mapping.variable_2)\n if frozenset([var1, var2]) in connections:\n self.convert_mapping(mapping, comp1, comp2, var1, var2)", "def test_connections_updated(self):\n assert self.agent_config.connections == {self.new_connection_id}", "def set_conn_matrix(self, vector):\n\n if len(vector) != self.get_conn_matrix_len():\n self.logger.error(\"Vector size (\" + str(len(vector)) + \") should match with connection matrix \" +\n \"size (\" + str(self.get_conn_matrix_len()) +\n \"). Please use the self.get_matrix_len method to determine it!\")\n else:\n for line in sorted(self.connection_matrix):\n for j in range(len(self.connection_matrix[line])):\n self.connection_matrix[line][j] = vector[j]\n\n self.logger.debug(\"Connection matrix updated: \" + str(self.connection_matrix))", "def config_connection_matrix(self):\n for leg in self.legs.values():\n for m in leg[\"muscles\"]:\n if \"brain_sig\" and \"name\" in m:\n self.connection_matrix[m[\"name\"]] = [0] * self.brain[\"n_osc\"]\n self.connection_matrix[m[\"name\"]][m[\"brain_sig\"] - 1] = 1.", "def test_neighbors(self):\n self.setup()\n graph_rep = nglpy.Graph(self.points, self.graph, self.max_neighbors,\n self.beta)\n expected_graph = {0: (1, ), 1: (0, 2, 4), 2: (1, 3), 3: (2, ),\n 4: (1, )}\n\n for i in range(len(self.points)):\n expected = list(expected_graph[i])\n actual = sorted(graph_rep.neighbors(i))\n msg = '\\nNode {} Connectivity:'.format(i)\n msg += '\\n\\texpected: {}\\n\\tactual: {} '.format(expected, actual)\n self.assertEqual(expected, actual, msg)\n\n self.assertEqual(graph_rep.neighbors(), expected_graph)", "def init_board(self, size):\n # One entry for every node, if diamond all will be filled with pieces, if triange half of matrix including \n # diagonal from top left to bottom right will be filled\n self.board = [[False for i in range(size)] for j in range(size)] \n\n # One entry for every node pair (i, j), where cM(i, j) = direction enum if there is a connection from i to j. \n # (i, i) does not have a connection\n self.connection_matrix = [[False for i in range(size*size)] for j in range(size*size)]\n if self.shape == ShapeType.DIAMOND:\n for node_i in range(size*size):\n top_boundry = node_i < size # Check if node is on top of board\n left_boundry = node_i % size == 0 # Check if node is in leftmost column in board\n right_boundry = (node_i + 1) % size == 0 # Check if node is in rightmost column in board\n bottom_boundry = node_i > size*size-1-size # Check if node is in bottommost coulmn in board\n \n # See docs/Diamond_Connection_Matrix.png for visualization\n if not top_boundry:\n self.connection_matrix[node_i][node_i-size] = DirectionType.UP_RIGHT\n if not top_boundry and not right_boundry:\n self.connection_matrix[node_i][node_i-size+1] = DirectionType.RIGHT\n if not right_boundry:\n self.connection_matrix[node_i][node_i+1] = DirectionType.DOWN_RIGHT\n if not bottom_boundry:\n self.connection_matrix[node_i][node_i+size] = DirectionType.DOWN_LEFT\n if not bottom_boundry and not left_boundry:\n self.connection_matrix[node_i][node_i+size-1] = DirectionType.LEFT\n if not left_boundry:\n self.connection_matrix[node_i][node_i-1] = DirectionType.UP_LEFT\n \n elif self.shape == ShapeType.TRIANGLE:\n for node_i in range(size*size):\n # check if node_i is in the empty triangle. \n # No proof for this but some sketching suggested the formula, and the formula worked with empirical testing\n # for many different sizes\n # == gives on diagonal to the right of main diagonal through matrix, greater gives the numbers on the rest of the row\n # basic intuition: size-node_i//size-1 gives how many of the nodes on a row in the board matrix are empty, \n # and the rest checks if the node_i is in such an area\n triangle_check = node_i%size >= size - (size - node_i//size - 1) \n if triangle_check: # If it is in the empty side there should be no connections so skip ahead\n continue\n\n top_boundry = node_i < size # Checks if node is on top of board\n left_boundry = node_i % size == 0 # Check if node is in leftmost column in board\n right_boundry = (node_i + 1) % size == 0 # Check if node is in rightmost column in board\n bottom_boundry = node_i > size*size-1-size # Check if node is in bottommost coulmn in board\n diagonal_boundry = node_i%(size+1) == 0 # Check if node is on diagonal in board\n\n # See docs/Triangle_Connection_Matrix.png for visualization\n if not top_boundry and not diagonal_boundry:\n self.connection_matrix[node_i][node_i-size] = DirectionType.UP_RIGHT\n if not right_boundry and not diagonal_boundry:\n self.connection_matrix[node_i][node_i+1] = DirectionType.RIGHT\n if not right_boundry and not bottom_boundry:\n self.connection_matrix[node_i][node_i+size+1] = DirectionType.DOWN_RIGHT\n if not bottom_boundry:\n self.connection_matrix[node_i][node_i+size] = DirectionType.DOWN_LEFT\n if not left_boundry:\n self.connection_matrix[node_i][node_i-1] = DirectionType.LEFT\n if not left_boundry and not top_boundry:\n self.connection_matrix[node_i][node_i-size-1] = DirectionType.UP_LEFT", "def test_connections_updated(self):\n assert self.connection_config.connections == {self.new_connection_id}", "def check_connected(self, update=True):\n # update if needed\n if update:\n\n self.update_neighbors()\n\n # go through each node checking that each degree id greater than 0\n for node in self.nodes:\n\n # only one node needs to be disconnected to fail\n if len(self.nodes[node].neighbors) < 1:\n return False\n\n return True", "def update_neighbors(self):\n neighbors = []\n for i in range(-1, 2):\n for j in range(-1, 2):\n if (i, j) == (0, 0):\n continue\n try:\n y, x = self.loc[0]+i, self.loc[1]+j\n neighbor = self.board.array[y, x]\n if neighbor > 0:\n neighbors.append(neighbor)\n except:\n continue\n \n self.neighbors = neighbors", "def findSommetsConnexeTo(self, origine, notVisited):\r\n notVisited.remove(origine) # on retire le sommet des non visités\r\n # print(self.adjMatrix)\r\n for voisin, weight in enumerate(self.adjMatrix[origine]): # Pour chaque voisin de ce point\r\n if weight !=0 and voisin in notVisited: # On y est connecté et on ne l'a pas encore vu\r\n self.findSommetsConnexeTo(voisin, notVisited) # On répète le processus pour ce point\r", "def oneRoadConnect(data, x1, y1, x2, y2):\n flag = XRoadConnect(data, x1, y1, x2, y2) or YRoadConnect(data, x1, y1, x2, y2)\n if not data[y1][x1] == data[y2][x2]:\n flag = False\n if data[y1][x1] == 0 and data[y2][x2] == 0:\n flag = False\n if flag:\n data[y1][x1] = data[y2][x2] = 0\n print(data)\n print(1)\n return flag, [[x1, y1], [x2, y2]]", "def bi_djikstre(connection_mat):\n n = connection_mat.shape[0]\n \n dist_f, prev_f = {}, {}\n Q_f = list(range(n))\n \n dist_b, prev_b = {}, {}\n Q_b = list(range(n))\n \n for i in Q_f:\n dist_f[i] = np.inf\n dist_f[n-2] = 0.0\n \n for i in Q_b:\n dist_b[i] = np.inf\n dist_b[n-1] = 0.0\n \n done_f = []\n done_b = []\n \n while not (set(done_b) & set(done_f)):\n \n for di, dist, prev, Q, done, connections, end in zip(['A', 'B'],[dist_b, dist_f], [prev_b, prev_f], [Q_b, Q_f], [done_b, done_f], [connection_mat.transpose(), connection_mat], [' ','\\n']):\n\n min_dist = min([dist[key] for key in Q])\n u = [key for key in Q if dist[key] == min_dist][0]\n# print(u, di, end=end)\n\n for v in np.nonzero(connections[:, u])[0]:\n# print(np.nonzero(connections[:, u])[0])\n alt = dist[u]+connections[v, u]\n# print(dist)\n# print(dist[u], alt)\n\n if alt < dist[v]:\n dist[v] = alt\n prev[v] = u\n# print('added to prev', di, prev)\n# print('added to dist', di, dist)\n \n done.append(u)\n Q.remove(u)\n \n meeting_point = list(set(done_b) & set(done_f))[0]\n \n# print('Meeting point:', meeting_point)\n\n path_b=[]\n path_f=[]\n\n# path_f.append(u)\n \n u = meeting_point\n \n while u != n-1:\n# print(u)\n u = prev_b[u]\n path_b.append(u)\n \n u = meeting_point\n\n while u != n-2:\n# print(u)\n u = prev_f[u]\n path_f.append(u)\n \n full_path =path_b[::-1]\n full_path.append(meeting_point)\n full_path.extend(path_f)\n \n return full_path", "def _fix_connectivity(X, connectivity, affinity):\n n_samples = X.shape[0]\n if connectivity.shape[0] != n_samples or connectivity.shape[1] != n_samples:\n raise ValueError(\n \"Wrong shape for connectivity matrix: %s when X is %s\"\n % (connectivity.shape, X.shape)\n )\n\n # Make the connectivity matrix symmetric:\n connectivity = connectivity + connectivity.T\n\n # Convert connectivity matrix to LIL\n if not sparse.issparse(connectivity):\n connectivity = sparse.lil_matrix(connectivity)\n\n # `connectivity` is a sparse matrix at this point\n if connectivity.format != \"lil\":\n connectivity = connectivity.tolil()\n\n # Compute the number of nodes\n n_connected_components, labels = connected_components(connectivity)\n\n if n_connected_components > 1:\n warnings.warn(\n \"the number of connected components of the \"\n \"connectivity matrix is %d > 1. Completing it to avoid \"\n \"stopping the tree early.\" % n_connected_components,\n stacklevel=2,\n )\n # XXX: Can we do without completing the matrix?\n connectivity = _fix_connected_components(\n X=X,\n graph=connectivity,\n n_connected_components=n_connected_components,\n component_labels=labels,\n metric=affinity,\n mode=\"connectivity\",\n )\n\n return connectivity, n_connected_components", "def is_connected(src, dst):\n for precursor in dst.precursor_nodes:\n if src == precursor.split(\":\")[0]:\n return 1\n return 0", "def test_reconnect_same(self):\n line, head = self._get_line()\n self.tool.connect(line, head, (120, 50))\n cinfo = self.canvas.get_connection(head)\n assert cinfo is not None\n item = cinfo.connected\n port = cinfo.port\n constraint = cinfo.constraint\n\n assert item == self.box1\n assert item != self.box2\n\n # connect to box1 again, handle's connected item and port should be\n # the same but connection constraint will differ\n connected = self.tool.connect(line, head, (120, 50))\n cinfo = self.canvas.get_connection(head)\n assert cinfo is not None\n self.assertEqual(self.box1, cinfo.connected)\n self.assertEqual(self.box1.ports()[0], cinfo.port)\n self.assertNotEqual(constraint, cinfo.constraint)", "def test_connections_updated(self):\n assert self.skill_config.connections == {self.new_connection_id}", "def topology_complete(self):\n\t\tfor i in range(len(self.sites) - 1):\n\t\t\tfor j in range(i + 1, len(self.sites)):\n\t\t\t\tself.sites[i].neighbors.append(self.sites[j])\n\t\t\t\tself.sites[j].neighbors.append(self.sites[i])", "def order_links_end_points(in_file,links_columns,links_columns_all_details,out_file):\n\n df = pd.read_csv(in_file)#.iloc[:,1:]\n # links_columns = [41,45,51,55]\n links_node_swapped_columns = links_columns[math.floor(len(links_columns)/2):] + links_columns[0:math.floor(len(links_columns)/2)]\n\n\n # links_columns_all_details = list(np.arange(41,61))\n links_node_swapped_columns_all_details = links_columns_all_details[math.floor(len(links_columns_all_details)/2):] + links_columns_all_details[0:math.floor(len(links_columns_all_details)/2)]\n\n\n for ix1, (Index, row1) in tqdm(enumerate(df.iterrows())):\n for ix2, (Index, row2) in enumerate(df[ix1+1:].iterrows()):\n\n\n if (row1[links_columns].as_matrix() == row2[links_node_swapped_columns].as_matrix()).all():\n # print('swapping',ix1,ix1 + 1 +ix2)\n # import ipdb; ipdb.set_trace()\n # print('Row2',row2)\n temp = []\n for i in range(len(links_columns_all_details)):\n\n if i < math.floor(len(links_columns_all_details)/2):\n temp.append(df.iat[ix1 + 1 + ix2, links_columns_all_details[i]])\n df.iat[ix1 + 1 + ix2, links_columns_all_details[i]] = df.iat[ix1 + 1 + ix2, links_node_swapped_columns_all_details[i]]\n else:\n df.iat[ix1 + 1 + ix2, links_columns_all_details[i]] = temp[i - math.floor(len(links_columns_all_details)/2)]\n\n # print('swapped',ix1,ix1 + 1 +ix2)\n # print('Row1', row1,'Row2', row2)\n # import ipdb; ipdb.set_trace()\n\n\n\n df.to_csv(out_file, index=False)\n\n return df", "def corner_pos(self, connection1, connection2, lastConnection):\n d1=(connection1.this.pos - connection1.other.pos).normalize()\n d2=(connection2.other.pos - connection1.other.pos).normalize()\n w1=self.get_width(connection1,connection1.other)/2\n w2=self.get_width(connection2,connection2.this)/2\n if abs(d1.dot(d2) +1) < 0.0001:\n b=0\n # catch case when it is the end of a single rod\n elif abs(d1.dot(d2) -1) < 0.0001:\n b=0\n return [[w2*rotate(d1,90), w2*rotate(d1,-90)], d1.cross(d2)[2]]\n else:\n if (d1[1]*d2[0]-d1[0]*d2[1])==0:\n raise ValueError(\"connections in the same place\"+str(connection1.this.pos )+\" \"+str(connection1.other.pos)+\" \"+str(connection2.other.pos))\n b = (d2[0]*d1[0]*w2 + w1*d1[0]**2 + w1*d1[1]**2 + w2*d1[1]*d2[1]) / (d1[1]*d2[0]-d1[0]*d2[1])\n# rotate direction can be correct if connection1 &2 are always in same rotational order\n return [ (b*d1 + w2*rotate(d1,90)), d1.cross(d2)[2] ]", "def _prune_connections(velocity_diff_matrix_m_s01, distance_matrix_m_s01,\n current_to_previous_matrix):\n\n num_current_by_previous = numpy.sum(current_to_previous_matrix, axis=0)\n previous_indices = numpy.argsort(-1 * num_current_by_previous)\n\n for j in previous_indices:\n this_worst_current_index = -1\n\n while this_worst_current_index is not None:\n these_current_indices = numpy.where(\n current_to_previous_matrix[:, j]\n )[0]\n\n this_worst_current_index = None\n\n # If [j]th previous local max is involved in a split:\n if len(these_current_indices) > 1:\n this_num_previous_by_current = numpy.array([\n numpy.sum(current_to_previous_matrix[i, :])\n for i in these_current_indices\n ], dtype=int)\n\n # Current local max cannot be involved in both a merger and a\n # split.\n if numpy.max(this_num_previous_by_current) > 1:\n this_worst_current_index = these_current_indices[\n numpy.argmax(this_num_previous_by_current)\n ]\n\n if this_worst_current_index is not None:\n current_to_previous_matrix[this_worst_current_index, j] = False\n continue\n\n if len(these_current_indices) <= MAX_STORMS_IN_SPLIT:\n continue\n\n this_max_velocity_diff_m_s01 = numpy.max(\n velocity_diff_matrix_m_s01[these_current_indices, j]\n )\n\n if numpy.isinf(this_max_velocity_diff_m_s01):\n this_worst_current_index = numpy.argmax(\n distance_matrix_m_s01[these_current_indices, j]\n )\n else:\n this_worst_current_index = numpy.argmax(\n velocity_diff_matrix_m_s01[these_current_indices, j]\n )\n\n this_worst_current_index = these_current_indices[\n this_worst_current_index]\n\n current_to_previous_matrix[this_worst_current_index, j] = False\n\n return current_to_previous_matrix", "def play_round_Conway_Cell(self):\n for x in self.board:\n for f in x:\n f.live_neighbors = 0\n\n for i in range(1, self.cols - 1):\n for j in range(1, self.rows - 1):\n status = self.board[i][j].status\n assert type(status)==int \n\n for m in range(i - 1, i + 2):\n for n in range(j - 1, j + 2):\n self.board[m][n].live_neighbors += status\n self.board[i][j].live_neighbors -= status", "def _detect_connections(compound_line_graph, top, type_=\"angle\"):\n connection = nx.Graph()\n for edge in EDGES[type_]:\n assert len(edge) == 2, \"Edges should be of length 2\"\n connection.add_edge(edge[0], edge[1])\n\n matcher = nx.algorithms.isomorphism.GraphMatcher(\n compound_line_graph, connection\n )\n\n formatter_fns = {\n \"angle\": _format_subgraph_angle,\n \"dihedral\": _format_subgraph_dihedral,\n \"improper\": _format_subgraph_improper,\n }\n\n conn_matches = IndexedSet()\n for m in matcher.subgraph_isomorphisms_iter():\n new_connection = formatter_fns[type_](m, top)\n conn_matches.add(new_connection)\n\n if conn_matches:\n conn_matches = _trim_duplicates(conn_matches)\n\n # Do more sorting of individual connection\n sorted_conn_matches = list()\n for match in conn_matches:\n if type_ in (\"angle\", \"dihedral\"):\n if match[0] < match[-1]:\n sorted_conn = match\n else:\n sorted_conn = match[::-1]\n elif type_ == \"improper\":\n sorted_conn = [match[0]] + sorted(match[1:])\n sorted_conn_matches.append(sorted_conn)\n\n # Final sorting the whole list\n if type_ == \"angle\":\n return sorted(\n sorted_conn_matches,\n key=lambda angle: (\n angle[1],\n angle[0],\n angle[2],\n ),\n )\n elif type_ == \"dihedral\":\n return sorted(\n sorted_conn_matches,\n key=lambda dihedral: (\n dihedral[1],\n dihedral[2],\n dihedral[0],\n dihedral[3],\n ),\n )\n elif type_ == \"improper\":\n return sorted(\n sorted_conn_matches,\n key=lambda improper: (\n improper[0],\n improper[1],\n improper[2],\n improper[3],\n ),\n )", "def test_neighbors_with_edges(self):\n self.setup()\n graph_rep = nglpy.Graph(self.points, self.graph, self.max_neighbors,\n self.beta, self.edges)\n\n expected_graph = {0: (1, 2), 1: (0, 3, 4), 2: (0, 3, 4), 3: (1, 2),\n 4: (1, 2)}\n\n for i in range(len(self.points)):\n expected = list(expected_graph[i])\n actual = sorted(graph_rep.neighbors(i))\n msg = '\\nNode {} Connectivity:'.format(i)\n msg += '\\n\\texpected: {}\\n\\tactual: {} '.format(expected, actual)\n self.assertEqual(expected, actual, msg)\n\n self.assertEqual(graph_rep.neighbors(), expected_graph)", "def randomConnect(self):\n if self.Nc == 0:\n return\n else:\n possible_pairs = np.vstack(np.triu_indices(self.numMonomers,k=2)).T\n Nl = len(possible_pairs)\n selected = possible_pairs[np.random.choice(Nl,size=self.Nc,replace=False)].T\n self.connect(selected)", "def are_connected(self, name1, name2):", "def __create_connections(self):\n \"\"\"\n When adding diagonals, each node adds only diagonals to nodes below it.\n This prevents a case where two nodes add diagonals with each other, s.t. both diagonals are added.\n \"\"\"\n # top left corner:\n self.add_connection(self.get_junc((0, 0)).right, self.get_junc((0, 1)).left)\n self.add_connection(self.get_junc((0, 0)).down, self.get_junc((1, 0)).up)\n # diagonal to down right\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((0, 0)).right, self.get_junc((1, 1)).up)\n else:\n self.add_connection(self.get_junc((0, 0)).down, self.get_junc((1, 1)).left)\n # top row:\n for wi in range(1, self.width - 1):\n self.add_connection(self.get_junc((0, wi)).right, self.get_junc((0, wi + 1)).left)\n self.add_connection(self.get_junc((0, wi)).left, self.get_junc((0, wi - 1)).right)\n self.add_connection(self.get_junc((0, wi)).down, self.get_junc((1, wi)).up)\n # diagonal to down left\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((0, wi)).left, self.get_junc((1, wi - 1)).up)\n else:\n self.add_connection(self.get_junc((0, wi)).down, self.get_junc((1, wi - 1)).right)\n # diagonal to down right\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((0, wi)).right, self.get_junc((1, wi + 1)).up)\n else:\n self.add_connection(self.get_junc((0, wi)).down, self.get_junc((1, wi + 1)).left)\n # top right corner:\n self.add_connection(self.get_junc((0, -1)).left, self.get_junc((0, -2)).right)\n self.add_connection(self.get_junc((0, -1)).down, self.get_junc((1, -1)).up)\n # diagonal to down left\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((0, -1)).left, self.get_junc((1, -2)).up)\n else:\n self.add_connection(self.get_junc((0, -1)).down, self.get_junc((1, -2)).right)\n # middle rows:\n for hi in range(1, self.height - 1):\n # left node\n self.add_connection(self.get_junc((hi, 0)).right, self.get_junc((hi, 1)).left)\n self.add_connection(self.get_junc((hi, 0)).down, self.get_junc((hi + 1, 0)).up)\n self.add_connection(self.get_junc((hi, 0)).up, self.get_junc((hi - 1, 0)).down)\n # diagonal to down right\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((hi, 0)).right, self.get_junc((hi + 1, 1)).up)\n else:\n self.add_connection(self.get_junc((hi, 0)).down, self.get_junc((hi + 1, 1)).left)\n # middle nodes\n for wi in range(1, self.width - 1):\n self.add_connection(self.get_junc((hi, wi)).right, self.get_junc((hi, wi + 1)).left)\n self.add_connection(self.get_junc((hi, wi)).left, self.get_junc((hi, wi - 1)).right)\n self.add_connection(self.get_junc((hi, wi)).down, self.get_junc((hi + 1, wi)).up)\n self.add_connection(self.get_junc((hi, wi)).up, self.get_junc((hi - 1, wi)).down)\n # diagonal to down left\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((hi, wi)).left, self.get_junc((hi + 1, wi - 1)).up)\n else:\n self.add_connection(self.get_junc((hi, wi)).down, self.get_junc((hi + 1, wi - 1)).right)\n # diagonal to down right\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((hi, wi)).right, self.get_junc((hi + 1, wi + 1)).up)\n else:\n self.add_connection(self.get_junc((hi, wi)).down, self.get_junc((hi + 1, wi + 1)).left)\n # right node:\n self.add_connection(self.get_junc((hi, -1)).left, self.get_junc((hi, -2)).right)\n self.add_connection(self.get_junc((hi, -1)).down, self.get_junc((hi + 1, -1)).up)\n self.add_connection(self.get_junc((hi, -1)).up, self.get_junc((hi - 1, -1)).down)\n # diagonal to down left\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((hi, -1)).left, self.get_junc((hi + 1, -2)).up)\n else:\n self.add_connection(self.get_junc((hi, -1)).down, self.get_junc((hi + 1, -2)).right)\n # bottom left corner:\n self.add_connection(self.get_junc((-1, 0)).right, self.get_junc((-1, 1)).left)\n self.add_connection(self.get_junc((-1, 0)).up, self.get_junc((-2, 0)).down)\n # bottom row\n for wi in range(1, self.width - 1):\n self.add_connection(self.get_junc((-1, wi)).right, self.get_junc((-1, wi + 1)).left)\n self.add_connection(self.get_junc((-1, wi)).left, self.get_junc((-1, wi - 1)).right)\n self.add_connection(self.get_junc((-1, wi)).up, self.get_junc((-2, wi)).down)\n # bottom right corner:\n self.add_connection(self.get_junc((-1, -1)).left, self.get_junc((-1, -2)).right)\n self.add_connection(self.get_junc((-1, -1)).up, self.get_junc((-2, -1)).down)", "def connect_corridors(G, all_ml_list, ml_dict, singleGraph):\r\n sensor_rows = run_on_file('d07_stations_2008_11_26.txt')\r\n connector_list, short_lines=find_all_connectors(sensor_rows)\r\n print 'number of connectors parsed in dataset: ', len(connector_list)\r\n for road_i, dir_i, coords, road_j, dir_j in short_lines:\r\n source_list=ml_dict[(road_i, dir_i)]\r\n destination_list=ml_dict[(road_j, dir_j)]\r\n source_line=nearest(coords, source_list, 1)\r\n destination_line=nearest(coords, destination_list, 1)\r\n G.add_edge(source_line[0][1][0], destination_line[0][1][0]) \r\n singleGraph.add_edge(source_line[0][1][0], destination_line[0][1][0]) \r\n G, singleGraph=manual_connections(G, singleGraph, ml_dict) \r\n return G, singleGraph", "def solve(self, board: List[List[str]]) -> None:\n if len(board) == 0: return\n\n m = len(board)\n n = len(board[0])\n\n uf = UnionFind(m * n + 1)\n dummy = m * n\n\n # connect 'O' at first and last col with dummy\n for i in range(m):\n if board[i][0] == 'O':\n uf.union(dummy, i * n)\n if board[i][-1] == 'O':\n uf.union(dummy, i * n + n - 1)\n\n # connect 'O' at first and last row with dummy\n for j in range(n):\n if board[0][j] == 'O':\n uf.union(dummy, j)\n if board[-1][j] == 'O':\n uf.union(dummy, n * (m-1) + j)\n\n d = [(1, 0), (0, 1), (0, -1), (-1, 0)]\n\n for i in range(1, m-1):\n for j in range(1, n-1):\n if board[i][j] == 'O':\n for di, dj in d:\n x = i+di\n y = j+dj\n if board[x][y] == 'O':\n uf.union(x*n+y, i*n+j)\n\n # change not connected 'O' with 'X'\n for i in range(1, m-1):\n for j in range(1, n-1):\n if not uf.connected(dummy, i * n + j):\n board[i][j] = 'X'", "def connection(self, sampleseq, num):\n self.Adjmatrix = np.zeros((self.nodenum, self.nodenum), dtype = int)\n \n for i in range(self.supplynum):\n minindex = np.array(sf.minimumk(self.Dismatrix[self.supplyseries[i], self.trandemandseries], sampleseq[self.supplyseries[i]]))\n self.Adjmatrix[self.supplyseries[i], self.trandemandseries[minindex]] = 1\n# self.Adjmatrix[minindex, self.supplyseries[i]] = 1\n \n for i in range(self.trannum):\n if(np.sum(self.Adjmatrix[self.supplyseries, self.transeries[i]]) == 0):\n minindex = np.array(sf.minimumk(self.Dismatrix[self.supplyseries, self.transeries[i]], num))\n self.Adjmatrix[minindex, self.transeries[i]] = 1\n# self.Adjmatrix[self.transeries[i], minindex] = 1\n \n \n# for i in range(self.supplynum):\n# minindex = np.array(sf.minimumk(self.Dismatrix[self.supplyseries[i], self.supplyseries], num))\n# self.Adjmatrix[self.supplyseries[i], minindex] = 1\n# self.Adjmatrix[minindex, self.supplyseries[i]] = 1\n \n# for i in range(self.trannum):\n# if(np.sum(self.Adjmatrix[self.supplyseries, self.transeries[i]]) != 0):\n# continue\n# minindex = np.array(sf.minimumk(self.Dismatrix[self.supplyseries, self.transeries[i]], num))\n# self.Adjmatrix[minindex, self.transeries[i]] = 1\n## self.Adjmatrix[self.transeries[i], minindex] = 1\n# \n for i in range(self.trannum):\n minindex = np.array(sf.minimumk(self.Dismatrix[self.transeries[i], self.demandseries], min(sampleseq[self.transeries[i]], self.demandnum))) + self.supplynum + self.trannum\n self.Adjmatrix[self.transeries[i], minindex] = 1\n# self.Adjmatrix[minindex, self.transeries[i]] = 1\n \n# for i in range(self.demandnum):\n# if(np.sum(self.Adjmatrix[self.transeries, self.demandseries[i]]) == 0):\n# minindex = np.array(sf.minimumk(self.Dismatrix[self.transeries, self.demandseries[i]], 1)) + self.supplynum\n# self.Adjmatrix[minindex, self.demandseries[i]] = 1\n \n# for i in range(self.trannum):\n# minindex = np.array(sf.minimumk(self.Dismatrix[self.transeries[i], self.transeries], num)) + self.supplynum\n# self.Adjmatrix[self.transeries[i], minindex] = 1\n \n for i in range(self.demandnum):\n if(np.sum(self.Adjmatrix[self.transeries, self.demandseries[i]]) == 0):\n minindex = np.array(sf.minimumk(self.Dismatrix[self.transeries, self.demandseries[i]], num)) + self.supplynum\n self.Adjmatrix[minindex, self.demandseries[i]] = 1\n# self.Adjmatrix[self.demandseries[i], minindex] = 1\n \n for i in range(self.demandnum):\n minindex = np.array(sf.minimumk(self.Dismatrix[self.demandseries[i], self.demandseries], min(sampleseq[self.demandseries[i]] + 1, self.demandnum))) + self.supplynum + self.trannum\n minindex = minindex[1:-1]\n for j in range(len(minindex)):\n if(self.Adjmatrix[self.demandseries[i], minindex[j]] == 1 or self.Adjmatrix[minindex[j], self.demandseries[i]] == 1):\n continue\n self.Adjmatrix[self.demandseries[i], minindex[j]] = 1", "def djikstre(connection_mat):\n n = connection_mat.shape[0]\n dist, prev = {}, {}\n Q = list(range(n))\n \n for i in Q:\n dist[i] = np.inf\n dist[n-2] = 0.0\n \n while(len(Q)>0):\n\n min_dist = min([dist[key] for key in Q])\n u = [key for key in Q if dist[key] == min_dist][0]\n Q.remove(u)\n\n for v in np.nonzero(connection_mat[:, u])[0]:\n \n alt = dist[u]+connection_mat[v, u]\n \n if alt < dist[v]:\n dist[v] = alt\n prev[v] = u\n \n return dist, prev", "def connection_mutation(self, connection_inno, attempts):\n tries = 0\n while tries < attempts:\n tries += 1\n\n # Get Random Nodes\n n1 = self.nodes[random_index(self.nodes.keys())]\n n2 = self.nodes[random_index(self.nodes.keys())]\n\n # Should Reverse\n if (\n (n1.type is NodeType.HIDDEN and n2.type is NodeType.INPUT)\n or (n1.type is NodeType.OUTPUT and n2.type is NodeType.HIDDEN)\n or (n1.type is NodeType.HIDDEN and n2.type is NodeType.INPUT)\n ):\n n1, n2 = n2, n1\n\n # Bad Connection Check 1\n if (\n (n1.type is NodeType.INPUT and n2.type is NodeType.INPUT)\n or (n1.type is NodeType.OUTPUT and n2.type is NodeType.OUTPUT)\n or (n1.id == n2.id)\n ):\n continue\n\n # Bad Connection Check 2\n if n1.layer == n2.layer:\n continue\n\n # Check for Circular Structures\n # List of nodes that should have their connections checked\n needs_checking = []\n # List of nodes that requires output from node2\n node_ids = []\n for con in self.connections.values():\n if con.in_node == n2.id:\n # Connection comes from node2\n needs_checking.append(con.out_node)\n node_ids.append(con.out_node)\n\n while len(needs_checking) > 0:\n node_id = needs_checking.pop(0)\n for con in self.connections.values():\n if con.in_node == node_id:\n # Connection comes from needs_checking node\n needs_checking.append(con.out_node)\n node_ids.append(con.out_node)\n\n # Check if node1 is dependent on node2\n if any(i == n1.id for i in node_ids):\n continue\n\n # Existing or Reverse Existing Connection Check\n if any(\n (con.in_node == n1.id and con.out_node == n2.id)\n or (con.in_node == n2.id and con.out_node == n1.id)\n for con in self.connections.values()\n ):\n continue\n\n self.add_connection(\n Connection(\n id=connection_inno.inc,\n in_node=n1.id,\n out_node=n2.id,\n weight=random(-1, 1),\n enabled=True,\n )\n )\n return True\n\n # print('could not mutate')\n return False", "def test_reconnect_another(self):\n line, head = self._get_line()\n self.tool.connect(line, head, (120, 50))\n cinfo = self.canvas.get_connection(head)\n assert cinfo is not None\n item = cinfo.connected\n port = cinfo.port\n constraint = cinfo.constraint\n\n assert item == self.box1\n assert port == self.box1.ports()[0]\n assert item != self.box2\n\n # connect to box2, handle's connected item and connection data\n # should differ\n self.tool.connect(line, head, (120, 150))\n cinfo = self.canvas.get_connection(head)\n assert cinfo is not None\n self.assertEqual(self.box2, cinfo.connected)\n self.assertEqual(self.box2.ports()[0], cinfo.port)\n\n # old connection does not exist\n self.assertNotEqual(item, cinfo.connected)\n self.assertNotEqual(constraint, cinfo.constraint)", "def reshuffle_connections(attempt, connections, error_gates, order):\n # No more than 10 attempt to reshuffle\n if attempt > 9:\n attempt = 0\n order += 1\n\n return connections, attempt, order\n\n # Put the not connectable gates in the front\n connections.remove(error_gates)\n connections.insert(0, error_gates)\n attempt += 1\n\n return connections, attempt, order", "def bfs_is_connected(self):\n q = Queue.Queue()\n origins = [self.vertices()[0]]\n traveled = set(origins)\n while origins:\n for o in origins:\n for child in self.out_vertices(o):\n if child not in traveled:\n q.put(child)\n traveled.add(child)\n\n origins = []\n while not q.empty():\n origins.append(q.get())\n if len(traveled) == self.order():\n return True\n return False", "def clusters_connected( self):\n def check_connected( k, vertices, edges):\n dads = {}\n for p in vertices:\n dads[p] = p\n\n def Find( c):\n while c != dads[c]:\n c = dads[c]\n return c\n\n def Union( p, q):\n dads[Find(p)] = Find(q)\n\n for p,q in edges:\n Union( p, q)\n\n stuff = set([ Find(p) for (k,p) in dads.items()])\n assert len(stuff) == 1, \"More than one partition\"\n\n vertices = collections.defaultdict( list)\n for p in itertools.product( range(self.n), repeat=2):\n vertices[self.raster[p]].append( p)\n\n def X():\n for x in range(self.n-1):\n for y in range(self.n):\n yield (x,y),(x+1,y)\n\n def Y():\n for x in range(self.n):\n for y in range(self.n-1):\n yield (x,y),(x,y+1)\n\n connections = collections.defaultdict( list)\n for (p,q) in itertools.chain( X(), Y()):\n if self.raster[p] == self.raster[q]:\n connections[self.raster[p]].append( ( p, q))\n\n for (k,v) in vertices.items():\n check_connected( k, v, connections[k])", "def setZeroes(self, matrix):\n for i in range(len(matrix)):\n for j in range(len(matrix[i])):\n if matrix[i][j] == 0 and (i, j) not in self.visited:\n for neighbor in self.setter(matrix, i, j):\n matrix[neighbor[0]][neighbor[1]] = 0\n self.visited.add((neighbor[0], neighbor[1]))\n print(matrix)", "def update_connection(session=None, data=None, add_new_connection=False):\n data_dict = format_check_update_connection_request(data)\n if data_dict is None:\n print(\"Error: invalid update\")\n return False\n with mc.MCSessionWrapper(session=session) as session:\n for dkey in data_dict.keys():\n upcn_to_change = data_dict[dkey][0][0]\n urev_to_change = data_dict[dkey][0][1]\n dncn_to_change = data_dict[dkey][0][2]\n drev_to_change = data_dict[dkey][0][3]\n boup_to_change = data_dict[dkey][0][4]\n aodn_to_change = data_dict[dkey][0][5]\n strt_to_change = data_dict[dkey][0][6]\n conn_rec = session.query(Connections).filter(\n (Connections.upstream_part == upcn_to_change)\n & (Connections.up_part_rev == urev_to_change)\n & (Connections.downstream_part == dncn_to_change)\n & (Connections.down_part_rev == drev_to_change)\n & (Connections.upstream_output_port == boup_to_change)\n & (Connections.downstream_input_port == aodn_to_change)\n & (Connections.start_gpstime == strt_to_change)\n )\n num_conn = conn_rec.count()\n if num_conn == 0:\n if add_new_connection:\n connection = Connections()\n connection.connection(\n up=upcn_to_change,\n up_rev=urev_to_change,\n down=dncn_to_change,\n down_rev=drev_to_change,\n upstream_output_port=boup_to_change,\n downstream_input_port=aodn_to_change,\n start_gpstime=strt_to_change,\n )\n else:\n print(\n \"Warning: no {} and add_new_connection isn't enabled.\".format(\n dkey\n )\n )\n return False\n elif num_conn == 1:\n if add_new_connection:\n print(\n \"Warning: {} exists and and_new_connection is enabled\".format(\n dkey\n )\n )\n return False\n else:\n connection = conn_rec.first()\n else: # pragma: no cover\n # we don't know how to cause this, thus the no cover. But we want to catch it\n # if it does happen.\n raise RuntimeError(\n \"More than one of \",\n dkey,\n \" exists. This should not happen, please \"\n \"make an issue on the repo!\",\n )\n connection = None\n if connection:\n for d in data_dict[dkey]:\n setattr(connection, d[7], d[8])\n session.add(connection)\n cm_utils.log(\"cm_partconn connection update\", data_dict=data_dict)\n\n return True", "def connected(self, x, y):\n return (self.numbers[x] == self.numbers[y])", "def breaks_connectivity(level, index, axis=0):\n new_level = remove_index(level, index, axis=axis)\n return not is_solvable(new_level)", "def read_connections(file_name, point_names):\r\n\r\n connections = []\r\n fid = open(file_name, 'r')\r\n line=fid.readline()\r\n while(line):\r\n connections.append(np.array(line.split(',')))\r\n connections[-1][0] = connections[-1][0].strip()\r\n connections[-1][1] = connections[-1][1].strip()\r\n line = fid.readline()\r\n connect = np.zeros((len(point_names), len(point_names)),dtype=bool)\r\n for i in range(len(point_names)):\r\n for j in range(len(point_names)):\r\n for k in range(len(connections)):\r\n if connections[k][0] == point_names[i] and connections[k][1] == point_names[j]:\r\n \r\n connect[i,j]=True\r\n connect[j,i]=True\r\n break\r\n \r\n return connect", "def find_next_moves(self):\n # iterate through all cells, and group them with upper cells and left\n # cells\n\n # generate separated cells then merge the them with same neighbours\n matrix_rows = len(self.status)\n if matrix_rows == 0:\n matrix_cols = 0\n else:\n matrix_cols = len(self.status[0])\n matrix = []\n for i in range(matrix_rows):\n matrix.append([[(i, j)] for j in range(matrix_cols)])\n # merge coordinations\n for i in range(matrix_rows):\n for j in range(matrix_cols):\n if self.status[i][j] != '':\n # is same with right cell?\n if j < matrix_cols - 1 and self.status[i][j] == self.status[i][j + 1]:\n new_item = matrix[i][j] + matrix[i][j + 1]\n matrix[i][j] = matrix[i][j + 1] = new_item\n # is same with down cell?\n if i < matrix_rows - 1 and self.status[i][j] == self.status[i + 1][j]:\n new_item = matrix[i][j] + matrix[i + 1][j]\n matrix[i][j] = matrix[i + 1][j] = new_item\n\n # filter out all unvalid results\n result = []\n # filter out all single-cell groups\n for i in range(matrix_rows):\n for j in range(matrix_cols):\n if (len(matrix[i][j]) > 1 and\n matrix[i][j] not in result):\n result.append(matrix[i][j])\n\n # filter sublists\n result = sorted(result, key=len, reverse=True)\n changed = True\n while changed:\n changed = False\n for i in range(len(result)):\n for j in range(i + 1, len(result)):\n if set(result[i]).issuperset(set(result[j])):\n result.remove(result[j])\n changed = True\n break\n if changed:\n break\n\n if result:\n for i in result:\n yield (self.convert_coordinations(i),\n len(i) * len(i) * 5,\n self.calc_new_status(i))\n else:\n left_cells = sum([len(i) - i.count('') for i in self.status])\n left_cells_score = 2000 - 20 * left_cells * left_cells\n if left_cells_score < 0:\n left_cells_score = 0\n for i in self.parents:\n i.children[self] = [(i.children[self][0][0] + left_cells_score,\n i.children[self][0][1],\n i.children[self][0][2])]", "def updateConnections(self, *connections):\n\n # Verify if ports are valid, otherwise do nothing.\n for connection in connections:\n for k1, v1 in connection.items():\n if v1 not in k1.ports:\n logger.error(\"Port '%s' is not in '%s: %s'\", v1, k1, k1.ports)\n raise RuntimeError(\"Port '{}' is not in '{}: {}'\".format(v1, k1, k1.ports))\n\n # Remove old conflicting connections\n def check_if_port_is_not_connected(connection, k1, v1):\n for k2, v2 in connection.items():\n if (k1, v1) == (k2, v2):\n logger.warning(\"Deleting existing connection %s.\", connection)\n return False\n return True\n for connection in connections:\n for k1, v1 in connection.items():\n connectioncheck2 = lambda connection: check_if_port_is_not_connected(\n connection, k1, v1)\n self.connections[:] = [x for x in self.connections if connectioncheck2(x)]\n\n # Add new connections\n for connection in connections:\n if connection not in self.connections:\n self.connections.append(connection)\n else:\n logger.warning(\"Connection already exists: %s\", connection)\n return True", "def set_neighbor(self):\n for node in self.node:\n for other in self.node:\n if other.id != node.id and distance.euclidean(node.location, other.location) <= node.com_ran:\n node.neighbor.append(other.id)", "def connectSystems(self):\n for id in self.systems.keys():\n mySystem = self.systems[id]\n mySystem.connectedSystems = []\n for id2 in self.systems.keys():\n mySystem2 = self.systems[id2]\n xDist = abs(mySystem.x - mySystem2.x)\n yDist = abs(mySystem.y - mySystem2.y)\n if (xDist + yDist) <> 0 and (xDist <= self.systemSize) and (yDist <= self.systemSize):\n mySystem.connectedSystems.append(id2)", "def relativize_coordinates(self):\n if len(self.nodes) + len(self.connecting) < 1:\n return\n smallest_c = (self.nodes+self.connecting)[0].c\n for node in self.nodes+self.connecting:\n if node.c < smallest_c:\n smallest_c = node.c\n for node in self.nodes+self.connecting:\n node.c = node.c - smallest_c", "def connect_backwards(self):\n\n for n in self.nodes:\n n.receives_from = []\n\n for n1 in self.nodes:\n for n2 in n1.sends_to:\n n2.receives_from.append(n1)", "def __check_and_join_col(self, x: int, y: int, tree: int, increment: int) -> bool:\n for m in [self.__maze[x + i, y + (2 * increment)] for i in (-1, 0, 1)]:\n # if any square maps to a different maze connect it and redo the mappings\n if m == 0:\n continue\n\n main_tree = self.__mappings.get(m, tree)\n if main_tree != tree:\n self.__activate(x, y + increment, tree)\n self.__activate(x, y + (2 * increment), tree)\n self.__remap(tree, main_tree)\n return False\n return True", "def are_connected(self, person1, person2):\n\n possible_nodes = Queue()\n seen = set()\n possible_nodes.enqueue(person1)\n seen.add(person1)\n\n while not possible_nodes.is_empty():\n person = possible_nodes.dequeue()\n print(\"checking\", person)\n if person is person2:\n return True\n else:\n for cohabitant in person.adjacent - seen:\n possible_nodes.enqueue(cohabitant)\n seen.add(cohabitant)\n print(\"added to queue:\", cohabitant)\n return False", "def get_conn_matrix_vector(self):\n\n vect = []\n for line in sorted(self.connection_matrix):\n for item in self.connection_matrix[line]:\n vect.append(item)\n\n return vect", "def toposorted(self):\n order = []\n colors = {node: \"white\" for node in self._neighbors}\n\n def visit(node):\n assert colors[node] == \"white\"\n colors[node] = \"gray\"\n for neighbor in self._neighbors[node]:\n if colors[neighbor] == \"white\":\n visit(neighbor)\n elif colors[neighbor] == \"gray\":\n raise CyclicGraphError(\n \"Cycle involving {!r} and {!r} detected\".format(node, neighbor)\n )\n order.append(node)\n colors[node] = \"black\"\n\n for node in self._neighbors:\n if colors[node] == \"white\":\n visit(node)\n return order", "def _check_connected(self) -> Set[Tuple[int, int]]:\n first_tile = None\n non_empty_tiles = set()\n\n # Find the first tile and all tiles that are non-empty\n for row in range(self.board_size):\n for col in range(self.board_size):\n tile = self.board[row][col]\n if tile is not None:\n non_empty_tiles.add((row, col))\n if first_tile is None:\n first_tile = (row, col)\n\n # Traverse through all tiles reachable from the first tile.\n # Whatever tiles are left in non_empty_tiles are not connected.\n self._find_connected_tiles(first_tile[0], first_tile[1], non_empty_tiles)\n return non_empty_tiles", "def get_oc_oc_connections(self, random_conn=False):\n\n print \"Drawing OC - OC connections .... \"\n abstract_weights_non_negative = np.loadtxt(self.params['oc_oc_abstract_weights_fn'])\n abstract_weights = self.take_log_weights(abstract_weights_non_negative)\n if random_conn:\n rnd.shuffle(abstract_weights)\n rnd.seed(self.params['random_oc_oc_seed'])\n np.savetxt(self.params['oc_oc_abstract_weights_fn'].rsplit('.dat')[0] + '_random.dat', abstract_weights)\n\n assert (abstract_weights[:,0].size == self.params['n_hc'] * self.params['n_mc'])\n assert (abstract_weights[0,:].size == self.params['n_hc'] * self.params['n_mc'])\n w_max_abstract = abstract_weights.max()\n w_min_abstract = abstract_weights.min()\n\n w_pyr_pyr_global_max = self.params['w_pyr_pyr_global_max']\n w_pyr_rsnp_max = self.params['w_pyr_rsnp_max']\n output_pyr_pyr = \"\"\n line_cnt_pyr_pyr = 0\n output_pyr_rsnp = \"\"\n line_cnt_pyr_rsnp = 0\n cnt_discarded_conn = 0\n for src_mc in xrange(abstract_weights[:, 0].size):\n for tgt_mc in xrange(abstract_weights[:, 0].size):\n if (src_mc != tgt_mc):\n w_in = abstract_weights[src_mc, tgt_mc]\n if (w_in > 0): # draw several pyr -> pyr connections between the two MC\n src_tgt_dict = {} # src_tgt_dict[src_gid] = [tgt_gid_0, ...] multiple connections between the same source and the same target are forbiddden\n w_out = (w_in / w_max_abstract) * w_pyr_pyr_global_max\n src_pyrs = rnd.randint(0, self.params['n_pyr_per_mc'], self.params['n_pyr_pyr_between_2mc'])\n for src in np.unique(src_pyrs):\n src_tgt_dict[src] = []\n for src in src_pyrs:\n src_pyr = src + src_mc * self.params['n_pyr_per_mc'] + self.params['pyr_offset']\n tgt_pyr = rnd.randint(0, self.params['n_pyr_per_mc']) + tgt_mc * self.params['n_pyr_per_mc'] + self.params['pyr_offset']\n src_tgt_dict[src].append(tgt_pyr)\n\n # remove multiple instances of the same src-tgt connection\n for src in src_pyrs:\n n1 = len(src_tgt_dict[src])\n src_tgt_dict[src] = np.unique(src_tgt_dict[src]).tolist()\n cnt_discarded_conn += n1 - len(src_tgt_dict[src])\n for tgt_pyr in src_tgt_dict[src]:\n w_noise = self.draw_connection(1.0, w_out, noise=self.params['w_pyr_pyr_global_sigma'])\n if (w_noise > self.params['weight_threshold']):\n output_pyr_pyr += \"%d %d %.6e\\n\" % (src_pyr, tgt_pyr, w_noise)\n line_cnt_pyr_pyr += 1\n\n elif (w_in < 0):\n w_out = (w_in / w_min_abstract) * w_pyr_rsnp_max\n src_pyrs = self.get_rnd_targets(self.params['n_pyr_per_mc'], self.params['n_pyr_rsnp_between_2mc']) # avoid double connections\n for src in src_pyrs:\n src_pyr = src + src_mc * self.params['n_pyr_per_mc'] + self.params['pyr_offset'] \n tgt_rsnp = rnd.randint(0, self.params['n_rsnp_per_mc']) + tgt_mc * self.params['n_rsnp_per_mc'] + self.params['rsnp_offset']\n w_noise = self.draw_connection(1.0, w_out, noise=self.params['w_pyr_rsnp_sigma'])\n if (w_noise > self.params['weight_threshold']):\n output_pyr_rsnp += \"%d %d %.6e\\n\" % (src_pyr, tgt_rsnp, w_noise)\n line_cnt_pyr_rsnp += 1\n\n print 'Number of discarded pyr-pyr connections:', cnt_discarded_conn\n print 'Number of pyr-rsnp connections:', line_cnt_pyr_rsnp\n print 'Number of pyr-pyr connections:', line_cnt_pyr_pyr\n print 'Number of OC-OC connections:', line_cnt_pyr_pyr + line_cnt_pyr_rsnp\n output_fn_pyr_pyr = self.params['conn_list_pyr_pyr']\n output_file_pyr_pyr = open(output_fn_pyr_pyr, 'w')\n output_file_pyr_pyr.write(\"%d\\t%d\\n\" % (line_cnt_pyr_pyr, 3))\n output_file_pyr_pyr.write(output_pyr_pyr)\n output_file_pyr_pyr.close()\n\n output_fn_pyr_rsnp = self.params['conn_list_pyr_rsnp']\n output_file_pyr_rsnp = open(output_fn_pyr_rsnp, 'w')\n output_file_pyr_rsnp.write(\"%d\\t%d\\n\" % (line_cnt_pyr_rsnp, 3))\n output_file_pyr_rsnp.write(output_pyr_rsnp)\n output_file_pyr_rsnp.close()", "def get_overlapping_conn(conn1: NDArray,\n conn2: NDArray) -> Tuple[NDArray, NDArray]:\n\n conn_union = np.empty((0, 3), dtype=np.int32)\n\n # Get unique components\n if np.ma.is_masked(conn1):\n concomp1 = np.unique(conn1).compressed()\n else:\n concomp1 = np.unique(conn1)\n\n if np.ma.is_masked(conn2):\n concomp2 = np.unique(conn2).compressed()\n else:\n concomp2 = np.unique(conn2)\n\n # Loop through them and connect size and number of overlapping data\n for ix2 in concomp2:\n for ix1 in concomp1:\n # Skip 0 component combination with other components\n if not ix1 == 0 and not ix2 == 0:\n idx = np.where((conn1 == ix1) & (conn2 == ix2))[0]\n if np.count_nonzero(idx) > 0:\n carray = np.array([ix2, ix1, np.count_nonzero(idx)],\n dtype=np.int32, ndmin=2)\n\n conn_union = np.concatenate((conn_union, carray), axis=0)\n\n # Get 0 components in both frames\n elif ix1 == 0 and ix2 == 0:\n idx = np.where((conn1 == ix2) & (conn2 == ix1))[0]\n if np.count_nonzero(idx) > 0:\n carray = np.array([ix2, ix1, np.count_nonzero(idx)],\n dtype=np.int32, ndmin=2)\n\n conn_union = np.concatenate((conn_union, carray), axis=0)\n\n # Find components to correct in Frame 2\n conn_pairs = np.empty((0, 3), dtype=np.int32)\n\n for k in np.unique(conn_union[:, 0]):\n ik = conn_union[:, 0] == k\n # find number of times components is referenced\n count = np.sum(conn_union[:, 0] == k)\n\n if count > 1:\n max_points = np.max(conn_union[ik][:, 2])\n # Select the one with the most points\n ik = np.where((conn_union[:, 0] == k) &\n (conn_union[:, 2] == max_points))[0]\n # Select first if there are more pairs with same num of points\n ik = np.array(ik[0], ndmin=1) if ik.shape[0] > 1 else ik\n\n conn_pairs = np.concatenate((conn_pairs, conn_union[ik]), axis=0)\n\n return conn_pairs", "def is_connected(self) -> bool:\n for node in self.nodes.values():\n if node.is_connected:\n return True\n return False", "def is_connected(self):\n if self._connected:\n return True\n else:\n return perms_are_connected(self._g, self.degree())", "def set_both_connections(self, new_node):\n distance_to_new = self.current_node.distance_between(new_node.location)\n self.current_node.set_adjacent_from_direction(distance_to_new, new_node)\n reverse_distance = new_node.distance_between(self.current_node.location)\n new_node.set_adjacent_from_direction(reverse_distance, self.current_node)", "def cfdProcessNodeTopology(self):\r\n self.nodeElements = self.cfdInvertConnectivity(self.elementNodes)\r\n self.nodeFaces = self.cfdInvertConnectivity(self.faceNodes)", "def test_RelaxedNeighborhood(self):\n self.setup()\n self.graph = 'relaxed beta skeleton'\n graph_rep = nglpy.Graph(self.points, self.graph, self.max_neighbors,\n self.beta)\n expected_graph = {0: (1, 3), 1: (0, 2, 4), 2: (1, 3), 3: (0, 2),\n 4: (1,)}\n\n for i in range(len(self.points)):\n expected = list(expected_graph[i])\n actual = sorted(graph_rep.neighbors(i))\n msg = '\\nNode {} Connectivity:'.format(i)\n msg += '\\n\\texpected: {}\\n\\tactual: {} '.format(expected, actual)\n self.assertEqual(expected, actual, msg)\n\n self.assertEqual(graph_rep.neighbors(), expected_graph)", "def connectCurrentConnection(self, position):\n\n # get the item at the position\n itemAt = self.itemAt(position.toPoint(), self.getView().transform())\n\n # remove the connection (a new connection will get added if there is a valid connector)\n self.removeItem(self.currentlyConnecting)\n connection = self.currentlyConnecting\n self.currentlyConnecting = None\n\n\n \"\"\" if itemAt is a Connector (Top/Bottom) item (if you pull onto a Blob) \"\"\"\n if itemAt is not None and isinstance(itemAt, ConnectorItem) and not self.disabled:\n # check, whether the connection is already connected to connector of the given type (top/bottom)\n if connection.checkSameConnectorTypeRestriction(itemAt):\n # get the connectors\n if itemAt.isTopConnector():\n topConnector = itemAt\n bottomConnector = connection.getConnectorIfNotFullyConnected()\n else:\n topConnector = connection.getConnectorIfNotFullyConnected()\n bottomConnector = itemAt\n\n # get data needed to notify the underling data structure\n topLayerID = topConnector.getNodeItem().getLayerID()\n bottomLayerID = bottomConnector.getNodeItem().getLayerID()\n topBlobIndex = topConnector.getIndex()\n bottomBlobIndex = bottomConnector.getIndex()\n\n # notify to change the data\n self.__nodeEditor.tryToConnect(topLayerID, topBlobIndex, bottomLayerID, bottomBlobIndex)\n\n \"\"\" if itemAt is a Node Item (if you pull onto a layer) \"\"\"\n if itemAt is not None and isinstance(itemAt, NodeItem) and not self.disabled:\n # test if connector starts at a top Blob\n if connection.getConnectorIfNotFullyConnected().isTopConnector():\n\n # bottomNode is itemAt\n bottomNode = itemAt\n\n # get layer IDs\n topLayerID = connection.getConnectorIfNotFullyConnected().getNodeItem().getLayerID()\n bottomLayerID = bottomNode.getLayerID()\n topBlobIndex = connection.getConnectorIfNotFullyConnected().getIndex()\n\n # get the Index of the new Blob, should it be necessary to create one\n # (determined in the following for loop)\n bottomBlobIndex = bottomNode.getBottomConnectorCount()\n\n # current connection top name and phase\n topBlobName = connection.getConnectorIfNotFullyConnected().getBlobName()\n topBlobPhase = connection.getConnectorIfNotFullyConnected().getPhase()\n\n # check if there is a connected Node that has a different phase than the currently\n # connecting Node, but has a connection with the same top Blob Name\n topBlobFound = False\n for bottomBlob in bottomNode.getBottomConnectors():\n if len(bottomBlob.getConnectedNodes()) > 0:\n for topNode in bottomBlob.getConnectedNodes():\n for topBlob in topNode.getTopConnectors():\n if topBlob.getBlobName() == topBlobName and topBlob.getPhase() != topBlobPhase:\n bottomBlobIndex = bottomBlob.getIndex()\n topBlobFound = True\n break\n\n # otherwise (if no corresponding top Blob was found)\n # get Index of first empty bottom blob (if available)\n counter = -1\n emptyBlobAvailable = False\n if not topBlobFound:\n for blob in bottomNode.getBottomConnectors():\n counter += 1\n if len(blob.getConnectedNodes()) == 0:\n bottomBlobIndex = counter\n emptyBlobAvailable = True\n break\n\n # add empty bottom blob property\n if not emptyBlobAvailable and not topBlobFound:\n self.__nodeEditor.tryToAddBottomBlob(bottomLayerID, \"\")\n\n # connect nodes\n connected = self.__nodeEditor.tryToConnect(topLayerID, topBlobIndex, bottomLayerID, bottomBlobIndex)\n\n # if the connection did not work but a new blob was created, remove it\n if not connected and not emptyBlobAvailable and not topBlobFound:\n bottomNode.removeBottomConnector(bottomBlobIndex)", "def create_connection_stations(city: str, active_nodes: set[Node]) -> \\\n set[tuple[str, str, str, str]]:\n row_set = set()\n for node in active_nodes:\n for neighbor in node.get_neighbours():\n row_set.add((city, node.name, neighbor.name, node.get_color(neighbor)))\n\n return row_set", "def interpop_connections(mat_connections, n_mitral, n_subpop, n_mitral_per_subpop, inter_conn_rate, inter_conn_strength, homeostasy=False):\n if homeostasy:\n init_total = 1.*mat_connections.sum(axis=0)\n tr_init_total = 1.*mat_connections.sum(axis=1)\n\n res_mat = mat_connections\n n_granule = n_subpop\n subpop_start = np.zeros((n_subpop)) # which is the first non interconnected mitral from each subpop\n for mtpop in inter_conn_rate:\n assert mtpop >= 0 and mtpop < n_subpop, \\\n \"Incorrect mitral sub-population number \"+str(mtpop)+\" for inter-connectivity.\"\n for grpop in inter_conn_rate[mtpop]:\n if grpop != mtpop:\n assert grpop >= 0 and grpop < n_granule, \\\n \"Incorrect granule sub-population number \"+str(grpop)+\" for inter-connectivity.\"\n conn = inter_conn_rate[mtpop][grpop]\n assert conn >= 0 and conn <= 1, \"Connectivity must be in [0, 1].\"\n nlinks = int(n_mitral_per_subpop*conn)\n newconn = np.zeros((n_mitral_per_subpop, 1))\n for i in xrange(nlinks):\n try:\n newconn[i + subpop_start[mtpop]] = inter_conn_strength[mtpop][grpop]\n except Exception, e:\n print \"Likely, too much connections to have no overlap, rewrite the code !\"\n print \"EXCEPTION:\", e\n exit(1)\n subpop_start[mtpop] += nlinks\n start = mtpop*n_mitral_per_subpop\n stop = start + n_mitral_per_subpop\n res_mat[start:stop, grpop] = newconn[:, 0]\n \n if not(homeostasy):\n return res_mat, res_mat.transpose()\n else:\n # Connection strengthes are normalized such that each neuron (mitral or granule) receive the same total amount of excitation or inhibition \n # (i.e. the same total of synaptic conductance) as if they were no interglomerular connections\n tr_res_mat = res_mat.transpose().copy()\n tr_res_mat_norm = tr_init_total/tr_res_mat.sum(axis=0) # here the numerator is 1. because there is only one granule\n tr_res_mat = tr_res_mat*tr_res_mat_norm\n res_mat_norm = init_total/res_mat.sum(axis=0)\n res_mat = res_mat*res_mat_norm\n return res_mat, tr_res_mat", "def is_connected(solution_set):\n \n # Iterate over all routes\n for route in solution_set:\n \n # Get nodes from route\n route_nodes = get_nodes_from_route(route)\n \n # Get nodes from all other routes\n other_nodes = get_nodes_from_all_routes_except(solution_set, route)\n \n # Iterate over nodes\n for node in route_nodes:\n \n # If node is not found, exit early\n if node not in other_nodes:\n return False\n \n # Else, return true\n return True", "def _create_connections(self):\n self.predecessors = {}\n self.successors = {}\n for nd in self.nodes:\n self.predecessors[nd.name] = []\n self.successors[nd.name] = []\n\n for (nd_out, nd_in) in self.edges:\n self.predecessors[nd_in.name].append(nd_out)\n self.successors[nd_out.name].append(nd_in)", "def rebulid_connected_set(tmp, M_x):\n g = Graph(len(tmp))\n for i in range(0, len(tmp)):\n for j in range(i, len(tmp)):\n if Is_neighbor(tmp[i], tmp[j]):\n g.addEdge(i, j)\n cc = g.connectedComponents()\n tmps = []\n x = []\n for i in range(0, len(cc)):\n x_=[]\n tmp_ = list(tmp[cc[i][0]].flatten())\n x_.append(M_x[tmp[cc[i][0]][0]][tmp[cc[i][0]][1]])\n for j in range(1, len(cc[i])):\n tmp_ = tmp_+list(tmp[cc[i][j]].flatten())\n x_.append(M_x[tmp[cc[i][j]][0]][tmp[cc[i][j]][1]])\n x.append(calculate_1_center(x_))\n tmp_ = set(tmp_)\n tmps.append(list(tmp_))\n return tmps, x", "def connexify(self, estimator, nb_connect=5, verbose=False):\n connex_groups_id = list(self.graph.connex_groups)\n connex_pairs = permutations(connex_groups_id, 2)\n new_edges = []\n for conidx1, conidx2 in connex_pairs:\n for _ in range(nb_connect):\n node_idx1 = random.choice(self.graph.connex_groups[conidx1])\n node_idx2 = random.choice(self.graph.connex_groups[conidx2])\n state1 = self.graph.nodes[node_idx1]\n state2 = self.graph.nodes[node_idx2]\n success, X_opt, U_opt, V_opt = self.opt_trajectories(\n (state1, state2), estimator,\n verbose=verbose)\n if success:\n new_edges.append(((node_idx1, node_idx2),\n X_opt, U_opt, V_opt))\n\n for edge in new_edges:\n self.graph.add_edge(*edge)", "def set_connected(self):\n self.connected = True", "def test_component_coolant_connection_list(self):\n with PhysicsEngineHarness('tests/engineering-test.json') as physics_engine:\n engineering = physics_engine.get_state().engineering\n\n connected_loops = engineering.components.CoolantConnectionMatrix()\n\n self.assertEqual(connected_loops.shape, (3, N_COMPONENTS))", "def test_component_coolant_connection_list(self):\n with PhysicsEngineHarness('tests/engineering-test.json') as physics_engine:\n engineering = physics_engine.get_state().engineering\n\n connected_loops = engineering.components.CoolantConnectionMatrix()\n self.assertEqual(connected_loops.shape, (3, N_COMPONENTS))", "def threeRoadConnect(data, x1, y1, x2, y2):\n temp_data = np.pad(data, (1, 1), 'constant', constant_values=0)\n # init\n points = [[x1, y1]]\n flagX = False\n flagY = False\n if not data[y1][x1] == data[y2][x2]:\n return False, []\n # Two lines parallel to the X-AXIS\n posX = 0\n for i in range(0, 18):\n if temp_data[y1 + 1][i] == 0 and temp_data[y2 + 1][i] == 0:\n if XRoadConnect(temp_data, i, y1 + 1, x1 + 1, y1 + 1) \\\n and XRoadConnect(temp_data, i, y2 + 1, x2 + 1, y2 + 1) \\\n and YRoadConnect(temp_data, i, y1 + 1, i, y2 + 1):\n flagX = True\n posX = i - 1\n if flagX:\n points.append([posX, y1])\n points.append([posX, y2])\n\n # Two lines parallel to the Y-AXIS\n posY = 0\n for i in range(0, 10):\n if temp_data[i][x1 + 1] == 0 and temp_data[i][x2 + 1] == 0:\n if YRoadConnect(temp_data, x1 + 1, i, x1 + 1, y1 + 1) \\\n and YRoadConnect(temp_data, x2 + 1, i, x2 + 1, y2 + 1) \\\n and XRoadConnect(temp_data, x1 + 1, i, x2 + 1, i):\n flagY = True\n posY = i - 1\n if flagY and flagX == False:\n points.append([x1, posY])\n points.append([x2, posY])\n\n if flagX or flagY:\n data[y1][x1] = data[y2][x2] = 0\n points.append([x2, y2])\n print(data)\n print(3)\n return flagX or flagY, points", "def __check_and_join_row(self, x: int, y: int, tree: int, increment: int) -> bool:\n for m in [self.__maze[x + (2 * increment), y + i] for i in (-1, 0, 1)]:\n # if any square maps to a different maze connect it and redo the mappings\n if m == 0:\n continue\n main_tree = self.__mappings.get(m, tree)\n if main_tree != tree:\n self.__activate(x + increment, y, tree)\n self.__activate(x + (2 * increment), y, tree)\n self.__remap(tree, main_tree)\n return False\n return True", "def construct_fast_graph_connection(coord_list, radie):\n\n connection_distance = []\n connection = []\n coord_list_tree = scipy.spatial.cKDTree(coord_list)\n for j, data in enumerate(coord_list):\n '''save nodes which are in range'''\n connections_ckd = coord_list_tree.query_ball_point(data, radie)\n for i in connections_ckd:\n #only save upper half of the matrix\n if i > j:\n #save the connection\n connection.append([j, i])\n #save the relative distance of the nodes\n connection_distance.append(np.hypot(coord_list[i,0]-data[0], coord_list[i,1]-data[1]))\n\n connection_distance = np.array(connection_distance)\n connection = np.array(connection)\n\n\n return connection, connection_distance", "def reverse_cuthill_mckee(self, permutation: bool = True) -> [np.ndarray, np.ndarray]:\n # 1. get the connectivity matrix\n nodeids, C = self.node_connectivity_matrix()\n\n # 2. compute the new node order\n perm = scipy.sparse.csgraph.reverse_cuthill_mckee(C, False)\n\n # 3. create a dictionary with new node order\n nodeids = {nid: perm[i] for nid, i in nodeids.items()}\n\n if permutation:\n # 4. create the permutation matrix for node order if requested\n P = C[np._ix(perm, perm)]\n return nodeids, P\n else:\n # 4. or return just the new node order\n return nodeids", "def Connected(self) -> bool:", "def Connected(self) -> bool:", "def twoRoadConnect(data, x1, y1, x2, y2):\n flag = False\n points = [[x1, y1]]\n if not data[y1][x1] == data[y2][x2]:\n return False, []\n if YRoadConnect(data, x1, y1, x1, y2) and XRoadConnect(data, x2, y2, x1, y2) and data[y2][x1] == 0:\n flag = True\n points.append([x1, y2])\n elif XRoadConnect(data, x1, y1, x2, y1) and YRoadConnect(data, x2, y2, x2, y1) and data[y1][x2] == 0:\n flag = True\n points.append([x2, y1])\n if flag:\n data[y1][x1] = data[y2][x2] = 0\n points.append([x2, y2])\n print(data)\n print(2)\n return flag, points", "def verify_intervlan_routing(self):\n for src in self.host_information:\n for dst in self.host_information:\n if dst > src:\n self.check_host_connectivity_by_id(src, dst)", "def update_cell_nodes(self):\n self.cells['nodes'] = -1\n\n for c in range(self.Ncells()):\n # consider two edges at a time, and find the common node\n for i,(ja,jb) in enumerate(circular_pairs(self.cell_to_edges(c))):\n for n in self.edges['nodes'][ja,:]: \n if n in self.edges['nodes'][jb]:\n self.cells['nodes'][c,i] = n\n break", "def check_pointing_pair(self):\n\n for index in range(self.board_size):\n squ = self.squares[index]\n nos = self.get_numbers([self.possibles[cell[0]][cell[1]] for cell in squ])\n\n for num in nos:\n s_row, s_col, found = self.same_row_col(num, squ)\n if s_row:\n row = found[0][0]\n for c in range(self.board_size):\n if (row, c) not in squ:\n if num in self.possibles[row][c]:\n self.possibles[row][c].remove(num)\n if s_col:\n col = found[0][1]\n for r in range(self.board_size):\n if (r, col) not in squ:\n if num in self.possibles[r][col]:\n self.possibles[r][col].remove(num)", "def topology_random_connect(self, probability):\n\t\tfor i in range(len(self.sites) - 1):\n\t\t\tfor j in range(i + 1, len(self.sites)):\n\t\t\t\tif not (self.sites[j] in self.sites[i].neighbors):\n\t\t\t\t\tif numpy.random.rand() < probability:\n\t\t\t\t\t\tself.sites[i].neighbors.append(self.sites[j])\n\t\t\t\t\t\tself.sites[j].neighbors.append(self.sites[i])", "def connect_all(graph, nodeset):\n for element in nodeset:\n graph.add_node(element)\n for element1 in nodeset:\n for element2 in nodeset:\n if not element1 == element2:\n graph.add_edge(element1, element2)\n return graph", "def transition(self):\n for index, row in self.community_table.iterrows():\n\n # CANOPY BASED SUCCESSION\n if row.succession_code == 1:\n self.ecocommunities[(self.ecocommunities == index) &\n (self.canopy > row['max_canopy'])] = row.to_ID\n\n # AGE BASED SUCCESSION\n elif row.succession_code == 2:\n self.ecocommunities = np.where((self.ecocommunities == index) &\n (self.forest_age > row['age_out']),\n self.climax_communities, self.ecocommunities)", "def connecting(node1, node2):\n comp_list = []\n \"\"\":type : list[components.Component]\"\"\"\n if node1 == node2:\n return []\n for comp in node1.connected_comps:\n if comp.neg == node2:\n comp_list.append(comp)\n elif comp.pos == node2:\n comp_list.append(comp)\n return comp_list", "def is_connected(self) -> bool:", "def breakConnections(self):\n for connections in pm.listConnections(self.data['shapeNode'], plugs=True, connections=True):\n # if connections[-1].nodeType() in ['shadingEngine', 'displacementShader']:\n if cmds.getClassification(connections[-1].nodeType(), satisfies=\"shader\"):\n pm.disconnectAttr(str(connections[-1]), str(connections[0]))\n self.logger.info(\"Break Connection : %s > %s\" % (str(connections[-1]), str(connections[0])))", "def connect_portals(self):\n portal_coords = [tuple(coord)\n for coord in np.argwhere(self.maze.isalpha())]\n for portal_coord in portal_coords:\n\n portal_x = portal_coord[2]\n portal_y = portal_coord[1]\n portal_z = portal_coord[0]\n\n x_on_left = portal_x <= 3\n x_on_right = portal_x >= self.rim_x\n x_on_outside = x_on_left or x_on_right\n\n y_on_left = portal_y <= 3\n y_on_right = portal_y >= self.rim_y\n y_on_outside = y_on_left or y_on_right\n\n if x_on_outside or y_on_outside:\n portal_type = \"upward\"\n else:\n portal_type = \"downward\"\n\n for other_portal_coord in portal_coords:\n other_x = other_portal_coord[2]\n other_y = other_portal_coord[1]\n if (other_x == portal_x) and (other_y == portal_y):\n continue\n elif self.maze[portal_coord] == self.maze[other_portal_coord]:\n other_z = other_portal_coord[0]\n\n # Look for a the correspondig portal with a z-coord 1 lower\n if portal_type == \"upwards\":\n if portal_z == other_z + 1:\n self.graph.add_edge(\n portal_coord, other_portal_coord)\n\n # Look for a the correspondig portal with a z-coord 1 higher\n elif portal_type == \"downward\":\n if portal_z == other_z - 1:\n self.graph.add_edge(\n portal_coord, other_portal_coord)", "def is_connected(self):\n if self.V < 1:\n raise ValueError(\"empty graph\")\n if self.V < 2:\n return True\n if self.E == 0:\n return False\n cc = self.cc()\n return int(cc.max() == 0)", "def drawConns(self):\r\n for node in self.G.nodes():\r\n pX,pY = self.G.node[node][\"site\"].getCenter()\r\n for nb in self.G.neighbors(node):\r\n nbX,nbY = self.G.node[nb][\"site\"].getCenter()\r\n self.connLines.append(self.canvasCirkt.create_line(pX,pY,nbX,nbY))\r\n self.canvasCirkt.update()", "def test_is_strongly_connected(self):\n G = DiGraph([(0, 1), (1, 2), (2, 0)])\n assert_true(is_strongly_connected(G))", "def update(self):\n self.next = self.now.copy()\n for crow in range(self.row):\n for ccol in range(self.col):\n around = self.neighbors(crow, ccol)\n if (around < 2 or around > 3):\n self.next[crow, ccol] = False\n\n elif ((not self.now[crow, ccol]) and\n around == 3):\n self.next[crow, ccol] = True\n\n self.now = self.next.copy()\n return self.now" ]
[ "0.69678575", "0.6915727", "0.6357731", "0.6154334", "0.6130675", "0.6066713", "0.6047329", "0.59654814", "0.59127927", "0.5845888", "0.5836875", "0.57811123", "0.57547414", "0.5699769", "0.5684559", "0.56675184", "0.5660186", "0.56549203", "0.56542313", "0.5649522", "0.5607197", "0.55956805", "0.5586937", "0.5567009", "0.5558918", "0.5547059", "0.55232763", "0.5504804", "0.55010617", "0.54866743", "0.547285", "0.5468236", "0.54627466", "0.54447556", "0.5439548", "0.54359674", "0.5434466", "0.54232216", "0.5417652", "0.5382866", "0.5379514", "0.53617895", "0.5360222", "0.53408617", "0.53366196", "0.5322642", "0.5320925", "0.53205764", "0.53148395", "0.529921", "0.5295101", "0.5268652", "0.52667797", "0.52661204", "0.52517104", "0.5236828", "0.5236725", "0.5236399", "0.5229287", "0.5227355", "0.5226585", "0.522157", "0.5205645", "0.52020764", "0.51984864", "0.51832825", "0.5181051", "0.517924", "0.51789427", "0.5178692", "0.5176686", "0.51766473", "0.51672983", "0.5163555", "0.5163044", "0.5155854", "0.51538414", "0.5150526", "0.5137761", "0.51312923", "0.5130713", "0.51271474", "0.5120089", "0.5113525", "0.5110983", "0.5110983", "0.51097625", "0.5109746", "0.5106978", "0.5106886", "0.51061624", "0.50944453", "0.5094234", "0.50910765", "0.50880015", "0.50868607", "0.50828445", "0.5082525", "0.50823087", "0.50787485", "0.50769097" ]
0.0
-1
If a radius of < 0 is found, its neighbors are used to get the correct radius
def matchRadius(self): print('Making sure all points and radii match...') rads = [] for sec in self.secRads.keys(): for r in self.secRads[sec]: rads.append(r) if not self.medrad: self.medrad = rads[int(len(rads)/2)] print('Median radius is: %.5f' % self.medrad) self.uniqueNodes = [] self.uniqueRads = [] for sec in self.sections.keys(): for n in xrange(len(self.sections[sec])): # check to see if that point already exists in uniquesecs if self.sections[sec][n] in self.uniqueNodes: radInd = self.uniqueNodes.index(self.sections[sec][n]) self.secRads[sec][n] = self.uniqueRads[radInd] else: self.uniqueNodes.append(self.sections[sec][n]) if self.secRads[sec][n] <= 0 or self.secRads[sec][n] > 10000: print('Bad radius found, section %i node %i' %(sec, n)) self.uniqueRads.append(self.medrad) self.secRads[sec][n] = self.medrad print('Replaced with: %.5f' % self.uniqueRads[-1]) else: self.uniqueRads.append(self.secRads[sec][n]) print('Radii fixed.') return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_neighborhood_radius_consistent():\r\n grid_spacing = random.uniform(1e-6, 10.0)\r\n center = numpy.random.random(random.randint(1, 3))\r\n\r\n # Find points with radius neighborhood\r\n radius = random.uniform(_distance_to_nearest(grid_spacing, center), grid_spacing*5)\r\n points = ill.get_neighborhood_radius(grid_spacing, center, radius)\r\n\r\n # Every points found within this radius, should be in the points of a larger radius\r\n outer_points = ill.get_neighborhood_radius(grid_spacing, center,\r\n radius+random.uniform(0.0, grid_spacing*5))\r\n\r\n for point in points:\r\n assert point in outer_points", "def test_get_neighborhood_radius_correct():\r\n grid_spacing = random.uniform(1e-6, 4.0)\r\n dimensionality = random.randint(1, 3)\r\n\r\n center = numpy.random.random(dimensionality)*2 - 1.0\r\n radius = random.uniform(1e-6, grid_spacing*2)\r\n\r\n # Find all points on grid in range with exhaustive search\r\n grid = _make_grid(grid_spacing, dimensionality,\r\n numpy.min(center)-radius, numpy.max(center)+radius)\r\n expected_neighborhood = [point for point in grid if calculate.distance(point, center) <= radius]\r\n\r\n assert (sorted(ill.get_neighborhood_radius(grid_spacing, center, radius))\r\n == sorted(expected_neighborhood))", "def find_channel_neighbors(geom, radius):\n return (squareform(pdist(geom)) <= radius)", "def find_channel_neighbors(geom, radius):\n return (squareform(pdist(geom)) <= radius)", "def _radius_neighbors_reduce_func(self, dist, start, radius, return_distance):\n neigh_ind = [np.where(d <= radius)[0] for d in dist]\n\n if return_distance:\n if self.effective_metric_ == \"euclidean\":\n dist = [np.sqrt(d[neigh_ind[i]]) for i, d in enumerate(dist)]\n else:\n dist = [d[neigh_ind[i]] for i, d in enumerate(dist)]\n results = dist, neigh_ind\n else:\n results = neigh_ind\n return results", "def get_neighbours_round(self, cell, radius):\n\t\tx,y = cell.find_id()\n\t\tlength = self.space.shape[1]\n\t\twidth = self.space.shape[0]\n\t\tif (length == 0 or width == 0 or x < 0 or x >= length or y < 0 or y >= width or radius < 2):\n\t\t\treturn []\n\t\tneighs = [(i,j) for i in range(y-radius,y+radius+1) if 0<=i<width for j in range(x-radius,x+radius+1) if (0<=j<length)]\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\ti , j = neigh\n\t\t\tif round(math.sqrt((j-x)**2+(i-y)**2),4) < round(radius,4):\n\t\t\t\tneighbours.append(self.space[neigh[0],neigh[1]])\n\t\treturn neighbours", "def _radius_neighbors_from_graph(graph, radius, return_distance):\n assert graph.format == \"csr\"\n\n no_filter_needed = bool(graph.data.max() <= radius)\n\n if no_filter_needed:\n data, indices, indptr = graph.data, graph.indices, graph.indptr\n else:\n mask = graph.data <= radius\n if return_distance:\n data = np.compress(mask, graph.data)\n indices = np.compress(mask, graph.indices)\n indptr = np.concatenate(([0], np.cumsum(mask)))[graph.indptr]\n\n indices = indices.astype(np.intp, copy=no_filter_needed)\n\n if return_distance:\n neigh_dist = _to_object_array(np.split(data, indptr[1:-1]))\n neigh_ind = _to_object_array(np.split(indices, indptr[1:-1]))\n\n if return_distance:\n return neigh_dist, neigh_ind\n else:\n return neigh_ind", "def _compute_euclidean_neigh_matrix(src, d_matrix, radius):\n\n n_max = 100\n n_min = 3\n reached_points = np.array([0])\n counter = 0\n n_neigh = []\n list_neigh = []\n\n while counter < reached_points.shape[0] < src.shape[0]:\n P = reached_points[counter]\n aux = np.array(sorted(\n np.where(d_matrix[P] <= radius)[0],\n key=lambda k: d_matrix[P, k]))\n n_neigh.append(aux.shape[0])\n\n # Check the number of neighbours\n if n_neigh[-1] < n_min:\n raise ValueError('Computation of neighbours aborted since '\n 'their minimum number is too small.\\n'\n 'Please choose a higher radius.')\n elif n_neigh[-1] > n_max:\n raise ValueError('Computation of neighbours aborted since'\n 'their maximum number is too big.\\n'\n 'Please choose a lower radius.')\n list_neigh.append(aux)\n reached_points = np.append(reached_points,\n aux[~np.in1d(aux, reached_points)])\n counter += 1\n\n if counter >= reached_points.shape[0]:\n raise ValueError('Too small value of the radius:'\n 'the neighbour-matrix is not connected')\n elif src.shape[0] == reached_points.shape[0]:\n while counter < src.shape[0]:\n P = reached_points[counter]\n aux = np.array(sorted(\n np.where(d_matrix[P] <= radius)[0],\n key=lambda k: d_matrix[P, k]))\n n_neigh.append(aux.shape[0])\n\n if n_neigh[-1] < n_min:\n raise ValueError('Computation of neighbours aborted since '\n 'their minimum number is too small.\\n'\n 'Please choose a higher radius.')\n elif n_neigh[-1] > n_max:\n raise ValueError('Computation of neighbours aborted since'\n 'their maximum number is too big.\\n'\n 'Please choose a lower radius.')\n\n list_neigh.append(aux)\n counter += 1\n\n n_neigh_max = max(n_neigh)\n n_matrix = np.zeros([src.shape[0],\n n_neigh_max], dtype=int) - 1\n for i in range(src.shape[0]):\n n_matrix[i, 0:list_neigh[i].shape[0]] = list_neigh[i]\n index_ord = np.argsort(n_matrix[:, 0])\n n_matrix = n_matrix[index_ord]\n return n_matrix\n else:\n raise RuntimeError(\"Some problems during\"\n \"computation of neighbours.\")", "def nearest_in_n_sphere(self, value, r):\n return self.nearest_in_bounding_box(value, r)\n \n # This seems right\n # return self.binary_search_find_nearest_neighbors_in_radius(value, r)\n \n # This seems wrong\n # return self.recur_find_nearest_n_neighbor(value, r)", "def get_neighbors(index, radius, height, width):\n # Calculate the original 2-D coordinates of the central pixel.\n row, col = index // width, index % width\n\n # Get a grid of possible candidates that are close to the central pixel.\n r = int(radius)\n x = np.arange(max(col - r, 0), min(col + r + 1, width))\n y = np.arange(max(row - r, 0), min(row + r + 1, height))\n X, Y = np.meshgrid(x, y)\n\n # Determine which candidates are within the given radius of the pixel.\n R = np.sqrt(((X - col)**2 + (Y - row)**2))\n mask = R < radius\n return (X[mask] + Y[mask]*width).astype(np.int), R[mask]", "def neighbors(self, moore=True, include_center=True, radius=1):\n return self.model.grid.get_neighbors(self.pos, moore, include_center,\n radius)", "def get_neighbors_ring(self,distance):\n return self.grid.get_neighbors_ring(self,distance)", "def nearest_neighbor_search_radius_modified(tree, target_point, hr, distance, nearest=None, depth=0):\r\n \r\n global nearest_nn\r\n global distance_nn\r\n \r\n if tree is None:\r\n return \r\n # at the end the whole tree is pruned - None\r\n \r\n k = len(target_point.position) - 1 # k = 2\r\n \r\n cur_node = tree.location # current tree's node\r\n left_branch = tree.left_child # its left branch\r\n right_branch = tree.right_child # its right branch\r\n \r\n nearer_kd = further_kd = None\r\n nearer_hr = further_hr = None\r\n left_hr = right_hr = None\r\n \r\n # Select axis based on depth so that axis cycles through all valid values\r\n axis_pom = depth % k\r\n axis = 'x' if axis_pom == 0 else 'y'\r\n \r\n # hr = [(min_val-delta, max_val+delta), (max_val+delta, min_val-delta)] # initial splitting plane\r\n # = [(-2, 22), (22, -2)]\r\n \r\n # split the hyperplane depending on the axis\r\n if axis == 'x':\r\n left_hr = [hr[0], (cur_node.position[0], hr[1][1])]\r\n right_hr = [(cur_node.position[0],hr[0][1]), hr[1]]\r\n \r\n if axis == 'y':\r\n left_hr = [(hr[0][0], cur_node.position[1]), hr[1]]\r\n right_hr = [hr[0], (hr[1][0], cur_node.position[1])]\r\n \r\n # check which hyperplane the target point belongs to\r\n # if the target_point is on the left/bottom side\r\n if target_point.position[axis_pom] <= cur_node.position[axis_pom]:\r\n nearer_kd = left_branch # closer sub-tree is the left/bottom_branch\r\n further_kd = right_branch # further sub-tree is the right/top_branch\r\n nearer_hr = left_hr # closer hyperplane is the left/bottom_hyperplane\r\n further_hr = right_hr # futher hyperplane is the right/top_hyperplane\r\n \r\n # if the target_point is on the right/top side\r\n if target_point.position[axis_pom] > cur_node.position[axis_pom]:\r\n nearer_kd = right_branch\r\n further_kd = left_branch\r\n nearer_hr = right_hr\r\n further_hr = left_hr\r\n \r\n # check whether the current node is closer\r\n # print(\"curr node\", cur_node) #test\r\n # print(\"targ node\", target_point)\r\n dist = (cur_node.position[0] - target_point.position[0])**2 + (cur_node.position[1] - target_point.position[1])**2\r\n \r\n if dist < distance:\r\n nearest = cur_node\r\n distance = dist\r\n\r\n if dist < radius: # and all([i != j for i, j in zip(cur_node, target_point)]):\r\n in_range.append(cur_node)\r\n \r\n # go deeper in the tree, pass the sub-tree and hyperplane in which the target_point bellow,\r\n # pass current best distance and closest node, increase the depth \r\n nearest_neighbor_search_radius_modified(nearer_kd, target_point, nearer_hr, distance, nearest, depth+1)\r\n \r\n # once we reached the leaf node we check whether whether we found closer points inside the hypersphere\r\n if distance < distance_nn:\r\n nearest_nn = nearest\r\n distance_nn = distance\r\n \r\n # a nearer point (px,py) could only be in further_kd (further_hr) -> explore it\r\n px = compute_closest_coordinate(target_point.position[0], further_hr[0][0], further_hr[1][0])\r\n py = compute_closest_coordinate(target_point.position[1], further_hr[1][1], further_hr[0][1])\r\n \r\n # check whether it is closer than the current nearest neighbor => whether a hypersphere crosses the hyperplane\r\n dist = (px - target_point.position[0])**2 + (py - target_point.position[1])**2\r\n \r\n # explore the further kd-tree / hyperplane if necessary\r\n if radius > distance_nn: \r\n check_dist = radius\r\n else:\r\n check_dist = distance_nn\r\n \r\n if dist < check_dist:\r\n nearest_neighbor_search_radius_modified(further_kd, target_point, further_hr, distance, nearest, depth+1)\r\n \r\n return in_range", "def neighbourhood(self, query, radius):\n tree_neighbourhood = lambda tree: list(map(lambda x: x[1], tree.get_all_in_range(query, radius)))\n neighbourhood_trees = list(itertools.chain.from_iterable(map(tree_neighbourhood, self.trees)))\n return neighbourhood_trees + list(filter(lambda x: self.dist_fn(x, query) < radius, self.pool))", "def find_neighbors(self):\n x, y = self.position\n\n for i in range(3):\n for j in range(3):\n try:\n self.neighbors.append(self.stitches[(x - 1 + i, y - 1 + j)].position)\n except:\n pass\n\n # this cell will be added by default so we must delete at the end\n self.neighbors.remove(self.position)", "def get_neighbourhood(self, radius: int = 1) -> set:\n if radius == 0:\n return set()\n result = self.neighbours.copy()\n if radius > 1:\n # Recursively get neighbours of neighbours.\n for neighbour in self.neighbours:\n result |= neighbour.get_neighbourhood(radius - 1)\n return result - {self}", "def radius(self):\n if self._radius is None:\n translated_xyz = translate_to_center_of_mass(self.get_xyz())\n _, symbols, x, y, z = get_xyz_matrix(translated_xyz)\n border_elements = list() # a list of the farthest element/s\n r = 0\n for si, xi, yi, zi in zip(symbols, x, y, z):\n ri = xi ** 2 + yi ** 2 + zi ** 2\n if ri == r:\n border_elements.append(si)\n elif ri > r:\n r = ri\n border_elements = [si]\n atom_r = max([get_atom_radius(si) if get_atom_radius(si) is not None else 1.50 for si in border_elements])\n self._radius = r ** 0.5 + atom_r\n logger.info('Determined a radius of {0:.2f} Angstrom for {1}'.format(self._radius, self.label))\n return self._radius", "def neighbors(self, tol=1e-8):\n diffs = np.remainder(\n np.array([np.subtract.outer(xs, xs) for xs in self.rs.T]) + .5, 1) - .5\n\n if self.shear != 0:\n xdiff, ydiff = diffs[:2]\n im = np.round(ydiff)\n xdiff -= im * self.shear\n ydiff = ydiff - im\n xdiff -= np.round(xdiff)\n diffs[:2] = xdiff, ydiff\n\n sigmadists = np.add.outer(self.diameters, self.diameters) / 2.\n dists = np.sqrt(np.sum(diffs**2, axis=0))\n\n return self.Neighbors(dists - sigmadists < tol, diffs * self.L)", "def check_overlapping(self, fit_radius=True, merge=True, mindist='auto', update_geometry=False):\n\n from scipy.spatial.distance import cdist\n from scipy.spatial import cKDTree\n # index = list(self.graph)[:]\n # centers = np.array(list(zip(*nx.get_node_attributes(self.graph,'center').values()))).T\n # pores_radii = np.fromiter(nx.get_node_attributes(self.graph,'radius').values(),dtype=np.float)\n\n pores_radii = list(nx.get_node_attributes(\n self.graph, 'radius').items())\n # we begin by the bigger pores\n pores_radii.sort(key=lambda tup: tup[1], reverse=True)\n index, pores_radii = zip(*pores_radii)\n pores_radii = np.array(pores_radii)\n\n centers = nx.get_node_attributes(self.graph, 'center')\n centers = [np.array(centers[i]) for i in index]\n centers = np.array(centers)\n # distances = cdist(centers,centers)\n kdtree = cKDTree(centers)\n\n stop = False\n\n while not stop:\n\n stop = True\n\n for i, n1 in enumerate(index):\n\n #distances = cdist(centers,[self.graph.nodes[n1]['center']])[:,0]\n\n if self.graph.has_node(n1):\n\n if mindist == 'auto':\n gap = self.graph.nodes[n1]['radius']*0.02\n else:\n gap = mindist\n\n labels = kdtree.query_ball_point(\n self.graph.nodes[n1]['center'], 2.5*self.graph.nodes[n1]['radius'])\n labels.remove(i)\n # distances,labels = kdtree.query(x=net.graph.nodes[n1]['center'],2*self.graph.nodes[n1]['radius'],n_jobs=1)\n # labels.remove(i)\n #distance *= 0.998\n distances = cdist(centers[labels], [self.graph.nodes[n1]['center']])[\n :, 0]*0.998\n d = distances - pores_radii[labels]\n d -= self.graph.nodes[n1]['radius']\n # On commence par la distance la plus faible\n d_and_labels = [(d[j], k) for j, k in enumerate(labels)]\n d_and_labels.sort(key=lambda t: t[0])\n\n for (dist, ind) in d_and_labels:\n\n n2 = index[ind]\n if self.graph.has_node(n2) and self.graph.has_node(n1):\n\n # Le centre du pore né est dans la sphère du pore n1 OU il y a overlapping et fit_radius == False\n # -> Merging ou suppression du pore de plus petit rayon\n if (dist + self.graph.nodes[n2]['radius'] <= gap) or (dist < gap and dist + self.graph.nodes[n2]['radius'] > gap and not fit_radius):\n\n if (self.graph.nodes[n1]['radius'] >= self.graph.nodes[n2]['radius']):\n if merge:\n self.merge_pores(n1, n2)\n print(\"pore\", n1, \"and\", n2,\n \"overlap: merging (deleting\", n2, \")\")\n else:\n self.remove_pore(n2)\n print(\"pore\", n1, \"and\", n2,\n \"overlap: deleting\", n2)\n\n else:\n if merge:\n self.merge_pores(n2, n1)\n print(\"pore\", n1, \"and\", n2,\n \"overlap: merging (deleting\", n1, \")\")\n else:\n self.remove_pore(n1)\n print(\"pore\", n1, \"and\", n2,\n \"overlap: deleting\", n2)\n # On termine l'itération car le pore n1 n'existe plus...\n break\n\n # Overlapping et fit_radius == True\n # 3 options:\n # -Le rayon du pore le plus petit est modifié\n # -Merging\n # -Suppression\n elif dist < gap and dist + self.graph.nodes[n2]['radius'] > gap and fit_radius:\n if (self.graph.nodes[n1]['radius'] >= self.graph.nodes[n2]['radius']):\n r = dist + \\\n self.graph.nodes[n2]['radius'] - \\\n self.graph.nodes[n1]['radius'] - gap\n if self.graph.nodes[n2]['radius'] >= r and r > 0:\n self.graph.nodes[n2]['radius'] = r\n pores_radii[ind] = r\n print(\n \"pore\", n1, \"and\", n2, \"overlap: changin radius of\", n2, \"to\", r)\n else:\n if merge:\n self.merge_pores(n1, n2)\n print(\n \"pore\", n1, \"and\", n2, \"overlap: merging (deleting\", n2, \")\")\n else:\n self.remove_pore(n2)\n print(\"pore\", n1, \"and\", n2,\n \"overlap: deleting\", n2)\n else:\n if self.graph.nodes[n1]['radius'] >= dist:\n self.graph.nodes[n1]['radius'] = dist\n pores_radii[i] = dist\n print(\n \"pore\", n1, \"and\", n2, \"overlap: changin radius of\", n1, \"to\", dist)\n else:\n if merge:\n self.merge_pores(n2, n1)\n print(\n \"pore\", n1, \"and\", n2, \"overlap: merging (deleting\", n1, \")\")\n else:\n self.remove_pore(n1)\n print(\"pore\", n1, \"and\", n2,\n \"overlap: deleting\", n1)\n # On termine l'itération car le pore n1 n'existe plus...\n break\n\n if update_geometry:\n self.set_auto_throats_length()\n self.set_auto_throats_radius()", "def get_neighbours(self, business, num=5, add_self=False):\n\n def radius_step(radius, num_longtidues, num_latitudes, time):\n \"\"\"expand the search-radius exponentially\"\"\"\n step = int(exp(time))\n radius['long_down'] = radius['long_down'] - step\n if radius['long_down'] <= 0:\n radius['long_down'] = 0\n radius['long_up'] = radius['long_up'] + step\n if radius['long_up'] >= num_longtidues - 1:\n radius['long_up'] = num_longtidues - 1\n radius['lat_down'] = radius['lat_down'] - step\n if radius['lat_down'] <= 0:\n radius['lat_down'] = 0\n radius['lat_up'] = radius['lat_up'] + step\n if radius['lat_up'] >= num_latitudes - 1:\n radius['lat_up'] = num_latitudes - 1\n\n cell = self.get_cell(business)\n b_long = business.longitude\n b_lat = business.latitude\n radius = {'long_down': cell[0], 'long_up': cell[0] + 1,\n 'lat_down': cell[1], 'lat_up': cell[1] + 1}\n ret = []\n time = 0\n inner_radius = 0\n while len(ret) < num and inner_radius < 100:\n found = []\n radius_step(radius, self.longitudes.size, self.latitudes.size,\n time)\n time = time + 1\n for row in range(radius['long_down'], radius['long_up']):\n for col in range(radius['lat_down'], radius['lat_up']):\n if row in self.cells and col in self.cells[row]:\n for item in self.cells[row][col]:\n if item not in ret:\n found.append(item)\n if (len(found) + len(ret)) < num:\n continue\n # We approximate the in-radius of the search-rectangle by half of\n # the distance between the centers of left and right border\n # (Not exactly the in-radius on the surface of a sphereoid, but\n # easier to calculate)\n inner_radius = haversine((self.longitudes[radius['long_down']],\n self.latitudes[cell[1]]),\n (self.longitudes[radius['long_up']],\n self.latitudes[cell[1]])) / 2\n for neighbour in found:\n n_long = neighbour['longitude']\n n_lat = neighbour['latitude']\n dist = haversine((b_long, b_lat), (n_long, n_lat))\n # make sure we only include businesses in the in-circle of the\n # search-rectangle\n if dist <= inner_radius and \\\n (add_self or neighbour['index'] != business.name):\n neighbour['distance'] = dist\n ret.append(neighbour)\n return sorted(ret, key=itemgetter('distance'))[:num]", "def getRadius(self):\r\n if len(self._indices)==0:\r\n return 0\r\n big=Cluster.distance(self,self._dataset.getPoint(self._indices[0]))\r\n for i in range (len(self._indices)):\r\n dist=Cluster.distance(self,self._dataset.getPoint(self._indices[i]))\r\n if (dist>big):\r\n big=dist\r\n return big", "def get_nearest_neighbors(self, kdt, radius=8):\n neighbors = kdt.query_radius(np.array([self.position[:-1]]), r = radius)\n return neighbors[0][1:]", "def _get_node_neighbors(\n self, node: Tuple[int, int], radius: int = 1\n ) -> List[Tuple[int, int]]:\n row_range = range(\n max(node[0] - radius, 0),\n min(node[0] + radius, self.n_rows - 1) + 1,\n )\n column_range = range(\n max(node[1] - radius, 0),\n min(node[1] + radius, self.n_columns - 1) + 1,\n )\n return list(itertools.product(row_range, column_range))", "def locate_neighbors(grouped, row, column, width, height, reach):\n neighbors = []\n for row_val in range(2*int(reach) + 1):\n for col_val in range(2*int(reach) + 1):\n row_final = row - int(reach) + row_val\n col_final = column - int(reach) + col_val\n if col_final == column and row_final == row:\n continue\n if col_final >= width or col_final < 0:\n continue\n if row_final >= height or row_final < 0:\n continue\n row_num = (row_final * width) + col_final\n final_int = grouped[row_num][0]\n neighbors.append(final_int)\n return neighbors", "def neighbors(i , j) :\n ns = []\n # vector de direction\n dx = [+1, +1, 0, 1]\n dy = [0, +1, 1, -1]\n for d in range(4) :\n ns.append((i + dx[d], j + dy[d]))\n #remove neagative element\n ns = [i for i in ns if i[0] >= 0 and i[1] >= 0]\n return ns", "def get_radius_idx(x, y, x0, y0, r, Tree, n_reloc=0,\r\n min_months=24, max_reloc=3, time=None, height=None):\r\n\r\n # Query the Tree from the center of cell\r\n idx = Tree.query_ball_point((x0, y0), r)\r\n\r\n #print 'query #: 1 ( first search )'\r\n\r\n if len(idx) < 2:\r\n return idx\r\n\r\n if time is not None:\r\n n_reloc = max_reloc\r\n\r\n if n_reloc < 1:\r\n return idx\r\n\r\n # Relocate center of search radius and query again\r\n for k in range(n_reloc):\r\n\r\n # Compute new search location => relocate initial center\r\n x0_new, y0_new = np.median(x[idx]), np.median(y[idx])\r\n\r\n # Compute relocation distance\r\n reloc_dist = np.hypot(x0_new-x0, y0_new-y0)\r\n\r\n # Do not allow total relocation to be larger than the search radius\r\n if reloc_dist > r:\r\n break\r\n\r\n #print 'query #:', k+2, '( reloc #:', k+1, ')'\r\n #print 'relocation dist:', reloc_dist\r\n\r\n idx = Tree.query_ball_point((x0_new, y0_new), r)\r\n\r\n # If max number of relocations reached, exit\r\n if n_reloc == k+1:\r\n break\r\n\r\n # If time provided, keep relocating until time-coverage is sufficient\r\n if time is not None:\r\n\r\n t_b, x_b = binning(time[idx], height[idx], dx=1/12., window=1/12.)[:2]\r\n\r\n print(('months #:', np.sum(~np.isnan(x_b))))\r\n\r\n # If sufficient coverage, exit\r\n if np.sum(~np.isnan(x_b)) >= min_months:\r\n break\r\n\r\n return idx", "def update_neighbors(self):\n neighbors = []\n for i in range(-1, 2):\n for j in range(-1, 2):\n if (i, j) == (0, 0):\n continue\n try:\n y, x = self.loc[0]+i, self.loc[1]+j\n neighbor = self.board.array[y, x]\n if neighbor > 0:\n neighbors.append(neighbor)\n except:\n continue\n \n self.neighbors = neighbors", "def init_neighbors(self, element):\n def distance(loc1, loc2):\n '''L2 metric distance between two equi-dimensional coordinates.'''\n return np.sqrt(np.sum(np.square(np.subtract(loc1, loc2))))\n\n element.clear_neighbors() # Clean slate\n for e in self.elements:\n if (e.idx != element.idx) & \\\n (distance(element.coords, e.coords) <= self.crit_radius):\n element.add_neighbor(e)\n # TODO: add edge length attribute", "def neighbourhood_radius_squared(distances, new_cache):\n ret = neighbourhood_radius(distances, new_cache=new_cache)\n ret.name = 'neighbourhood_radius_squared'\n\n return ret ** 2 # Square it!", "def FindClosestPointWithinRadius(self, p_float, , p_float_4):\n ...", "def get_radius(self):\r\n return 1", "def find_loners(radec, radec_all, radius):\n \n loners = np.ones(len(radec))\n for i,(ra,dec) in enumerate(radec):\n dra = abs(radec_all[:,0] - ra)\n ddec = abs(radec_all[:,1] - dec)\n keep = np.logical_and(dra < radius, ddec < radius)\n r = np.sqrt((dra[keep]**2 + ddec[keep]**2))\n r = r[r != 0]\n if any(r < radius):\n loners[i] = False\n \n return loners", "def create_epsilon_neighbourhoods(self):\n self.neigbors_clf = NearestNeighbors(radius=self.epsilon, algorithm='ball_tree')\n self.neigbors_clf.fit(self.data)\n _, neigh_idx = self.neigbors_clf.radius_neighbors(self.data)\n return neigh_idx", "def radius(self) -> int:\n pass", "def _neighbors(self, r, c):\n for dr, dc in [(-1, -1), (-1, 0), (-1, 1), (1, 0), (0, -1), (0, 1), (1, 1), (1, -1)]:\n if (0 <= r + dr < self.H) and (0 <= c + dc < self.W):\n yield r + dr, c + dc", "def inradius(self) -> npt.NDArray[np.float_]:\n return dist(self.center, cast(Segment, self.edges[0]).midpoint)", "def objects_radius(self, centre, radius):", "def radius_square(self):\n try: \n return self._radius_2\n except AttributeError:\n center = self.center()\n self._radius_2 = max( (v.vector() - center).dot_product(\n v.vector() - center) for v in\n self.vertex_generator() )\n return self._radius_2", "def getNeighborsInRadius(self, baseDistance, ElectricalRadius, originID):\n neighbors_to_process = self.Neighbors\n# neighbors_to_process.remove(originID)\n neighbors_in_radius = deque()\n distance_to_neighbor = deque()\n\n if not neighbors_to_process:\n return [[],[]]\n\n for n in range(len(neighbors_to_process)):\n if baseDistance + self.getElectricalDistance(neighbors_to_process[n]) <= ElectricalRadius and neighbors_to_process[n] != originID:\n neighbors_in_radius.append(neighbors_to_process[n])\n distance_to_neighbor.append(baseDistance + self.getElectricalDistance(neighbors_to_process[n]))\n\n return [ neighbors_in_radius, distance_to_neighbor ]", "def get_nk (self, target_radius, printout = False):\n k_total = 0 # contains the number of iterations, n\n r_momomode = 62.5 # radius of the monomode fiber in um\n radius = target_radius\n radii=[]\n while radius < r_momomode : \n k_total += 1 \n radius = self.radius_km1(radius)\n radii.insert(0,radius)\n if printout:\n print (radius)\n # print(\"n = \"+str(k_total))\n return k_total-2, radii[2:]", "def get_radius(self):", "def moore_neighbourhood(self, grid_position: tuple, radius: int) -> list:\n result = []\n u = [grid_position[0] - radius, grid_position[1] - radius]\n for i in range(2 * radius + 1):\n for j in range(2 * radius + 1):\n # This does not make much sense, since u is a list and i and j are integers\n result.append([u + i, u + j])\n return result", "def radius_neighbors(\n self, X=None, radius=None, return_distance=True, sort_results=False\n ):\n check_is_fitted(self)\n\n if sort_results and not return_distance:\n raise ValueError(\"return_distance must be True if sort_results is True.\")\n\n query_is_train = X is None\n if query_is_train:\n X = self._fit_X\n else:\n if self.metric == \"precomputed\":\n X = _check_precomputed(X)\n else:\n X = self._validate_data(X, accept_sparse=\"csr\", reset=False, order=\"C\")\n\n if radius is None:\n radius = self.radius\n\n use_pairwise_distances_reductions = (\n self._fit_method == \"brute\"\n and RadiusNeighbors.is_usable_for(\n X if X is not None else self._fit_X, self._fit_X, self.effective_metric_\n )\n )\n\n if use_pairwise_distances_reductions:\n results = RadiusNeighbors.compute(\n X=X,\n Y=self._fit_X,\n radius=radius,\n metric=self.effective_metric_,\n metric_kwargs=self.effective_metric_params_,\n strategy=\"auto\",\n return_distance=return_distance,\n sort_results=sort_results,\n )\n\n elif (\n self._fit_method == \"brute\" and self.metric == \"precomputed\" and issparse(X)\n ):\n results = _radius_neighbors_from_graph(\n X, radius=radius, return_distance=return_distance\n )\n\n elif self._fit_method == \"brute\":\n # Joblib-based backend, which is used when user-defined callable\n # are passed for metric.\n\n # This won't be used in the future once PairwiseDistancesReductions\n # support:\n # - DistanceMetrics which work on supposedly binary data\n # - CSR-dense and dense-CSR case if 'euclidean' in metric.\n\n # for efficiency, use squared euclidean distances\n if self.effective_metric_ == \"euclidean\":\n radius *= radius\n kwds = {\"squared\": True}\n else:\n kwds = self.effective_metric_params_\n\n reduce_func = partial(\n self._radius_neighbors_reduce_func,\n radius=radius,\n return_distance=return_distance,\n )\n\n chunked_results = pairwise_distances_chunked(\n X,\n self._fit_X,\n reduce_func=reduce_func,\n metric=self.effective_metric_,\n n_jobs=self.n_jobs,\n **kwds,\n )\n if return_distance:\n neigh_dist_chunks, neigh_ind_chunks = zip(*chunked_results)\n neigh_dist_list = sum(neigh_dist_chunks, [])\n neigh_ind_list = sum(neigh_ind_chunks, [])\n neigh_dist = _to_object_array(neigh_dist_list)\n neigh_ind = _to_object_array(neigh_ind_list)\n results = neigh_dist, neigh_ind\n else:\n neigh_ind_list = sum(chunked_results, [])\n results = _to_object_array(neigh_ind_list)\n\n if sort_results:\n for ii in range(len(neigh_dist)):\n order = np.argsort(neigh_dist[ii], kind=\"mergesort\")\n neigh_ind[ii] = neigh_ind[ii][order]\n neigh_dist[ii] = neigh_dist[ii][order]\n results = neigh_dist, neigh_ind\n\n elif self._fit_method in [\"ball_tree\", \"kd_tree\"]:\n if issparse(X):\n raise ValueError(\n \"%s does not work with sparse matrices. Densify the data, \"\n \"or set algorithm='brute'\"\n % self._fit_method\n )\n\n n_jobs = effective_n_jobs(self.n_jobs)\n delayed_query = delayed(_tree_query_radius_parallel_helper)\n chunked_results = Parallel(n_jobs, prefer=\"threads\")(\n delayed_query(\n self._tree, X[s], radius, return_distance, sort_results=sort_results\n )\n for s in gen_even_slices(X.shape[0], n_jobs)\n )\n if return_distance:\n neigh_ind, neigh_dist = tuple(zip(*chunked_results))\n results = np.hstack(neigh_dist), np.hstack(neigh_ind)\n else:\n results = np.hstack(chunked_results)\n else:\n raise ValueError(\"internal: _fit_method not recognized\")\n\n if not query_is_train:\n return results\n else:\n # If the query data is the same as the indexed data, we would like\n # to ignore the first nearest neighbor of every sample, i.e\n # the sample itself.\n if return_distance:\n neigh_dist, neigh_ind = results\n else:\n neigh_ind = results\n\n for ind, ind_neighbor in enumerate(neigh_ind):\n mask = ind_neighbor != ind\n\n neigh_ind[ind] = ind_neighbor[mask]\n if return_distance:\n neigh_dist[ind] = neigh_dist[ind][mask]\n\n if return_distance:\n return neigh_dist, neigh_ind\n return neigh_ind", "def get_radius_in_spheres(\n structure: StructureOrMolecule, nn_strategy=None, cutoff: float = 5.0,\n numerical_tol: float = 1e-6,\n pbc: Union[bool, Tuple[bool]] = True,\n) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n _ = nn_strategy\n\n if isinstance(structure, Structure):\n lattice_matrix = np.ascontiguousarray(np.array(structure.lattice.matrix), dtype=float)\n if pbc is not False:\n pbc = _re_pbc(pbc, return_type=\"int\")\n else:\n pbc = np.array([0, 0, 0])\n elif isinstance(structure, Molecule):\n lattice_matrix = np.array([[1000.0, 0.0, 0.0], [0.0, 1000.0, 0.0], [0.0, 0.0, 1000.0]], dtype=float)\n pbc = np.array([0, 0, 0])\n else:\n raise ValueError(\"structure type not supported\")\n\n r = float(cutoff)\n cart_coords = np.ascontiguousarray(np.array(structure.cart_coords), dtype=float)\n center_indices, neighbor_indices, images, distances = find_points_in_spheres(\n cart_coords, cart_coords, r=r, pbc=pbc, lattice=lattice_matrix, tol=numerical_tol\n )\n center_indices = center_indices.astype(np.int64)\n neighbor_indices = neighbor_indices.astype(np.int64)\n # images = images.astype(np.int64)\n distances = distances.astype(np.float32)\n exclude_self = (distances > numerical_tol)\n # exclude_self = (center_indices != neighbor_indices) | (distances > numerical_tol)\n\n return center_indices[exclude_self], neighbor_indices[exclude_self], \\\n distances[exclude_self].reshape(-1, 1), distances[exclude_self], np.array(np.NaN)", "def _replace_center_with_one_if_five_neighbors_are_different_than_0(values):\n greater_than_0 = 0\n for entry in values:\n if entry > 0:\n greater_than_0 += 1\n if greater_than_0 >= 5:\n return 1\n else:\n return 0", "def _find_neighbours(self):\n\n neighbours = []\n for i, p in enumerate(self.frame_0):\n nearests = np.where(np.linalg.norm(self.frame_0 - p, axis=1) <= self.R_n)[0]\n # delete self index\n index = np.argwhere(nearests==i)\n nearests = np.delete(nearests, index)\n neighbours.append(nearests)\n\n return neighbours", "def get_nearest_neighbours(self, cell):\n\t\tneighs = self.get_neighbours(cell)\n\t\ti, j = cell.find_id()\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tx, y = neigh.find_id()\n\t\t\tif abs(x-i)+abs(y-j) <= 1: \n\t\t\t\tneighbours.append(self.space[y,x])\n\t\treturn neighbours", "def semidiameter(radius, distance):\n\n return np.arcsin(radius / distance)", "def radius(self) -> npt.NDArray[np.float_]:\n return dist(self.center, self.vertices[0])", "def _get_neighbors(x, y, data, distance=1):\n mask = numpy.zeros_like(data, dtype=numpy.bool)\n y_max, x_max = data.shape\n y_low = max(y - distance, 0)\n y_high = min(y + distance + 1, y_max)\n x_low = max(x - distance, 0)\n x_high = min(x + distance + 1, x_max)\n mask[y_low:y_high, x_low:x_high] = True\n mask[y, x] = False\n return mask", "def getneighbors(self):\r\n\t\ti=self.cell[0]\r\n\t\tj=self.cell[1]\r\n\t\t\r\n\t\tw = self.width-1\r\n\t\tCenter = self.base[i][j]\r\n\t\tif(self.type==\"Neumann\"):\r\n\t\t\tif(j==w and 0<i<w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][0]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\r\n\t\t\tif(i==w and 0<j<w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[0][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\r\n\t\t\tif(j==0 and 0<i<w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][w]\r\n\t\t\tif(i==0 and 0<j<w):\r\n\t\t\t\tNorth=self.base[w][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\t\r\n\t\t\tif(j==w and i==w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][0]\r\n\t\t\t\tSouth=self.base[0][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\r\n\t\t\tif(j==0 and i==0):\r\n\t\t\t\tNorth=self.base[w][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][w]\r\n\r\n\t\t\tif(j==0 and i==w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[0][j]\r\n\t\t\t\tWest=self.base[i][w]\r\n\r\n\t\t\tif(i==0 and j==w):\r\n\t\t\t\tNorth=self.base[w][j]\r\n\t\t\t\tEast=self.base[i][0]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\r\n\t\t\tif(0<i<w and 0<j<w):\t\t\t\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\r\n\t\t\tself.surrounding = [North,South,East,West]\r\n\t\t\tself.binary= str(East)+str(West)+str(South)+str(North)+str(Center)\r\n\t\t\t\r\n\t\telif(self.type==\"Moore\"):\r\n\t\t\t\r\n\t\t\tif(j==w and 0<i<w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][0]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\t\t\t\tNE = self.base[i-1][0]\r\n\t\t\t\tNW = self.base[i-1][j-1]\r\n\t\t\t\tSE = self.base[i+1][0]\r\n\t\t\t\tSW = self.base[i+1][j-1]\r\n\t\t\tif(i==w and 0<j<w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[0][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\t\t\t\tNE = self.base[i-1][j+1]\r\n\t\t\t\tNW = self.base[i-1][j-1]\r\n\t\t\t\tSE = self.base[0][j+1]\r\n\t\t\t\tSW = self.base[0][j-1]\r\n\t\t\tif(j==0 and 0<i<w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][w]\r\n\t\t\t\tNE = self.base[i-1][j+1]\r\n\t\t\t\tNW = self.base[i-1][w]\r\n\t\t\t\tSE = self.base[i+1][j+1]\r\n\t\t\t\tSW = self.base[i+1][w]\r\n\t\t\tif(i==0 and 0<j<w):\r\n\t\t\t\tNorth=self.base[w][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\t\t\t\tNE = self.base[w][j+1]\r\n\t\t\t\tNW = self.base[w][j-1]\r\n\t\t\t\tSE = self.base[i+1][j+1]\r\n\t\t\t\tSW = self.base[i+1][j-1]\r\n\t\t\t\t\t\t\t\r\n\t\t\tif(j==w and i==w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][0]\r\n\t\t\t\tSouth=self.base[0][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\t\t\t\tNE = self.base[i-1][0]\r\n\t\t\t\tNW = self.base[i-1][j-1]\r\n\t\t\t\tSE = self.base[0][0]\r\n\t\t\t\tSW = self.base[0][j-1]\r\n\t\t\tif(j==0 and i==0):\r\n\t\t\t\tNorth=self.base[w][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][w]\r\n\t\t\t\tNE = self.base[w][j+1]\r\n\t\t\t\tNW = self.base[w][w]\r\n\t\t\t\tSE = self.base[i+1][j+1]\r\n\t\t\t\tSW = self.base[i+1][w]\r\n\t\t\tif(j==0 and i==w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[0][j]\r\n\t\t\t\tWest=self.base[i][w]\r\n\t\t\t\tNE = self.base[i-1][j+1]\r\n\t\t\t\tNW = self.base[i-1][w]\r\n\t\t\t\tSE = self.base[0][j+1]\r\n\t\t\t\tSW = self.base[0][w]\r\n\t\t\tif(i==0 and j==w):\r\n\t\t\t\tNorth=self.base[w][j]\r\n\t\t\t\tEast=self.base[i][0]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\t\t\t\tNE = self.base[w][0]\r\n\t\t\t\tNW = self.base[w][j-1]\r\n\t\t\t\tSE = self.base[i+1][0]\r\n\t\t\t\tSW = self.base[i+1][j-1]\r\n\t\t\tif(0<i<w and 0<j<w):\t\t\t\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\t\t\t\tNE = self.base[i-1][j+1]\r\n\t\t\t\tNW = self.base[i-1][j-1]\r\n\t\t\t\tSE = self.base[i+1][j+1]\r\n\t\t\t\tSW = self.base[i+1][j-1]\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tself.surrounding = [North,South,East,West,NE,NW,SE,SW]\r\n\t\t\tself.binary= str(East)+str(West)+str(South)+str(North)+str(Center)+str(NE)+str(NW)+str(SE)+str(SW)", "def coords_in_ring(self, anchor, radius):\n if radius == 0:\n return [anchor]\n\n results = list()\n # this code doesn't work for radius == 0; can you see why?\n coord = anchor + self.dirs[4] * radius\n\n for i in range(0, 6):\n for _ in range(0, radius):\n results.append(coord)\n coord = coord + self.dirs[i]\n return results", "def compute_neighbours_matrix(src, d_matrix, radius, n_simil):\n\n if n_simil == 1:\n return _compute_euclidean_neigh_matrix(src, d_matrix, radius)\n elif 0 <= n_simil < 1:\n return _compute_correlation_neigh_matrix(src, d_matrix, radius)\n else:\n raise NotImplementedError", "def neighbours(self):\n\n neighbours = []\n root = self.root\n if self == root:\n return neighbours\n\n ########################\n # IMMEDIATELY ADJACENT #\n sizes = [self.maxs[0] - self.mins[0], self.maxs[1] - self.mins[1]]\n coords = [(self.mins[0] + sizes[0] / 2, self.maxs[1] + sizes[1] / 2,),\n (self.maxs[0] + sizes[0] / 2, self.mins[1] + sizes[1] / 2,),\n (self.mins[0] + sizes[0] / 2, self.mins[1] - sizes[1] / 2,),\n (self.maxs[0] - sizes[0] / 2, self.mins[1] + sizes[1] / 2,),]\n # loop through top, right, bottom, left\n for i in range(4):\n x, y = coords[i]\n query_quad = root.query_xy(x, y)\n if query_quad is not None:\n same_size_idx = query_quad.location[: self.tree_depth]\n same_size_quad = root[same_size_idx]\n neighbours += list(self._get_border_children(same_size_quad, i))\n\n #############\n # DIAGONALS #\n root_sizes = [root.maxs[0] - root.mins[0], root.maxs[1] - root.mins[1]]\n xs, ys = (root_sizes / 2 ** root.max_tree_depth) / 2\n neighbours += [\n root.query_xy(self.mins[0] - xs, self.mins[1] - ys), # TL\n root.query_xy(self.maxs[0] + xs, self.mins[1] - ys), # TR\n root.query_xy(self.mins[0] - xs, self.maxs[1] + ys), # BL\n root.query_xy(self.maxs[0] + xs, self.maxs[1] + ys), # BR\n ]\n\n unique_neighbours = list(set(neighbours))\n try:\n unique_neighbours.remove(self)\n except ValueError:\n pass\n\n return unique_neighbours", "def get_surround(xy, dim_x=10, dim_y=10, radius=1, exclude_self=True):\n laterals = []\n for dx in range(-int(radius), int(radius)+1, 1):\n for dy in range(-int(radius), int(radius)+1, 1):\n if dx**2 + dy**2 > radius**2:\n continue\n if (xy[0]+dx >= 0) and (xy[0]+dx < dim_x) and (xy[1]+dy >= 0) and (xy[1]+dy < dim_y):\n if not (exclude_self and dx == 0 and dy == 0):\n laterals.append((xy[0]+dx, xy[1]+dy))\n return laterals", "def ecfp(mol,radius):\n #mol=Chem.AddHs(mol)\n bitInfo={}\n atoms_dict=invariants(mol)\n \n for idxs,i in atoms_dict.items():\n bitInfo[i]=bitInfo.get(i,())+((idxs,0),)\n \n neighborhoods=[]\n atom_neighborhoods=[len(mol.GetBonds())*bitarray('0') for a in mol.GetAtoms()]\n dead_atoms=len(mol.GetAtoms())*bitarray('0')\n \n for r in range(1,radius+1):\n round_ids={} #new bit ID this iteration\n round_atom_neighborhoods=copy.deepcopy(atom_neighborhoods) #bond to include under this r\n neighborhoods_this_round=[] #(round_atom_neighborhoods,round_ids,idxs)\n \n for idxs,a in enumerate(mol.GetAtoms()):\n if dead_atoms[idxs]:\n continue\n nbsr=[] #list to hash this iteration\n o_bond=bond(mol,idxs)\n for b in o_bond:\n round_atom_neighborhoods[idxs][b[2]] = True\n round_atom_neighborhoods[idxs] |= atom_neighborhoods[b[1]]\n nbsr.append((b[0],atoms_dict[b[1]]))\n nbsr=sorted(nbsr)\n nbsr=[item for sublist in nbsr for item in sublist]\n nbsr.insert(0,atoms_dict[idxs])\n nbsr.insert(0,r)\n \n round_ids[idxs]=get_hash(nbsr)\n neighborhoods_this_round.append((round_atom_neighborhoods[idxs],round_ids[idxs],idxs))\n for lst in neighborhoods_this_round:\n if lst[0] not in neighborhoods:\n bitInfo[lst[1]] = bitInfo.get(lst[1],())+((lst[2],r),)\n neighborhoods.append(lst[0])\n else:\n dead_atoms[lst[2]]=True\n atoms_dict=round_ids\n atom_neighborhoods=copy.deepcopy(round_atom_neighborhoods)\n return bitInfo", "def ecfp(mol, radius=2):\n\n atom_ids = invariants(mol)\n\n fp = {}\n for i in atom_ids.values():\n fp[i] = fp.get(i, 0) + 1\n\n neighborhoods = []\n atom_neighborhoods = [ len(mol.bonds) * bitarray('0') for a in mol.atoms]\n dead_atoms = len(mol.atoms) * bitarray('0')\n\n for layer in range(1, radius+1):\n round_ids = {}\n round_atom_neighborhoods = copy.deepcopy(atom_neighborhoods)\n neighborhoods_this_round = []\n\n for a in mol.atoms:\n if dead_atoms[a.index]: continue\n\n nbsr = []\n for b in a.bonds:\n round_atom_neighborhoods[a.index][b.index] = True\n oidx = b.xatom(a).index\n round_atom_neighborhoods[a.index] |= atom_neighborhoods[oidx]\n nbsr.append((b.bondtype, atom_ids[oidx]))\n\n nbsr = sorted(nbsr)\n nbsr = [item for sublist in nbsr for item in sublist]\n nbsr.insert(0, atom_ids[a.index])\n nbsr.insert(0, layer)\n\n round_ids[a.index] = gen_hash(nbsr)\n neighborhoods_this_round.append(\n (round_atom_neighborhoods[a.index], round_ids[a.index], a.index)\n )\n\n for lst in neighborhoods_this_round:\n if lst[0] not in neighborhoods:\n fp[lst[1]] = fp.get(lst[1], 0) + 1\n neighborhoods.append(lst[0])\n else:\n dead_atoms[lst[2]] = True\n\n atom_ids = round_ids\n atom_neighborhoods = copy.deepcopy(round_atom_neighborhoods)\n return fp", "def test_extreme_neighborhoods(self):\n\n ## Radius = 0 ==> all points are noise\n m = tc.dbscan.create(\n self.sf,\n distance=\"euclidean\",\n radius=0.0,\n min_core_neighbors=3,\n verbose=False,\n )\n\n self.assertEqual(m.num_clusters, 0)\n self.assertEqual(sum(m.cluster_id[\"type\"] == \"noise\"), self.n)\n\n ## Min_neighbors > 30 ==> all points are noise\n m = tc.dbscan.create(\n self.sf,\n distance=\"euclidean\",\n radius=0.0,\n min_core_neighbors=31,\n verbose=False,\n )\n\n self.assertEqual(m.num_clusters, 0)\n self.assertEqual(sum(m.cluster_id[\"type\"] == \"noise\"), self.n)\n\n ## Radius very large ==> all points are core points\n m = tc.dbscan.create(\n self.sf,\n distance=\"euclidean\",\n radius=100.0,\n min_core_neighbors=3,\n verbose=False,\n )\n\n self.assertEqual(m.num_clusters, 1)\n self.assertEqual(sum(m.cluster_id[\"type\"] == \"core\"), self.n)\n\n ## Min_neighbors = 0 ==> all points are core points\n m = tc.dbscan.create(\n self.sf,\n distance=\"euclidean\",\n radius=0.5,\n min_core_neighbors=0,\n verbose=False,\n )\n\n self.assertEqual(m.num_clusters, 1)\n self.assertEqual(sum(m.cluster_id[\"type\"] == \"core\"), self.n)", "def find_all_nearest_neighbours(point_cloud:np.ndarray) -> np.ndarray:\n pass", "def problem5(self, s):\n points = 0\n\n points = self.neighbor( 10, 10, s.nearest_neighbor)*3\n points += self.neighbor(100, 10, s.nearest_neighbor)*3\n points += self.neighbor( 10, 100, s.nearest_neighbor)*3\n points += self.neighbor(100, 100, s.nearest_neighbor)*3\n points += self.neighbor(100, 100, s.nearest_neighbor)*3\n\n _testDriver.get_code(s.nearest_neighbor)\n print \"\\n(Check that scipy.spatial.KDTree is not used)\"\n points *= self.grade(1)\n \n return points", "def solvable(grid):\n y = x = 1\n stack = deque([(0, y, x,)])\n goal = len(grid) - 2\n found = np.ones_like(grid, dtype=bool)\n \n while stack:\n i, y, x = stack.popleft()\n i += 1\n for y2, x2 in solve_perfect.neighbors(y, x, grid):\n if found[y2, x2]:\n if y2 == goal and x2 == goal:\n return i\n else:\n found[y2, x2] = False\n stack.append((i, y2, x2,))\n \n return 0", "def get_neighb_dist(self, i, ci):\n ri = self.xyz[i]\n j = self.conn[i][ci]\n rj = self.xyz[j].copy()\n if self.periodic:\n if self.use_pconn:\n img = self.pconn[i][ci]\n rj += np.dot(img, self.cell)\n else:\n all_rj = rj + self.images_cellvec\n all_r = all_rj - self.xyz[i]\n all_d = np.sqrt(np.add.reduce(all_r*all_r,1))\n closest = np.argsort(all_d)[0]\n return all_rj[closest]\n dr = ri-rj\n d = np.sqrt(np.sum(dr*dr))\n return d", "def setNeighbors(self):\n for cellIndex in range(len(self.cells)):\n cell = self.cells[cellIndex]\n\n #Checks the 8 cells around the living one. \n for neighborsX in range(cell.x - 1, cell.x + 2):\n for neighborsY in range(cell.y - 1, cell.y + 2):\n\n #If the position is outside the world, loop around.\n neighborsX = neighborsX % self.screen.worldSize\n neighborsY = neighborsY % self.screen.worldSize\n\n #Skipping itself. Becouse we do not want to calculate itself as a neighbor\n if(neighborsX == cell.x and neighborsY == cell.y):\n continue\n else:\n #Checks if a cell exist at neighborsX, neighborsY\n cellToCheck = self.getCellFromPosition(neighborsX, neighborsY)\n if(cellToCheck != False):\n #Add one to the neighbor var if there already exist and cell for the given position.\n cellToCheck.numOfNeighbor += 1\n else:\n #Creates a new cell if it do not exist any.\n newCell = Cell(self.screen, neighborsX, neighborsY, True)\n newCell.numOfNeighbor += 1\n self.cells.append(newCell)", "def nearest_neigh(self, atom):\n atoms = self.hutch.get_atoms_in_same_hutch(atom)[:]\n if atom in atoms: atoms.remove(atom)\n\n # This generation of nearby hutches isn't perfect but it will work\n rots = [(1,0,0),(0,1,0),(0,0,1)]\n i = 0\n while len(atoms) == 0:\n hutch = ((hutch[0]+rots[i][0])%self.hutch.nhutchs,(hutch[1]+rots[i][1])%self.hutch.nhutchs,(hutch[2]+rots[i][2])%self.hutch.nhutchs)\n i = (i+1) % 3\n atoms = self.hutch.hutchs[hutch]\n if atom in atoms: atoms.remove(atom)\n start = atoms[0]\n\n atoms = self.get_atoms_in_cutoff(atom,self.dist(atom,start))\n #if atom in atoms: atoms.remove(atom)\n d = float(\"inf\")\n for atomi in atoms:\n dt = self.dist(atom,atomi)\n if dt < d:\n d = dt\n a = atomi\n return a", "def get_neighbors(self, coord, radius=1):\n y, x = coord\n neighbors = {}\n if radius == 1:\n neighbors['right'] = [self._screen.inch(y, x+1)]\n neighbors['left'] = [self._screen.inch(y, x-1)]\n neighbors['up'] = [self._screen.inch(y-1, x)]\n neighbors['down'] = [self._screen.inch(y+1, x)]\n else:\n range_y, range_x = self._map_dims\n right, left, up, down = [], [], [], []\n for x_right in range(x+1, range_x[1], 1):\n char = self._screen.inch(y, x_right)\n right.append(char)\n for x_left in range(x-1, range_x[0], -1):\n char = self._screen.inch(y, x_left)\n left.append(char)\n for y_down in range(y+1, range_y[1], 1):\n char = self._screen.inch(y_down, x)\n down.append(char)\n for y_up in range(y-1, range_y[0], -1):\n char = self._screen.inch(y_up, x)\n up.append(char)\n\n neighbors['right'] = right\n neighbors['left'] = left\n neighbors['up'] = up\n neighbors['down'] = down\n return neighbors", "def topology_circle(self, radius):\n\t\tfor s in self.sites:\n\t\t\ts.clear_neighbor()\n\t\tfor i in range(len(self.sites)):\n\t\t\tfor r in range(radius):\n\t\t\t\tself.sites[i].neighbors.append(self.sites[(i + r + 1) % len(self.sites)])\n\t\t\t\tself.sites[(i + r + 1) % len(self.sites)].neighbors.append(self.sites[i])", "def get_neighbors(self, row, col):\n neighbors = set()\n for d in [-1,1]:\n if row+d >= 0 and row+d < self._height and \\\n (row+d,col) in self._empty_spaces:\n neighbors.add((row+d,col))\n if col+d >= 0 and col+d < self._width and \\\n (row,col+d) in self._empty_spaces:\n neighbors.add((row,col+d))\n return neighbors", "def FindPointsWithinRadius(self, p_float, , vtkIdList):\n ...", "def get_neighbours(lat, long):\n # ns = north east, ew = east west (ratio between 1 feet and degree) \n # its different on diferent places on earth (sphere)!!\n ns = 0.0025\n ew = 0.0025\n walk = []\n for i in range(-2, 3):\n for j in range(-2, 3):\n thiscell = CellId.from_lat_lng(LatLng.from_degrees(lat + ns*i, long + ew*j)).parent(S2_CELL_LEVEL)\n if abs(i * j) < 4:\n walk.append(thiscell.id())\n return sorted(walk)", "def neighbors(districts, r, c):\r\n n_list = []\r\n if r>0:\r\n n_list += [districts[r-1,c]]\r\n if r<4:\r\n n_list += [districts[r+1,c]]\r\n if c>0:\r\n n_list += [districts[r,c-1]]\r\n if c<4:\r\n n_list += [districts[r,c+1]]\r\n return n_list", "def radius(self):\n c = self.centroid()\n dmax = -np.inf\n for vertex in self.path.vertices:\n d = np.linalg.norm(vertex - c)\n if d > dmax:\n dmax = d\n return d", "def find_circles(img_orig, edge_img, radii, hough_threshold, nhood_delta):\n\n peaks = []\n for radius in radii:\n h = hough_circles_acc(img_orig, edge_img, radius, True)\n p = hough_peaks(h, 250, (50, 50))\n for i in range(len(p)):\n tmp = list(p[i])\n tmp.append(radius)\n #'''\n cont = False\n for j in range(len(peaks)):\n dist = math.sqrt((peaks[j][1] - tmp[1]) ** 2 + (peaks[j][0] - tmp[0]) ** 2)\n if dist <= 6.0:\n peaks[j] = tmp\n cont = True\n if not cont:\n peaks.append(tmp)\n #'''\n #peaks.append(tmp)\n return np.array(peaks)", "def find_los_neighbors(seats, occupied_self, i, j):\n values = []\n for dx in [-1, 0, 1]:\n for dy in [-1, 0, 1]:\n if not (dx == 0 and dy == 0):\n values.append(\n find_nearest_los_seat(seats, occupied_seats, i, j, dx, dy)\n )\n return values", "def neighbors(self, x):\n pass", "def random_neighbors(self) -> int:\n return self.__random_neighbors", "def get_perimeter(self, radius: int = 1) -> set:\n return self.get_neighbourhood(radius) - self.get_neighbourhood(radius - 1)", "def _check_neighbors(self):\n for direction, dir_info in self.DIRECTIONS.items():\n pos = Point(\n self.position.x + dir_info[\"mask\"][0],\n self.position.y + dir_info[\"mask\"][1]\n )\n status = self.move(direction)\n self.grid[status].add(pos)\n if status in (1, 2):\n # moved\n self.move(dir_info[\"opposite\"])\n yield pos", "def get_elk_in_radius(self, radius):\n agents_in_radius = self.model.grid.get_neighbors(\n self.pos,\n moore=True,\n include_center=False,\n radius=radius\n )\n # Get closest elks\n elk_in_radius = [\n agent for agent in agents_in_radius if isinstance(agent, Elk)\n ]\n\n return elk_in_radius", "def _calc_neighborhood_func(self, curr_it: int, mode: str) -> float:\n return decreasing_rate(\n self.radius_max_,\n self.radius_min_,\n iteration_max=self.max_iterations_,\n iteration=curr_it,\n mode=mode,\n )", "def get_neighb_coords(self, i, ci):\n j = self.conn[i][ci]\n rj = self.xyz[j].copy()\n if self.periodic:\n if self.use_pconn:\n img = self.pconn[i][ci]\n rj += np.dot(img, self.cell)\n else:\n all_rj = rj + self.images_cellvec\n all_r = all_rj - self.xyz[i]\n all_d = np.sqrt(np.add.reduce(all_r*all_r,1))\n closest = np.argsort(all_d)[0]\n return all_rj[closest]\n return rj", "def test_d2_get_neighborhood_small(self):\n config.NR_COLS = 3\n config.NR_ROWS = 3\n gamefield = [\n [1, 0, 0],\n [1, 0, 0],\n [0, 1, 1],\n ]\n # top left\n nh = logic.get_neighborhood(gamefield, 0, 0)\n self.assertEqual(nh, 3)\n # top right\n nh = logic.get_neighborhood(gamefield, 0, 2)\n self.assertEqual(nh, 4)\n # bottom left\n nh = logic.get_neighborhood(gamefield, 2, 0)\n self.assertEqual(nh, 4)\n # bottom right\n nh = logic.get_neighborhood(gamefield, 2, 2)\n self.assertEqual(nh, 3)\n # center\n nh = logic.get_neighborhood(gamefield, 1, 1)\n self.assertEqual(nh, 4)", "def cell_containing(self,xy,neighbors_to_test=4): \n hit = self.select_cells_nearest(xy, count=neighbors_to_test, inside=True)\n if hit is None:\n return -1\n else:\n return hit", "def calc_number_neighbours(num_electrons: int):\r\n if num_electrons < -4 or num_electrons > 4 : \r\n # if number of missing/extra e- higher than 4, then distort 8-num_electrons\r\n num_neighbours = abs(8 - abs(num_electrons) )\r\n elif -4 < num_electrons < 4:\r\n num_neighbours = abs(num_electrons)\r\n elif abs(num_electrons) == 4:\r\n num_neighbours = abs(num_electrons)\r\n \r\n return abs(num_neighbours)", "def in_circle(x0, y0, x, y, r):\n return ((x - x0) ** 2 + (y - y0) ** 2) <= (r ** 2)", "def _radius_neighbors_graph(\n self,\n sf,\n label,\n distance,\n radius,\n k=None,\n src_field='query_label',\n dst_field='reference_label'):\n\n # Get a feature list.\n features = []\n [features.extend(list(i[0])) for i in distance]\n\n\n print 'Determining edges...'\n # Compute the edgelist via nearest neighbors.\n nn = gl.nearest_neighbors.create(\n sf, label=label, features=features, distance=distance)\n edgelist = nn.query(\n sf, label=label, k=k, radius=radius)\n\n # Remove loops from the edgelist.\n # edgelist = self._remove_loops(edgelist)\n\n print 'Constructing graph...'\n # Make the graph.\n g = gl.SGraph(\n sf,\n edgelist,\n vid_field=label,\n src_field=src_field,\n dst_field=dst_field)\n return g", "def neighbours(self, i, j):\n nearest = []\n for x_offset, y_offset in [(0, -1), (0, 1), (1, 0), (-1, 0)]:\n try:\n nearest.append(self.as_list[checkNonNegIndex(i + x_offset)][checkNonNegIndex(j + y_offset)])\n except IndexError:\n continue\n except TypeError:\n continue\n return nearest", "def match(self, pos, radius):\n\n nodes = self._data[:, 2:5]\n distlist = np.squeeze(cdist(pos.reshape(1, 3), nodes))\n if distlist.size == 0:\n return False, -2\n minidx = distlist.argmin()\n minnode = self._data[minidx, 2:5]\n\n # See if either of them can cover each other with a ball of their own\n # radius\n mindist = np.linalg.norm(pos - minnode)\n return radius > mindist or self._data[minidx, 5] > mindist, minidx", "def find_neighbors(self):\n #checked#\n ###your code here###\n for address in self.homes:\n for i in range(-1, 2):\n for j in range(-1,2):\n neighbor_address=(address[0]+i, address[1]+j)\n if neighbor_address in self.homes and neighbor_address!=address:\n self.homes[address].neighbors.append(self.homes[neighbor_address])", "def within_radius(network, src_coords, radius=DEFAULT_RADIUS):\n for node in network:\n # exclude the sender\n dest_coords = node.coords\n if dest_coords == src_coords:\n continue\n elif distance(dest_coords, src_coords) < radius:\n yield node", "def findNeighbours(self):\n neighbours = []\n\n for i in range(self.xCoordinate - 1, self.xCoordinate + 2):\n for j in range(self.yCoordinate - 1, self.yCoordinate + 2):\n if (not (i == self.xCoordinate and j == self.yCoordinate)) and (0 <= i <= 394 and 0 <= j <= 499):\n neighbours.append(PixelPosition(i, j))\n\n return neighbours", "def mask_neighbors(self, mask, rad=9, ptrn='r'):\n return um.mask_neighbors(mask, rad, ptrn)", "def get_fan_in(xy=(0, 0), dim_x_l=10, dim_y_l=10, dim_x_u=9, dim_y_u=9, block_x=2, block_y=2, radius=2):\n x = xy[0]\n y = xy[1]\n if dim_x_u > 1:\n factor_x = ((dim_x_l-1)-(block_x-1))/(1.0*(dim_x_u-1))\n else:\n factor_x = ((dim_x_l-1)-(block_x))/2.0\n if dim_y_u > 1:\n factor_y = ((dim_y_l-1)-(block_y-1))/(1.0*(dim_y_u-1))\n else:\n factor_y = ((dim_y_l-1)-(block_y))/2.0\n results = []\n if dim_x_u > 1 and dim_y_u > 1:\n for xx in range(block_x):\n for yy in range(block_y):\n if (xx-(block_x-1)*0.5)**2 + (yy-(block_y-1)*0.5)**2 > radius**2:\n continue\n results.append((int((factor_x*(x))+xx), int((factor_y*(y))+yy)))\n return results\n elif dim_x_u == 1 and dim_y_u > 1:\n for xx in range(block_x):\n for yy in range(block_y):\n if (xx-(block_x-1)*0.5)**2 + (yy-(block_y-1)*0.5)**2 > radius**2:\n continue\n results.append((int((dim_x_l-block_x)/2.0+xx), int((factor_y*(y)+yy))))\n return results\n elif dim_x_u > 1 and dim_y_u == 1:\n for xx in range(block_x):\n for yy in range(block_y):\n if (xx-(block_x-1)*0.5)**2 + (yy-(block_y-1)*0.5)**2 > radius**2:\n continue\n results.append((int((factor_x*(x)+xx)), int((dim_y_l-block_y)/2.0+yy)))\n return results\n elif dim_x_u == 1 and dim_y_u == 1:\n for xx in range(block_x):\n for yy in range(block_y):\n if (xx-(block_x-1)*0.5)**2 + (yy-(block_y-1)*0.5)**2 > radius**2:\n continue\n results.append((int((dim_x_l-block_x)/2.0+xx), int((dim_y_l-block_y)/2.0+yy)))\n return results", "def get_neighbours_circular(lat, lng):\n origin = CellId.from_lat_lng(LatLng.from_degrees(lat, lng)).parent(S2_CELL_LEVEL)\n neighbors = {origin.id()}\n\n edge_neighbors = origin.get_edge_neighbors()\n surrounding_neighbors = [\n edge_neighbors[0], # North neighbor\n edge_neighbors[0].get_edge_neighbors()[1], # North-east neighbor\n edge_neighbors[1], # East neighbor\n edge_neighbors[2].get_edge_neighbors()[1], # South-east neighbor\n edge_neighbors[2], # South neighbor\n edge_neighbors[2].get_edge_neighbors()[3], # South-west neighbor\n edge_neighbors[3], # West neighbor\n edge_neighbors[0].get_edge_neighbors()[3], # North-west neighbor\n ]\n\n for cell in surrounding_neighbors:\n neighbors.add(cell.id())\n for cell2 in cell.get_edge_neighbors():\n neighbors.add(cell2.id())\n\n return neighbors", "def get_radius(center, rad, speed_limit):\n i = Intersection(center, rad, speed_limit)\n return i.get_radius()", "def get_further_neighbours(self, cell):\n\t\tneighs = self.get_neighbours(cell)\n\t\ti, j = cell.find_id()\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tx, y = neigh.find_id()\n\t\t\tif abs(x-i)+abs(y-j) > 1 or abs(x-i)+abs(y-j) == 0: \n\t\t\t\tneighbours.append(self.space[y,x])\n\t\treturn neighbours", "def neighbors(self):\n \n # find 0 - blank square\n \n x0 = None\n y0 = None\n \n for i in range(4):\n for j in range(4):\n if self.get_tile(i,j) == 0:\n y0 = i\n x0 = j\n\n if x0 == None or y0 == None:\n return []\n \n neighbor_list = []\n \n # move 0 to the right\n if x0 < 3:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0,x0+1)\n new_position.set_tile(y0,x0+1,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'r'\n neighbor_list.append(new_position)\n # move 0 to the left\n if x0 > 0:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0,x0-1)\n new_position.set_tile(y0,x0-1,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'l'\n neighbor_list.append(new_position)\n # move 0 up\n if y0 > 0:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0-1,x0)\n new_position.set_tile(y0-1,x0,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'u'\n neighbor_list.append(new_position)\n # move 0 down\n if y0 < 3:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0+1,x0)\n new_position.set_tile(y0+1,x0,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'd'\n neighbor_list.append(new_position)\n \n return neighbor_list", "def investigate(self, nearest_neighbors):\n pass", "def neighbors(r, c):\n return [\n (r - 1, c - 1),\n (r + 0, c - 1),\n (r + 1, c - 1),\n (r - 1, c + 0),\n (r + 1, c + 0),\n (r - 1, c + 1),\n (r + 0, c + 1),\n (r + 1, c + 1),\n ]", "def get_radius(self):\n if self.no_dist is False:\n dist = self.distance\n radius = (dist * self.ang_size / 60. *\n np.pi/180. * ct._kpc_over_pc_)/2.\n self.radius = radius\n else:\n self.radius = -1 # use -1 to indicate unknown diameter\n\n return self.radius", "def test_d1_get_neighborhood(self):\n config.NR_COLS = 10\n config.NR_ROWS = 10\n gamefield = [\n [1, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n [1, 0, 0, 0, 0, 0, 0, 0, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n ]\n # top left\n nh = logic.get_neighborhood(gamefield, 0, 0)\n self.assertEqual(nh, 4)\n # top right\n nh = logic.get_neighborhood(gamefield, 0, 8)\n self.assertEqual(nh, 2)\n # bottom left\n nh = logic.get_neighborhood(gamefield, 9, 1)\n self.assertEqual(nh, 4)\n # bottom right\n nh = logic.get_neighborhood(gamefield, 9, 9)\n self.assertEqual(nh, 4)\n # center\n nh = logic.get_neighborhood(gamefield, 4, 5)\n self.assertEqual(nh, 3)" ]
[ "0.7410353", "0.71742636", "0.7014808", "0.7014808", "0.6995029", "0.69532037", "0.69336915", "0.67915857", "0.6648367", "0.65443754", "0.65413064", "0.6540949", "0.64974695", "0.64635015", "0.6388047", "0.6292354", "0.62815535", "0.6259406", "0.6251604", "0.6242718", "0.624088", "0.6217781", "0.6207946", "0.6192231", "0.61671937", "0.6164548", "0.6142607", "0.6119441", "0.6119412", "0.60893685", "0.6087394", "0.6082798", "0.60745734", "0.6061589", "0.60611784", "0.60440296", "0.60378706", "0.6022406", "0.6018164", "0.60160667", "0.60019094", "0.6000389", "0.59859794", "0.59815377", "0.59670496", "0.5965826", "0.593521", "0.5918668", "0.59153354", "0.5914836", "0.59042287", "0.5899082", "0.5897013", "0.5896732", "0.5896518", "0.5891557", "0.5891302", "0.5861509", "0.5859048", "0.58568805", "0.5855354", "0.5852467", "0.5832815", "0.58254164", "0.58145547", "0.5813491", "0.5812697", "0.5811877", "0.58098906", "0.5808949", "0.58088744", "0.5808206", "0.57869947", "0.5780408", "0.577701", "0.5770472", "0.57685405", "0.5763138", "0.57626784", "0.57584655", "0.5752764", "0.57494", "0.5748148", "0.5746353", "0.5745428", "0.5742882", "0.57426876", "0.57389116", "0.5735044", "0.5731211", "0.5731167", "0.5725033", "0.5724046", "0.5723297", "0.5722039", "0.5720926", "0.5718364", "0.57174146", "0.5711775", "0.5698988" ]
0.6215466
22
Determines whether points in certain segments are spaced too far apart (relative to median point spacing).
def findLongSections(self, version=2): median_distances = [self.medianDist(sec) for sec in self.sections.keys()] median_distances.sort() median_dist = median_distances[int(len(median_distances)/2)] long_sections, long_distances = [], [] if version == 1: for s in xrange(len(median_distances)): if median_distances[s] > 2*median_dist: long_sections.append(median_distances[s]) long_distances.append(s) print('Found %i sections with points spaced far apart' \ % len(long_sections)) elif version == 2: for sec in self.sections.keys(): if len(self.sections[sec]) < 3: long_sections.append(sec) d = [a - b for a,b in zip(self.sections[sec][0], \ self.sections[sec][-1])] long_distances.append(np.linalg.norm(d)) print(np.shape(long_distances)) # sections_distances = zip(long_sections, long_distances) return long_sections, long_distances, median_dist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _filter_out_bad_segments(img1, seg1, img2, seg2):\n minval = tf.reduce_min(tf.reduce_sum(seg1, [0,1])*tf.reduce_sum(seg2, [0,1]))\n if minval < 0.5:\n warnings.warn(\"filtering bad segment\")\n return False\n else:\n return True", "def on_segment(point_p, point_q, point_r):\n if (point_q.x <= max(point_p.x, point_r.x)\n and point_q.x >= min(point_p.x, point_r.x)\n and point_q.y <= max(point_p.y, point_r.y)\n and point_q.y >= min(point_p.y, point_r.y)):\n return True\n return False", "def pointInSegment(point, segmentPoint1, segmentPoint2):\n\t\tx = point[0]\n\t\ty = point[1]\n\n\t\tif x < segmentPoint1[0] and x < segmentPoint2[0]:\n\t\t\treturn False\n\t\t\n\t\tif x > segmentPoint1[0] and x > segmentPoint2[0]:\n\t\t\treturn False\n\t\t\n\t\tif y < segmentPoint1[1] and y < segmentPoint2[1]:\n\t\t\treturn False\n\t\t\n\t\tif y > segmentPoint1[1] and y > segmentPoint2[1]:\n\t\t\treturn False\n\t\t\n\t\treturn True", "def near_segment(point:tuple, edge:tuple)->bool:\n return between(point[0], edge[0][0], edge[1][0]) and between(point[1], edge[0][1], edge[1][1])", "def is_outlier(points, thresh=12):\n if len(points.shape) == 1:\n points = points[:,None]\n median = np.median(points, axis=0)\n diff = np.sum((points - median)**2, axis=-1)\n diff = np.sqrt(diff)\n med_abs_deviation = np.median(diff)\n\n modified_z_score = 0.6745 * diff / med_abs_deviation\n\n return modified_z_score > thresh", "def isnot_outlier(points, thresh=1.5):\n if len(points.shape) == 1:\n points = points[:,None]\n median = np.median(points, axis=0)\n diff = np.sum((points - median)**2, axis=-1)\n diff = np.sqrt(diff)\n med_abs_deviation = np.median(diff)\n\n modified_z_score = 0.6745 * diff / med_abs_deviation\n\n return modified_z_score <= thresh", "def is_point_on_segment(point, segment, tol=0.0):\n a, b = segment\n if not is_point_on_line(point, segment, tol=tol):\n return False\n d_ab = distance_point_point(a, b)\n if d_ab == 0:\n return False\n d_pa = distance_point_point(a, point)\n d_pb = distance_point_point(b, point)\n if d_pa + d_pb <= d_ab + tol:\n return True\n return False", "def is_outlier(points, thresh=3.5):\n if len(points.shape) == 1:\n points = points[:,None]\n median = np.median(points, axis=0)\n diff = np.sum((points - median)**2, axis=-1)\n diff = np.sqrt(diff)\n med_abs_deviation = np.median(diff)\n\n modified_z_score = 0.6745 * diff / med_abs_deviation\n\n return modified_z_score > thresh", "def should_skip(p0, p1, p2):\n if p0 is None:\n return False\n if p1 is None:\n return False\n if p2 is None:\n return False\n indices = range(len(p1))\n # Calculate vectors for p1 and p2 relative to p0.\n v1 = [p1[i] - p0[i] for i in indices]\n v2 = [p2[i] - p0[i] for i in indices]\n # Calculate the lengths of the relative vectors.\n l1 = math.sqrt(sum(v1[i] * v1[i] for i in indices))\n l2 = math.sqrt(sum(v2[i] * v2[i] for i in indices))\n if l2 < minSegment:\n # Ignore midpoint because the whole segment is very short.\n return 'length=%.5f (too short)' % l2\n ratio = l1 / l2 # Ratio of midpoint vs endpoint.\n # How far is the midpoint away from straight line?\n d = [v1[i] - v2[i] * ratio for i in indices]\n error = math.sqrt(sum(d[i] * d[i] for i in indices))\n if error > minDeviation:\n return False\n # Ignore midpoint because it is very close to the straight line.\n return 'ratio=%.5f error=%.5f (straight line)' % (ratio, error)", "def can_fix_intersection(self, segment):\n\n points = segment.points\n points = [points[1], points[2], points[3], points[2], points[1], points[0]]\n path = create_path(points)\n layer = GSLayer()\n layer.paths.append(path)\n\n if layer.paths[0].insertNodeWithPathTime_(2.5) is None:\n return False\n for segment in layer.paths[0].segments[:-1]:\n # We need to check only curve segments which consist of four points.\n if len(segment.points) == 4:\n s_t = self.triangle_error_of(segment.points, do_round=True)\n if s_t is not None:\n points = points2vectors(segment.points)\n ok = False\n for s, t in self.calculate_s_t_candidates(points, s_t):\n if self.try_update_points(points, s, t) is not None:\n ok = True\n break\n if not ok:\n return False\n return True", "def dominate(aver_size1, aver_size2, likelibest1, likelibest2):\n return (aver_size1 < aver_size2 and likelibest1 >= likelibest2) or (\n aver_size1 <= aver_size2 and likelibest1 > likelibest2\n )", "def _check_overlap(self, points, radius):\n dists = distance.cdist(points, points, 'euclidean')\n dists = dists[np.nonzero(dists)]\n\n return np.any(dists < 2.0 * radius)", "def check_alignment(image, r1, r2):\n \n distance = dist_between_spheres(r1, r2, image.shape[0] / 2. + 10, image.shape[0] / 2.)\n gap_signal = []\n denoised = median_filter(image.copy(), 3)\n \n for j in np.arange(0., image.shape[1]): \n # Take the region around the gap, which later on will be used\n # to define the intensity at the gap between the spheres.\n # The width of the gap is not exact\n if image.shape[1] / 2. + distance + 5 > j > image.shape[1] / 2. - distance - 5:\n gap_signal.append(denoised[image.shape[0] / 2. + 10, j])\n \n centre = np.mean(np.argwhere(np.min(gap_signal) == gap_signal))\n print centre\n print len(gap_signal) / 2.\n print\n \n if abs(centre - len(gap_signal) / 2.) <= 1.5:\n return True\n else:\n return False", "def _check_separate(gti0, gti1):\n gti0_start = gti0[:, 0]\n gti0_end = gti0[:, 1]\n gti1_start = gti1[:, 0]\n gti1_end = gti1[:, 1]\n\n if (gti0_end[-1] <= gti1_start[0]) or (gti1_end[-1] <= gti0_start[0]):\n return True\n\n for g in gti1.flatten():\n for g0, g1 in zip(gti0[:, 0], gti0[:, 1]):\n if (g <= g1) and (g >= g0):\n return False\n for g in gti0.flatten():\n for g0, g1 in zip(gti1[:, 0], gti1[:, 1]):\n if (g <= g1) and (g >= g0):\n return False\n return True", "def is_separating_axis(o, p1, p2):\n min1, max1 = float('+inf'), float('-inf')\n min2, max2 = float('+inf'), float('-inf')\n\n for v in p1:\n projection = np.dot(v, o)\n\n min1 = min(min1, projection)\n max1 = max(max1, projection)\n\n for v in p2:\n projection = np.dot(v, o)\n\n min2 = min(min2, projection)\n max2 = max(max2, projection)\n\n if max1 >= min2 and max2 >= min1:\n d = min(max2 - min1, max1 - min2)\n # push a bit more than needed so the shapes do not overlap in future\n # tests due to float precision\n d_over_o_squared = d/np.dot(o, o) + 1e-10\n pv = d_over_o_squared*o\n return False, pv\n else:\n return True, None", "def isinsidepointXY(x,p):\n \n return dist(x,p) < epsilon", "def onSegment(self, p, q, r):\n if ((q.x <= max(p.x, r.x)) and (q.x >= min(p.x, r.x)) and\n (q.y <= max(p.y, r.y)) and (q.y >= min(p.y, r.y))):\n return True\n return False", "def contains_point(self, x, y):\r\n if self.m == None:\r\n if abs(x - self.start[0]) > 0.6:\r\n return False\r\n else:\r\n if (y >= self.start[1] and y <= self.end[1]) or \\\r\n (y <= self.start[1] and y >= self.end[1]):\r\n return True\r\n else:\r\n return False\r\n else: \r\n y0 = int(self.m * x + self.n)\r\n if abs(y - y0) > 0.6: \r\n return False \r\n else: \r\n if ((x >= self.start[0] and x <= self.end[0]) or \\\r\n (x <= self.start[0] and x >= self.end[0])) and \\\r\n ((y >= self.start[1] and y <= self.end[1]) or \\\r\n (y <= self.start[1] and y >= self.end[1])): \r\n return True\r\n else:\r\n return False", "def _has_noise(self) -> bool:\n min = self.array.min()\n max = self.array.max()\n near_min, near_max = np.percentile(self.array, [0.5, 99.5])\n max_is_extreme = max > near_max * 1.25\n min_is_extreme = (min < near_min * 0.75) and (\n abs(min - near_min) > 0.1 * (near_max - near_min)\n )\n return max_is_extreme or min_is_extreme", "def hasSpaceAround(self,x,y):\n global gamemap\n c = 0\n for x2 in xrange(-2,2):\n for y2 in xrange(-2,2):\n if self.near(x, y,x + x2,y + y2):\n if not gamemap[x + x2][y + y2].type[0]:\n c += 1\n if c >= 8:\n return False\n else:\n return True", "def is_point_within(self, x, y):\n return abs(x - self._x_position) <= self._x_length / 2 and abs(y - self._y_position) <= self._y_length / 2", "def __comparing_points(self, point1, point2) -> bool:\n return (abs(point1.x - point2.x) <= self.dirt_pos_tolerance and abs(\n point1.y - point2.y) <= self.dirt_pos_tolerance)", "def validate_points(a, b):\r\n\tdiff_y = b[0] - a[0]\r\n\tdiff_x = b[1] - a[1]\r\n\r\n\treturn (diff_y == 0 and diff_x != 0) or (diff_x == 0 and diff_y != 0) or abs(diff_x) == abs(diff_y)", "def ok(self, point):\n [x1, x2, x3, x4, x5, x6] = point.decisions\n if x1 + x2 -2 < 0:\n return False\n if 6 - x1 - x2 < 0:\n return False\n if 2 - x2 + x1 < 0:\n return False\n if 2 - x1 + 3*x2 < 0:\n return False\n if 4 - (x3 - 3)**2 - x4 < 0:\n return False\n if (x5 - 3)**3 + x6 - 4 < 0:\n return False\n for i, d in enumerate(point.decisions):\n if d < self.decisions[i].low or d > self.decisions[i].high:\n print i, d, self.decisions[i].low, self.decisions[i].high\n return False\n return True", "def ideal_spacing(data, npoints):\n dims = data.shape\n actual_npoints = (data >= 0).sum()\n spacing = np.ones(3, dtype='uint')\n\n while actual_npoints > npoints:\n\n # Subsample the direction with the highest number of samples\n ddims = dims / spacing\n if ddims[0] >= ddims[1] and ddims[0] >= ddims[2]:\n dir = 0\n elif ddims[1] > ddims[0] and ddims[1] >= ddims[2]:\n dir = 1\n else:\n dir = 2\n spacing[dir] += 1\n subdata = data[::spacing[0], ::spacing[1], ::spacing[2]]\n actual_npoints = (subdata >= 0).sum()\n\n return spacing", "def is_equidistant(self) -> bool:\n if len(self.time) < 3:\n return True\n return len(self.time.to_series().diff().dropna().unique()) == 1", "def isoutside(coords, shape):\n # Label external pores for trimming below\n if len(shape) == 1: # Spherical\n # Find external points\n r = np.sqrt(np.sum(coords**2, axis=1))\n Ps = r > shape[0]\n elif len(shape) == 2: # Cylindrical\n # Find external pores outside radius\n r = np.sqrt(np.sum(coords[:, [0, 1]]**2, axis=1))\n Ps = r > shape[0]\n # Find external pores above and below cylinder\n if shape[1] > 0:\n Ps = Ps + (coords[:, 2] > shape[1])\n Ps = Ps + (coords[:, 2] < 0)\n else:\n pass\n elif len(shape) == 3: # Rectilinear\n shape = np.array(shape, dtype=float)\n try:\n lo_lim = shape[:, 0]\n hi_lim = shape[:, 1]\n except IndexError:\n lo_lim = np.array([0, 0, 0])\n hi_lim = shape\n Ps1 = np.any(coords > hi_lim, axis=1)\n Ps2 = np.any(coords < lo_lim, axis=1)\n Ps = Ps1 + Ps2\n return Ps", "def point_sur_segment(self, pt):\n dp = pt - self.c\n d = dp.length - self.r\n a = atan2(dp.y, dp.x)\n t = (a - self.a0) / self.da\n return t > 0 and t < 1, d, t", "def checkMedian(nums1, nums2, x=None, x_dash=None,y=None,y_dash=None,median=False):\n # odd array\n if (len(nums1) + len(nums2)) % 2 == 1:\n if x != None and x_dash != None and y != None and y_dash != None:\n if nums1[x] >= nums2[y] and nums1[x] <= nums2[y_dash]:\n median = nums1[x]\n return median\n elif nums1[x] <= nums2[y] and nums1[x_dash] >= nums2[y]:\n median = nums2[y]\n return median\n else:\n return False\n elif x == None and x_dash != None and y != None and y_dash == None:\n if nums1[x_dash] >= nums2[y]:\n median = nums2[y]\n return median\n else:\n return False\n elif x != None and x_dash == None and y != None and y_dash != None:\n if nums2[y] >= nums1[x]:\n median = nums2[y]\n return median\n elif nums1[x] >= nums2[y] and nums1[x] <= nums2[y_dash]:\n\n median = nums1[x]\n return median\n else:\n return False\n # even array\n else:\n if x != None and x_dash != None and y != None and y_dash != None:\n if nums1[x] >= nums2[y] and nums1[x] <= nums2[y_dash]:\n if nums1[x_dash] <= nums2[y_dash]:\n median = (nums1[x] + nums1[x_dash])/2\n elif nums1[x_dash] > nums2[y_dash]:\n median = (nums1[x] + nums2[y_dash])/2\n return median\n elif nums1[x] <= nums2[y] and nums1[x_dash] >= nums2[y]:\n if nums2[y_dash] <= nums1[x_dash]:\n median = (nums2[y] + nums2[y_dash])/2\n elif nums2[y_dash] > nums1[x_dash]:\n median = (nums2[y] + nums1[x_dash])/2\n return median\n else:\n return False\n elif x == None and x_dash != None and y != None and y_dash == None:\n if nums1[x_dash] >= nums2[y]:\n median = (nums2[y] + nums1[x_dash])/2\n return median\n else:\n return False\n elif x != None and x_dash == None and y != None and y_dash != None:\n if nums2[y] >= nums1[x]:\n median = (nums2[y] + nums2[y_dash])/2\n return median\n elif nums1[x] >= nums2[y] and nums1[x] <= nums2[y_dash]:\n median = (nums1[x] + nums2[y_dash])/2\n return median\n else:\n return False\n elif x != None and x_dash == None and y == None and y_dash != None:\n if nums1[x] <= nums2[y_dash]:\n median = (nums1[x] + nums2[y_dash])/2\n return median\n else:\n return False\n elif x == None and x_dash != None and y != None and y_dash != None:\n if nums2[y] <= nums1[x_dash]:\n if nums1[x_dash]<= nums2[y_dash]:\n median = (nums2[y] + nums1[x_dash])/2\n return median\n elif nums1[x_dash] > nums2[y_dash]:\n median = (nums2[y] + nums2[y_dash])/2\n return median\n else:\n return False", "def contains_point(self, x, y): \r\n n = len(self.points)\r\n inside = False\r\n \r\n x1, y1 = self.points[0]\r\n for i in range(n + 1):\r\n x2, y2 = self.points[i % n]\r\n if y > min(y1, y2):\r\n if y <= max(y1, y2):\r\n if x <= max(x1, x2):\r\n if y1 != y2:\r\n xinters = (y - y1) * (x2 - x1) / (y2 - y1) + x1\r\n if x1 == x2 or x <= xinters:\r\n inside = not inside\r\n x1, y1 = x2, y2\r\n \r\n return inside", "def in_bounds(self, point):\n # Sanity checks\n # Check that point has same number of dimensions as graph\n if not len(point) == len(self.dimensions):\n raise Exception(\"Point has \" + str(len(point)) + \" dimensions, Coordination Space has \" + \\\n str(len(self.dimensions)) + \" dimensions.\")\n\n for i, coordinate in enumerate(point):\n if coordinate > self.dimensions[i] or coordinate < 0:\n return False\n\n return True", "def is_underlapping(\n geom: LineString,\n trace: LineString,\n endpoint: Point,\n snap_threshold: float,\n snap_threshold_error_multiplier: float,\n) -> Optional[bool]:\n split_results = list(split(geom, trace).geoms)\n if len(split_results) == 1:\n # Do not intersect\n return True\n if len(split_results) > 1:\n for segment in split_results:\n if (\n segment.distance(endpoint)\n < snap_threshold * snap_threshold_error_multiplier\n ):\n # Dangling end, overlapping\n return False\n log_prints = {\n \"geom\": geom,\n \"trace\": trace,\n \"endpoint\": endpoint,\n \"snap_threshold\": snap_threshold,\n \"snap_threshold_error_multiplier\": snap_threshold_error_multiplier,\n }\n log.error(f\"Expected is_underlapping to be resolvable.\\nvalues:{log_prints}\")\n return None", "def segment_segment(s1, s2):\n l1=s1.line()\n l2=s2.line()\n i = line_line(l1, l2)\n if isinstance(i, bool): return False\n k = s1.affine(i)\n return k >= 0 and k <= 1 and i", "def is_spanning(vs, es):\n [e.clear() for e in es]\n d = dijkstra(vs, es, vs[0])\n [e.restore() for e in es]\n return max(d) < 1e-5", "def check_star(peaks,data):\n star = 0\n for i in peaks:\n max = data[i]\n if i<3 or i+4>data.size:\n continue\n mean = data[i-3:i+4].mean()\n if (max-mean)<0.1*max:\n star += 1\n if star*2>peaks.size:\n return True\n else:\n return False", "def is_outlier(points, threshold=3.5):\n # transform into vector\n if len(points.shape) == 1:\n points = points[:,None]\n\n # compute median value \n median = np.median(points, axis=0)\n \n # compute diff sums along the axis\n diff = np.sum((points - median)**2, axis=-1)\n diff = np.sqrt(diff)\n # compute MAD\n med_abs_deviation = np.median(diff)\n \n # compute modified Z-score\n # http://www.itl.nist.gov/div898/handbook/eda/section4/eda43.htm#Iglewicz\n modified_z_score = 0.6745 * diff / med_abs_deviation\n\n # return a mask for each outlier\n return modified_z_score > threshold", "def __contains__(self, item):\n if len(item) != len(self.sizes):\n raise ValueError('Point dimension does not match grid dimension')\n for i in range(len(self.sizes)):\n if not 1 <= item[i] < self.sizes[i] - 1:\n return False\n return True", "def _isproperdist(X):\n X = np.asarray(X)\n if not np.allclose(np.sum(X), 1) or not np.all(X>=0) or not np.all(X<=1):\n return False\n else:\n return True", "def inside( self, point ):\n for i in range( 0, len(point) ):\n if math.fabs( self.center[i] - point[i] ) > self.dimLens[i]/2.0:\n return False;\n return True;", "def in_display(self, point):\n x, y = point\n if x < 0 or x > self.width or \\\n y < 0 or y > self.height:\n return False\n return True", "def check_lengths(self, length: Expr) -> bool:\n for point1 in self.points:\n for point2 in self.points - {point1}:\n if abs(point2 - point1) == length:\n print(f'Length {length} found between points: {point1} and {point2}')\n return True\n return False", "def curvature_sanity(left_curvature, left_offset, right_curvature, right_offset):\n if return_queue_len(flag='L') >= 1 and return_queue_len(flag='R') >= 1:\n offset = center_position - (left_offset + right_offset) / 2.\n offset_measure = np.abs(overall_offset - offset)\n return True if offset_measure < 0.2 else False\n else:\n return True", "def contains_point(self, point):\n\t\tthreshold = 0.6\n\t\tx = point[0]\n\t\ty = point[1]\n\t\tif (x >= (self.xmin - threshold) and x <= (self.xmax + threshold) and\n\t\t\ty >= (self.ymin - threshold) and y <= (self.ymax + threshold)):\n\t\t return True\n\t\treturn False", "def within(point: tuple, box: tuple) -> bool:\r\n \r\n return box[0] < point[0] < box[2] and box[1] < point[1] < box[3]", "def contains_pt(self, pt):\n x, y = pt\n if not self.x - self.radius < x < self.x + self.radius:\n return False\n if not self.y - self.radius < y < self.y + self.radius:\n return False\n return True", "def has_been_segmented(input, loc, clockwise):\n height = np.shape(input)[0]\n width = np.shape(input)[1]\n\n if clockwise:\n if loc[0] == 0 and loc[2] + 1 < width:\n if input[int(loc[1]), int(loc[2]) + 1] < 0:\n return True\n elif loc[0] == 1 and loc[2] - 1 >= 0:\n if input[int(loc[1]), int(loc[2]) - 1] < 0:\n return True\n elif loc[0] == 2 and loc[1] - 1 >= 0:\n if input[int(loc[1]) - 1, int(loc[2])] < 0:\n return True\n elif loc[1] + 1 < height:\n if input[int(loc[1]) + 1, int(loc[2])] < 0:\n return True\n else:\n if loc[0] == 0 and loc[2] - 1 >= 0:\n if input[int(loc[1]), int(loc[2]) - 1] < 0:\n return True\n elif loc[0] == 1 and loc[2] + 1 < width:\n if input[int(loc[1]), int(loc[2]) + 1] < 0:\n return True\n elif loc[0] == 2 and loc[1] + 1 < height:\n if input[int(loc[1]) + 1, int(loc[2])] < 0:\n return True\n elif loc[1] - 1 >= 0:\n if input[int(loc[1]) - 1, int(loc[2])] < 0:\n return True\n\n return False", "def _in_bounds(self, x, y):\r\n return 0 <= x < 8 and 0 <= y < 8", "def check_inside(self, person):\n p_top_x = person[0] + self.padding\n p_left_y = person[1] + self.padding\n p_bottom_x = person[2] - self.padding\n p_right_y = person[3] - self.padding\n\n return p_top_x >= self.top_x and p_left_y >= self.left_y and p_bottom_x <= self.bottom_x \\\n and p_right_y <= self.right_y", "def span_overlap(a: Tuple[int, int], b: Tuple[int, int]) -> bool:\n return not (a[0] > b[1] or a[1] < b[0])", "def in_range(x, y):\n if (x < 0 or x > width or y < 0 or y > length):\n return False\n else:\n return True", "def _overlapping(self, atom1, atom2):\n\n if np.linalg.norm(atom1.pos-atom2.pos) < (atom1.rad+atom2.rad):\n return True\n else:\n return False", "def ispoint(x):\n if isvect(x) and x[3] > 0.0:\n return True\n return False", "def space_between(first_value, second_value, bboxes):\n bbox1 = bboxes[second_value][0]\n bbox2 = bboxes[first_value][2]\n space_betw = bbox1 - bbox2\n \n if (space_betw > 6):\n return True\n return False", "def valid(point):\n index = offset(point)\n if tiles[index] == 0:\n return False\n\n index = offset(point + 19)\n\n if tiles[index] == 0:\n return False\n\n return point.x % 20 == 0 or point.y % 20 == 0", "def check_astrometry(ra1,dec1,ra2,dec2,pt_size=0.3):\n\tra_diff = ra2-ra1\n\tdec_diff = dec2-dec1\n\tra_med_diff = np.median(ra_diff)\n\tdec_med_diff = np.median(dec_diff)\n\treturn ra_med_diff, dec_med_diff", "def contains(self, possible_point):\n# if possible_point == self.endpoints[0] or possible_point == self.endpoints[1]:\n# return False\n distance = sum(possible_point.distance_to(p) for p in self.endpoints)\n return abs(distance - self.length()) < 0.0000001", "def _mad_based_outliers(points, minv, maxv, thresh=3.5):\n\n median = np.median(points)\n diff = np.sqrt((points - median)**2)\n med_abs_deviation = np.median(diff)\n\n if med_abs_deviation == 0:\n return points\n\n modified_z_score = 0.6745 * diff / med_abs_deviation\n mask_outliers = modified_z_score > thresh\n\n mask_max = np.abs(points - maxv) < np.abs(points - minv)\n mask_min = np.abs(points - maxv) > np.abs(points - minv)\n\n points[mask_max * mask_outliers] = maxv\n points[mask_min * mask_outliers] = minv\n\n return points", "def _check_normalization(self):\n lastDistance = None\n distance = None\n for idx in xrange(len(self) - 1):\n distance = self[idx+1][0] - self[idx][0]\n\n # first run\n if lastDistance is None:\n lastDistance = distance\n continue\n\n if lastDistance != distance:\n return False\n\n lastDistance = distance\n\n return True", "def is_inside(self, points):\n points = np.atleast_2d(points) - self.centroid\n return np.logical_and(\n np.linalg.norm(points, axis=-1) <= self.radius,\n # At present circles are not orientable, so the z position must\n # match exactly.\n np.isclose(points[:, 2], 0),\n )", "def unsafe(self): \n return self.distmin < self.distmax*0.5", "def is_straight_line(self, arr):\n # First pair of point (x0, y0) \n x0 = arr[0][0]\n y0 = arr[0][1]\n\n # Second pair of point (x1, y1) \n x1 = arr[len(arr) - 1][0]\n y1 = arr[len(arr) - 1][1]\n\n dx = x1 - x0\n dy = y1 - y0\n\n # Loop to iterate over the points \n for i in range(len(arr)):\n x = arr[i][0]\n y = arr[i][1]\n\n if (dx * (y - y1) - dy * (x - x1)) > self.movement_tolerance:\n return False\n\n return True", "def isInternal(self, aPoint):\n if (aPoint.x >= self.pMin.x and aPoint.x <= self.pMax.x) \\\n and (aPoint.y >= self.pMin.y and aPoint.y <= self.pMax.y):\n return True\n else:\n return False", "def is_exceptional(self):\n G = self.poset().hasse_diagram()\n for x in G:\n nx = list(G.neighbors_out(x))\n nx.append(x)\n if min(nx) < x and max(nx) > x:\n return False\n return True", "def out_of_bounds(self):\n return not 0 <= self.nodes[0].x < WIDTH * SCALE or not 0 <= self.nodes[0].y < HEIGHT * SCALE", "def close_to_exceeding(self) -> bool:\n mean = self.current / self.num_cuts\n if self.max_frames is not None:\n return self.current + mean > self.max_frames\n if self.max_samples is not None:\n return self.current + mean > self.max_samples\n if self.max_duration is not None:\n return self.current + mean > self.max_duration\n return False", "def _is_blank(im):\n \n # Take the r% center\n r = 0.2\n h1 = int(float(im.shape[0]) * r)\n h2 = im.shape[0] - h1\n w1 = int(float(im.shape[1]) * r) \n w2 = im.shape[1] - w1\n #\n im_center = im[h1:h2, w1:w2]\n \n if np.mean(im_center) < 0.06:\n return True\n else:\n return False", "def swimming_fish_out_of_bounds(f: SwimmingFish, width: int) -> bool:\n return (left_edge(f) < -width/2 and f.dx < 0) or \\\n (right_edge(f) > width/2 and f.dx > 0)", "def isPointCollideWithMargin1(self, point):\n return self.p[0]-Vect(1, 1) <= point <= self.p[2]", "def is_within(point, surface, offset):\r\n return (point[0] >= offset[0] and point[0] < offset[0] + surface.get_width() \\\r\n and point[1] >= offset[1] and point[1] < offset[1] + surface.get_height())", "def _isPoint(self):\n return (self.width == 0 and self.height == 1) or (self.height == 0 and self.width == 1)", "def _isPoint(self):\n return (self.width == 0 and self.height == 1) or (self.height == 0 and self.width == 1)", "def inside_unit_circle(point):\n distance = math.sqrt(point[0] ** 2 + point[1] ** 2)\n return distance < 1", "def test_point_within_dimensions_border():\n point = np.array([100, 20])\n image_dimensions = np.array([100, 100])\n assert not point_within_dimensions(point, image_dimensions)", "def match_marking_points(point_a, point_b):\n \n squared_distance_thresh = 0.000277778 # 10 pixel in 600*600 image\n direction_angle_thresh = 0.5235987755982988 # 30 degree in rad \n \n dist_square = calc_point_squre_dist(point_a, point_b)\n #if min(point_a.shape[1], point_b.shape[1]) <= 2:\n if True:\n return dist_square < squared_distance_thresh\n\n angle = calc_point_direction_angle(point_a, point_b)\n if point_a[3] > 0.5 and point_b[3] < 0.5:\n return False\n if point_a[3] < 0.5 and point_b[3] > 0.5:\n return False\n return (dist_square < squared_distance_thresh\n and angle < direction_angle_thresh)", "def hit_wall(s):\n if s == [1, 1]: # We would enter the None-field\n return True\n elif s[0] < 0 or s[0] > 2 or s[1] < 0 or s[1] > 3: # We would be out of bounds\n return True\n else:\n return False", "def is_valid_size(self, dot_width, dot_height, distance, screen_width, screen_height):\n if dot_width * distance > screen_width or dot_height * distance > screen_height:\n return False\n return True", "def remove_point(self, x):\n idx = np.sum(np.abs(self.proposed_points - x), axis=1).argmin()\n if np.sum(np.abs(self.proposed_points[idx, :] - x)) < 1e-10:\n self.proposed_points = np.delete(self.proposed_points, idx, axis=0)\n for i in range(self.nstrats):\n self.sampling_strategies[i].remove_point(x)\n return True\n return False", "def test_dp1_closed_segment(grid):\n space = bempp.api.function_space(grid, \"DP\", 1, segments=[1])\n\n for elem_index in range(grid.number_of_elements):\n if space.support[elem_index]:\n assert _np.all(space.local_multipliers[elem_index] != 0)\n else:\n assert _np.all(space.local_multipliers[elem_index] == 0)", "def is_origin_inside(points):\n\n # SciPy requires the array of points to have indices [point, axis]\n # so we have to transpose our arrays\n hull = scipy.spatial.Delaunay(points.T)\n return numpy.all(hull.find_simplex(origin().T) >= 0)", "def is_spot_possible(left, right, bottom, top):\n return True\n if right < 6 or bottom < 6:\n # print(\"IMPOSSIBLE\", left, right, top, bottom)\n return False\n if left > 18 or top > 18:\n # print(\"IMPOSSIBLE\", left, right, top, bottom)\n return False\n if abs(top - bottom) > 16 or abs(right - left) > 16:\n # print(\"IMPOSSIBLE\", left, right, top, bottom)\n return False\n return True", "def segmentsIntersect(self, seg1, seg2):\n\t\ts1_x = seg1[1][0] - seg1[0][0]\n\t\ts1_y = seg1[1][1] - seg1[0][1]\n\t\ts2_x = seg2[1][0] - seg2[0][0]\n\t\ts2_y = seg2[1][1] - seg2[0][1]\n\n\t\tdenom = -s2_x * s1_y + s1_x * s2_y\n\n\t\tif (denom > 1e-10):\n\t\t\ts = (-s1_y * (seg2[0][0] - seg1[0][0]) + s1_x * (seg2[0][1] - seg1[0][1])) / (-s2_x * s1_y + s1_x * s2_y)\n\t\t\tt = ( s2_x * (seg2[0][1] - seg1[0][1]) - s2_y * (seg2[0][0] - seg1[0][0])) / (-s2_x * s1_y + s1_x * s2_y)\n\t\t\treturn (s >= 0 and s <= 1 and t >= 0 and t <= 1)\n\t\telse:\n\t\t\treturn False", "def remove_point(self, x):\n idx = np.sum(np.abs(self.proposed_points - x), axis=1).argmin()\n if np.sum(np.abs(self.proposed_points[idx, :] - x)) < 1e-10:\n self.proposed_points = np.delete(self.proposed_points, idx, axis=0)\n return True\n return False", "def segment_analyzer(sizes, cluster_size_std=defaults.MAX_SEGMENT_CLUSTER_SIZE_STD):\n if len(sizes) < 2: return False\n size_std = np.std(sizes)\n size_mean = np.mean(sizes)\n if len(sizes) == 2:\n if size_std <= cluster_size_std and size_std / size_mean < 0.2:\n return True\n else:\n return False\n if size_std <= cluster_size_std and size_std / size_mean <= 0.3:\n return True\n cluster = one_dimension_val_clutering(sizes)\n if len(cluster) == 0: return False\n cluster_std = np.std(cluster)\n cluster_mean = np.mean(cluster)\n if len(cluster) == 2:\n if cluster_std / cluster_mean < 0.1:\n return True\n else:\n return False\n if cluster_std / cluster_mean < 0.2 or len(cluster) >= 4:\n return True\n return False", "def condition_segment(segment):\n # 1. If the start and end points are the same, done and one\n if segment[0][0] == segment[-1][0] and segment[0][1] == segment[-1][1]:\n if len(segment) == 2:\n LOG.warning(\" REJECTING two point segment, both equal\")\n return None\n return [segment]\n # 2. If point start and end points are inside the conus and they are closer\n # to each other than the CONUS bounds, then close off polygon\n if all(not point_outside_conus(Point(segment[i])) for i in [0, -1]):\n pt0 = Point(segment[0])\n pt1 = Point(segment[-1])\n cpt0 = get_conus_point(pt0)\n cpt1 = get_conus_point(pt1)\n cdist0 = cpt0.distance(pt0)\n cdist1 = cpt1.distance(pt1)\n if pt0.distance(pt1) < 0.5 * min([cdist0, cdist1]):\n LOG.warning(\" non-closed polygon assumed unclosed in error.\")\n segment.append(segment[0])\n return [segment]\n # 3. If the line intersects the CONUS 3+ times, split the line\n ls = ensure_outside_conus(LineString(segment))\n # Examine how our linestring intersects the CONUS polygon\n res = ls.intersection(CONUS[\"poly\"])\n if isinstance(res, LineString):\n return [ls.coords]\n # We got multiple linestrings\n # pylint: disable=no-member\n res = [r for r in res.geoms if r.length > 0.2]\n if len(res) == 1:\n LOG.warning(\" was able to filter out very short lines\")\n return [ensure_outside_conus(res[0]).coords]\n LOG.warning(\" returning a MultiLineString len=%s\", len(res))\n return [ensure_outside_conus(x).coords for x in res]", "def _overlap(x1, w1, x2, w2):\r\n if x1+w1 < x2-w2: return False\r\n if x1-w1 > x2+w2: return False\r\n\r\n return True", "def within_distance(self, point, distance):\n return all(distance >= seg.shortest_distance_to(point)\n for seg in self.segments)", "def inside_limits(self, point):\n if not self.regions:\n # Use rectangle check\n lat, lon = point.latitude, point.longitude\n if (lon > self.limits[0] and lat > self.limits[1] and\n lon < self.limits[2] and lat < self.limits[3]):\n return True\n else:\n return False\n else:\n # Check inside all possible regions\n p = Point((point.longitude, point.latitude))\n print(p, point)\n # import IPython; IPython.embed()\n for name, poly in self.regions.items():\n # if poly.contains(p):\n if p.intersects(poly):\n return name\n return False", "def _is_skull_stripped(imgs):\n\n def _check_img(img):\n data = np.abs(nb.load(img).get_fdata(dtype=np.float32))\n sidevals = (\n data[0, :, :].sum()\n + data[-1, :, :].sum()\n + data[:, 0, :].sum()\n + data[:, -1, :].sum()\n + data[:, :, 0].sum()\n + data[:, :, -1].sum()\n )\n return sidevals < 10\n\n return all(_check_img(img) for img in imgs)", "def has_compatible_ligands(self, identity):\n return ((len(self.bad_coords[identity]) == 0) and\n (not self.BAD_COORD_RESIDUE in self.inaccuracies[identity]))", "def is_outlier(hist, value):\n stdev = np.std(hist, axis=0)\n avg = np.average(hist[-15:], axis=0)\n if any(lf for lf, avg, std in zip(value, avg, stdev) if lf > avg + 3 * std) or \\\n any(lf for lf, avg, std in zip(value, avg, stdev) if lf < avg - 3 * std):\n return True\n return False", "def is_inside(self, mX, mY, point):\n return (math.sqrt((point[0] - mX) * (point[0] - mX)\n + (point[1] - mY) * (point[1] - mY)) <= 2)", "def _acceptable_dimensions(self, box):\n return self._min_width < box.x1-box.x0 < self._max_width and\\\n self._min_height < box.y1-box.y0 < self._max_height", "def valid_coordinates(self, x, y):\n return ((x >= 0) and (x < self.width) and\n (y >= 0) and (y < self.height))", "def contains_point(self, point):\n if self.orientation(point) == 0:\n return point >= min(self.begin, self.end) and point <= max(self.begin, self.end)\n\n return False", "def is_array_dominated(array_0, array_1):\n for val_0, val_1 in zip(sorted(array_0), sorted(array_1)):\n if val_0 >= val_1:\n return False\n return True", "def remove_point(self, x):\n\n idx = np.sum(np.abs(self.proposed_points - x), axis=1).argmin()\n if np.sum(np.abs(self.proposed_points[idx, :] - x)) < 1e-10:\n self.proposed_points = np.delete(self.proposed_points, idx, axis=0)\n return True\n return False", "def remove_point(self, x):\n\n idx = np.sum(np.abs(self.proposed_points - x), axis=1).argmin()\n if np.sum(np.abs(self.proposed_points[idx, :] - x)) < 1e-10:\n self.proposed_points = np.delete(self.proposed_points, idx, axis=0)\n return True\n return False", "def pinp_crossing(point:tuple, edges:list, include_edges:bool=True)->bool:\n return crossing_number(point, edges, include_edges) % 2 == 1", "def identical_to(self, elem):\n \n return (self.n == elem.n) and (math.fabs(self.dx - elem.dx) < 0.001) and (math.fabs(self.dy - elem.dy) < 0.001) and (math.fabs(self.dz - elem.dz) < 0.001)", "def verify_coords(self, piece_coords):\n if piece_coords[0] >= self.size or piece_coords[0] < 0:\n return False\n if piece_coords[1] >= self.size or piece_coords[1] < 0:\n return False\n return True", "def tunnify( self, segment ):\n\t\ttry:\n\t\t\tx1, y1 = segment[0]\n\t\t\tx2, y2 = segment[1]\n\t\t\tx3, y3 = segment[2]\n\t\t\tx4, y4 = segment[3]\n\t\t\t\n\t\t\tif [x1, y1] == [x2, y2]:\n\t\t\t\txInt, yInt = x3, y3\n\t\t\t\tfirstHandlePercentage = self.tunnifyLo\n\t\t\t\tsecondHandlePercentage = self.tunnifyHi\n\t\t\telif [x3, y3] == [x4, y4]:\n\t\t\t\txInt, yInt = x2, y2\n\t\t\t\tfirstHandlePercentage = self.tunnifyHi\n\t\t\t\tsecondHandlePercentage = self.tunnifyLo\n\t\t\telse:\n\t\t\t\t# no zero handle\n\t\t\t\treturn False\n\t\t\t\n\t\t\tintersectionPoint = NSPoint( xInt, yInt )\n\t\t\tsegmentStartPoint = NSPoint( x1, y1 )\n\t\t\tsegmentFinalPoint = NSPoint( x4, y4 )\n\t\t\t\n\t\t\tfirstHandleX, firstHandleY = self.xyAtPercentageBetweenTwoPoints( segmentStartPoint, intersectionPoint, firstHandlePercentage )\n\t\t\tsecondHandleX, secondHandleY = self.xyAtPercentageBetweenTwoPoints( segmentFinalPoint, intersectionPoint, secondHandlePercentage )\n\t\t\t\n\t\t\treturn firstHandleX, firstHandleY, secondHandleX, secondHandleY\n\t\texcept Exception as e:\n\t\t\tself.logToConsole( \"tunnify: %s\" % str(e) )" ]
[ "0.6294915", "0.62770545", "0.62648475", "0.61977303", "0.6162508", "0.6156369", "0.6076896", "0.605726", "0.6015216", "0.59716797", "0.59055436", "0.5897351", "0.5878322", "0.5816097", "0.57989115", "0.57959044", "0.5737896", "0.57194346", "0.571571", "0.57036346", "0.56697905", "0.56618387", "0.5660853", "0.5655081", "0.5631485", "0.5630115", "0.56300336", "0.56196296", "0.5611785", "0.5607689", "0.5602608", "0.5586439", "0.554774", "0.5542325", "0.55420107", "0.5533638", "0.55264914", "0.5492981", "0.54917973", "0.54889524", "0.5482982", "0.5477139", "0.5474137", "0.5467928", "0.54533404", "0.5451382", "0.544975", "0.54371744", "0.543652", "0.5435457", "0.5411871", "0.54085934", "0.53965354", "0.5392338", "0.5388818", "0.5383837", "0.5381654", "0.53739613", "0.53658074", "0.53656775", "0.53620243", "0.53590006", "0.53583354", "0.5354383", "0.53522253", "0.5352042", "0.5348886", "0.5337642", "0.53365463", "0.5335389", "0.5335389", "0.53269786", "0.5323285", "0.5319545", "0.53095126", "0.5308433", "0.52997005", "0.52919817", "0.5268779", "0.525536", "0.5252227", "0.5251018", "0.5247804", "0.52454305", "0.52449536", "0.52399397", "0.52355385", "0.522893", "0.5223766", "0.5223568", "0.52190614", "0.52165353", "0.5215837", "0.5211039", "0.5210444", "0.5207981", "0.5207981", "0.5207216", "0.52045375", "0.52043337", "0.52013886" ]
0.0
-1
Interpolate the points and radii between sections that have too few points.
def interpPoints(self, interpRad=False): # print(np.shape(long_distances)) long_sections, long_distances, meddist = self.findLongSections() print('Long inter-point distances found: %i' % len(long_sections)) count = 0 for sec in long_sections: print('Supposed long section %i has %i nodes' \ % (sec, len(self.sections[sec]))) # set first and last points for interpolation pt0, pt1 = self.sections[sec][0], self.sections[sec][-1] # find number of points numpts = int(long_distances[long_sections.index(sec)]/meddist) Xs = np.linspace(pt0[0], pt1[0], numpts) Ys = np.linspace(pt0[1], pt1[1], numpts) Zs = np.linspace(pt0[2], pt1[2], numpts) newpts = np.dstack((Xs, Ys, Zs)) newpts = [newpts[0][i] for i in xrange(len(newpts[0]))] self.sections[sec] = newpts count = count + 1 rad0, rad1 = self.secRads[sec][0], self.secRads[sec][-1] # print(rad0, rad1) rads = np.linspace(rad0, rad1, numpts) # print(rads) self.secRads[sec] = rads long_sections, long_distances, meddist = self.findLongSections() print('Long sections still remaining: %i' % len(long_sections)) if len(long_sections) > 0: print(long_distances, meddist) return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __hinterpolate(self):\n \n # Temp. Data holders\n upperint = []\n lowerint = []\n \n # Dont like this, because here we insert points into the rawdata\n # But it creates consisitent results in the interpolation results\n if self.__upper[0][0] != 0: self.__upper.insert(0,(0.,0.))\n if self.__lower[0][0] != 0: self.__lower.insert(0,(0.,0.))\n \n # Create points\n if self.__interpolation_method == \"l\":\n xpointsU = list(map(lambda x:x/float(self.__procPointsCount),range(0,self.__procPointsCount+1)))\n xpointsL = list(map(lambda x:x/float(self.__procPointsCount),range(0,self.__procPointsCount+1)))\n elif self.__interpolation_method == \"p\":\n xpointsU = [x**2/float(self.__procPointsCount)**2 for x in range(self.__procPointsCount+1)]\n xpointsL = [x**2/float(self.__procPointsCount)**2 for x in range(self.__procPointsCount+1)]\n \n # Calculate secants\n uppersec = [(self.__upper[i+1][1]-self.__upper[i][1])/(self.__upper[i+1][0]-self.__upper[i][0]) for i in range(len(self.__upper)-1)]\n lowersec = [(self.__lower[i+1][1]-self.__lower[i][1])/(self.__lower[i+1][0]-self.__lower[i][0]) for i in range(len(self.__lower)-1)]\n \n # Calculate tangents\n uppertan = [(uppersec[k-1]+uppersec[k])/2 for k in range(1,len(uppersec))]\n uppertan.insert(0,uppersec[0])\n uppertan.append(uppersec[-1])\n\n lowertan = [(lowersec[k-1]+lowersec[k])/2 for k in range(1,len(lowersec))]\n lowertan.insert(0,lowersec[0])\n lowertan.append(lowersec[-1])\n \n # Hermite blending functions\n p0 = lambda t: 2*t**3 - 3*t**2 + 1\n m0 = lambda t: t**3 - 2*t**2 + t\n p1 = lambda t: -2*t**3 + 3*t**2\n m1 = lambda t: t**3 - t**2\n \n # Find matching points to improve accuarcy\n matchU = [(i,j) for i in range(len(xpointsU)) for j in range(len(self.__upper)) if xpointsU[i] == self.__upper[j][0]]\n matchL = [(i,j) for i in range(len(xpointsL)) for j in range(len(self.__lower)) if xpointsL[i] == self.__lower[j][0]]\n \n # Reverse match pairs to insure no index errors\n matchU.reverse()\n matchL.reverse()\n\n# print(self.__lower)\n# print(xpointsL)\n # Pop xpoints that dont require interpolation and append the point into the upperint list\n for i in matchU:\n xpointsU.pop(i[0])\n upperint.append(self.__upper[i[1]])\n \n# print(matchL)\n \n # Same process as above but for lower airfoil\n for i in matchL:\n xpointsL.pop(i[0])\n lowerint.append(self.__lower[i[1]])\n \n # Interpolate upper points\n for xp in xpointsU:\n for i in range(len(self.__upper)-1):\n if self.__upper[i][0] < xp < self.__upper[i+1][0]:\n h = self.__upper[i+1][0]-self.__upper[i][0]\n t = (xp - self.__upper[i][0]) / h\n solution = ( p0(t)*self.__upper[i][1] + h*m0(t)*uppertan[i] + p1(t)*self.__upper[i+1][1] + h*m1(t)*uppertan[i+1] )\n upperint.append((xp,solution))\n \n # Interpolate lower points\n for xp in xpointsL:\n for i in range(len(self.__lower)-1):\n if self.__lower[i][0] < xp < self.__lower[i+1][0]:\n h = self.__lower[i+1][0]-self.__lower[i][0]\n t = (xp - self.__lower[i][0]) / h\n solution = ( p0(t)*self.__lower[i][1] + h*m0(t)*lowertan[i] + p1(t)*self.__lower[i+1][1] + h*m1(t)*lowertan[i+1] )\n lowerint.append((xp,solution))\n \n # Sort the points to keep the correct sequence\n upperint.sort(key=lambda x:x[0], reverse=True)\n lowerint.sort(key=lambda x:x[0])\n \n # Do checks to insure no duplicates\n if upperint[0][0] != 1.0: upperint.insert(0,(1.0,0.0))\n if upperint[-1][0] != 0.0: upperint.append((0.0,0.0))\n if lowerint[0][0] == 0.0: lowerint.pop(0)\n if lowerint[-1][0] != 1.0: lowerint.append((1.0,0.0))\n\n self.__ProcPoints = upperint + lowerint", "def test_interpolation():\n spiral_arm = survey.get_spiral_slice(track = \"perseus\", \n interpolate = True)\n spiral_arm2 = survey.get_spiral_slice(track = \"Per\", \n interpolate = False)\n\n assert np.allclose(spiral_arm[\"INTEN\"], spiral_arm2[\"INTEN\"], equal_nan = True)", "def interpolate(self, distance, normalized=...): # -> BaseGeometry:\n ...", "def test_interpolation(self):\n\n ndx1, ndx2 = self.find_partition()\n tessellation = Delaunay(self.grid[ndx2,:])\n\n # initialisation\n results = []\n ndim = self.ndim+1\n\n for j in ndx1:\n nmodels = len(self.tracks[j].models)\n aResult = np.empty((nmodels,ndim+nglb+6),dtype=gtype)\n pt = self.tracks[j].params + [0.0,]\n\n for i in range(nmodels):\n aModel1 = self.tracks[j].models[i]\n pt[-1] = aModel1.glb[iage]\n aModel2 = interpolate_model(self,pt,tessellation,ndx2)\n aResult[i,0:ndim] = pt\n if (aModel2 is None):\n aResult[i,ndim:ndim+nglb+6] = np.nan\n else:\n aResult[i,ndim:ndim+nglb+6] = compare_models(aModel1,aModel2)\n\n results.append(aResult)\n\n return results, ndx1, ndx2, tessellation", "def interpolate_none(self):\n\n # Reset processed data\n self.u_processed_mps = np.copy(self.u_mps)\n self.v_processed_mps = np.copy(self.v_mps)\n self.u_processed_mps[self.valid_data[0, :] == False] = np.nan\n self.v_processed_mps[self.valid_data[0, :] == False] = np.nan", "def _interpolate(self, kps1: List[List[kp]], kps2: List[List[kp]]) -> np.ndarray:\n interpolated_kps = []\n for i in range(len(kps1)):\n # If one of the two points is empty -> Not interpolate\n if len(kps1[i]) != 0 and len(kps2[i]) != 0:\n interpolated_coords = np.linspace(np.array(kps1[i]), np.array(kps2[i]), num=3).tolist()\n interpolated_kps.append(interpolated_coords[1])\n else:\n interpolated_kps.append([None, None, None])\n return np.array(interpolated_kps)", "def interpolate_hold_9(self):\n\n # Initialize variables\n n_ensembles = self.u_mps.shape[0]\n\n # Get data from object\n self.u_processed_mps = np.copy(self.u_mps)\n self.v_processed_mps = np.copy(self.v_mps)\n self.u_processed_mps[self.valid_data[0, :] == False] = np.nan\n self.v_processed_mps[self.valid_data[0, :] == False] = np.nan\n\n n_invalid = 0\n # Process data by ensembles\n for n in range(n_ensembles):\n # Check if ensemble is invalid and number of consecutive invalids is less than 9\n if self.valid_data[0, n] == False and n_invalid < 9:\n self.u_processed_mps[n] = self.u_processed_mps[n - 1]\n self.v_processed_mps[n] = self.v_processed_mps[n - 1]\n n_invalid += 1\n else:\n n_invalid = 0", "def interpolate_nans(self):\n\n signal = self.signal\n\n # check for more than one nan in row\n for i in range(len(signal)-1) :\n if np.isnan(signal[i]) and np.isnan(signal[i+1]) :\n raise Exception('There are two nans in a row ask moritz what to do !')\n\n if np.isnan(signal[0]) :\n np.signal[0] = signal[1]\n if np.isnan(signal[-1]) :\n signal[-1] = signal[-2]\n\n for i in range(1,len(signal)-1) :\n if np.isnan(signal[i]):\n signal[i] = (signal[i-1] + signal[i+1])/2", "def linear_interpolation(self, pt1, pt2, unknown):\n\n #Write your code for linear interpolation here\n pt1,intensity1=pt1\n pt2,intensity2=pt2\n newPoint=unknown\n intensity_diff=pt2-pt1\n if(intensity_diff<=0):\n intensity_diff=1\n\n a1=pt2-newPoint\n b1=a1/intensity_diff\n x=intensity1*b1\n a2=newPoint - pt1\n b2=a2/intensity_diff\n y=intensity2*b2\n new_intensity=x+y\n\n return new_intensity", "def interpolate(self, *point, **kwargs):\n\n # Assume alpha enhancement of 0.4 if not given.\n if len(point) == 3:\n point = [] + list(point) + [0.4]\n warnings.warn(\n \"Assuming [alpha/Fe] = 0.4 composition unless \"\n \"otherwise specified.\", StandardCompositionAssumed)\n elif len(point) == 4:\n point = list(point)\n warnings.warn(\n \"Fourth stellar param is [alpha/Fe] = {}\".format(point[3]))\n\n return super(self.__class__, self).interpolate(*point, **kwargs)", "def check_interp(self):\n\n points = np.loadtxt(\"skeleton_temp/\" + cell + \"_points.txt\", delimiter=',')\n\n self.initial_scatter = ax.scatter(points[:, 0],\n points[:, 1],\n points[:, 2], s=5, c='r')\n self.cell_points = self.get_cell_xyz()\n ax.scatter(self.cell_points[::5, 0],\n self.cell_points[::5, 1],\n self.cell_points[::5, 2], s=3, c='b', alpha=.03)\n ax.set_xlabel('X (um)')\n ax.set_ylabel('Y (um)')\n ax.set_zlabel('Z (um)')", "def test_linear_interpolation_outside_domain(self):\n\n # Define pixel centers along each direction\n x = [1.0, 2.0, 4.0]\n y = [5.0, 9.0]\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n for j in range(len(y)):\n A[i, j] = linear_function(x[i], y[j])\n\n # Simple example first for debugging\n xis = numpy.linspace(0.9, 4.0, 4)\n etas = numpy.linspace(5, 9.1, 3)\n points = combine_coordinates(xis, etas)\n refs = linear_function(points[:, 0], points[:, 1])\n\n vals = interpolate2d(x, y, A, points, mode='linear',\n bounds_error=False)\n msg = ('Length of interpolation points %i differs from length '\n 'of interpolated values %i' % (len(points), len(vals)))\n assert len(points) == len(vals), msg\n for i, (xi, eta) in enumerate(points):\n if xi < x[0] or xi > x[-1] or eta < y[0] or eta > y[-1]:\n assert numpy.isnan(vals[i])\n else:\n msg = ('Got %.15f for (%f, %f), expected %.15f'\n % (vals[i], xi, eta, refs[i]))\n assert numpy.allclose(vals[i], refs[i],\n rtol=1.0e-12, atol=1.0e-12), msg\n\n # Try a range of combinations of points outside domain\n # with error_bounds True\n print\n for lox in [x[0], x[0] - 1]:\n for hix in [x[-1], x[-1] + 1]:\n for loy in [y[0], y[0] - 1]:\n for hiy in [y[-1], y[-1] + 1]:\n\n # Then test that points outside domain can be handled\n xis = numpy.linspace(lox, hix, 4)\n etas = numpy.linspace(loy, hiy, 4)\n points = combine_coordinates(xis, etas)\n\n if lox < x[0] or hix > x[-1] or \\\n loy < y[0] or hiy > y[-1]:\n try:\n vals = interpolate2d(x, y, A, points,\n mode='linear',\n bounds_error=True)\n except BoundsError, e:\n assert 'bounds_error was requested' in str(e)\n else:\n msg = 'Should have raised bounds error'\n raise Exception(msg)\n\n # Try a range of combinations of points outside domain with\n # error_bounds False\n for lox in [x[0], x[0] - 1, x[0] - 10]:\n for hix in [x[-1], x[-1] + 1, x[-1] + 5]:\n for loy in [y[0], y[0] - 1, y[0] - 10]:\n for hiy in [y[-1], y[-1] + 1, y[-1] + 10]:\n\n # Then test that points outside domain can be handled\n xis = numpy.linspace(lox, hix, 10)\n etas = numpy.linspace(loy, hiy, 10)\n points = combine_coordinates(xis, etas)\n refs = linear_function(points[:, 0], points[:, 1])\n vals = interpolate2d(x, y, A, points,\n mode='linear', bounds_error=False)\n\n assert len(points) == len(vals), msg\n for i, (xi, eta) in enumerate(points):\n if xi < x[0] or xi > x[-1] or\\\n eta < y[0] or eta > y[-1]:\n msg = 'Expected NaN for %f, %f' % (xi, eta)\n assert numpy.isnan(vals[i]), msg\n else:\n msg = ('Got %.15f for (%f, %f), expected '\n '%.15f' % (vals[i], xi, eta, refs[i]))\n assert numpy.allclose(vals[i], refs[i],\n rtol=1.0e-12,\n atol=1.0e-12), msg", "def _interpolation(self, video):\n self.F_int = []\n self.mgrid_0 = []\n self.mgrid_1 = []\n for p in range(video.points.shape[0]):\n _m_0, _m_1 = np.meshgrid(self.extended_points_0[p], self.extended_points_1[p])\n _F_int = interp2d(self.extended_points_0[p], self.extended_points_1[p], video.mraw[0, _m_0, _m_1], kind='cubic')\n self.F_int.append(_F_int)\n\n m_0, m_1 = np.meshgrid(self.extended_points_0[p, self.pad:-self.pad], self.extended_points_1[p, self.pad:-self.pad])\n self.mgrid_0.append(m_0)\n self.mgrid_1.append(m_1)", "def InterpolateDerivs(self, , p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...", "def interpolate(timepoint_defined, signal, interp_type, TR):\n\n timepoint_defined = np.array(timepoint_defined)\n\n true_inds = np.where(timepoint_defined == True)[0]\n false_inds = np.where(timepoint_defined == False)[0]\n\n\n signal_copy = np.array(signal)\n\n if interp_type == 'linear':\n\n #Still need to handle beginning/end cases\n\n for temp_timepoint in false_inds:\n\n\n #past_timepoint = true_inds[np.sort(np.where(true_inds < temp_timepoint)[0])[-1]]\n #future_timepoint = true_inds[np.sort(np.where(true_inds > temp_timepoint)[0])[0]]\n\n\n #Be sure there is at least one future timepoint and one past timepoint.\n #If there isn't, then grab either two past or two future timepoints and use those\n #for interpolation. If there aren't even two total past + future timepoints, then\n #just set the output to 0. Could also set the output to be unadjusted, but this\n #is a way to make the issue more obvious.\n temp_past_timepoint = np.sort(np.where(true_inds < temp_timepoint)[0])\n temp_future_timepoint = np.sort(np.where(true_inds > temp_timepoint)[0])\n\n #If we don't have enough data to interpolate/extrapolate\n if len(temp_past_timepoint) + len(temp_future_timepoint) < 2:\n\n signal_copy[temp_timepoint] = 0\n\n #If we do have enough data to interpolate/extrapolate\n else:\n\n if len(temp_past_timepoint) == 0:\n past_timepoint = true_inds[temp_future_timepoint[1]]\n else:\n past_timepoint = true_inds[temp_past_timepoint[-1]]\n\n if len(temp_future_timepoint) == 0:\n future_timepoint = true_inds[temp_past_timepoint[-2]]\n else:\n future_timepoint = true_inds[temp_future_timepoint[0]]\n\n #Find the appopriate past/future values\n past_value = signal_copy[int(past_timepoint)]\n future_value = signal_copy[int(future_timepoint)]\n\n #Use the interp1d function for interpolation\n interp_object = interp.interp1d([past_timepoint, future_timepoint], [past_value, future_value], bounds_error=False, fill_value='extrapolate')\n signal_copy[temp_timepoint] = interp_object(temp_timepoint).item(0)\n\n return signal_copy\n\n\n #For cubic spline interpolation, instead of taking the past/future timepoint\n #we will just take the closest 5 timepoints. If there aren't 5 timepoints, we will\n #set the output to 0\n if interp_type == 'cubic_spline':\n\n sorted_good = np.sort(signal_copy[true_inds])\n min_bound = sorted_good[0]\n max_bound = sorted_good[-1]\n\n #Continue if there are at least 5 good inds\n true_inds_needed = 5\n if len(true_inds) >= true_inds_needed:\n\n for temp_timepoint in false_inds:\n\n closest_inds = true_inds[np.argsort(np.absolute(true_inds - temp_timepoint))]\n closest_vals = signal_copy[closest_inds.astype(int)]\n interp_object = interp.interp1d(closest_inds, closest_vals, kind = 'cubic', bounds_error=False, fill_value='extrapolate')\n signal_copy[temp_timepoint.astype(int)] = interp_object(temp_timepoint).item(0)\n\n min_bound_exceded = np.where(signal_copy < min_bound)[0]\n if len(min_bound_exceded) > 0:\n\n signal_copy[min_bound_exceded] = min_bound\n\n max_bound_exceded = np.where(signal_copy > max_bound)[0]\n if len(max_bound_exceded) > 0:\n\n signal_copy[max_bound_exceded] = max_bound\n\n #If there aren't enough good timepoints, then set the bad timepoints = 0\n else:\n\n signal_copy[false_inds.astype(int)] = 0\n\n\n return signal_copy\n\n\n if interp_type == 'spectral':\n\n signal_copy = spectral_interpolation(timepoint_defined, signal_copy, TR)\n\n return signal_copy", "def interpolate(self, interpolation=\"nearest\", **kwargs):\n return podpac.interpolators.Interpolate(source=self, interpolation=interpolation, **kwargs)", "def InterpolationDerivs(self, , p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...", "def test_linear_interpolation_nan_points(self):\n\n # Define pixel centers along each direction\n x = [1.0, 2.0, 4.0]\n y = [5.0, 9.0]\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n for j in range(len(y)):\n A[i, j] = linear_function(x[i], y[j])\n\n # Then test that interpolated points can contain NaN\n xis = numpy.linspace(x[0], x[-1], 10)\n etas = numpy.linspace(y[0], y[-1], 10)\n xis[6:7] = numpy.nan\n etas[3] = numpy.nan\n points = combine_coordinates(xis, etas)\n\n vals = interpolate2d(x, y, A, points, mode='linear')\n refs = linear_function(points[:, 0], points[:, 1])\n assert nanallclose(vals, refs, rtol=1e-12, atol=1e-12)", "def test_interpolation_corner_cases(self):\n\n # Define four pixel centers\n x = [2.0, 4.0]\n y = [5.0, 9.0]\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n for j in range(len(y)):\n A[i, j] = linear_function(x[i], y[j])\n\n # Test that interpolated points are correct\n xis = numpy.linspace(x[0], x[-1], 3)\n etas = numpy.linspace(y[0], y[-1], 3)\n points = combine_coordinates(xis, etas)\n\n # Interpolate to cropped grids\n for xc, yc, Ac in [([x[0]], [y[0]], numpy.array([[A[0, 0]]])), # 1 x 1\n ([x[0]], y, numpy.array([A[0, :]])), # 1 x 2\n ]:\n\n vals = interpolate2d(xc, yc, Ac, points, mode='linear')\n msg = 'Expected NaN when grid %s is incomplete' % str(Ac.shape)\n assert numpy.all(numpy.isnan(vals)), msg", "def interpolate_linear(self, transect):\n\n u = np.copy(self.u_mps)\n v = np.copy(self.v_mps)\n\n valid = np.isnan(u) == False\n\n # Check for valid data\n if sum(valid) > 1 and sum(self.valid_data[0, :]) > 1:\n\n # Compute ens_time\n ens_time = np.nancumsum(transect.date_time.ens_duration_sec)\n\n # Apply linear interpolation\n self.u_processed_mps = np.interp(x=ens_time,\n xp=ens_time[self.valid_data[0, :]],\n fp=u[self.valid_data[0, :]],\n left=np.nan,\n right=np.nan)\n # Apply linear interpolation\n self.v_processed_mps = np.interp(x=ens_time,\n xp=ens_time[self.valid_data[0, :]],\n fp=v[self.valid_data[0, :]],\n left=np.nan,\n right=np.nan)", "def InterpolatePoint(self, p_int, vtkDataSetAttributes, vtkIdList, *float, **kwargs):\n ...", "def interpolate(self):\n print(\"Interpolating points...\")\n interpolated_points = set()\n if os.cpu_count():\n processes = os.cpu_count()\n print(f\"Running on all {processes} cores.\")\n else:\n processes = 1\n length = len(self.main_cluster)\n delta = math.ceil(length / processes)\n manager = Manager()\n result_map = manager.dict()\n jobs = []\n for index in range(processes):\n start = index * delta\n stop = (index + 1) * delta\n if stop > length:\n stop = length\n p = Process(target=worker, args=(start, stop,\n result_map, index,\n self.distances,\n self.interpolation_threshold,\n self.main_cluster,\n self.color_lookup_table_points))\n jobs.append(p)\n p.start()\n\n for proc in jobs:\n proc.join()\n\n for index in result_map.keys():\n print(index)\n interpolated_points.update(result_map[index])\n\n main_points = [self.get_value_tuple(index) for index in self.main_cluster]\n interpolated_points.update(main_points)\n\n print(\"Finished interpolation!\")\n\n self.interpolated_points = list(interpolated_points)", "def see_what_its_doing_1d():\n all_points = create_points_with_random_pollution_1d(100, 100, 10)\n picked_points = pick_uniform_random_points(all_points, 20)\n interpolated_points = interpolate_unknown_points(picked_points, all_points)\n\n picked_x = []\n picked_pollution = []\n for label, point in picked_points.items():\n picked_x.append(label)\n picked_pollution.append(point.get_pollution_value())\n\n interp_x = []\n inter_pollution = []\n\n for label, point in interpolated_points.items():\n if not label in picked_x:\n interp_x.append(label)\n inter_pollution.append(point.get_pollution_value())\n\n plt.plot(picked_x, picked_pollution, \"ro\", interp_x, inter_pollution, \"go\")\n plt.xlabel(\"Point Label\")\n plt.ylabel(\"Pollution Value\")\n plt.show()", "def see_what_its_doing_1d():\n all_points = create_points_with_random_pollution_1d(100, 100, 10)\n picked_points = pick_uniform_random_points(all_points, 20)\n interpolated_points = interpolate_unknown_points(picked_points, all_points)\n\n picked_x = []\n picked_pollution = []\n for label, point in picked_points.items():\n picked_x.append(label)\n picked_pollution.append(point.get_pollution_value())\n\n interp_x = []\n inter_pollution = []\n\n for label, point in interpolated_points.items():\n if not label in picked_x:\n interp_x.append(label)\n inter_pollution.append(point.get_pollution_value())\n\n plt.plot(picked_x, picked_pollution, \"ro\", interp_x, inter_pollution, \"go\")\n plt.xlabel(\"Point Label\")\n plt.ylabel(\"Pollution Value\")\n plt.show()", "def see_what_its_doing_1d():\n all_points = create_points_with_random_pollution_1d(100, 100, 10)\n picked_points = pick_uniform_random_points(all_points, 20)\n interpolated_points = interpolate_unknown_points(picked_points, all_points)\n\n picked_x = []\n picked_pollution = []\n for label, point in picked_points.items():\n picked_x.append(label)\n picked_pollution.append(point.get_pollution_value())\n\n interp_x = []\n inter_pollution = []\n\n for label, point in interpolated_points.items():\n if not label in picked_x:\n interp_x.append(label)\n inter_pollution.append(point.get_pollution_value())\n\n plt.plot(picked_x, picked_pollution, \"ro\", interp_x, inter_pollution, \"go\")\n plt.xlabel(\"Point Label\")\n plt.ylabel(\"Pollution Value\")\n plt.show()", "def test_isentropic_pressure_interp():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290\n tmp[3, :] = 288.\n tmpk = tmp * units.kelvin\n isentlev = [296., 297] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk)\n trueprs = 936.213 * units.hPa\n assert_almost_equal(isentprs[0][1], trueprs, 3)", "def test_call_interpolate(self):\r\n # Verified with iNEXT (http://glimmer.rstudio.com/tchsieh/inext/).\r\n # SE estimates differ because they use a different technique. SE\r\n # estimates have been verified against values in Colwell 2012 instead\r\n # (in separate unit tests).\r\n\r\n # Just reference.\r\n obs = self.estimator1(start=15, stop=15, num_steps=1)\r\n self.assertEqual(obs.getSampleCount(), 1)\r\n assert_almost_equal(obs.getEstimates('S1'),\r\n [(15, 5, 0.674199862463, 3.67859255119, 6.32140744881)])\r\n\r\n # start=1 and reference.\r\n obs = self.estimator1(start=1, stop=1, num_steps=1)\r\n self.assertEqual(obs.getSampleCount(), 1)\r\n assert_almost_equal(obs.getEstimates('S1'),\r\n [(1, 1.0, 0.250252397843, 0.509514313183, 1.49048568682),\r\n (15, 5, 0.674199862463, 3.67859255119, 6.32140744881)])\r\n\r\n # Points in between start=1 and reference.\r\n obs = self.estimator1(start=1, stop=15, num_steps=3)\r\n self.assertEqual(obs.getSampleCount(), 1)\r\n assert_almost_equal(obs.getEstimates('S1'),\r\n [(1, 1.0, 0.250252397843, 0.509514313183, 1.49048568682),\r\n (5, 3.40326340326, 0.655024590447, 2.119438797,\r\n 4.68708800953),\r\n (9, 4.4001998002, 0.680106580075,\r\n 3.0672153976, 5.7331842028),\r\n (13, 4.85714285714, 0.665379090563, 3.55302380357,\r\n 6.16126191071),\r\n (15, 5, 0.674199862463, 3.67859255119, 6.32140744881)])", "def _larange_interpolate(x, points):\n p = PRIME\n k = len(points)\n xs, ys = [], []\n for pt in points:\n xs.append(pt.X)\n ys.append(pt.Y)\n assert k == len(set(xs)), \"Points must be destinct.\"\n nums = [] # numerators\n dens = [] # denominators calculated individually to prevent float div errors\n for i in range(k):\n others = list(xs)\n cur = others.pop(i) # current x value\n nums.append(product(x - o for o in others))\n dens.append(product(cur - o for o in others))\n den = product(dens) # common denominator\n num = sum([_divmod(nums[i] * den * ys[i] % p, dens[i], p) for i in range(k)])\n return _divmod(num, den, p) % p", "def bilinear_interpolation(self, pt1, pt2, pt3, pt4, unknown):\n\n # Write your code for bilinear interpolation here\n # May b you can reuse or call linear interpolatio method to compute this task\n \n X1,Y1, intensity1 = pt1\n X2,Y2, intensity2 = pt2\n X3,Y3, intensity3 = pt3\n X4,Y4, intensity4 = pt4\n newPointX1,newPointY1 = unknown\n\n newpt1=self.linear_interpolation((X1,intensity1),(X2,intensity2),newPointX1)\n newpt2=self.linear_interpolation((X3,intensity3),(X4,intensity4),newPointX1)\n newpt1=Y1,newpt1\n newpt2=Y4,newpt2\n intensity=self.linear_interpolation(newpt1,newpt2,newPointY1)\n \n \n\n return intensity", "def test_1d_linear_interpolation_basic(self):\n\n # Define pixel centers along each direction\n x = [1.0, 2.0, 4.0]\n\n # Define array with corresponding values\n A = numpy.zeros((len(x)))\n\n # Define values for each xas a linear function\n for i in range(len(x)):\n A[i] = linear_function(x[i], 0)\n\n # Test first that original points are reproduced correctly\n for i, xi in enumerate(x):\n val = interpolate1d(x, A, [xi], mode='linear')[0]\n ref = linear_function(xi, 0)\n assert numpy.allclose(val, ref, rtol=1e-12, atol=1e-12)\n\n # Then test that genuinly interpolated points are correct\n xis = numpy.linspace(x[0], x[-1], 10)\n points = xis\n\n vals = interpolate1d(x, A, points, mode='linear')\n refs = linear_function(points, 0)\n assert numpy.allclose(vals, refs, rtol=1e-12, atol=1e-12)\n\n # Exercise bounds_error flag\n vals = interpolate1d(x, A, points, mode='linear',\n bounds_error=True)\n refs = linear_function(points, 0)\n assert numpy.allclose(vals, refs, rtol=1e-12, atol=1e-12)", "def interpolate(self, xs):\n tck = splrep(self._xs, self._ys)\n new_ys = splev(xs, tck, der=0)\n return new_ys", "def test_interpolation_random_array_and_nan(self):\n\n # Define pixel centers along each direction\n x = numpy.arange(20) * 1.0\n y = numpy.arange(25) * 1.0\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define arbitrary values for each x, y pair\n numpy.random.seed(17)\n A = numpy.random.random((len(x), len(y))) * 10\n\n # Create islands of NaN\n A[5, 13] = numpy.nan\n A[6, 14] = A[6, 18] = numpy.nan\n A[7, 14:18] = numpy.nan\n A[8, 13:18] = numpy.nan\n A[9, 12:19] = numpy.nan\n A[10, 14:17] = numpy.nan\n A[11, 15] = numpy.nan\n\n A[15, 5:6] = numpy.nan\n\n # Creat interpolation points\n xis = numpy.linspace(x[0], x[-1], 39) # Hit all mid points\n etas = numpy.linspace(y[0], y[-1], 73) # Hit thirds\n points = combine_coordinates(xis, etas)\n\n for mode in ['linear', 'constant']:\n vals = interpolate2d(x, y, A, points, mode=mode)\n\n # Calculate reference result with expected NaNs and compare\n i = j = 0\n for k, (xi, eta) in enumerate(points):\n\n # Find indices of nearest higher value in x and y\n i = numpy.searchsorted(x, xi)\n j = numpy.searchsorted(y, eta)\n\n if i > 0 and j > 0:\n\n # Get four neigbours\n A00 = A[i - 1, j - 1]\n A01 = A[i - 1, j]\n A10 = A[i, j - 1]\n A11 = A[i, j]\n\n if numpy.allclose(xi, x[i]):\n alpha = 1.0\n else:\n alpha = 0.5\n\n if numpy.allclose(eta, y[j]):\n beta = 1.0\n else:\n beta = eta - y[j - 1]\n\n if mode == 'linear':\n if numpy.any(numpy.isnan([A00, A01, A10, A11])):\n ref = numpy.nan\n else:\n ref = (A00 * (1 - alpha) * (1 - beta) +\n A01 * (1 - alpha) * beta +\n A10 * alpha * (1 - beta) +\n A11 * alpha * beta)\n elif mode == 'constant':\n assert alpha >= 0.5 # Only case in this test\n\n if beta < 0.5:\n ref = A10\n else:\n ref = A11\n else:\n msg = 'Unknown mode: %s' % mode\n raise Exception(msg)\n\n #print i, j, xi, eta, alpha, beta, vals[k], ref\n assert nanallclose(vals[k], ref, rtol=1e-12, atol=1e-12)", "def interpol(self,x,y,x1):\n \n N = len(x)\n i = np.minimum(np.maximum(np.searchsorted(x,x1,side='right'),1),N-1)\n xl = x[i-1]\n xr = x[i]\n yl = y[i-1]\n yr = y[i]\n y1 = yl + (yr-yl)/(xr-xl) * (x1-xl)\n above = x1 > x[-1]\n below = x1 < x[0]\n y1 = np.where(above,y[-1] + (x1 - x[-1]) * (y[-1]-y[-2])/(x[-1]-x[-2]), y1)\n y1 = np.where(below,y[0],y1)\n \n return y1, i", "def _interpolation(matrix):\n try:\n\tok = ~np.isnan(matrix)\n \txp = ok.ravel().nonzero()[0]\n \tfp = matrix[~np.isnan(matrix)]\n \tx = np.isnan(matrix).ravel().nonzero()[0]\n \tmatrix[np.isnan(matrix)] = np.interp(x, xp, fp)\n \treturn matrix\n except:\n return matrix", "def interpolate(self, known_coords, known_values, interp_coords, groupname):\n\t\t#Find which interpolation method is desired\n\t\tif self.interp_method == \"Grid Linear\":\n\t\t\tinterp_values = self.grid_interpolation_linear(known_coords=known_coords, known_values=known_values, interp_coords=interp_coords, groupname=groupname)\n\t\telif self.interp_method == \"Linear\":\n\t\t\tfrom scipy.interpolate import griddata\n\t\t\tinterp_values = self.interpolate_linear(known_coords=known_coords, known_values=known_values, interp_coords=interp_coords, groupname=groupname)\n\t\telse:\n\t\t\t#Should raise error\n\t\t\tpass\n\t\t\n\t\treturn interp_values", "def interpolatePoints(pts, numBetween):\n if numBetween <= 0:\n return pts\n #create new vector w/ original points distributed evenly\n numPts = len(pts) + (len(pts)-1)*numBetween\n densePts = np.zeros((numPts,3))\n densePts[0::numBetween+1] = pts\n \n #create interpolated points\n for idx in range(0, numPts-1, numBetween+1):\n pt0 = densePts[idx]\n pt1 = densePts[idx+numBetween+1]\n dxyz = (pt1 - pt0)/float(numBetween+1)\n for ii in range(1,numBetween+1):\n densePts[idx+ii] = pt0 + dxyz*ii\n return densePts", "def interpolate_point(xi, x, y):\n num_pts = len(x)\n if num_pts%2==0:\n #Even\n i_h2 = num_pts/2\n i_h1 = num_pts/2 - 1\n if x[i_h2] < xi:\n return interpolate_point(xi, x[i_h2:], y[0:i_h2:])\n elif x[i_h1] > xi:\n return interpolate_point(xi, x[0:i_h1], y[0:i_h1])\n else:\n return ((xi-x[i_h1])*y[i_h2]+(x[i_h2]-xi)*y[i_h1])/(x[i_h2]-x[i_h1])\n else:\n #Odd\n i_half = num_pts/2\n if x[i_half] < xi:\n return interpolate_point(xi, x[i_half:], y[i_half:])\n elif x[i_half] > xi:\n return interpolate_point(xi, x[0:i_half+1], y[0:i_half+1])\n else:\n return y[i_half]", "def interpol(newx, x, y, wrap=None, **kwargs):\n if 'baddata' in kwargs:\n y = np.ma.masked_equal(y, kwargs['baddata'])\n x = np.ma.masked_array(x)\n x.mask = y.mask\n kwargs.__delitem__('baddata')\n else:\n tst = np.ma.core.MaskedArray\n if type(x)!=tst or type(y)!=tst or type(newx)!=tst:\n x = np.ma.masked_array(x)\n y = np.ma.masked_array(y)\n newx = np.ma.masked_array(newx)\n\n def wrap_interp(xout, xin, yin, sect):\n dpsect=360/sect\n yc = np.cos(np.deg2rad(y*dpsect))\n ys = np.sin(np.deg2rad(y*dpsect))\n new_yc = np.interp(newx, x.compressed(), yc.compressed(), **kwargs)\n new_ys = np.interp(newx, x.compressed(), ys.compressed(), **kwargs)\n try:\n new_bad = np.interp(newx, x, y.mask)\n except ValueError:\n new_bad = np.zeros((len(newx)))\n newy = np.rad2deg(np.arctan(new_ys/new_yc))/dpsect\n #1st quadrant is O.K\n #2nd quadrant\n idx = [n for n in range(len(new_yc)) if new_yc[n]<0 and new_ys[n]>0]\n newy[idx] = sect/2 + newy[idx]\n #print('Sector 2 inds: %s' % idx)\n #3rd quadrant\n idx = [n for n in range(len(new_yc)) if new_yc[n]<0 and new_ys[n]<0]\n newy[idx] = sect/2 + newy[idx]\n #print('Sector 3 inds: %s' % idx)\n #4th quadrant\n idx = [n for n in range(len(new_yc)) if new_yc[n]>0 and new_ys[n]<0]\n newy[idx] = sect + newy[idx]\n #print('Sector 4 inds: %s' % idx)\n new_bad = np.ma.make_mask(new_bad)\n newy = np.ma.masked_array(newy, mask=new_bad)\n return newy\n\n if wrap=='hour':\n newy = wrap_interp(newx, x.compressed(), y.compressed(), 24)\n elif wrap=='lon':\n newy = wrap_interp(newx, x.compressed(), y.compressed(), 360)\n elif isinstance(wrap, numbers.Real):\n newy = wrap_interp(newx, x.compressed(), y.compressed(), wrap)\n else:\n newy = np.interp(newx, x.compressed(), y.compressed(), **kwargs)\n return newy", "def interpolate(self, interp):\n x = np.linspace(0, 29, len(self.ya))\n f_ya = interpolate.interp1d(x, self.ya)\n f_yv = interpolate.interp1d(x, self.yv)\n f_pa = interpolate.interp1d(x, np.reshape(self.pa, [-1]))\n f_pv = interpolate.interp1d(x, np.reshape(self.pv, [-1]))\n\n x_interp = np.linspace(0, 29, len(self.ya)*interp)\n self.ya = list(f_ya(x_interp))\n self.yv = list(f_yv(x_interp))\n self.pa = list(f_pa(x_interp))\n self.pv = list(f_pv(x_interp))", "def interpolate_smooth(self, transect):\n\n # Get data from object\n\n u = np.copy(self.u_mps)\n v = np.copy(self.v_mps)\n u[self.valid_data[0, :] == False] = np.nan\n v[self.valid_data[0, :] == False] = np.nan\n\n # Compute ens_time\n ens_time = np.nancumsum(transect.date_time.ens_duration_sec)\n\n # Apply smooth to each component\n u_smooth = rloess(ens_time, u, 10)\n v_smooth = rloess(ens_time, v, 10)\n\n # Save data in object\n self.u_processed_mps = u\n self.v_processed_mps = v\n self.u_processed_mps[np.isnan(u)] = u_smooth[np.isnan(u)]\n self.v_processed_mps[np.isnan(v)] = v_smooth[np.isnan(v)]", "def interpolate(i0, d0, i1, d1):\n if i0 == i1:\n return [d0]\n values = []\n a = (d1 - d0) / (i1 - i0)\n d = d0\n for i in range(i0,i1+1):\n values.append(d)\n d = d + a\n return values", "def test_isentropic_pressure_addition_args_interp():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290.\n tmp[3, :] = 288.\n rh = np.ones((4, 5, 5))\n rh[0, :] = 100.\n rh[1, :] = 80.\n rh[2, :] = 40.\n rh[3, :] = 20.\n relh = rh * units.percent\n tmpk = tmp * units.kelvin\n isentlev = [296., 297.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, relh)\n truerh = 69.197 * units.percent\n assert_almost_equal(isentprs[1][1], truerh, 3)", "def interpolate_timeseries(self, x, t, **kw):\n v, t_v = self.timeseries(x, rmnans=True)\n kw.update(dict(bounds_error=False))\n interpolant = sp.interpolate.interp1d(t_v, v, **kw)\n return interpolant(t)", "def linear_interpolate(x, x0, y0, x1, y1):\n try:\n return (y0 * (x1 - x) + y1 * (x - x0)) / (x1 - x0)\n except ZeroDivisionError:\n return 0.0", "def interpolate(self):\n interp = (\n self._get_ticks() - self._last_update\n ) / self._tick_step / self.dilation\n if interp > 1.0:\n interp = 1.0\n return interp", "def interpolate(self, distances):\n cs = interp1d(self.distances, self.altitudes, fill_value='extrapolate')\n return cs(distances)", "def test_isentropic_pressure_4d():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((3, 4, 5, 5))\n tmp[:, 0, :] = 296.\n tmp[:, 1, :] = 292.\n tmp[:, 2, :] = 290\n tmp[:, 3, :] = 288.\n tmpk = tmp * units.kelvin\n rh = np.ones((3, 4, 5, 5))\n rh[:, 0, :] = 100.\n rh[:, 1, :] = 80.\n rh[:, 2, :] = 40.\n rh[:, 3, :] = 20.\n relh = rh * units.percent\n isentlev = [296., 297., 300.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, relh, vertical_dim=1)\n trueprs = 1000. * units.hPa\n trueprs2 = 936.213 * units.hPa\n trueprs3 = 879.50375588 * units.hPa\n truerh = 69.19706 * units.percent\n assert isentprs[0].shape == (3, 3, 5, 5)\n assert_almost_equal(isentprs[0][:, 0, :], trueprs, 3)\n assert_almost_equal(isentprs[0][:, 1, :], trueprs2, 3)\n assert_almost_equal(isentprs[0][:, 2, :], trueprs3, 3)\n assert_almost_equal(isentprs[1][:, 1, ], truerh, 3)", "def interp_flight(oflight, npts, timestep=55):\n\n # Let's start with a full copy of the original, and update it as we go\n iflight = copy.deepcopy(oflight)\n\n rough_delta = iflight.flighttime.seconds/np.float(npts)\n delta = np.around(rough_delta, decimals=0)\n # i == number of legs\n # j == total number of points in flight plan\n i = 0\n j = 0\n for leg in iflight.legs:\n if len(leg.utcdt) > 1:\n # Construct our point timings, done poorly but quickly\n filler = np.arange(leg.relative_time[0],\n leg.relative_time[-1]+delta,\n delta)\n # If we popped over, just stop at the leg boundary regardless\n if filler[-1] > leg.relative_time[-1]:\n filler[-1] = leg.relative_time[-1]\n\n # Check if mhdg or thdg has a zero point crossing that will confuse\n # the simple minded interpolation that's about to happen\n\n# print \"ORIG THDG:\", leg.mhdg\n# print \"ORIG MHDG:\", leg.thdg\n # This is some pretty dirty logic for right now. Needs cleaning up.\n uprange = False\n for k, hdg in enumerate(leg.mhdg):\n if k != 0:\n # Check the previous and the current; if it crosses zero,\n # then add 360 to keep it monotonicly increasing\n # Do this for both magnetic and true headings\n if leg.mhdg[k-1] >= 350. and leg.mhdg[k] < 10:\n uprange = True\n if uprange is True:\n leg.mhdg[k] += 360.\n if leg.thdg[k-1] >= 350. and leg.thdg[k] < 10:\n uprange = True\n if uprange is True:\n leg.thdg[k] += 360.\n if uprange is True:\n pass\n\n # Actually interpolate the points...add more in this style as need\n latprimer = spi.interp1d(leg.relative_time,\n leg.lat, kind='linear')\n lonprimer = spi.interp1d(leg.relative_time,\n leg.long, kind='linear')\n thdgprimer = spi.interp1d(leg.relative_time,\n leg.thdg, kind='linear')\n mhdgprimer = spi.interp1d(leg.relative_time,\n leg.mhdg, kind='linear')\n\n # Replacing the existing stuff with the interpolated values\n leg.lat = latprimer(filler)\n leg.long = lonprimer(filler)\n leg.thdg = thdgprimer(filler) % 360.\n leg.mhdg = mhdgprimer(filler) % 360.\n\n # Use a stubby little function instead of a loop. Better?\n # Need to explicitly list() map() in Python3 to operate on it\n # the same way as in Python2\n filler = list(map(go_dt, filler))\n leg.relative_time = filler\n\n # Recreate timestamps for the new interpolated set, both dt and iso\n # formatted objects for easy interactions\n leg.utcdt = leg.relative_time + np.repeat(iflight.takeoff,\n len(filler))\n leg.utc = list(map(go_iso, leg.utcdt))\n\n j += len(leg.long)\n i += 1\n\n# print \"Interpolated %s to roughly fit %d points total,\" % \\\n# (oflight.filename, npts)\n# print \"with a delta_t of %06.1f; ended up with %d points total.\" % \\\n# (delta, j)\n\n return iflight", "def extrapolate_nans(x, y, v):\n if numpy.ma.is_masked(v):\n nans = v.mask\n else:\n nans = numpy.isnan(v)\n notnans = numpy.logical_not(nans)\n v[nans] = scipy.interpolate.griddata((x[notnans], y[notnans]), v[notnans],\n (x[nans], y[nans]),\n method='nearest').ravel()\n return v", "def fill_missing_data_points(data):\n return data.interpolate()", "def interpolate(self, image):\n return", "def test_linear_interpolation_basic(self):\n\n # Define pixel centers along each direction\n x = [1.0, 2.0, 4.0]\n y = [5.0, 9.0]\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n for j in range(len(y)):\n A[i, j] = linear_function(x[i], y[j])\n\n # Test first that original points are reproduced correctly\n for i, xi in enumerate(x):\n for j, eta in enumerate(y):\n val = interpolate2d(x, y, A, [(xi, eta)], mode='linear')[0]\n ref = linear_function(xi, eta)\n assert numpy.allclose(val, ref, rtol=1e-12, atol=1e-12)\n\n # Then test that genuinly interpolated points are correct\n xis = numpy.linspace(x[0], x[-1], 10)\n etas = numpy.linspace(y[0], y[-1], 10)\n points = combine_coordinates(xis, etas)\n\n vals = interpolate2d(x, y, A, points, mode='linear')\n refs = linear_function(points[:, 0], points[:, 1])\n assert numpy.allclose(vals, refs, rtol=1e-12, atol=1e-12)", "def interpolate_missing(y):\n if y.isna().any():\n y = y.interpolate(method='linear', limit_direction='both')\n return y", "def pad(input_data):\n # source : https://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array \n data = input_data.copy()\n bad_indexes = np.isnan(data)\n good_indexes = np.logical_not(bad_indexes)\n good_data = data[good_indexes]\n interpolated = np.interp(bad_indexes.nonzero()[0], good_indexes.nonzero()[0], good_data)\n data[bad_indexes] = interpolated\n return data", "def interpolate_ephemeris(self):\n #Compute the offsets into the lookup tables\n startemiss, stopemiss = self.get_emissivity_offsets()\n hourslice, starttime = self.get_hour_offsets()\n latslice = self.get_lat_offsets()\n \n #Compute the start and stop dates\n startdata = self.extract_season(self.startseason,startemiss,\n hourslice, latslice)\n stopdata = self.extract_season(self.stopseason,startemiss,\n hourslice, latslice)\n # Interpolate Season\n seasons = [self.startseason, self.stopseason]\n season_f = compute_interpolation_function(seasons, [startdata, stopdata], 'linear')\n data = season_f(self.season)\n #Interpolate time\n self.data = self.interpolatehour(hourslice, starttime, data)", "def interpolate_linear(self, known_coords, known_values, interp_coords, groupname):\n\t\t#First need to reshape known_coords and known_values\n\t\tn_params = self.signal[groupname]['dimension']\n\t\tknown_coords = np.reshape( known_coords, (-1,n_params) )\n\t\tknown_values = np.reshape( known_values, (-1) )\t\t\n\t\treturn griddata(known_coords, known_values, interp_coords, method='linear')", "def interpolate_loss_calculation(self, interpolates):\n _, fake_scores = self.D(interpolates)\n return fake_scores", "def profile_interp(var,z_orig,z_interp,method='linear',out_of_bounds='NaN'):\n z_orig = z_orig[~isnan(z_orig)]\n var= var[~isnan(var)]\n #assert(all(diff(z_orig) > 0))\n if len(z_orig) > len(var) or len(var) > len(z_orig): return NaN\n if len(z_orig) <= 2 or len(var) <= 2: return NaN\n \n if out_of_bounds == 'NaN':\n interpolant = interpolate.interp1d(z_orig,var,kind=method,bounds_error=False,fill_value=NaN)\n elif out_of_bounds == 'nearest':\n interpolant = interpolate.interp1d(z_orig,var,kind=method,bounds_error=False,fill_value=(var[0],var[-1]))\n elif out_of_bounds == 'extrap':\n interpolant = interpolate.interp1d(z_orig,var,kind=method,bounds_error=False,fill_value='extrapolate')\n else:\n raise ValueError('Extrapolation method must be NaN, nearest, or cubic.')\n result = interpolant(z_interp)\n\n if result.size == 1: return result.item()\n else: return result", "def interpolate(x0, y0, x1, y1, x):\n y = (y0 * (x1 - x) + y1 * (x - x0)) / (x1 - x0)\n\n return y", "def test_isentropic_pressure_data_bounds_error():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290.\n tmp[3, :] = 288.\n tmpk = tmp * units.kelvin\n isentlev = [296., 350.] * units.kelvin\n with pytest.raises(ValueError):\n isentropic_interpolation(isentlev, lev, tmpk)", "def interp_spherical_data(x1, y1, x2, y2, lon, lat, J, m1, great_circle = False):\n\n background_x, background_y = m1(lon, lat)\n\n if great_circle == False:\n interp_lons = np.linspace(x1, x2, 100)\n interp_lats = np.linspace(y1, y2, 100)\n interp_x, interp_y = m1(interp_lons, interp_lats)\n\n else:\n great_circle = m1.drawgreatcircle(x1, y1, x2, y2, del_s = 50., alpha = 1.0, color = \"w\", zorder = 102)\n interp_x = great_circle[0].get_data()[0] \n interp_y = great_circle[0].get_data()[1]\n\n output_data = scipy.interpolate.griddata(list(zip(background_x, background_y)), jr1, list(zip(interp_x, interp_y)), method = 'cubic')\n\n output_lon, output_lat = m1(interp_x, interp_y, inverse = True)\n return output_data, output_lon, output_lat", "def interpolate_composite(self, transect):\n\n u = np.copy(self.u_processed_mps)\n v = np.copy(self.v_processed_mps)\n\n valid = np.isnan(u) == False\n\n # Check for valid data\n if np.sum(valid) > 1:\n\n # Compute ensTime\n ens_time = np.nancumsum(transect.date_time.ens_duration_sec)\n\n # Ensure monotonic input\n diff_time = np.diff(ens_time[valid])\n idx = np.where(diff_time == 0)[0]\n mono_array = np.vstack([ens_time[valid], u[valid], v[valid]])\n # Replace non-monotonic times with average values\n for i in idx[::-1]:\n mono_array[1, i] = np.nanmean(mono_array[1, i:i+2])\n mono_array[2, i] = np.nanmean(mono_array[2, i:i + 2])\n mono_array = np.delete(mono_array, i+1, 1)\n # Apply linear interpolation\n # Apply linear interpolation\n self.u_processed_mps = np.interp(ens_time,\n mono_array[0, :],\n mono_array[1, :])\n # Apply linear interpolation\n self.v_processed_mps = np.interp(ens_time,\n mono_array[0, :],\n mono_array[2, :])", "def InterpolateSurfaceVectorsWithLine():\r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, PointData, PolygonData = CreateMatrixVTK(VTKString)\r\n Centroids1,Vectors1 = ElementNormal(PointData,PolygonData)\r\n # Load full volume centroid\r\n NCF_Str = OpenData(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles\",\"new_centroids_file.dat\")\r\n HeaderNCF,Centroids2 = CreateMatrixDat(NCF_Str)\r\n print('Loading Finished \\n Inserting Centre Line...')\r\n # Create Plane of vectors through centreline.\r\n PlaneCentroids,PlaneVectors = InsertCentreLine(Centroids1,Vectors1,50)\r\n print('Centre Line Inserted \\n Interpolating Centroids...')\r\n # Interpolate Vectors\r\n Vectors2 = VectorInter(PlaneCentroids,PlaneVectors,Centroids2)\r\n # Make the data more sparse to display better.\r\n C1,V1 = SparseData(PlaneCentroids,PlaneVectors,0.1)\r\n C2,V2 = SparseData(Centroids2,Vectors2,0.1)\r\n print('Interpolation Finished \\n Plotting...')\r\n # Plot Data\r\n fig = plt.figure()\r\n\r\n ax1 = fig.add_subplot(121,projection = '3d')\r\n DisplaySliceVectors(C1,V1,ax1,5,10)\r\n\r\n ax2 = fig.add_subplot(122,projection = '3d')\r\n DisplaySliceVectors(C2,V2,ax2,5,10)\r\n\r\n plt.show()\r\n\r\n header = 'TITLE = \\\"Normal Surface Vectors With Central axis Line\\\"\\nVARIABLES = \\\"XV\\\", \\\"YV\\\", \\\"ZV\\\" \\nZONE T=\\\"Step 0 Incr 0\\\" \\nF = VECTORS'\r\n\r\n np.savetxt(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/OutputFiles/SurfaceLineVectorInterpolation.dat\",Vectors2,header = header,comments='')", "def interpolate_and_average(xs, ys, interop_points=None, confidence_interval=False):\n # Get the xs of shortest curve\n max_min_x = max(x.min() for x in xs)\n min_max_x = min(x.max() for x in xs)\n if interop_points is None:\n # Interop points according to curve with \"least resolution\"\n interop_points = min(x.shape[0] for x in xs)\n\n new_x = np.linspace(max_min_x, min_max_x, interop_points)\n new_ys = []\n\n for old_x, old_y in zip(xs, ys):\n new_ys.append(np.interp(new_x, old_x, old_y))\n\n # Average out\n # atleast_2d for case when we only have one reptition\n new_ys = np.atleast_2d(np.array(new_ys))\n new_y = np.mean(new_ys, axis=0)\n std_y = np.std(new_ys, axis=0)\n\n if confidence_interval:\n interval = 1.96 * (std_y / np.sqrt(len(xs)))\n lower_bound = new_y - interval\n upper_bound = new_y + interval\n return new_x, new_y, std_y, lower_bound, upper_bound\n else:\n return new_x, new_y, std_y", "def solve_i():\r\n x = np.array([ -2.1, -1.45, -1.3, -0.2, 0.1, 0.15, 0.8, 1.1, 1.5, 2.8, 3.8 ])\r\n y = np.array([0.012155, 0.122151, 0.184520, 0.960789, 0.990050, 0.977751,\r\n 0.527292, 0.298197, 0.105399, 3.936690E-4, 5.355348E-7])\r\n # find and plot both interpolations and the oiginal points\r\n plt.figure(1)\r\n cubic_interpol(x,y)\r\n lin_interpol(x,y)\r\n plt.plot(x, y, 'rx', ms = 10, label = 'Points')\r\n # plot settings\r\n plt.title('Cubic & Linear Interpolation Given Points')\r\n plt.xlabel('x',fontsize = 14)\r\n plt.ylabel('y',fontsize = 14)\r\n plt.legend()", "def Interpolate(self, ind, results):\n return _gmat_py.Interpolator_Interpolate(self, ind, results)", "def test_isentropic_pressure_masked_column():\n lev = [100000., 95000.] * units.Pa\n tmp = np.ma.ones((len(lev), 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[:, :, -1] = np.ma.masked\n tmp = units.Quantity(tmp, units.kelvin)\n isentprs = isentropic_interpolation([296.] * units.kelvin, lev, tmp)\n trueprs = np.ones((1, 5, 5)) * (1000. * units.hPa)\n trueprs[:, :, -1] = np.nan\n assert isentprs[0].shape == (1, 5, 5)\n assert_almost_equal(isentprs[0], trueprs, 3)", "def test_1d_constant_interpolation_basic(self):\n\n # Define pixel centers along each direction\n x = numpy.array([1.0, 2.0, 4.0])\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n A[i] = linear_function(x[i], 0)\n\n # Then test that interpolated points are always assigned value of\n # closest neighbour\n xis = numpy.linspace(x[0], x[-1], 10)\n points = xis\n\n vals = interpolate1d(x, A, points, mode='constant')\n\n # Find upper neighbours for each interpolation point\n xi = points[:]\n idx = numpy.searchsorted(x, xi, side='left')\n\n # Get the neighbours for each interpolation point\n x0 = x[idx - 1]\n x1 = x[idx]\n\n z0 = A[idx - 1]\n z1 = A[idx]\n\n # Location coefficients\n alpha = (xi - x0) / (x1 - x0)\n\n refs = numpy.zeros(len(vals))\n for i in range(len(refs)):\n if alpha[i] < 0.5:\n refs[i] = z0[i]\n\n if alpha[i] >= 0.5:\n refs[i] = z1[i]\n\n assert numpy.allclose(vals, refs, rtol=1e-12, atol=1e-12)", "def interpolateSpline(x, y) :\n n = len(x)\n\n dim = 4 * (n - 1)\n b = np.zeros((dim, 1))\n A = np.zeros((dim, dim))\n\n for i in range(n-1):\n x1 = x[i]\n x2 = x[i+1]\n y1 = y[i]\n y2 = y[i+1]\n b[i*4:(i+1)*4, 0] = [y1, y2, 0, 0]\n\n A[i*4, i*4:(i+1)*4] = [pow(x1,3), pow(x1,2), x1, 1] \n A[i*4+1, i*4:(i+1)*4] = [pow(x2,3), pow(x2,2), x2, 1]\n if (i != n-2):\n A[i*4+2, i*4:(i+2)*4] = [3*pow(x2,2), 2 * x2, 1, 0, -3*pow(x2,2), -2 * x2, -1, 0, ]\n A[i*4+3, i*4:(i+2)*4] = [6*x2, 2, 0, 0, -6*x2, -2, 0, 0]\n else: \n A[i*4+2, 0:4] = [6*x[0], 2, 0, 0]\n A[i*4+3, i*4:(i+1)*4] = [6*x2, 2, 0, 0]\n \n # solve linear system for the coefficients of the spline\n coeffs = np.linalg.solve(A, b)\n\n # extract local pieces\n spline = []\n for k in range(n-1):\n spline.append(np.poly1d(coeffs[k*4:(k+1)*4, 0]))\n\n return spline", "def interpolate_hold_last(self):\n\n # Initialize variables\n n_ensembles = len(self.u_mps)\n\n # Get data from object\n self.u_processed_mps = np.copy(self.u_mps)\n self.v_processed_mps = np.copy(self.v_mps)\n self.u_processed_mps[self.valid_data[0, :] == False] = np.nan\n self.v_processed_mps[self.valid_data[0, :] == False] = np.nan\n\n n_invalid = 0\n # Process data by ensembles\n for n in range(1, n_ensembles):\n # Check if ensemble is invalid and number of consecutive invalids is less than 9\n if (self.valid_data[0, n] == False) and (n_invalid < 9):\n self.u_processed_mps[n] = self.u_processed_mps[n - 1]\n self.v_processed_mps[n] = self.v_processed_mps[n - 1]", "def trilinear_interpolate(point, atom_index, emap, emap_max, emap_min):\n point1 = []\n point0 = []\n dif = []\n for p in point:\n if round(p) == p:\n p += 1E-10\n point0.append(math.floor(p))\n point1.append(math.ceil(p))\n dif.append((p - point0[-1]) / (point1[-1] - point0[-1]))\n\n i000 = energy_map_index(point0, emap_max, emap_min) # (0, 0, 0)\n i100 = energy_map_index([point1[0], point0[1], point0[2]], emap_max, emap_min) # (1, 0, 0)\n i001 = energy_map_index([point0[0], point0[1], point1[2]], emap_max, emap_min) # (0, 0, 1)\n i101 = energy_map_index([point1[0], point0[1], point1[2]], emap_max, emap_min) # (1, 0, 1)\n i010 = energy_map_index([point0[0], point1[1], point0[2]], emap_max, emap_min) # (0, 1, 0)\n i110 = energy_map_index([point1[0], point1[1], point0[2]], emap_max, emap_min) # (1, 1, 0)\n i011 = energy_map_index([point0[0], point1[1], point1[2]], emap_max, emap_min) # (0, 1, 1)\n i111 = energy_map_index(point1, emap_max, emap_min) # (1, 1, 1)\n\n c00 = emap[i000][atom_index] * (1 - dif[0]) + emap[i100][atom_index] * dif[0]\n c01 = emap[i001][atom_index] * (1 - dif[0]) + emap[i101][atom_index] * dif[0]\n c10 = emap[i010][atom_index] * (1 - dif[0]) + emap[i110][atom_index] * dif[0]\n c11 = emap[i011][atom_index] * (1 - dif[0]) + emap[i111][atom_index] * dif[0]\n\n c0 = c00 * (1 - dif[1]) + c10 * dif[1]\n c1 = c01 * (1 - dif[1]) + c11 * dif[1]\n\n c = c0 * (1 - dif[2]) + c1 * dif[2]\n\n return c", "def interpolatePeriodicSpline(x, y) :\n n = len(x)\n\n dim = 4 * (n - 1)\n b = np.zeros((dim, 1))\n A = np.zeros((dim, dim))\n\n for i in range(n-1):\n x1 = x[i]\n x2 = x[i+1]\n y1 = y[i]\n y2 = y[i+1]\n b[i*4:(i+1)*4, 0] = [y1, y2, 0, 0]\n\n A[i*4, i*4:(i+1)*4] = [pow(x1,3), pow(x1,2), x1, 1] \n A[i*4+1, i*4:(i+1)*4] = [pow(x2,3), pow(x2,2), x2, 1]\n if (i != n-2):\n A[i*4+2, i*4:(i+2)*4] = [3*pow(x2,2), 2 * x2, 1, 0, -3*pow(x2,2), -2 * x2, -1, 0, ]\n A[i*4+3, i*4:(i+2)*4] = [6*x2, 2, 0, 0, -6*x2, -2, 0, 0]\n else: \n A[i*4+2, 0:4] = [3 * pow(x[0],2), 2 * x[0], 1, 0]\n A[i*4+2, i*4:(i+1)*4] = [-3 * pow(x2,2), -2 * x2, -1, 0]\n A[i*4+3, 0:4] = [6 * x[0], 2, 0, 0]\n A[i*4+3, i*4:(i+1)*4] = [-6 * x2, -2, 0, 0]\n # solve linear system for the coefficients of the spline\n coeffs = np.linalg.solve(A, b)\n\n # extract local pieces\n spline = []\n for k in range(n-1):\n spline.append(np.poly1d(coeffs[k*4:(k+1)*4, 0]))\n\n return spline", "def test_constant_interpolation_basic(self):\n\n # Define pixel centers along each direction\n x = numpy.array([1.0, 2.0, 4.0])\n y = numpy.array([5.0, 9.0])\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n for j in range(len(y)):\n A[i, j] = linear_function(x[i], y[j])\n\n # Then test that interpolated points are always assigned value of\n # closest neighbour\n xis = numpy.linspace(x[0], x[-1], 10)\n etas = numpy.linspace(y[0], y[-1], 10)\n points = combine_coordinates(xis, etas)\n\n vals = interpolate2d(x, y, A, points, mode='constant')\n\n # Find upper neighbours for each interpolation point\n xi = points[:, 0]\n eta = points[:, 1]\n idx = numpy.searchsorted(x, xi, side='left')\n idy = numpy.searchsorted(y, eta, side='left')\n\n # Get the four neighbours for each interpolation point\n x0 = x[idx - 1]\n x1 = x[idx]\n y0 = y[idy - 1]\n y1 = y[idy]\n\n z00 = A[idx - 1, idy - 1]\n z01 = A[idx - 1, idy]\n z10 = A[idx, idy - 1]\n z11 = A[idx, idy]\n\n # Location coefficients\n alpha = (xi - x0) / (x1 - x0)\n beta = (eta - y0) / (y1 - y0)\n\n refs = numpy.zeros(len(vals))\n for i in range(len(refs)):\n if alpha[i] < 0.5 and beta[i] < 0.5:\n refs[i] = z00[i]\n\n if alpha[i] >= 0.5 and beta[i] < 0.5:\n refs[i] = z10[i]\n\n if alpha[i] < 0.5 and beta[i] >= 0.5:\n refs[i] = z01[i]\n\n if alpha[i] >= 0.5 and beta[i] >= 0.5:\n refs[i] = z11[i]\n\n assert numpy.allclose(vals, refs, rtol=1e-12, atol=1e-12)", "def showAsPointsInterpolated(self, lToRRatio = 2.0):\n MonkeyPatchMayaVi()\n import enthought.mayavi.mlab as mlab\n from mayavi import mlab\n \n @mlab.show\n def _showSimple():\n maxInterpolPts = 10\n \n def interpolateSection(section):\n sStart = section.getDistalNPA4()\n sEnd = section.getProximalNPA4()\n length = section.getLength()\n rad = min(section.d_r, section.p_r) \n n = min( max( int( lToRRatio * length / rad ), 1 ), maxInterpolPts)\n jVecSteps = ( sEnd-sStart ) / n\n \n intPts = [ sStart + k*jVecSteps for k in range(0,n) ]\n return intPts \n \n lbs = []\n for morph in self.morphs:\n lb = Flatten( ListBuilderSectionVisitor(functor=interpolateSection, morph=morph ) () ) \n lbs.extend( lb )\n \n \n pts = numpy.array( lbs )\n\n x = pts[:, 0]\n y = pts[:, 1]\n z = pts[:, 2]\n s = pts[:, 3]\n \n mlab.points3d(x, y, z, s, colormap=self.colormap, scale_factor=self.scale_factor)\n mlab.outline()\n _showSimple()", "def interpolate(self, x_pivot, f_pivot):\n interpolation = interp1d(x_pivot, f_pivot,\n kind=self.kind, bounds_error=False)\n return interpolation", "def interpolate(series, indices):\r\n not_included = [t for t in indices if t not in series.index.values]\r\n new_indices_series = pd.Series(index=not_included, data=[np.nan] * len(not_included))\r\n with_new_indices = series.append(new_indices_series).sort_index()\r\n interpolated = with_new_indices.interpolate(method=\"linear\")\r\n\r\n # We want the values on the indices before the (originally) first index to be equal to the first value. Similarly,\r\n # we want the values after the (originally) last index to be equal to the last value.\r\n min_index = min(series.index.values)\r\n max_index = max(series.index.values)\r\n start_value = series.values[0]\r\n end_value = series.values[-1]\r\n\r\n new_values = []\r\n for t, p in interpolated.items():\r\n if t < min_index:\r\n new_values.append(start_value)\r\n elif t > max_index:\r\n new_values.append(end_value)\r\n else:\r\n new_values.append(p)\r\n\r\n new_series = pd.Series(data=new_values, index=interpolated.index)\r\n\r\n return new_series", "def interpolate_meridional(self, *interp1d_args, **interp1d_kwargs):\n return self.interp1d_meridional(*interp1d_args, **interp1d_kwargs)(self.lat)", "def test_isentropic_pressure():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290\n tmp[3, :] = 288.\n tmp[:, :, -1] = np.nan\n tmpk = tmp * units.kelvin\n isentlev = [296.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk)\n trueprs = np.ones((1, 5, 5)) * (1000. * units.hPa)\n trueprs[:, :, -1] = np.nan\n assert isentprs[0].shape == (1, 5, 5)\n assert_almost_equal(isentprs[0], trueprs, 3)", "def interpolate_matrix(matrix):", "def test_linear_interpolation_nan_array(self):\n\n # Define pixel centers along each direction\n x = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]\n y = [4.0, 5.0, 7.0, 9.0, 11.0, 13.0]\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n for j in range(len(y)):\n A[i, j] = linear_function(x[i], y[j])\n A[2, 3] = numpy.nan # (x=2.0, y=9.0): NaN\n\n # Then test that interpolated points can contain NaN\n xis = numpy.linspace(x[0], x[-1], 12)\n etas = numpy.linspace(y[0], y[-1], 10)\n points = combine_coordinates(xis, etas)\n\n vals = interpolate2d(x, y, A, points, mode='linear')\n refs = linear_function(points[:, 0], points[:, 1])\n\n # Set reference result with expected NaNs and compare\n for i, (xi, eta) in enumerate(points):\n if (1.0 < xi <= 3.0) and (7.0 < eta <= 11.0):\n refs[i] = numpy.nan\n\n assert nanallclose(vals, refs, rtol=1e-12, atol=1e-12)", "def apply_interpolation(self, transect, interpolation_method=None):\n\n # Reset processed data\n if self.u_mps is not None:\n self.u_processed_mps = np.copy(self.u_mps)\n self.v_processed_mps = np.copy(self.v_mps)\n self.u_processed_mps[self.valid_data[0, :] == False] = np.nan\n self.v_processed_mps[self.valid_data[0, :] == False] = np.nan\n\n # Determine interpolation methods to apply\n if interpolation_method is None:\n interpolation_method = self.interpolate\n else:\n self.interpolate = interpolation_method\n\n # Apply specified interpolation method\n\n if interpolation_method == 'None':\n # Sets invalid data to nan with no interpolation\n self.interpolate_none()\n\n elif interpolation_method == 'ExpandedT':\n # Set interpolate to none as the interpolation done is in the QComp\n self.interpolate_next()\n\n elif interpolation_method == 'Hold9':\n # Interpolates using SonTek method of holding last valid for up to 9 samples\n self.interpolate_hold_9()\n\n elif interpolation_method == 'HoldLast':\n # Interpolates by holding last valid indefinitely\n self.interpolate_hold_last()\n\n elif interpolation_method == 'Linear':\n # Interpolates using linear interpolation\n self.interpolate_linear(transect)\n\n elif interpolation_method == 'Smooth':\n # Interpolates using smooth interpolation\n self.interpolate_smooth(transect)\n\n elif interpolation_method == 'TRDI':\n # TRDI interpolation is done in discharge.\n # For TRDI the interpolation is done on discharge not on velocities\n self.interpolate_none()", "def interpolate(m):\n \n x1 = m[0]\n x2 = m[1]\n x3 = m[2]\n y1 = m[3]\n y2 = m[4]\n y3 = m[5]\n denom = (x1 - x2)*(x1 - x3)*(x2 - x3)\n A = (x3 * (y2 - y1) + x2 * (y1 - y3) + x1 * (y3 - y2)) / denom\n B = (x3**2 * (y1 - y2) + x2**2 * (y3 - y1) + x1**2 * (y2 - y3)) / denom\n C = (x2 * x3 * (x2 - x3) * y1 + x3 * x1 * (x3 - x1) * y2 + x1 * x2 * (x1 - x2) * y3) / denom\n xext = -B/(2*A)\n yext = A*xext**2 + B*xext + C\n \n return(np.array([xext,yext]))", "def interpolate_na(\n self, method: str = \"nearest\", extrapolate: bool = False, **kwargs\n ):\n dim0 = self.dim0\n kwargs.update(dict(method=method, extrapolate=extrapolate))\n if dim0:\n interp_data = np.empty(self._obj.shape, dtype=self._obj.dtype)\n for i, (_, sub_xds) in enumerate(self._obj.groupby(dim0)):\n interp_data[i, ...] = self._interpolate_na(\n sub_xds.load().data, **kwargs\n )\n else:\n interp_data = self._interpolate_na(self._obj.load().data, **kwargs)\n interp_array = xr.DataArray(\n name=self._obj.name,\n dims=self._obj.dims,\n coords=self._obj.coords,\n data=interp_data,\n attrs=self._obj.attrs,\n )\n interp_array.raster.set_nodata(self.nodata)\n interp_array.raster.set_crs(self.crs)\n return interp_array", "def _LinearInterpolate(x0, target, x1, y0, y1):\n if x0 == x1:\n return (y0 + y1) / 2\n return (y1 - y0) * (target - x0) / (x1 - x0) + y0", "def splitting_intensity(self, **kwargs):\n \n if 'pol' not in kwargs:\n raise Exception('pol must be specified')\n \n copy = self.data.copy()\n copy.rotateto(kwargs['pol'])\n copy.x = np.gradient(copy.x)\n rdiff, trans = copy.chopdata()\n s = -2 * np.trapz(trans * rdiff) / np.trapz(rdiff**2)\n return s", "def section_coordinates():\n \n gh_width = 30.0 # in feet\n gh_width_west = gh_width/2.0\n N_x = 100\n dx = gh_width_west/100.0\n gh_length = 48 # in feet\n \n xvalues = np.linspace(0,(N_x)*dx,N_x+1) # array for width\n yvalues = np.linspace(0,gh_length,num=gh_length+1) # array for height\n zvalues_west = np.zeros(N_x+1) # array for height\n \n for i in range(0,len(xvalues)):\n zvalues_west[i] = 7.29944696 + (1.27415518*xvalues[i]) + (-0.0680139854*xvalues[i]**2) + (0.00152035861*xvalues[i]**3)\n i += 1\n \n roof_slopes_west = np.zeros(N_x+1)\n roof_lengths = np.zeros(N_x+1)\n\n total_length_west = 0\n\n for i in range(1,len(xvalues)):\n dz = zvalues_west[i] - zvalues_west[i-1]\n roof_slopes_west[i] = dz/dx\n roof_lengths[i] = (dz**2 + dx**2)**0.5\n total_length_west += roof_lengths[i]\n \n zvalues_east = np.flip(zvalues_west, axis=0)\n zvalues_west = zvalues_west[:-1]\n zvalues = np.concatenate((zvalues_west, zvalues_east), axis=0)\n \n xx, yy = np.meshgrid(xvalues, yvalues) \n \n plt.plot(xx, yy, marker='.', color='k', linestyle='none')\n plt.axis('equal')\n plt.show() \n\n return roof_slopes_west", "def interpolate(self, lon, lat, egy=None):\n raise NotImplementedError(\"MapBase.interpolate()\")", "def test_isentropic_pressure_p_increase_rh_out():\n lev = [85000., 90000., 95000., 100000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 288.\n tmp[1, :] = 290.\n tmp[2, :] = 292.\n tmp[3, :] = 296.\n tmpk = tmp * units.kelvin\n rh = np.ones((4, 5, 5))\n rh[0, :] = 20.\n rh[1, :] = 40.\n rh[2, :] = 80.\n rh[3, :] = 100.\n relh = rh * units.percent\n isentlev = 296. * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, relh)\n truerh = 100. * units.percent\n assert_almost_equal(isentprs[1], truerh, 3)", "def test_isentropic_interpolation_dataarray():\n temp = xr.DataArray([[[296.]], [[292.]], [[290.]], [[288.]]] * units.K,\n dims=('isobaric', 'y', 'x'),\n coords={'isobaric': (('isobaric',), [1000., 950., 900., 850.],\n {'units': 'hPa'}),\n 'time': '2020-01-01T00:00Z'})\n\n rh = xr.DataArray([[[100.]], [[80.]], [[40.]], [[20.]]] * units.percent,\n dims=('isobaric', 'y', 'x'), coords={\n 'isobaric': (('isobaric',), [1000., 950., 900., 850.], {'units': 'hPa'}),\n 'time': '2020-01-01T00:00Z'})\n\n isentlev = [296., 297.] * units.kelvin\n press, rh_interp = isentropic_interpolation(isentlev, temp.isobaric, temp, rh)\n\n assert_array_almost_equal(press, np.array([[[1000.]], [[936.213]]]) * units.hPa, 3)\n assert_array_almost_equal(rh_interp, np.array([[[100.]], [[69.19706]]]) * units.percent, 3)", "def interpolate( h, x, y=None, z=None, outOfRangeValue=30 ):\n\n if x != x: return outOfRangeValue\n if x <= h.GetXaxis().GetBinCenter(1) or x >= h.GetXaxis().GetBinCenter(h.GetXaxis().GetNbins()): return outOfRangeValue\n \n if y != None:\n if y != y: return outOfRangeValue\n if y <= h.GetYaxis().GetBinCenter(1) or y >= h.GetYaxis().GetBinCenter(h.GetYaxis().GetNbins()): return outOfRangeValue\n if z != None:\n if z != z: return outOfRangeValue\n if z <= h.GetZaxis().GetBinCenter(1) or z >= h.GetZaxis().GetBinCenter(h.GetZaxis().GetNbins()): return outOfRangeValue\n \n if y != None and z != None: return h.Interpolate( x, y, z )\n if y != None: return h.Interpolate( x, y )\n return h.Interpolate( x )", "def test_isentropic_pressure_tmp_out_interp():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290.\n tmp[3, :] = 288.\n tmpk = tmp * units.kelvin\n isentlev = [296., 297.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, temperature_out=True)\n truetmp = 291.4579 * units.kelvin\n assert_almost_equal(isentprs[1][1], truetmp, 3)", "def interpolate(self, var, time, lat, lon):\n\n # Get the nearest four points in space\n # Check to see if lat/lons are 2d or 1d\n if len(self['lat'].shape) == 2:\n closey, closex, distances = self.nearest_points(lat, lon, npt=4)\n # Distances in km\n# distances = np.array([self.haversine(\n# (self['lat'][y,x].values, self['lon'][y,x].values),\n# (lat, lon)) for y,x in \n# zip(list(closey), list(closex))])\n else:\n closen = self.nearest_points(lat, lon, npt=4)\n closey = closen\n closex = closen\n # Distances in km\n distances = np.array([self.haversine(\n (self['lat'][n].values, self['lon'][n].values),\n (lat, lon)) for n in list(closen)])\n # Check for exact match (within some tolerance)\n spaceweights = np.zeros(distances.shape)\n if (distances < 1.0).sum() > 0:\n spaceweights[distances.argmin()] = 1\n else:\n # Here, inverse distance weighting (for simplicity)\n spaceweights = 1.0 / distances\n spaceweights /= spaceweights.sum()\n # Get weights in time\n #time64 = np.datetime64(time)\n #all the valid times in the ensemble\n valids = self['validtime'].values\n timeweights = np.zeros(valids.shape)\n # Check if we are outside the valid time range\n if (time < valids[0]) or (time > valids[-1]):\n print(\"Interpolation is outside of time range in state!\")\n return None\n # Find where we are in this list\n #index after the time of the observation\n lastdex = (valids >= time).argmax()\n # If we match a particular time value, then\n # this is just an identity\n if valids[lastdex] == time:\n # Just make a one at this time\n timeweights[lastdex] = 1\n else:\n # Linear interpolation\n #often going to be 6 hours, subtracts datetime objects I think\n diff = (valids[lastdex] - valids[lastdex-1])\n #print(valids[lastdex], valids[lastdex-1], diff)\n #often going to be 21600 seconds\n totsec = diff.seconds\n #totsec = np.abs(diff / np.timedelta64(1, 's'))\n #ST\n #calculate time difference between time after and time of observation\n #the abs will make this positive definite, which is okay since\n #the difference will always be negative\n thisdiff = abs(time - valids[lastdex])\n #thissec = np.abs(thisdiff / np.timedelta64(1,'s'))\n thissec = thisdiff.seconds\n # Put in appropriate weights\n #ST switched the -1 between the two lines to match up with the positive-\n #definite thisdiff\n timeweights[lastdex-1] = float(thissec) / totsec\n timeweights[lastdex] = 1.0 - (float(thissec)/totsec)\n # Now that we have the weights, do the interpolation\n #ST an ntimes x 4 x nens array\n interp = self.variables[var].values[:,closey,closex,:]\n # Do a dot product with the time weights\n # And with the space weights\n if len(interp.shape) == 3:\n interp = (timeweights[:,None,None] * interp).sum(axis=0)\n else:\n interp = (timeweights[:,None,None,None] * interp).sum(axis=0)\n \n if len(interp.shape) == 3:\n #ST Changed 2nd : to None\n interp = (spaceweights[:,None,None] * interp).sum(axis=1)\n else:\n interp = (spaceweights[:,None] * interp).sum(axis=0)\n # Return estimate from all ensemble members\n return interp", "def linear_interpolate_release(mvi, j):\n set_system_state(mvi, 1)\n lam1 = mvi.system.lambda_()[j]\n set_system_state(mvi, 2)\n lam2 = mvi.system.lambda_()[j]\n\n # If either of the following loops are entered, there are likely going to\n # be problems.\n if (lam1 < 0) and (lam2 < 0):\n #add_constraints(mvi, mvi._state1_releases)\n #print mvi.lambda1c[j]\n #print mvi\n #raise Exception(\"Bad release interpolation.\")\n print 'WARNING: BAD INTERPOLANT'\n return mvi.t1, mvi.q1\n\n if lam1 < 0:\n return mvi.t1, mvi.q1\n\n tr = mvi.t1 - (lam1/(lam2-lam1))*(mvi.t2-mvi.t1)\n frac = (tr-mvi.t1)/(mvi.t2-mvi.t1)\n qr = frac*(mvi.q2-mvi.q1)+mvi.q1\n\n return tr, qr", "def InterpolateSection(self, pointA, pointB, Resolution = 50):\n\t\tif type(pointA) != Point:\n\t\t\traise TypeError()\n\t\tif type(pointB) != Point:\n\t\t\traise TypeError()\n\n\t\tx0, y0, z0 = ToCartesian(pointA.Latitude, pointA.Longitude)\n\t\tx1, y1, z1 = ToCartesian(pointB.Latitude, pointB.Longitude)\n\n\t\tdistance = Distance_LatLongs(pointA.Latitude, pointA.Longitude, pointB.Latitude, pointB.Longitude)\n\n\t\t# distance = math.sqrt(math.pow(x1 - x0, 2) + math.pow(y1 - y0, 2) + math.pow(z1 - z0, 2))\n\n\t\tsegment_size = distance / Resolution\n\n\t\tsegments = []\n\n\t\tfor index in range(Resolution):\n\t\t\tsegment_distance = index * segment_size\n\t\t\tx, y, z = Interpolate_Linear_3Points(x0, y0, z0, x1, y1, z1, segment_distance)\n\n\t\t\tlat, lon = ToGeo(x, y, z)\n\n\t\t\tsegments.append(Point(Latitude = lat, Longitude = lon))\n\n\t\treturn segments", "def filter_interpolated(self, ys, xs):\n return ys, xs", "def interpolated_solution(self):\n # need to reverse the direction of the _solution array\n if self.model.assortativity == 'positive':\n traj = self._solution[::-1]\n else:\n traj = self._solution\n\n xi = np.linspace(traj[0, 0], traj[-1, 0], 10 * traj.shape[0])\n interp_soln = self.ivp.interpolate(traj, xi, k=5, ext=2)\n\n # convert to a data frame\n col_names = ['x', r'$\\hat{\\mu}(x)$', r'$\\hat{\\theta}(x)$', '$\\hat{w}(x)$', r'$\\hat{\\pi}(x)$']\n df = pd.DataFrame(interp_soln, columns=col_names)\n\n return df.set_index('x')", "def computeNormalAndCurvature():\n radius = 50\n for i,j in pts:\n nb_pts = ti.cast(0, ti.f32)\n accu_0 = ti.cast(0, ti.f32)\n accu_1 = ti.cast(0, ti.f32)\n accu_2 = ti.cast(0, ti.f32)\n accu_3 = ti.cast(0, ti.f32)\n accu_4 = ti.cast(0, ti.f32)\n accu_5 = ti.cast(0, ti.f32)\n accu_6 = ti.cast(0, ti.f32)\n accu_7 = ti.cast(0, ti.f32)\n accu_8 = ti.cast(0, ti.f32)\n z = 0\n for x in range(i-radius, i+radius):\n for y in range(j-radius, j+radius):\n if ti.is_active(block1, [x,y]):\n accu_0 += x * x\n accu_1 += x * y\n accu_2 += x * z\n accu_3 += y * y\n accu_4 += y * z\n accu_5 += z * z\n accu_6 += x\n accu_7 += y\n accu_8 += z\n nb_pts += 1\n accu_0 /= nb_pts\n accu_1 /= nb_pts\n accu_2 /= nb_pts\n accu_3 /= nb_pts\n accu_4 /= nb_pts\n accu_5 /= nb_pts\n accu_6 /= nb_pts\n accu_7 /= nb_pts\n accu_8 /= nb_pts\n cov_mat_0 = accu_0 - accu_6 * accu_6\n cov_mat_1 = accu_1 - accu_6 * accu_7\n cov_mat_2 = accu_2 - accu_6 * accu_8\n cov_mat_4 = accu_3 - accu_7 * accu_7\n cov_mat_5 = accu_4 - accu_7 * accu_8\n cov_mat_8 = accu_5 - accu_8 * accu_8\n cov_mat_3 = cov_mat_1\n cov_mat_6 = cov_mat_2\n cov_mat_7 = cov_mat_5\n\n # Compute eigen value and eigen vector\n # Make sure in [-1, 1]\n scale = ti.max(1.0, ti.abs(cov_mat_0))\n scale = ti.max(scale, ti.abs(cov_mat_1))\n scale = ti.max(scale, ti.abs(cov_mat_2))\n scale = ti.max(scale, ti.abs(cov_mat_3))\n scale = ti.max(scale, ti.abs(cov_mat_4))\n scale = ti.max(scale, ti.abs(cov_mat_5))\n scale = ti.max(scale, ti.abs(cov_mat_6))\n scale = ti.max(scale, ti.abs(cov_mat_7))\n scale = ti.max(scale, ti.abs(cov_mat_8))\n if scale > 1.0:\n cov_mat_0 /= scale\n cov_mat_1 /= scale\n cov_mat_2 /= scale\n cov_mat_3 /= scale\n cov_mat_4 /= scale\n cov_mat_5 /= scale\n cov_mat_6 /= scale\n cov_mat_7 /= scale\n cov_mat_8 /= scale\n \n # Compute roots\n eigen_val_0 = ti.cast(0, ti.f32)\n eigen_val_1 = ti.cast(0, ti.f32)\n eigen_val_2 = ti.cast(0, ti.f32)\n \n c0 = cov_mat_0 * cov_mat_4 * cov_mat_8 \\\n + 2 * cov_mat_3 * cov_mat_6 * cov_mat_7 \\\n - cov_mat_0 * cov_mat_7 * cov_mat_7 \\\n - cov_mat_4 * cov_mat_6 * cov_mat_6 \\\n - cov_mat_8 * cov_mat_3 * cov_mat_3\n c1 = cov_mat_0 * cov_mat_4 \\\n - cov_mat_3 * cov_mat_3 \\\n + cov_mat_0 * cov_mat_8 \\\n - cov_mat_6 * cov_mat_6 \\\n + cov_mat_4 * cov_mat_8 \\\n - cov_mat_7 * cov_mat_7\n c2 = cov_mat_0 + cov_mat_4 + cov_mat_8\n \n if ti.abs(c0) < 0.00001:\n eigen_val_0 = 0\n d = c2 * c2 - 4.0 * c1\n if d < 0.0: # no real roots ! THIS SHOULD NOT HAPPEN!\n d = 0.0\n sd = ti.sqrt(d)\n eigen_val_2 = 0.5 * (c2 + sd)\n eigen_val_1 = 0.5 * (c2 - sd)\n else:\n s_inv3 = ti.cast(1.0 / 3.0, ti.f32)\n s_sqrt3 = ti.sqrt(3.0)\n c2_over_3 = c2 * s_inv3\n a_over_3 = (c1 - c2 * c2_over_3) * s_inv3\n if a_over_3 > 0:\n a_over_3 = 0\n \n half_b = 0.5 * (c0 + c2_over_3 * (2 * c2_over_3 * c2_over_3 - c1))\n q = half_b * half_b + a_over_3 * a_over_3 * a_over_3\n if q > 0:\n q = 0\n \n rho = ti.sqrt(-a_over_3)\n theta = ti.atan2(ti.sqrt(-q), half_b) * s_inv3\n cos_theta = ti.cos(theta)\n sin_theta = ti.sin(theta)\n eigen_val_0 = c2_over_3 + 2 * rho * cos_theta\n eigen_val_1 = c2_over_3 - rho * (cos_theta + s_sqrt3 * sin_theta)\n eigen_val_2 = c2_over_3 - rho * (cos_theta - s_sqrt3 * sin_theta)\n temp_swap = ti.cast(0, ti.f32)\n \n # Sort in increasing order.\n if eigen_val_0 >= eigen_val_1:\n temp_swap = eigen_val_1\n eigen_val_1 = eigen_val_0\n eigen_val_0 = temp_swap\n if eigen_val_1 >= eigen_val_2:\n temp_swap = eigen_val_2\n eigen_val_2 = eigen_val_1\n eigen_val_1 = temp_swap\n if eigen_val_0 >= eigen_val_1:\n temp_swap = eigen_val_1\n eigen_val_1 = eigen_val_0\n eigen_val_0 = temp_swap\n \n if eigen_val_0 <= 0:\n eigen_val_0 = 0\n d = c2 * c2 - 4.0 * c1\n if d < 0.0: # no real roots ! THIS SHOULD NOT HAPPEN!\n d = 0.0\n sd = ti.sqrt(d)\n eigen_val_2 = 0.5 * (c2 + sd)\n eigen_val_1 = 0.5 * (c2 - sd)\n # end of compute roots\n\n eigen_value = eigen_val_1 * scale # eigen value for 2D SDF\n # eigen value for 3D SDF\n #eigen_value = eigen_val_0 * scale\n\n #print(\"eigen_val_0 \", eigen_val_0)\n #print(\"eigen_val_1 \", eigen_val_1)\n #print(\"eigen_val_2 \", eigen_val_2)\n \n # TODO\n #scaledMat.diagonal ().array () -= eigenvalues (0)\n #eigenvector = detail::getLargest3x3Eigenvector<Vector> (scaledMat).vector;\n\n # Compute normal vector (TODO)\n #visual_norm[i,j][0] = eigen_val_0 #eigen_vector[0]\n #visual_norm[i,j][1] = eigen_val_1 #eigen_vector[1]\n #visual_norm[i,j][2] = eigen_val_2 #eigen_vector[2]\n\n # Compute the curvature surface change\n eig_sum = cov_mat_0 + cov_mat_1 + cov_mat_2\n visual_curv[i,j][0] = 0\n if eig_sum != 0:\n visual_curv[i,j][0] = eigen_val_1 # true curvature is: ti.abs(eigen_value / eig_sum)", "def segement_divide(pts,step=0.10, offset_x=0.01, offset_y=0.01):\n\n # Select the x and y of the points\n n = len(pts)\n \n z = 0.0\n \n points_plane = [] \n points_x = []\n paint_point = []\n\n for i in range(n):\n points_plane.append([pts[i][0], pts[i][1]])\n \n # Sorted the list according to x \n points_plane.sort(key=lambda x:x[0])\n\n # Segment the points according to x \n counter = 0 # Count the interval\n x_min = points_plane[0][0]\n x_max = points_plane[n-1][0]\n\n # The whole interval that needs to be divided\n upper = x_max + offset_x\n lower = x_min - offset_x\n lower_bound = lower\n \n # Set each segement's lower and upperbound\n while (lower_bound + step <= upper): \n # The break condition will be lower_bound > upper - step\n upper_bound = lower_bound + step\n\n # Find the index between lower bound and upper bound\n # First, find the index which x >= lower bound\n index = 0\n \n while (points_plane[index][0] < lower_bound): \n index = index + 1 # The index of the first point in the interval\n \n # If there is at least one point in the [lower_bound, upper_bound]\n if (points_plane[index][0] <= upper_bound): \n\n x_start = points_plane[index][0]\n y_max = points_plane[index][1]\n y_min = points_plane[index][1]\n \n while (points_plane[index][0] <= upper_bound): \n # The break condition will be x[index] > upper bound or index = n - 1\n # Compute the y max and y min in this interval\n \n if points_plane[index][1] > y_max: \n y_max = points_plane[index][1]\n\n if points_plane[index][1] < y_min:\n y_min = points_plane[index][1]\n \n if index < n - 1:\n index = index + 1\n else:\n break\n # The index of the last point in the interval, when index < n-1\n \n x_end = points_plane[index][0]\n\n paint_point.append([lower_bound,y_max+offset_y,z]) \n paint_point.append([lower_bound,y_min-offset_y,z])\n points_x.append([x_start, x_end])\n \n counter = counter + 1\n\n # Update interval\n lower_bound = upper_bound - offset_x\n \n # Deal with the last interval\n lower_bound_last = upper - step\n index_last = 0\n counter = counter + 1\n while ((index_last < n) and (points_plane[index_last][0] < lower_bound_last)): \n # The first point in the last interval\n index_last = index_last + 1\n \n if (index_last < n): \n # There is at least one point in the last interval\n x_start_last = points_plane[index_last][0]\n y_max_last = points_plane[index_last][1]\n y_min_last = points_plane[index_last][1]\n\n while ((index_last)<n) and (points_plane[index_last][0] <= upper):\n\n if points_plane[index_last][1] > y_max_last: \n y_max_last = points_plane[index_last][1]\n \n if points_plane[index_last][1] < y_min_last:\n y_min_last = points_plane[index_last][1]\n\n index_last = index_last + 1\n \n index_last = index_last - 1 # The index of the last point in the interval\n \n paint_point.append([lower_bound_last, y_max_last+offset_y, z])\n paint_point.append([lower_bound_last, y_min_last-offset_y, z])\n# paint_point.append([upper, y_max_last+offset_y, z])\n# paint_point.append([upper, y_min_last-offset_y, z])\n# return trans_to_end(paint_point)\n return paint_point", "def different_quadratic_extrpolation_lower(x_interp, x_spline, y_spline):\n index_lower_1 = 0\n index_lower_2 = 1\n x1_lower = x_spline[index_lower_1]\n x2_lower = x_spline[index_lower_2]\n x3_lower = x_spline[index_lower_2 + 1]\n f1_lower = y_spline[index_lower_1]\n\n df1_dx_lower = calc_gradient(x_spline, y_spline, index_lower_1)/(x2_lower - x1_lower)\n df2_dx_lower = calc_gradient(x_spline, y_spline, index_lower_2)/(x3_lower - x2_lower)\n\n # Solve 2ax-b = df_dx for the gradient at point 1 and 2\n # Rearrange both equations to find 'a' and 'b' quadratic coefficients\n a_lower = (df2_dx_lower - df1_dx_lower)/(2.*(x2_lower - x1_lower))\n b_lower = df1_dx_lower - 2.*a_lower*x1_lower\n\n # Find c by solving at the fixed points (f = a x**2 + bx + c) at point 1 for the lower, and point 2 for the upper\n c_lower = f1_lower - a_lower*x1_lower**2 - b_lower*x1_lower\n return a_lower*x_interp**2 + b_lower*x_interp + c_lower", "def interpolate1Image2D( iImage, iCoorX, iCoorY ):\n # pretvori vhodne spremenljivke v np polje\n iImage = np.asarray( iImage ) \n iCoorX = np.asarray( iCoorX )\n iCoorY = np.asarray( iCoorY ) \n # preberi velikost slike in jedra\n dy, dx = iImage.shape\n # ustvari 2d polje koordinat iz 1d vhodnih koordinat (!!!)\n if np.size(iCoorX) != np.size(iCoorY):\n print('Stevilo X in Y koordinat je razlicno!') \n iCoorX, iCoorY = np.meshgrid(iCoorX, iCoorY, sparse=False, indexing='xy')\n #------------------------------- za hitrost delovanja \n return interpn( (np.arange(dy),np.arange(dx)), iImage, \\\n np.dstack((iCoorY,iCoorX)),\\\n method='linear', bounds_error=False)\\\n .astype( iImage.dtype )" ]
[ "0.6352975", "0.62675714", "0.61275345", "0.5952627", "0.5907965", "0.58464473", "0.5796776", "0.578933", "0.57794523", "0.57792443", "0.5757177", "0.5749844", "0.57261205", "0.5726011", "0.56745857", "0.5649165", "0.56255597", "0.5618809", "0.5615824", "0.55846244", "0.5575734", "0.55678564", "0.5558076", "0.5558076", "0.5558076", "0.55468947", "0.55329084", "0.5502345", "0.55007267", "0.54945827", "0.5486501", "0.5486107", "0.5483363", "0.54748726", "0.5470862", "0.54575795", "0.54575104", "0.5454245", "0.5444075", "0.5439287", "0.5439023", "0.54335827", "0.54326206", "0.5429286", "0.5419967", "0.5419636", "0.5413704", "0.5413231", "0.54093695", "0.5405765", "0.5403048", "0.5401799", "0.5399949", "0.53990865", "0.5390334", "0.5383853", "0.5382359", "0.53787386", "0.53770256", "0.5374631", "0.5373034", "0.5371954", "0.536459", "0.5363246", "0.5359254", "0.5355683", "0.534502", "0.5337399", "0.53353155", "0.53341794", "0.5333126", "0.532686", "0.53198457", "0.5315376", "0.53144574", "0.5301394", "0.52952445", "0.52941525", "0.5260467", "0.52507013", "0.52471924", "0.52338666", "0.5231545", "0.5225373", "0.5220493", "0.52130556", "0.52061236", "0.5206067", "0.5205742", "0.5202392", "0.5201588", "0.52007437", "0.5195376", "0.5191441", "0.5181201", "0.5175327", "0.51715255", "0.51670504", "0.51669174", "0.5166572" ]
0.6316739
1
Write a hoc file.
def writeHoc(self): print('Writing output file %s ...' % self.outFile) with open(self.outFile, 'w') as fOut: def createSection(secNum): fOut.write('create section_%i\n' %secNum) fOut.write('section_%i {\n' %secNum) fOut.write('pt3dclear()\n') for node in xrange(len(self.sections[secNum])): fOut.write('pt3dadd(%.6f, %.6f, %.6f, %.6f)\n' \ % (self.sections[secNum][node][0], self.sections[secNum][node][1], self.sections[secNum][node][2], self.secRads[secNum][node])) fOut.write('}\n') def createConnection(): for c in xrange(len(self.connections)): fOut.write('connect section_%i(1), section_%i(0)\n' \ % (self.connections[c][0],self.connections[c][1])) for sec in self.sections.keys(): createSection(sec) createConnection() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_to_file(self, filename: str) -> None:", "def write(self, filename):\n pass", "def write(self, filename):\n pass", "def write(self, filename, data, hdr):\n pass", "def write(self, fname):\n pass", "def write_file(self):\n file = open(self.__file_path, 'w+')\n file.truncate(0)\n file.write(self.__content)\n file.close()", "def write_file(file, content):\n with open(file, \"w\") as fid:\n fid.write(content)", "def write_to_file(self, overwrite=True):\n t0 = time.time()\n self.hdus.verify()\n if BACKEND == 'astropy':\n self.hdus.writeto(self.filename, overwrite=overwrite)\n elif BACKEND == 'pyfits':\n self.hdus.writeto(self.filename, clobber=overwrite)\n self.logger.debug(\"Took {:.4f} seconds to write to disk\".format(time.time() - t0))", "def openHFile():\r\n global file2write\r\n \r\n file2write=open(\"Core/Inc/songs.h\",'w')", "def write(cls, file, data):\n file.write(data)", "def write_to(self, filepath):\n output = self._generate_output()\n with open(filepath, 'wb') as out:\n out.write(output.encode('utf-8'))\n out.write(b'<!-- handrolled for excellence -->\\n')", "def write_to_file(unit, fobj):\n\n _write_all_headers(unit, fobj)\n _write_all_sections(unit, fobj)", "def write_file(path, data):\n with open_local_or_gcs(path, 'w') as h_dest:\n h_dest.write(data) # pylint: disable=no-member", "def file_write(stuff, file_path):\n with open(file_path, \"wt\") as fo:\n fo.write(stuff)", "def write(self, filename): # real signature unknown; restored from __doc__\n pass", "def filewrite(self, filename):\n io.write(self, filename)", "def write_file(*args, **kwargs): # real signature unknown\n pass", "def writeHedr(self):\n path = os.path.join(self.dir,self.name)\n out = file(path,'r+b')\n out.seek(16) #--Skip to Hedr record data\n self.tes3.hedr.getSize()\n self.tes3.hedr.dump(out)\n out.close()\n #--Done\n self.getHeader()\n self.setMTime()", "def write_data(fh, header, data):\r\n # fhw = open(filename, \"w\")\r\n fh.write(str(header))\r\n fh.write(str(data) + \"\\n\")", "def write(self, file):\n #write header\n self.ID.write(file)\n if (self.write_size): \n self.size.write(file)\n for variable in self.variables:\n variable.write(file)\n for subchunk in self.subchunks:\n subchunk.write(file)", "def write_file(self):\n if self._write_file == None:\n return\n\n try:\n out = file(self._write_file, \"w\")\n except IOError, e:\n print e\n sys.exit(1)\n out.writelines(\"A cases\") \n out.close()", "def write_to_file(filename, content):\n with open(filename, 'w') as f:\n f.write(content)", "def write(self, filename):\n bvh_string = self.generate_bvh_string()\n if filename[-4:] == '.bvh':\n filename = filename\n else:\n filename = filename + '.bvh'\n with open(filename, 'w') as outfile:\n outfile.write(bvh_string)", "def write_file(filename, data):\n file = open(filename, \"a\")\n file.write(data)\n file.close()", "def write(self, filename, data):\n owner_rw = 0600\n fd = os.open(filename, os.O_WRONLY | os.O_CREAT, owner_rw)\n # In case file existed already with wrong permissions, fix them.\n os.chmod(filename, owner_rw)\n os.write(fd, data)\n os.close(fd)", "def write_to_file(data, filename):\n fimg = fits.HDUList()\n fimghdu = fits.PrimaryHDU()\n fimghdu.data = data\n fimg.append(fimghdu)\n fimg.writeto(filename, overwrite=True)\n print(' wrote output data to: ', filename)", "def writefile(filename, content):\n with open(Path(os.path.expanduser(filename)), 'w') as outfile:\n outfile.write(content)", "def write(self, filename, data):\n raise NotImplementedError", "def file_writer(path, data):\n with open(path, \"a\") as file:\n file.write(data + \"\\n\")", "def writeFile(self, filename):\n\n s = self.asString()\n if os.access(filename, os.F_OK):\n raise RuntimeError(\"file %s already exists -- not overwritten.\" % (filename))\n \n f = file(filename, \"w\")\n f.write(s)\n f.close()", "def write_to_file(self, file, content):\n with open(file, 'a') as report_file:\n report_file.write('{}\\n'.format(content))", "def write(self, instream: typ.BinaryIO, filepath: str,\r\n filename: str = None) -> None:\r\n if filename is not None:\r\n filename = path.basename(filename)\r\n if self.fs_type == 'FAT':\r\n allocator_metadata = self.fs.write(instream, filepath)\r\n self.metadata.add_file(filename, allocator_metadata)\r\n elif self.fs_type == 'NTFS':\r\n allocator_metadata = self.fs.write(instream, filepath)\r\n self.metadata.add_file(filename, allocator_metadata)\r\n else:\r\n raise NotImplementedError()", "def put(self, filename, data, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n file_obj = open(file_path, \"w\")\n file_obj.write(data)", "def write_file(self, filename, contents):\n blob = self.repo.create_blob(contents)\n self.index.add(pygit2.IndexEntry(filename, blob, pygit2.GIT_FILEMODE_BLOB))", "def write_file(filename: str, content: str, mode: str = \"w\") -> IO:\n with open(filename, mode) as file:\n file.write(content)\n return file", "def writefile():\n\n print(\"Writing to file...\")\n\n # Open the heartbeat file in append mode and save the current time.\n with open(settings.ROOT_DIR + \"/heartbeat\", \"a\") as f:\n f.write(str(time()))", "def write (self, file):\n\t\tfile.write (self.pack ())", "def write_file(file_path, contents):\n logger.debug(f'write to file:{file_path}')\n with open(file_path, 'w') as outfile:\n outfile.write(contents)", "def write_file(path, data):\n # opens file\n try:\n os.makedirs(os.path.dirname(path), exist_ok=True)\n f = open(str(path), \"w\")\n f.write(data)\n f.close()\n except Exception as e:\n print(\"Error writing file: \", e)\n sys.exit(1)", "def write_to_file(self):\n self.calibration_directory.mkdir(parents=True, exist_ok=True)\n with self.file_path.open(mode=\"w\") as file:\n \"\"\"\n ------------------\n Data virtual object\n ------------------\n \"\"\"\n file.write(\"Data received from the hololens:\\n\")\n file.write(f'{\"\".join(self.hololens_message)}\\n')\n file.write(\"Position and Rotation received from hololens \\n\")\n file.write(\"Pay attention: Left handed KOS and quaternion with scalar last\\n\")\n # for i in self.calibration_position:\n position = \" \".join([str(x) for x in self.calibration_position])\n file.write(position)\n file.write(\"\\n\")\n rotation = \" \".join([str(x) for x in self.calibration_rotation])\n file.write(rotation)\n file.write(\"\\n\")\n \"\"\"\n ------------------\n Holotracker\n ------------------\n \"\"\"\n file.write(f\"Holotracker Pose: Tracker->LH\\n\")\n file.write(\"x y z\\n\")\n position = \" \".join([str(x) for x in self.holo_tracker.position])\n file.write(f\"{position}\\n\")\n file.write(\"w i j k\\n\")\n rotation = \" \".join([str(x) for x in self.holo_tracker.rotation])\n file.write(f\"{rotation}\\n\")\n file.write(\"Homogenous matrix of Holo Tracker\\n\")\n np.savetxt(file, self.holo_tracker.get_pose_as_hom_matrix())\n file.write(\"\\n\")\n \"\"\"\n ------------------\n Calibrationtracker\n ------------------\n \"\"\"\n file.write(f\"Calibrationtracker Pose: Tracker->LH\\n\")\n file.write(\"x y z\\n\")\n position = \" \".join([str(x) for x in self.calibration_tracker.position])\n file.write(f\"{position}\\n\")\n file.write(\"w i j k\\n\")\n rotation = \" \".join([str(x) for x in self.calibration_tracker.rotation])\n file.write(f\"{rotation}\\n\")\n file.write(\"Homogenous matrix of Calibration Tracker\\n\")\n np.savetxt(file, self.calibration_tracker.get_pose_as_hom_matrix())\n file.write(\"\\n\")\n \"\"\"\n ------------------\n Calibration object used\n ------------------\n \"\"\"\n file.write(f\"CalibrationObject used : \\n{self.calibration_object}\")\n file.write(\"\\n\")\n \"\"\"\n ------------------\n Point registration service + reprojection error\n ------------------\n \"\"\"\n file.write(\"\\nMarix LH->Virtual\\n\")\n np.savetxt(file, self.hom_LH_to_virtual,)\n file.write(\"\\nReprojection error\\n\")\n file.write(f\"{self.reprojection_error}\")\n file.write(\"\\n\")\n \"\"\"\n ------------------\n Virtual center to Tracker\n ------------------\n \"\"\"\n file.write(\"\\nMatrix Virtual->Tracker\\n\")\n np.savetxt(file, self.hom_tracker_to_virtual)\n file.write(\"\\n\")\n \"\"\"\n ------------------\n Point Data which was used for matching\n ------------------\n \"\"\"\n file.write(\"POINTS THAT WERE MATCHED\\n\\n\")\n file.write(\"Virtual points. Already transformed into Right Hand KOS \\n\")\n np.savetxt(file, self.virtual_points)\n file.write(\"\\n\")\n file.write(\"Real points\\n\")\n np.savetxt(file, self.real_points)", "def write_file(data, file_path):\n try:\n with open(file_path, \"w\") as file_obj:\n file_obj.write(data)\n\n except OSError:\n writer(f\"\\nwarning: Unable to write backup file {file_path}\\n\", FORMAT[\"WARNING\"])", "def write_file(data, filename):\n file = open(filename, \"wb\")\n file.write(data)\n file.close()", "def write_to_file(filepath, data):\n\n with open(filepath, 'w') as f:\n f.write(str(data))", "def write(self, prefix, path=None):\n\n if path is None:\n path = os.getcwd()\n\n header, source = self.doprint(prefix=prefix)\n\n with open(os.path.join(path, prefix + '.h'), 'w') as f:\n f.write(header)\n\n with open(os.path.join(path, prefix + '.c'), 'w') as f:\n f.write(source)", "def write(self, content):\n if content is None:\n self.hash = None\n else:\n try:\n self.hash = hashlib.sha1(content.encode('utf-8')).hexdigest()\n except AttributeError:\n self.hash = hashlib.sha1(content).hexdigest()\n\n if self._has_tmp_file_path():\n with open(self._get_tmp_file_path(), \"w\") as tmp_file:\n tmp_file.write(content)\n else:\n self._content = content", "def write_file(filename):\r\n if Py3:\r\n return open(filename, \"w\", newline='')\r\n return open(filename, \"wb\")", "def _write_file(output_path: str, file_content: Iterable[str]) -> None:\n with open(output_path, \"w+\", encoding=\"utf-8\") as f:\n f.writelines(file_content)\n\n logging.info(f\"wrote to '{output_path}'\")", "def write(self, filename, data):\n\t\t# create the path if it doesn't exists\n\t\tdir = os.path.dirname(filename)\n\t\tif not os.path.isdir(dir):\n\t\t\tos.mkdir(dir)\n\t\t\n\t\t# write data\n\t\tfile = codecs.open(filename, 'w', 'utf8')\n\t\tfile.write(data)\n\t\tfile.close()", "def write_file(self, contents):\n fd = open(os.path.join(os.path.dirname(__file__),\n 'data', 'test.html'), 'w')\n fd.write(contents)\n fd.close()", "def saveOnFile(self, path, data):\n with open(path, \"w\") as f:\n f.write(data)", "def _write_output_file(output: str, file_name: str):\n\tfile1 = open(file_name, 'w')\n\tfile1.write(output)\n\tfile1.close()", "def write_file(path, contents, mode=\"w\"):\n with open(path, mode) as f:\n f.write(contents)", "def write(self, path, content):\n this_file = open(path, 'w')\n this_file.write(content)\n this_file.close()", "def create_file(self, content=\"\"):\n if (self.exists()):\n raise IOError(\"A file at '{}' already exists.\".format(self.location))\n with open(self.location, 'w') as f:\n f.write(content)", "def write_to_file(entry, file):\n with open(file, \"a\") as f:\n f.write(entry)", "def write(filename):\n print(uc.write(filename))", "def write_file(filename, content):\n codecs.open(filename, \"w\", encoding='utf-8').writelines(content)", "def write_to_file(filename, data):\n with open(filename, \"a\") as file:\n file.writelines(data)", "def write_to_file(filename, data):\n with open(filename, \"a\") as file:\n file.writelines(data)", "def write_to_file(file: Text, data: bytes):\n with open(file, \"wb\") as w:\n w.write(data)\n w.flush()", "def fs_write(obj, file_path):\n try:\n with open(str(file_path), 'w') as f:\n f.write(obj)\n return obj\n except TypeError as e:\n raise e", "def write(self,path,content):\n file_path = os.path.join( self.directory, path)\n with open(file_path, \"w\") as file:\n file.write( content )", "def write_file(filename, content):\n dirname = os.path.dirname(filename)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n\n with open(filename, 'wb') as f:\n f.write(content.encode('utf-8'))", "def create_file(path):\n open(path, \"w\").close()", "def write(self, file):\n #write header\n for variable in self.variables:\n variable.write(file)\n for subchunk in self.subchunks:\n subchunk.write(file)", "def write(self, content, mode='wb'):\r\n self.localpath.write(content, mode)", "def writable(path):", "def write(self, file_path, content):\n self._set_extension(file_path)\n\n logger.debug(\"writing to %s\", self._file_path)\n\n self._content = content\n\n if self._file_ext == 'json':\n self._write_json()", "def write_to_file(file_name, content):\n with open(file_name, \"w\") as text_file:\n text_file.write(str(content))", "def file_write(filename, dic):\n d = dic \n f = open(filename, 'w') \n f.write(str(d))\n f.close()", "def create_output_file(self):\n if self.options['storage_method'] == 'hdf5':\n try:\n fp = h5py.File(self.file_name, \"w\")\n except IOError:\n print \"Unable to open output file '%s'\" % self.file_name\n sys.exit(1)\n # remember file pointer\n self.file_pointer = fp\n print \"Creating file '%s'\" % self.file_name\n elif self.options['storage_method'] == 'none':\n # save command for later processing\n self.h5commands.append((\"create_file\", self.file_name))", "def _write_file(self, filename, content, mode=None):\n with open(filename, 'w') as fp:\n fp.write(dedent(content).strip())\n fp.write('\\n')\n\n if mode is not None:\n os.chmod(filename, mode)", "def write_file(content, file_path, mode='w', encoding='utf-8'):\n with codecs.open(file_path, mode, encoding=encoding) as fid:\n fid.write(content)", "def WriteFile(path, content, mode='w', atomic=False, makedirs=False):\n write_path = path\n if atomic:\n write_path = path + '.tmp'\n\n if makedirs:\n SafeMakedirs(os.path.dirname(path))\n\n with open(write_path, mode) as f:\n f.writelines(cros_build_lib.iflatten_instance(content))\n\n if not atomic:\n return\n\n try:\n os.rename(write_path, path)\n except EnvironmentError:\n SafeUnlink(write_path)\n raise", "def write_file ( file_name, contents ):\n vlog(4, 'Writing File: %s SIZE=%s' % (file_name, len(contents)))\n with open(file_name, 'w') as file:\n file.write(contents)", "def write(self, data_to_write):\n self.single_file.write(data_to_write)\n self.single_file.flush()", "def write_to_file(self, filepath, mode = \"a\"): \n if \"r\" in mode: \n print(\"Only accepts write and append modes\")\n return \n with open(filepath, mode) as f: \n f.write(\"{}\\n\".format(self.title))\n verified, seen, ratio = self.get_verified_ratio()\n f.write(\"Verified Names: {}\\n\".format(str(verified)))\n f.write(\"Names: {}\\n\".format(str(seen)))\n f.write(\"Ratio: {}\\n\".format(str(ratio)))", "def WriteFile(fname, data):\n #self._out.Info(\"Write file '%s' size %d (%#0x)\" %\n #(fname, len(data), len(data)))\n with open(Filename(fname), 'wb') as fd:\n fd.write(data)", "def write_configuration_file(self, content):\n with open(self.configuration_file_path, 'w') as configuration_file:\n configuration_file.write(content)", "def write(self, file):\n pos = file.tell()\n pickle.dump((self.index, self.meta, self.info), file)\n file.seek(0)\n\n # update the header with the position of the content index.\n file.write(struct.pack('<Q', pos))", "def save(self, content, address, file):\n full_address = \"/home/red/WAREHOUSE\" + address\n file_path = full_address + \"/\" + file\n\n try:\n os.makedirs(full_address, 0o777, True)\n except OSError:\n pass\n\n write_binary_file(content, file_path)", "def write_to_file(self, filename):\n self.octree.write(str.encode(filename))\n print(\"Save octomap to \"+filename)", "def write_id_to_file(file, id):\n output = open(file, 'w')\n output.write(id)\n output.close()", "def write(self, object, content_type, to_file):\n return to_file", "def to_file(self, file_path, smirnoff_data):\n pass", "def filewrite(self, filename, data):\n try:\n filedata = data.decode(\"utf-8\")\n except Exception:\n filedata = data\n lock = FileLock(filename)\n lock.acquire()\n with open(filename, 'w+') as f:\n f.write(filedata)\n lock.release()", "def spew(path, data):\n with open(path, 'w+') as f:\n f.write(data)", "def write_file_content(path, file_name, content):\n if not os.path.exists(path):\n os.makedirs(path)\n f = io.open(path + file_name, \"w\", encoding = 'utf-8')\n f.write(content)\n f.close()", "def to_output_file(self, content):\n self.__log(f'Starting to write response content to output file.')\n if self.output_file_exists() and not self.config['FORCE_OVERWRITE']:\n self.__log(f'Cannot write to file. Selected output file exists and FORCE_OVERWRITE is disabled.', 'error')\n raise FileExistsError\n file = self.config['OUT_FOLDER'] + '/' + self.config['OUTPUT_FOLDER'] + '/' + self.output_filename + '.' \\\n + self.options['image_format'].lower()\n with open(file, 'w') as f:\n f.writelines(content)\n self.__log(f'Successfully wrote response content to \"{file}\".', 'success')", "def write(self, output: Any) -> None:\n self._original.write(output)\n self._handler.file_write(self._name, output)", "def write(self, ext_file_action=ExtFileAction.copy_relative_paths):\n if self.simulation_data.auto_set_sizes:\n self._update_size_defs()\n\n # create any folders in path\n package_file_path = self.get_file_path()\n package_folder = os.path.split(package_file_path)[0]\n if package_folder and not os.path.isdir(package_folder):\n os.makedirs(os.path.split(package_file_path)[0])\n\n # open file\n fd = open(package_file_path, \"w\")\n\n # write flopy header\n if self.simulation_data.write_headers:\n dt = datetime.datetime.now()\n header = (\n \"# File generated by Flopy version {} on {} at {}.\"\n \"\\n\".format(\n __version__,\n dt.strftime(\"%m/%d/%Y\"),\n dt.strftime(\"%H:%M:%S\"),\n )\n )\n fd.write(header)\n\n # write blocks\n self._write_blocks(fd, ext_file_action)\n\n fd.close()", "def write_file(self, directory, name, content):\n\n try:\n f = open(os.path.join(directory, name), 'w')\n f.write(content)\n f.close()\n except:\n print \"Content not written to file: %s\" % name", "def save_to_file(self, file_name, format, properties=None,\n halo_properties=None):\n\n directory = '/'.join(file_name.split('/')[:-1])\n import os\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n if format == \"hdf5\":\n import h5py\n\n f = h5py.File(file_name, \"a\")\n\n if properties is None: \n # save every property\n for quantity in self._quantities:\n f.create_dataset(quantity, data=self._quantities[quantity],\n compression=\"gzip\")\n else: \n # save specified properties\n for quantity in properties:\n f.create_dataset(quantity, data=self._quantities[quantity],\n compression=\"gzip\")\n\n if not halo_properties is None:\n # save specified halo properties\n for quantity in halo_properties:\n f.create_dataset(\"halo_\"+quantity, compression=\"gzip\",\n data=self.get_halo(quantity))\n f.close()\n\n elif format == \"fits\":\n from astropy.table import Table\n \n if properties is None:\n # save every property\n t = Table(list(self._quantities.values()), \n names=list(self._quantities.keys()))\n t.write(file_name, format=\"fits\")\n else:\n # save specified properties\n data = [None] * len(properties)\n for i, prop in enumerate(properties):\n data[i] = self._quantities[prop]\n t = Table(data, names=properties)\n t.write(file_name, format=\"fits\")\n\n if not halo_properties is None:\n # save specified halo properties\n data = [None] * len(halo_properties)\n for i, prop in enumerate(halo_properties):\n data[i] = self.get_halo(prop)\n halo_properties[i] = \"halo_\" + halo_properties[i]\n t = Table(data, names=halo_properties)\n t.write(file_name, format=\"fits\")\n\n # can add more file formats...\n\n else:\n raise ValueError(\"Invalid file format\")", "def write(self, file, data):\n if not exists(dirname(file)):\n makedirs(dirname(file))\n\n with open(file + '.json', 'w') as file:\n file.write(dumps(data, indent=2, separators=(',', ': '), sort_keys=True))", "def save_hdf_file(file_path, idata, key_path='entry', overwrite=True):\r\n if (\"\\\\\" in file_path):\r\n raise ValueError(\r\n \"Please use a file path following the Unix convention\")\r\n file_base, file_ext = os.path.splitext(file_path)\r\n if not ((file_ext == '.hdf') or (file_ext == '.h5')):\r\n file_ext = '.hdf'\r\n file_path = file_base + file_ext\r\n _create_folder(file_path)\r\n if not overwrite:\r\n file_path = _create_file_name(file_path)\r\n ofile = None\r\n try:\r\n ofile = h5py.File(file_path, 'w')\r\n except IOError:\r\n print((\"Couldn't write file: {}\").format(file_path))\r\n raise\r\n grp = ofile.create_group(key_path)\r\n grp.create_dataset(\"data\", data=idata)\r\n ofile.close()\r\n return file_path", "def to_file(self, filename=None):\n name = None\n if filename is not None:\n name = filename\n elif self.name:\n name = self.name\n\n if name:\n #f = open(self.name, 'w')\n f = codecs.open(name, 'w', encoding='utf-8')\n self.seek(0)\n f.write(self.read())\n f.close()\n else:\n print \"No log_name for this log\"", "def write_to_file(self,\n ofile=\"output.txt\",\n **kwargs):\n with open(file=ofile, mode='a') as ofile:\n for num_line, obj in self.items():\n ofile.write(str(self._construct_output_string(num_line=num_line,\n obj=obj,\n **kwargs)))", "def write_data_2(fh, header, data):\r\n # fhw = open(filename, \"w\")\r\n if len(header) <= 0 or len(data) <= 0:\r\n return\r\n else:\r\n fh.write(str(header + \"\\n\"))\r\n fh.write(str(data) + \"\\n\")\r\n fh.write(\"\\n\")", "def write(self, filename, header=None):\n\n origfile = self._filename\n\n try:\n with open(filename, 'w') as _file:\n self.writestream(_file, header)\n self._filename = filename\n return True\n\n except IOError:\n self._filename = origfile\n return False", "def create_file(path: Path, content: str) -> None:\n path.touch()\n with path.open(\"w\") as f:\n f.write(content)" ]
[ "0.68592376", "0.674406", "0.674406", "0.656854", "0.6566108", "0.6498972", "0.6464658", "0.6464213", "0.6418377", "0.6384356", "0.63767195", "0.637185", "0.63714814", "0.63141376", "0.6300945", "0.63001466", "0.6275614", "0.62744516", "0.6243898", "0.6229664", "0.62277204", "0.61617565", "0.61581826", "0.6146818", "0.612254", "0.61209", "0.6118378", "0.6106481", "0.6097403", "0.6095508", "0.6090492", "0.60687876", "0.60589784", "0.6056844", "0.6056712", "0.6047705", "0.60362005", "0.60181713", "0.60127056", "0.6005693", "0.59956783", "0.5989833", "0.5989538", "0.5980762", "0.5980057", "0.5971517", "0.59677005", "0.59620833", "0.5962006", "0.59559435", "0.5948734", "0.5946951", "0.5932892", "0.5929774", "0.5925201", "0.5919731", "0.59096605", "0.59046036", "0.5897821", "0.58893883", "0.5888516", "0.5886751", "0.58815306", "0.5871647", "0.5866589", "0.5849164", "0.58453715", "0.5843467", "0.58412105", "0.58407396", "0.5833554", "0.5833362", "0.58316284", "0.5830037", "0.5827301", "0.582587", "0.582022", "0.5812739", "0.5801791", "0.57992864", "0.57826763", "0.57733524", "0.57729423", "0.57721883", "0.5769498", "0.5767416", "0.5761557", "0.5760997", "0.5757384", "0.57490283", "0.5741134", "0.5739183", "0.5733576", "0.57297724", "0.57280165", "0.5727656", "0.5726971", "0.57125527", "0.5712195", "0.5710507" ]
0.5741435
90
Loads the data set provided in this repository and returns a list of Decks or FuzzyDecks. The deck list is sorted by archetype so the distance matrix is easier to visualize.
def load_data_set(hero_class: str, fuzzy: bool, filename: str = "data/Decks.json", debug: bool = False) \ -> Union[List[Deck], List[FuzzyDeck]]: if debug: print("### loading dataset...") with open(filename) as f: data = json.load(f) hero_classes = list(data["series"]["metadata"].keys()) if hero_class not in hero_classes and hero_class != "ALL": raise Exception("hero class <" + hero_class + "> not available. " "Consider using one class out of: " + ", ".join(hero_classes)) if debug: for cl in hero_classes: print("" + str(len(data["series"]["data"][cl])) + " played decks for hero class " + cl) played_decks = [] if hero_class == "ALL": for hero_class in hero_classes: for i, deck_data in enumerate(data["series"]["data"][hero_class]): if fuzzy: played_decks.append(FuzzyDeck(deck_data)) else: played_decks.append(Deck(deck_data)) else: for i, deck_data in enumerate(data["series"]["data"][hero_class]): if fuzzy: played_decks.append(FuzzyDeck(deck_data)) else: played_decks.append(Deck(deck_data)) # sort by cluster label for easier visualization of distance matrix played_decks = sorted(played_decks, key=lambda x: x.archetype[0]) return played_decks
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_decks(**options):\n graph = bonobo.Graph()\n\n csv_in = bonobo.noop\n\n graph.add_chain(csv_in, in_use_cards, _input=None)\n\n for deck in listdir('decks'):\n deck_path = join('decks', deck)\n if deck == '.gitignore':\n continue\n\n if isfile(deck_path):\n graph.add_chain(bonobo.CsvReader(deck_path), _output=csv_in)\n\n return graph", "def get_decks(self, include_cards=True):\n deck_previews = self.data_source.get_decks(self.user_id,\n not include_cards)\n\n return deck_previews", "def get_deck_list(deckid):\n # Need to know if we're looking at a deckid or deckid tuple\n # TODO: Clean this up a bit (shouldn't need to support deckids or deck)\n # tuples now that I'm using Deck objects.)\n if isinstance(deckid, tuple):\n # The deckid is in deck[0]\n # Format is (deckid, deck_class)\n deckid = deckid[0]\n # http://www.hearthpwn.com/decks/listing/ + /neutral or /class\n url = 'http://www.hearthpwn.com/decks/listing/'\n css = '#cards > tbody > tr > td.col-name'\n\n cards = []\n\n # Class Cards\n pagetree = get_pagetree(url + str(deckid) + '/class')\n elements = get_elements_from_page(pagetree, css)\n for element in elements:\n card = html.tostring(element, method='text', encoding='UTF-8')\n cards.append(card)\n\n # Neutral Cards\n pagetree = get_pagetree(url + str(deckid) + '/neutral')\n elements = get_elements_from_page(pagetree, css)\n for element in elements:\n card = html.tostring(element, method='text', encoding='UTF-8')\n cards.append(card)\n\n regex = re.compile(b'^\\r\\n(.+)\\r\\n\\r\\n\\xc3\\x97 (\\d+)')\n deck = []\n for card in cards:\n match = re.search(regex, card)\n if match:\n cardname = match.group(1).decode('UTF-8')\n amount = int(match.group(2))\n deck.append(Card(cardname, amount))\n\n return deck", "def get_decks(filtering=None, sorting=None, count=None,\n patch=None, classid=None):\n decks_metainfo = get_deck_metainfo(filtering, sorting, count,\n patch, classid)\n decks = [Deck(deck[0], deck[1], get_deck_list(deck[0]))\n for deck in decks_metainfo]\n return decks", "def populatePokerDeck():\r\n #At some point, I may want this function, or a function like it, to read from a txt/json or dat file, \r\n #but for now this suffices.\r\n aDeck =\t[\r\n {\r\n \"Suite\": \"Hearts\",\r\n \"Name\": \"Ace\",\r\n \"Type\": \"Face\",\r\n \"Value\": 1\r\n },\r\n {\r\n \"Suite\": \"Hearts\",\r\n \"Name\": \"Two\",\r\n \"Type\": \"Number\",\r\n \"Value\": 2\r\n },\r\n {\r\n \"Suite\": \"Hearts\",\r\n \"Name\": \"Three\",\r\n \"Type\": \"Number\",\r\n \"Value\": 3\r\n },\r\n {\r\n \"Suite\": \"Hearts\",\r\n \"Name\": \"Four\",\r\n \"Type\": \"Number\",\r\n \"Value\": 4\r\n },\r\n {\r\n \"Suite\": \"Hearts\",\r\n \"Name\": \"Five\",\r\n \"Type\": \"Number\",\r\n \"Value\": 5\r\n },\r\n {\r\n \"Suite\": \"Hearts\",\r\n \"Name\": \"Six\",\r\n \"Type\": \"Number\",\r\n \"Value\": 6\r\n },\r\n {\r\n \"Suite\": \"Hearts\",\r\n \"Name\": \"Seven\",\r\n \"Type\": \"Number\",\r\n \"Value\": 7\r\n },\r\n {\r\n \"Suite\": \"Hearts\",\r\n \"Name\": \"Eight\",\r\n \"Type\": \"Number\",\r\n \"Value\": 8\r\n },\r\n {\r\n \"Suite\": \"Hearts\",\r\n \"Name\": \"Nine\",\r\n \"Type\": \"Number\",\r\n \"Value\": 9\r\n },\r\n {\r\n \"Suite\": \"Hearts\",\r\n \"Name\": \"Ten\",\r\n \"Type\": \"Number\",\r\n \"Value\": 10\r\n },\r\n {\r\n \"Suite\": \"Hearts\",\r\n \"Name\": \"Jack\",\r\n \"Type\": \"Face\",\r\n \"Value\": 2\r\n },\r\n {\r\n \"Suite\": \"Hearts\",\r\n \"Name\": \"Queen\",\r\n \"Type\": \"Face\",\r\n \"Value\": 3\r\n },\r\n {\r\n \"Suite\": \"Hearts\",\r\n \"Name\": \"King\",\r\n \"Type\": \"Face\",\r\n \"Value\": 4\r\n },\r\n {\r\n \"Suite\": \"Clubs\",\r\n \"Name\": \"Ace\",\r\n \"Type\": \"Face\",\r\n \"Value\": 1\r\n },\r\n {\r\n \"Suite\": \"Clubs\",\r\n \"Name\": \"Two\",\r\n \"Type\": \"Number\",\r\n \"Value\": 2\r\n },\r\n {\r\n \"Suite\": \"Clubs\",\r\n \"Name\": \"Three\",\r\n \"Type\": \"Number\",\r\n \"Value\": 3\r\n },\r\n {\r\n \"Suite\": \"Clubs\",\r\n \"Name\": \"Four\",\r\n \"Type\": \"Number\",\r\n \"Value\": 4\r\n },\r\n {\r\n \"Suite\": \"Clubs\",\r\n \"Name\": \"Five\",\r\n \"Type\": \"Number\",\r\n \"Value\": 5\r\n },\r\n {\r\n \"Suite\": \"Clubs\",\r\n \"Name\": \"Six\",\r\n \"Type\": \"Number\",\r\n \"Value\": 6\r\n },\r\n {\r\n \"Suite\": \"Clubs\",\r\n \"Name\": \"Seven\",\r\n \"Type\": \"Number\",\r\n \"Value\": 7\r\n },\r\n {\r\n \"Suite\": \"Clubs\",\r\n \"Name\": \"Eight\",\r\n \"Type\": \"Number\",\r\n \"Value\": 8\r\n },\r\n {\r\n \"Suite\": \"Clubs\",\r\n \"Name\": \"Nine\",\r\n \"Type\": \"Number\",\r\n \"Value\": 9\r\n },\r\n {\r\n \"Suite\": \"Clubs\",\r\n \"Name\": \"Ten\",\r\n \"Type\": \"Number\",\r\n \"Value\": 10\r\n },\r\n {\r\n \"Suite\": \"Clubs\",\r\n \"Name\": \"Jack\",\r\n \"Type\": \"Face\",\r\n \"Value\": 2\r\n },\r\n {\r\n \"Suite\": \"Clubs\",\r\n \"Name\": \"Queen\",\r\n \"Type\": \"Face\",\r\n \"Value\": 3\r\n },\r\n {\r\n \"Suite\": \"Clubs\",\r\n \"Name\": \"King\",\r\n \"Type\": \"Face\",\r\n \"Value\": 4\r\n },\r\n {\r\n \"Suite\": \"Diamonds\",\r\n \"Name\": \"Ace\",\r\n \"Type\": \"Face\",\r\n \"Value\": 1\r\n },\r\n {\r\n \"Suite\": \"Diamonds\",\r\n \"Name\": \"Two\",\r\n \"Type\": \"Number\",\r\n \"Value\": 2\r\n },\r\n {\r\n \"Suite\": \"Diamonds\",\r\n \"Name\": \"Three\",\r\n \"Type\": \"Number\",\r\n \"Value\": 3\r\n },\r\n {\r\n \"Suite\": \"Diamonds\",\r\n \"Name\": \"Four\",\r\n \"Type\": \"Number\",\r\n \"Value\": 4\r\n },\r\n {\r\n \"Suite\": \"Diamonds\",\r\n \"Name\": \"Five\",\r\n \"Type\": \"Number\",\r\n \"Value\": 5\r\n },\r\n {\r\n \"Suite\": \"Diamonds\",\r\n \"Name\": \"Six\",\r\n \"Type\": \"Number\",\r\n \"Value\": 6\r\n },\r\n {\r\n \"Suite\": \"Diamonds\",\r\n \"Name\": \"Seven\",\r\n \"Type\": \"Number\",\r\n \"Value\": 7\r\n },\r\n {\r\n \"Suite\": \"Diamonds\",\r\n \"Name\": \"Eight\",\r\n \"Type\": \"Number\",\r\n \"Value\": 8\r\n },\r\n {\r\n \"Suite\": \"Diamonds\",\r\n \"Name\": \"Nine\",\r\n \"Type\": \"Number\",\r\n \"Value\": 9\r\n },\r\n {\r\n \"Suite\": \"Diamonds\",\r\n \"Name\": \"Ten\",\r\n \"Type\": \"Number\",\r\n \"Value\": 10\r\n },\r\n {\r\n \"Suite\": \"Diamonds\",\r\n \"Name\": \"Jack\",\r\n \"Type\": \"Face\",\r\n \"Value\": 2\r\n },\r\n {\r\n \"Suite\": \"Diamonds\",\r\n \"Name\": \"Queen\",\r\n \"Type\": \"Face\",\r\n \"Value\": 3\r\n },\r\n {\r\n \"Suite\": \"Diamonds\",\r\n \"Name\": \"King\",\r\n \"Type\": \"Face\",\r\n \"Value\": 4\r\n },\r\n {\r\n \"Suite\": \"Spades\",\r\n \"Name\": \"Ace\",\r\n \"Type\": \"Face\",\r\n \"Value\": 1\r\n },\r\n {\r\n \"Suite\": \"Spades\",\r\n \"Name\": \"Two\",\r\n \"Type\": \"Number\",\r\n \"Value\": 2\r\n },\r\n {\r\n \"Suite\": \"Spades\",\r\n \"Name\": \"Three\",\r\n \"Type\": \"Number\",\r\n \"Value\": 3\r\n },\r\n {\r\n \"Suite\": \"Spades\",\r\n \"Name\": \"Four\",\r\n \"Type\": \"Number\",\r\n \"Value\": 4\r\n },\r\n {\r\n \"Suite\": \"Spades\",\r\n \"Name\": \"Five\",\r\n \"Type\": \"Number\",\r\n \"Value\": 5\r\n },\r\n {\r\n \"Suite\": \"Spades\",\r\n \"Name\": \"Six\",\r\n \"Type\": \"Number\",\r\n \"Value\": 6\r\n },\r\n {\r\n \"Suite\": \"Spades\",\r\n \"Name\": \"Seven\",\r\n \"Type\": \"Number\",\r\n \"Value\": 7\r\n },\r\n {\r\n \"Suite\": \"Spades\",\r\n \"Name\": \"Eight\",\r\n \"Type\": \"Number\",\r\n \"Value\": 8\r\n },\r\n {\r\n \"Suite\": \"Spades\",\r\n \"Name\": \"Nine\",\r\n \"Type\": \"Number\",\r\n \"Value\": 9\r\n },\r\n {\r\n \"Suite\": \"Spades\",\r\n \"Name\": \"Ten\",\r\n \"Type\": \"Number\",\r\n \"Value\": 10\r\n },\r\n {\r\n \"Suite\": \"Spades\",\r\n \"Name\": \"Jack\",\r\n \"Type\": \"Face\",\r\n \"Value\": 2\r\n },\r\n {\r\n \"Suite\": \"Spades\",\r\n \"Name\": \"Queen\",\r\n \"Type\": \"Face\",\r\n \"Value\": 3\r\n },\r\n {\r\n \"Suite\": \"Spades\",\r\n \"Name\": \"King\",\r\n \"Type\": \"Face\",\r\n \"Value\": 4\r\n },\r\n {\r\n \"Suite\": \"Red\",\r\n \"Name\": \"Joker\",\r\n \"Type\": \"Face\",\r\n \"Value\": None\r\n },\r\n {\r\n \"Suite\": \"Black\",\r\n \"Name\": \"Joker\",\r\n \"Type\": \"Face\",\r\n \"Value\": None\r\n }]\r\n \r\n return aDeck", "def get_deck():\n deck = []\n for suit in Suit:\n for rank in Rank:\n deck.append(Card(suit, rank))\n return deck", "def deck(self) -> Iterable[CardIdentifier]:\n # for some reason cards are treated quite different by NS api currently\n # so we cant simply make a shards call. for now we make a direct call\n # to the requester shards_xml method, since it does not insert the\n # `nation=name` parameter\n # this request returns a <CARDS><DECK><CARD/>...</DECK><CARDS> structure,\n # so we immedietly retrieve the DECK node (which contains multiple CARD nodes)\n # with [0]\n deck = as_xml(\n self.requester.shard_request(\n shards=[\"cards\", \"deck\"], nationname=self.nationname\n ).text\n )[0]\n return [CardIdentifier.from_xml(node) for node in deck]", "def prepare_decklists(deck_files, data_path):\n \n deck = {'deckname':[], 'cardname':[], 'card_count':[], 'sideboard':[]}\n for file_name in deck_files:\n companion = 0\n file = open(data_path+file_name, 'r')\n sideboard = False\n for line in file:\n\n items = line.split(\" \")\n if (items[0][:4] == 'Deck'):\n pass\n elif (items[0][:9] == 'Companion'):\n companion = 1\n elif '\\n' in items or items[0][:9] == 'Sideboard':\n if companion == 1:\n companion -= 1\n else:\n sideboard = True\n else:\n try:\n deck['deckname'].append(file_name)\n deck['cardname'].append(\" \".join(items[1:]).replace('\\n',''))\n deck['card_count'].append(int(items[0]))\n deck['sideboard'].append(sideboard)\n except (Exception, ValueError) as error:\n print(f'Unable to process: {error} ; file {filename}')\n return deck", "def generate_deck(self):\n\t\tsuits = [\"hearts\", \"spades\",\"diamonds\",\"clubs\"]\n\t\tcards = []\n\n\t\tfor suit in suits:\n\t\t\tif self.ace_as_eleven:\n\t\t\t\tace = Card(\"Ace\", 11, suit)\n\t\t\telse:\n\t\t\t\tace = Card(\"Ace\", 1, suit)\n\t\t\tcards.append(ace)\n\n\t\t\ttwo = Card(\"Two\", 2, suit)\n\t\t\tcards.append(two)\n\t\t\t\n\t\t\tthree = Card(\"Three\", 3, suit)\n\t\t\tcards.append(three)\n\n\t\t\tfour = Card(\"Four\", 4, suit)\n\t\t\tcards.append(four)\n\n\t\t\tfive = Card(\"Five\", 5, suit)\n\t\t\tcards.append(five)\n\n\t\t\tsix = Card(\"Six\", 6, suit)\n\t\t\tcards.append(six)\n\n\t\t\tseven = Card(\"Seven\", 7, suit)\n\t\t\tcards.append(seven)\n\n\t\t\teight = Card(\"Eight\", 8, suit)\n\t\t\tcards.append(eight)\n\n\t\t\tnine = Card(\"Nine\", 9, suit)\n\t\t\tcards.append(nine)\n\n\t\t\tten = Card(\"Ten\", 10, suit)\n\t\t\tcards.append(ten)\n\n\t\t\tjack = Card(\"Jack\", 10, suit)\n\t\t\tcards.append(jack)\n\n\t\t\tqueen = Card(\"Queen\", 10, suit)\n\t\t\tcards.append(queen)\n\n\t\t\tking = Card(\"King\", 10, suit)\n\t\t\tcards.append(king)\n\n\t\treturn cards", "def __init__(self):\n \n self.deck = [Card(suit,rank) for suit in SUITS for rank in RANKS]", "def loadCardDB():\n with open(CARDS_JSON, 'r') as infofile:\n cards = json.load(infofile)\n with open(PILOT_TEXT_JSON, 'r') as infofile:\n pilotTexts = json.load(infofile)\n with open(UPGRADE_TEXT_JSON, 'r') as infofile:\n upgradeTexts = json.load(infofile)\n with open(MODIFICATION_TEXT_JSON, 'r') as infofile:\n modificationTexts = json.load(infofile)\n with open(TITLE_TEXT_JSON, 'r') as infofile:\n titleTexts = json.load(infofile)\n return _createCardDB(cards, pilotTexts, upgradeTexts, modificationTexts, titleTexts)", "def sort(self):\n self.deckcards.sort()", "def get_cards(self):\n return [card.view_model() for card in self._deck.loc]", "def load():\n\n # Path for the cache-file.\n cache_path = os.path.join(data_dir, \"collisions.pkl\")\n\n # If the DataSet-object already exists in a cache-file\n # then load it, otherwise create a new object and save\n # it to the cache-file so it can be loaded the next time.\n dataset = load_cached(cache_path=cache_path,\n in_dir=data_dir)\n\n return dataset", "def shuffle(self):\n\n if self.deck:\n self.deck = deque()\n\n max_decks = self.deck_count + 1 # +1 for range function\n\n for deck in range(1, max_decks):\n for suit in self.suits:\n for num, name in enumerate(self.names, start=1):\n card = PlayingCard()\n card.set_attributes(name, suit, num)\n self.deck.append(card)\n\n for deck_shuffle in range(self.shuffle_count):\n random.shuffle(self.deck)", "def test_list(self):\n\n decks = []\n try:\n decks.extend(scrape_decks())\n except Exception as e:\n self.logger.exception(\n 'Scraper for site TappedOut raised an exception'\n )\n\n print(\"Collected {} decks:\".format(len(decks)))\n for deck in decks:\n print(\"#\", deck.deckType)\n print(\" commander =\", deck.commander)\n print(\" image =\", deck.commander_img)\n print(\" video =\", deck.video)\n if deck.decklist is not None:\n print(\" decklist =\", deck.decklist)\n print()\n\n if os.environ.get(\"JUMBO_WRITE_TO_DB\") is not None:\n self.insert_decks(decks)", "def populate_deck_db(decks, cursor):\n cursor.execute('DROP TABLE IF EXISTS decks')\n cursor.execute('DROP TABLE IF EXISTS deck_lists')\n cursor.execute('''CREATE TABLE IF NOT EXISTS decks\n (deckid integer primary key, class text)\n WITHOUT ROWID''')\n\n cursor.execute('''CREATE TABLE IF NOT EXISTS deck_lists\n (deckid integer, cardname text, amount integer,\n PRIMARY KEY (deckid, cardname))''')\n for deck in decks:\n cursor.execute('INSERT INTO decks VALUES (?, ?)',\n (deck.deckid, deck.playerclass))\n for card in deck.decklist:\n cursor.execute('INSERT INTO deck_lists VALUES (?, ?, ?)',\n (deck.deckid, card.cardname, card.amount))\n return", "def loadData(catalog):\n loadArtworks(catalog)\n loadArtists(catalog)\n loadAdquires(catalog)\n loadNacionalities(catalog)\n load2DArtworks(catalog)\n loadArtistMediumsTags(catalog)\n loadDptments(catalog)\n catalog['artists'] = sortArtists(catalog, 3)\n fillArtistMediums(catalog)\n fillMostUsedMediums(catalog)\n catalog['artists_tags'] = sortArtistTags(catalog, 3)\n sort_dptments(catalog)", "def create_deck(self):\n\n deck = []\n\n # Suits and face values\n suits = ['Clubs', 'Diamonds', 'Hearts', 'Spades']\n face_values = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']\n\n # Creating deck\n for suit in suits:\n for value in face_values:\n deck.append(Card(suit[0], value))\n\n # Adding jokers\n if self.jokers:\n deck.append(Card('Jk', 0))\n deck.append(Card('Jk', 0))\n\n return deck", "def refresh(self):\n self.deck = []\n\n for _suit in Suit:\n for _face in Face:\n self.insert(Card(_suit, _face, self))", "def load(self):\n\t\t# Initialize empty list\n\t\tdata_files = []\n\n\t\t# Append the Drusen files to the list\n\t\tfor single_file in os.listdir(self.data_dir):\n\t\t\tdata_files.append(single_file)\n\t\treturn data_files", "def get_card_sets(self, name: str) -> List:", "def fetch_data():\n for category in CHEATSHEETS.items():\n subprocess.call(f'curl -o {PATH}{category[0] + \".csv\"} {category[1]}', shell=True)\n\n index = -1\n for filename in os.listdir(PATH):\n for idx, row in pd.read_csv(PATH + filename, on_bad_lines='skip').replace(np.nan, '').iterrows():\n name = row['Model']\n url = REDIRECT_URL + name.lower()\n category = filename.split('.')[0]\n featurizers = row['Acceptable Featurizers'].split(' ') if row['Acceptable Featurizers'] != '' else []\n backends = ['PyTorch' if item in {\"PTorch\", \"Torch\", \"PyTorch \"} else item for item in row['Backend'].split('/')]\n types = row['Type'] if filename != 'general.csv' else row['Classifier/Regressor']\n types = types.split('/') if filename == 'material.csv' else types.split('/ ')\n index += 1\n\n backend_list.append(backends)\n type_list.append(types)\n featurizer_list.append(featurizers)\n model_list.append(Model(name, url, category, featurizers, backends, types, index))", "def shuffle_deck(self):\n deck = [i for i in range(0, 52)]\n shuffle(deck)\n self.deck = [cards[c*2:c*2+2] for c in deck]", "def load(self, source):\n try:\n inputdata = self.__inputmanager.read(source)\n self.__suitables = self.__inputmanager.map(inputdata)\n self.__data = inputdata\n except ValueError as e:\n print (\"Failed to load the dataset: %s\" % e)\n raise\n\n self.__modules = self.import_suitable_visualizations(self.__suitables)\n self.__has_datefields = self.__inputmanager.has_date_points()\n # Converting the datakeys into strings.\n self.__datakeys = [str(i) for i in list(self.__data[0].keys())]\n return self.__suitables", "def parse_decklist(archidekt_id: str) -> tuple[Decklist, bool, list]:\r\n decklist = Decklist()\r\n warnings = []\r\n ok = True\r\n\r\n r = requests.get(f\"https://archidekt.com/api/decks/{archidekt_id}/\")\r\n if r.status_code != 200:\r\n raise (ValueError(f\"Archidekt returned statuscode {r.status_code}\"))\r\n\r\n data = r.json()\r\n\r\n in_deck = {cat[\"name\"] for cat in data[\"categories\"] if cat[\"includedInDeck\"]}\r\n\r\n for item in data[\"cards\"]:\r\n # Extract relevant data\r\n count = item[\"quantity\"]\r\n card_name = item[\"card\"][\"oracleCard\"][\"name\"]\r\n set_id = item[\"card\"][\"edition\"][\"editioncode\"]\r\n collector_number = item[\"card\"][\"collectorNumber\"]\r\n if len(item[\"categories\"]) > 0 and item[\"categories\"][0] not in in_deck:\r\n continue\r\n\r\n # Validate card name\r\n card_name, warnings_name = validate_card_name(card_name)\r\n if card_name is None:\r\n decklist.append_comment(card_name)\r\n warnings.extend([(decklist.entries[-1], level, msg) for level, msg in warnings_name])\r\n ok = False\r\n continue\r\n\r\n # Validate card print\r\n card, warnings_print = validate_print(card_name, set_id, collector_number)\r\n\r\n decklist.append_card(count, card)\r\n warnings.extend([(decklist.entries[-1], level, msg) for level, msg in warnings_name + warnings_print])\r\n\r\n decklist.name = data[\"name\"]\r\n\r\n return decklist, ok, warnings", "def populate(cards_info, sets_file=None, session=Session):\n\n s = session()\n\n with sets_file or codecs.open(DEFAULT_SETS_FILE, encoding=\"utf-8\") as file:\n reader = unicode_csv_reader(file, reader=csv.DictReader)\n sets = {}\n for row in reader:\n row[\"released\"] = datetime.datetime.strptime(\n row[\"released\"], u\"%Y/%m/%d\"\n )\n sets[row[\"code\"]] = m.Set(**row)\n\n sts = itertools.chain.from_iterable(types.subtypes.itervalues())\n\n types_ = {type : m.Type(name=type) for type in types.all}\n supertypes = {st : m.Supertype(name=st) for st in types.supertypes}\n subtypes = {st : m.Subtype(name=st) for st in sts}\n\n s.add_all(\n itertools.chain.from_iterable(\n i.itervalues() for i in (sets, types_, supertypes, subtypes)\n )\n )\n\n for card in cards_info:\n # XXX: Split cards / Stupid multiple ability\n if \" // \" in card[u\"name\"] or card[u\"name\"] == u\"Seeds of Strength\":\n continue\n\n t, u, v = (card.pop(k) for k in [u\"supertypes\", u\"types\", u\"subtypes\"])\n\n card[u\"ability_objects\"] = [\n s.query(m.Ability).filter_by(description=d).first() or\n m.Ability(description=d) for d in card.pop(u\"abilities\")\n ]\n\n card[u\"supertype_objects\"] = {supertypes[st] for st in t}\n card[u\"type_objects\"] = {types_[type] for type in u}\n card[u\"subtype_objects\"] = {subtypes[st] for st in v}\n\n appearances = {\n m.SetAppearance(set=sets[set], rarity=rarity)\n for set, rarity in card.pop(u\"appearances\")\n }\n\n card = m.Card(**card)\n card.set_appearances.update(appearances)\n\n s.add(card)\n\n s.commit()", "def build_data_set(self):\n if not self.assert_data_correct():\n self.download_all_data()\n self.unpack_rename_data()\n self.split_data_characters()\n self.clean_data_fragments()\n self.create_font_data()\n if not self.assert_train_augmented():\n self.augment_train_data()\n if not self.assert_style_data_correct():\n self.download_style_data()\n self.unpack_rename_data()", "def shuffle(self):\r\n random.shuffle(self.deck_of_cards)\r\n return self.deck_of_cards", "def shuffle(self):\n random.shuffle(self.deckcards)", "def buildDeck(self, resources):\n for key,value in resources.deckData.items():\n self.deck.append(resources.cards[value[self.playerClass]])\n random.shuffle(self.deck)\n self.HP = self.getHP() # Set HP", "def test_deck_contains_all_cards(self):\n\n # I'm using collections.Counter so that the order is ignored (as in a\n # set) but that multiples are accounted for.\n expected = collections.Counter([\n ('r', 'i'), ('r', 'i'), ('r', 'i'),\n ('r', 2), ('r', 3), ('r', 4), ('r', 5), ('r', 6), \n ('r', 7), ('r', 8), ('r', 9), ('r', 10),\n\n ('g', 'i'), ('g', 'i'), ('g', 'i'),\n ('g', 2), ('g', 3), ('g', 4), ('g', 5), ('g', 6),\n ('g', 7), ('g', 8), ('g', 9), ('g', 10),\n\n ('b', 'i'), ('b', 'i'), ('b', 'i'),\n ('b', 2), ('b', 3), ('b', 4), ('b', 5), ('b', 6),\n ('b', 7), ('b', 8), ('b', 9), ('b', 10),\n\n ('y', 'i'), ('y', 'i'), ('y', 'i'),\n ('y', 2), ('y', 3), ('y', 4), ('y', 5), ('y', 6),\n ('y', 7), ('y', 8), ('y', 9), ('y', 10),\n\n ('w', 'i'), ('w', 'i'), ('w', 'i'),\n ('w', 2), ('w', 3), ('w', 4), ('w', 5), ('w', 6),\n ('w', 7), ('w', 8), ('w', 9), ('w', 10), ])\n\n self.assertEqual(expected, collections.Counter(deck.deck_gen()))", "def __init__(self):\n self.deck = []\n\n for i in SUITS:\n for j in RANKS:\n self.deck.append(Card(i, j))", "def load_data(self):\n with open('data/fordTrain.csv') as f:\n data = csv.reader(f, delimiter=',')\n train = [x for i, x in enumerate(data) if i > 0] \n # Extract features and target variable separately\n trainx = [x[3:] for x in train]\n trainy = [x[2] for x in train]\n\n with open('data/fordTest.csv') as f:\n data = csv.reader(f, delimiter=',')\n testx = [x[3:] for i, x in enumerate(data) if i > 0] \n\n with open('data/Solution.csv') as f:\n data = csv.reader(f, delimiter=',')\n testy = [x[2] for i, x in enumerate(data) if i > 0] \n\n # Extract features and target variable, convert to numpy array\n trainx = np.asarray(trainx, dtype=np.float32)\n trainy = np.asarray(trainy, dtype=np.int8)\n testx = np.asarray(testx, dtype=np.float32)\n testy = np.asarray(testy, dtype=np.int8)\n\n # Return training and test sets\n trainSet = Dataset(trainx, trainy)\n testSet = Dataset(testx, testy)\n return trainSet, testSet", "def load_dataset(self):\n # Get all the files in the directory\n file_list = self.get_file_list()\n\n # Concatenate the data corresponding to a list of files\n data = self.concatenate_file_data(file_list)\n\n # Shuffle the data and create the training and the validation datasets\n data = self.shuffle_data_dictionary(data)\n self.training_dataset, self.validation_dataset = self.split_data_into_training_and_validation(data)", "def play():\n decks = make_decks()\n\n deck_pulls = {deck: [] for deck in decks}\n for i in range(100):\n deck = random.choice(decks)\n deck_pulls[deck].append(deck.pull())\n\n return decks, deck_pulls", "def get_datasets() -> List[Dataset]:\n\n amzn = Dataset(\n id='amzn', name='Amazon Reviews', language='en',\n description=\"This dataset consists of reviews of fine foods from amazon. The data span a period of more than 10 years, including all ~500,000 reviews up to October 2012. Reviews include product and user information, ratings, and a plain text review. It also includes reviews from all other Amazon categories.\")\n\n cnn = Dataset(\n id='cnn_dailymail', name='CNN/ DailyMail', language='en',\n description='The well-known CNN/ DailyMail data set for text summarization (version 3.0.0). The data has been fetched via HuggingFace Datasets')\n\n swisstext = Dataset(\n id='swisstext', name='SwissText 2019', language='de',\n description='The dataset was published for the SwissText conference 2019. ')\n\n return [amzn, cnn, swisstext]", "def load_data(self):\n if not os.path.isfile(\"{}/OFF_data.json\".format(settings.DIR_PATH)):\n self.request_constructor(settings.R_COLLECTION['category'], 'NULL', 'tags')\n self.crawl_data('category')\n i = 0\n for item in self.categories:\n i += 1\n cat = item.get(\"name\")\n self.request_constructor(settings.R_COLLECTION['product'], cat, 'products')\n self.crawl_data('product')\n\n self.data = {\"categories\": self.categories, \"products\": self.products}\n self.save_data('OFF_data.json')\n else:\n with open(\"{}/OFF_data.json\".format(settings.DIR_PATH), 'r') as f:\n self.data = json.load(f)\n self.categories = self.data[\"categories\"]\n self.products = self.data[\"products\"]\n return self.categories, self.products", "def get_all_desserts():\n return get_data_from_category_name(\"Desserts\")", "def _generate_datasets(self):\n datasets = list()\n for fname in sorted(os.listdir(self.base_dir)):\n if not self._filename_re.match(fname):\n continue\n\n file_path = os.path.join(self.base_dir, fname)\n try:\n fh = self._open_hdf5(file_path)\n\n except (IOError, OSError) as e:\n warnings.warn('Cannot access {}; skipped'.format(file_path))\n print(e)\n continue\n\n for key in fh:\n if self._groupname_re.match(key.lstrip('/')):\n datasets.append(ObjectTableWrapper(fh, key, self._schema))\n continue\n\n warn_msg = 'incorrect group name \"{}\" in {}; skipped this group'\n warnings.warn(warn_msg.format(os.path.basename(file_path), key))\n\n return datasets", "def load_data_from_files(self):\n # separated method to allow mock easier\n logger.info(\"Loading data...\")\n parent = Path(__file__).parent\n path = parent / \"resources\" / \"scores.txt\"\n self.scorer.load_from_file(path)\n path = parent / \"resources\" / \"american-english-large.txt\"\n self.trie.load_from_file(path)\n path = parent / \"resources\" / \"reels.txt\"\n self.reels = Reel.get_from_file(path)\n logger.info(\"Data loaded!\")", "def deckCreated(self):\n deck = self.deckLoader.loadDeck()\n self.assertTrue(all([item in deck for item in self.items]), \"Each item in the deck initializer should be in the deck\")", "def _read_dataset(self):\n import pandas as pd\n\n freesolv_path = get_data_file_path(FREESOLV_PATH)\n\n freesolv_db = pd.read_csv(freesolv_path, delimiter=';',\n skipinitialspace=True,\n skiprows=[0, 1, 2], header=0,\n names=['compound id', 'SMILES',\n 'iupac name',\n 'experimental value',\n 'experimental uncertainty',\n 'calculated value (GAFF)',\n 'calculated uncertainty',\n 'experimental reference',\n 'calculated reference',\n 'notes'])\n\n compound_ids = freesolv_db['compound id'].to_list()\n smiles_tags = freesolv_db['SMILES'].to_list()\n experimental_v = freesolv_db['experimental value'].to_list()\n return compound_ids, smiles_tags, experimental_v", "def get_deck(self, deck_id, include_cards=True):\n deck = self.data_source.get_deck(deck_id, self.user_id, include_cards)\n\n return deck", "async def get_deck_definition(\n self,\n *,\n short_fixed_trash: bool = False,\n ) -> DeckDefinitionV2:\n deck_load_name = SHORT_TRASH_DECK if short_fixed_trash else STANDARD_DECK\n return load_deck(deck_load_name, 2)", "def load_recipes_from_test_set(cls, args):\n cls._recipes = Dataset().load_test(\n use_full_test_set=args.use_full_test_set,\n use_english=args.use_english,\n use_english_intelligible=args.use_english_intelligible,\n use_gold=args.use_gold)\n cls._add_indices_to_recipes()\n cls._initialize_recipes_status()\n logging.info(\"Recipes loaded.\")", "def get_card_list(self):\n return self.cards", "def load_dataset():\n\n\n train_dd_loader = DailyDialogLoader(PATH_TO_TRAIN_DATA, load=False)\n train_dataloader = DataLoader(train_dd_loader, batch_size=16, shuffle=True, num_workers=0,\n collate_fn=PadCollate())\n\n test_dd_loader = DailyDialogLoader(PATH_TO_TEST_DATA, load=True)\n test_dataloader = DataLoader(test_dd_loader, batch_size=1, shuffle=False, num_workers=0,\n collate_fn=PadCollate())\n\n assert train_dd_loader.vocabulary.n_words == test_dd_loader.vocabulary.n_words\n\n return train_dd_loader, train_dataloader, test_dataloader", "def shuffle(self):\r\n random.shuffle(self.deck)", "def createDeck():\n deck = []\n my_file = open(current_file)\n all_the_lines = my_file.readlines()\n deck = []\n for i in all_the_lines:\n deck.append(i.strip())\n my_file.close()\n random.shuffle(deck)\n return deck", "def make_soldier_objects(data_dir: str) -> List[Soldier]:\n soldier_list = []\n \n with open(data_dir, newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n print(reader.__next__()) # print header\n for row in reader:\n ################################# YOUR CODE HERE #################################\n typecode, wp, armor, vitality = row\n if typecode not in [\"INF\", \"ARC\", \"CVL\"]:\n continue\n wp, armor, vitality = float(wp), float(armor), float(vitality)\n soldier = Soldier(typecode, wp, armor, vitality)\n soldier_list.append(soldier)\n ##################################################################################\n return soldier_list", "def fill_standard_deck(self):\n for name in [\"ace\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\", \"ten\", \"jack\",\n \"queen\", \"king\"]:\n for suit in [\"hearts\", \"diamonds\", \"spades\", \"clubs\"]:\n self.cards.append(card.Card(name, suit, self.card_values[name]))", "def shuffle():\n deck = []\n # By Baccarat rules, there are 4 aces worth 1 point, 16 face cards and tens\n # worth 0 point, and 32 other cards worth their numerical value.\n # 8 decks are suffled together to create a shoe.\n for n in range(8):\n for i in range (32):\n deck.append((i % 8) + 2)\n \n for i in range (16):\n deck.append(0)\n \n for i in range (4):\n deck.append(1)\n \n random.shuffle(deck)\n\n return deck", "async def test_get_deck_definition(\n deck_type: DeckType,\n expected_definition: DeckDefinitionV3,\n mock_labware_data_provider: LabwareDataProvider,\n) -> None:\n subject = DeckDataProvider(\n deck_type=deck_type, labware_data=mock_labware_data_provider\n )\n result = await subject.get_deck_definition()\n assert result == expected_definition", "def load_data(data_links_list=(\n 'https://raw.githubusercontent.com/JanetMatsen/bacteriopop/master'\n '/raw_data/raw_data.csv',\n 'https://raw.githubusercontent.com/JanetMatsen/bacteriopop/master'\n '/raw_data/sample_meta_info.tsv')):\n\n # Reading data sets from the links provided.\n df1 = pd.read_csv(data_links_list[0],\n error_bad_lines=False)\n df2 = pd.read_csv(data_links_list[1],\n sep='\\t')\n df2 = df2.set_index(df2['project'])\n # fill the Nas id df1 as \". Makes the groupbys behave better.\n df1.fillna('', inplace=True)\n # repleace 'genus' = 'other' with an empty string to be consistent.\n df1.replace(to_replace='other', value='', inplace=True)\n # Removing duplicate columns.\n del df2['project']\n del df2['ID']\n df1 = df1.set_index(df1['project'])\n # Removing duplicate column.\n del df1['project']\n # Joining the two datasets.\n df = df1.join(df2)\n # Uniformity in non-capitalization of column names.\n df.rename(columns={'Kingdom': 'kingdom', 'Phylum': 'phylum',\n 'Class': 'class', 'Order': 'order',\n 'Family': 'family', 'Genus': 'genus',\n 'Length': 'length'}, inplace=True)\n df.index.names = ['sampleID']\n # Rearranging columns so that abundance is the last column.\n df = df[['kingdom',\t'phylum', 'class', 'order',\n 'family', 'genus', 'length', 'oxygen',\n 'replicate', 'week', 'abundance']]\n assert isinstance(df, pd.DataFrame)\n return df", "def scrape_decklist(deck_url):\r\n soup = hot_soup(deck_url)\r\n card_re = re.compile(r\"(\\d+)\\s(.+)\")\r\n deck_table = soup.find_all(class_='Stable')[1]\r\n deck_headers = deck_table.previous_sibling.previous_sibling.find_all('td')\r\n archtype = deck_headers[2].text.replace('decks', '')\r\n cardlist = deck_table.table.find_all('table')\r\n mainboard, sideboard = [], []\r\n for row in cardlist.pop().find_all('span'):\r\n count, card = card_re.search(row.parent.text).groups()\r\n sideboard.append((int(count), card.strip()))\r\n\r\n for col in cardlist:\r\n for row in col.find_all('span'):\r\n count, card = card_re.search(row.parent.text).groups()\r\n mainboard.append((int(count), card.strip()))\r\n\r\n return mainboard, sideboard, archtype", "def load_data(self, data_set):\n print(\"Started Loading the Data\")\n tagged_tokens = data_set.tagged_words()\n tokens = untag(tagged_tokens)\n\n # Get the list of words that appear less than 5 times in Corpus\n print(\"Get LT5's\")\n tokens = [token.lower() for token in tokens] # Convert to lower case\n freq_dist = FreqDist(tokens) # Compute the freq dist\n tokens_lt_5 = [word for word, count in freq_dist.items() if count < 5]\n\n # Delete words less than 5 and make the corpus insensitive\n print(\"Making data case insensitive\")\n token_range = range(len(tagged_tokens))\n indexed_tokens = OrderedDict(zip(token_range,tagged_tokens))\n updated_tagged_tokens = OrderedDict()\n for tagged_token_id, tagged_token in indexed_tokens.items():\n if tagged_token[0].lower() in tokens_lt_5:\n del indexed_tokens[tagged_token_id]\n else:\n temp = list()\n temp.append(tagged_token[0].lower())\n temp.append(tagged_token[1])\n temp = tuple(temp)\n updated_tagged_tokens[tagged_token_id] = temp\n tagged_tokens = list(updated_tagged_tokens.values())\n\n # Pickle the data for future purpose\n print(\"Pickling the Updated Corpus\")\n if data_set == brown:\n file_name = \"q5_brown_updated.pkl\"\n else:\n file_name = \"q5_treebank_updated.pkl\"\n pkl.dump((tagged_tokens, tokens_lt_5), open(file_name,'wb'))\n\n return tagged_tokens, tokens_lt_5", "def test_list_datasets(self, tmp_path):\n assert qml.data.list_datasets() == {\n \"qspin\": {\"Heisenberg\": {\"closed\": {\"chain\": [\"1x4\"]}}},\n \"qchem\": {\"H2\": {\"6-31G\": [\"0.46\", \"1.0\", \"1.16\"]}},\n }", "def __init__(self):\n self.deck = init_deck()\n self.shuffle()", "def load_dataset(args, corpus_type, shuffle):\n assert corpus_type in [\"train\", \"valid\", \"test\"]\n\n def _lazy_dataset_loader(pt_file, corpus_type):\n dataset = torch.load(pt_file)\n logger.info('Loading %s dataset from %s, number of examples: %d' %\n (corpus_type, pt_file, len(dataset)))\n return dataset\n\n # Sort the glob output by file name (by increasing indexes).\n pts = sorted(glob.glob(args.data_path + '.' + corpus_type + '.[0-9]*.pt'))\n if pts:\n if (shuffle):\n random.shuffle(pts)\n\n for pt in pts:\n yield _lazy_dataset_loader(pt, corpus_type)\n else:\n # Only one inputters.*Dataset, simple!\n pt = args.data_path + '.' + corpus_type + '.pt'\n yield _lazy_dataset_loader(pt, corpus_type)", "def main():\n\t\n\tDeck = []\n\tfor suite in range(suites):\n for typecard in range(1, typecard+1):\n cards.append(typecard)", "def mock_data_loader(csv_path):\n file_path = KINGDOM_CSV_PATH\n\n kingdomArr = []\n\n with open(file_path, newline=\"\") as csv_file:\n reader = csv.reader(csv_file, delimiter=\",\")\n for row in reader:\n kingdomArr.append(Kingdom(row[0], row[1]))\n\n return kingdomArr", "def reshuffle(self):\n self.cards = []\n self.fill_standard_deck()\n self.shuffle()", "def load():\n global WHITE_CARDS\n global BLACK_CARDS\n\n with open('./cards.csv', 'r') as f:\n reader = csv.reader(f)\n for r in reader:\n black = r[0]\n white = r[1]\n if white:\n WHITE_CARDS.append(white)\n if black:\n BLACK_CARDS.append(black)", "def get_datasets(sim_args):\n if len(sim_args.data_folders) == 1 and sim_args.data_folders[0] == 'all':\n data_tags = [\n 'Webscope_C14_Set1',\n 'Webscope_C14_Set2',\n 'MSLR-WEB10k',\n 'NP2003',\n 'NP2004',\n 'HP2003',\n 'HP2004',\n 'TD2003',\n 'TD2004',\n 'MQ2007',\n 'MQ2008',\n 'OHSUMED',\n ]\n elif len(sim_args.data_folders) == 1 and sim_args.data_folders[0] == 'CIKM2017':\n data_tags = [\n 'MSLR-WEB10k',\n 'NP2003',\n 'NP2004',\n 'HP2003',\n 'HP2004',\n 'TD2003',\n 'TD2004',\n 'MQ2007',\n 'MQ2008',\n 'OHSUMED',\n ]\n elif len(sim_args.data_folders) == 1 and sim_args.data_folders[0] == 'letor64':\n data_tags = [\n 'NP2003',\n 'NP2004',\n 'HP2003',\n 'HP2004',\n 'TD2003',\n 'TD2004',\n ]\n # random.shuffle(data_tags)\n else:\n data_tags = sim_args.data_folders\n for data_tag in data_tags:\n assert data_tag in DATASET_COLLECTION, 'Command line input is currently not supported.'\n yield DATASET_COLLECTION[data_tag]", "def __init__(self):\n self.deck = []\n for n in range(1, 14):\n card1 = Card(n, \"diamond\")\n self.deck.append(card1)\n\n for n in range(1, 14):\n card1 = Card(n, \"spade\")\n self.deck.append(card1)\n\n for n in range(1, 14):\n card1 = Card(n, \"heart\")\n self.deck.append(card1)\n\n for n in range(1, 14):\n card1 = Card(n, \"club\")\n self.deck.append(card1)", "def get_cards(self):\n return [Flashcard.from_word(word) for word in self.get_words()]", "def choose_deck():\n deck_prompt = \"What deck would you like to play?\"\n choice = utils.choose_list(utils.DECKLIST, deck_prompt)\n deck = dicts.ALL_DECKS[choice]\n os.system('clear')\n return deck", "def test_loadData():\n \n sys = LVsystem.Ecosystem()\n \n sys.loadSetup('2Prey1Predator')\n \n \n data = sys.create_data()\n \n assert data[0] == 3\n assert data[1] == ['rabbit', 'hen', 'fox']\n assert data[2] == [30,10,20]\n assert data[3] == [0.09,0.07,-0.06] \n assert data[4] == [10000,10000,1]\n assert data[5] == [400,500,250]\n assert data[6][1][2] == -data[6][2][1]\n assert data[6][2][2] == 0\n\n sys.removeSpecies('rabbit')\n sys.removeSpecies('fox')\n sys.removeSpecies('hen')", "def __init__(self, num_decks=1):\r\n\t\tself.reshuffle = False\r\n\t\tself.stack = []\r\n\t\tfor i in range(num_decks):\r\n\t\t\tdeck = Deck()\r\n\t\t\tfor card in deck.cards:\r\n\t\t\t\tself.stack.append(card)\r\n\t\trandom.shuffle(self.stack)\r\n\t\tself.runningcount = 0\r\n\t\tself.decksremaining = num_decks\r\n\t\tself.truecount = 0", "def list(self):\n return 'Decks available: \\n{}'.format(\"\\n\".join([\n 'Deck {}: {} ({} cards)'.format(deck['id'], deck['title'], len(deck['cards']))\n for key, deck in self.decks.items()\n ]))", "def load_ck_data(openface_dir, emotion_dir, feature_type='AUs'):\n features = load_OpenFace_features(openface_dir, features=feature_type)\n labels = load_CK_emotions(emotion_dir)\n\n return train_utils.dicts2lists(features, labels)", "def loadData(dataPath):\n\n if not os.path.isdir(dataPath):\n api = KaggleApi()\n api.authenticate()\n api.dataset_download_files(\n dataset='sumanthvrao/daily-climate-time-series-data',\n path=dataPath,\n quiet=True,\n unzip=True\n )\n\n filepath1 = os.path.join(dataPath, 'DailyDelhiClimateTrain.csv')\n df1 = DatasetUtility.sortByDate(pd.read_csv(filepath1, header='infer'))\n\n filepath2 = os.path.join(dataPath, 'DailyDelhiClimateTest.csv')\n df2 = DatasetUtility.sortByDate(pd.read_csv(filepath2, header='infer'))\n\n return pd.concat([df1, df2], axis=0, ignore_index=True)", "def test_get_cards_by_card_types(model: BDF) -> None:\n # setup to remove hackish cards\n card_types = list(model.card_count.keys())\n removed_cards = []\n for card_type in ['ENDDATA', 'INCLUDE', 'JUNK']:\n if card_type in model.card_count:\n removed_cards.append(card_type)\n for removed_card in removed_cards:\n card_types.remove(removed_card)\n\n removed_cards = []\n for card_type in card_types:\n if card_type not in model.cards_to_read:\n try:\n removed_cards.append(card_type)\n #print('removed %s' % card_type)\n except ValueError:\n msg = 'card_type=%s cant be removed' % card_type\n raise ValueError(msg)\n for removed_card in removed_cards:\n card_types.remove(removed_card)\n\n # we now have a list of card types we would like to extract\n # we'll get the associated cards\n card_dict = model.get_cards_by_card_types(card_types,\n reset_type_to_slot_map=False)\n for card_type, cards in card_dict.items():\n for card in cards:\n msg = 'this should never crash here...card_type=%s card.type=%s' % (\n card_type, card.type)\n if card_type != card.type and card_type + '1' != card.type:\n raise RuntimeError(msg)", "def test_should_return_correct_data(self, mocked_get_data_loader):\n\n mocked_get_data_loader.return_value = mock_data_loader\n\n correct_kingdoms = {\n 'SPACE': Kingdom('SPACE', 'Gorilla'),\n 'LAND': Kingdom('LAND', 'Panda'),\n 'WATER': Kingdom('WATER', 'Octopus'),\n 'ICE': Kingdom('ICE', 'Mammoth'),\n 'AIR': Kingdom('AIR', 'Owl'),\n 'FIRE': Kingdom('FIRE', 'Dragon')\n }\n\n result_kingdoms = KingdomRepositoryServiceCsvImpl().get_all_kingdoms()\n\n for i in correct_kingdoms:\n\n self.assertEqual(correct_kingdoms[i], result_kingdoms[i])", "def load_data():\n\n # Load data from categories\n comp = fetch_20newsgroups(subset='all', categories=['comp.graphics', 'comp.sys.mac.hardware', 'comp.windows.x'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n science = fetch_20newsgroups(subset='all', categories=['sci.crypt', 'sci.electronics', 'sci.space'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n politics = fetch_20newsgroups(subset='all', categories=['talk.politics.guns', 'talk.politics.mideast'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n religion = fetch_20newsgroups(subset='all', categories=['alt.atheism', 'soc.religion.christian'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n recreation = fetch_20newsgroups(subset='all', categories=['rec.autos', 'rec.sport.baseball', 'rec.sport.hockey'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n\n # Print total number of documents\n data_len = [len(comp.data), len(science.data), len(politics.data), len(recreation.data), len(religion.data)]\n\n # Subsample classes to create a balanced dataset\n sub_k = min(data_len)\n comp.data, comp.target = [list(t) for t in zip(*random.sample(list(zip(comp.data, comp.target)), sub_k))]\n science.data, science.target = [list(t) for t in zip(*random.sample(list(zip(science.data, science.target)), sub_k))]\n politics.data, politics.target = [list(t) for t in zip(*random.sample(list(zip(politics.data, politics.target)), sub_k))]\n religion.data, religion.target = [list(t) for t in zip(*random.sample(list(zip(religion.data, religion.target)), sub_k))]\n recreation.data, recreation.target = [list(t) for t in zip(*random.sample(list(zip(recreation.data, recreation.target)), sub_k))]\n\n # Subcategories labels\n subcat_comp = np.array(comp.target)\n subcat_scien = np.array(science.target) + len(comp.target_names)\n subcat_polit = np.array(politics.target) + len(comp.target_names) + len(science.target_names)\n subcat_rel = np.array(religion.target) + len(comp.target_names) + len(science.target_names) + len(politics.target_names)\n subcat_rec = np.array(recreation.target) + len(comp.target_names) + len(science.target_names) + len(politics.target_names) + len(religion.target_names)\n\n # Assign labels to train data based on categories\n y_comp = np.ones(len(comp.data))\n y_scien = 2*np.ones(len(science.data))\n y_polit = 3*np.ones(len(politics.data))\n y_rel = 4*np.ones(len(religion.data))\n y_rec = 5*np.ones(len(recreation.data))\n labels = np.concatenate((y_comp,y_scien,y_polit,y_rel,y_rec), axis=None)\n\n # Computers\n train_comp, test_comp, y_train_comp, y_test_comp, subcat_comp_train, subcat_comp_test = train_test_split(comp.data, y_comp, subcat_comp, test_size=0.2, random_state=42)\n train_comp, val_comp, y_train_comp, y_val_comp, subcat_comp_train, subcat_comp_val = train_test_split(train_comp, y_train_comp, subcat_comp_train, test_size=0.25, random_state=42)\n\n # Sciences\n train_scien, test_scien, y_train_scien, y_test_scien, subcat_scien_train, subcat_scien_test = train_test_split(science.data, y_scien, subcat_scien, test_size=0.2, random_state=42)\n train_scien, val_scien, y_train_scien, y_val_scien, subcat_scien_train, subcat_scien_val = train_test_split(train_scien, y_train_scien, subcat_scien_train, test_size=0.25, random_state=42)\n\n # Politics\n train_polit, test_polit, y_train_polit, y_test_polit, subcat_polit_train, subcat_polit_test = train_test_split(politics.data, y_polit, subcat_polit, test_size=0.2, random_state=42)\n train_polit, val_polit, y_train_polit, y_val_polit, subcat_polit_train, subcat_polit_val = train_test_split(train_polit, y_train_polit, subcat_polit_train, test_size=0.25, random_state=42)\n\n # Religion\n train_rel, test_rel, y_train_rel, y_test_rel, subcat_rel_train, subcat_rel_test = train_test_split(religion.data, y_rel, subcat_rel, test_size=0.2, random_state=42)\n train_rel, val_rel, y_train_rel, y_val_rel, subcat_rel_train, subcat_rel_val = train_test_split(train_rel, y_train_rel, subcat_rel_train, test_size=0.25, random_state=42)\n\n # Recreation\n train_rec, test_rec, y_train_rec, y_test_rec, subcat_rec_train, subcat_rec_test = train_test_split(recreation.data, y_rec, subcat_rec, test_size=0.2, random_state=42)\n train_rec, val_rec, y_train_rec, y_val_rec, subcat_rec_train, subcat_rec_val = train_test_split(train_rec, y_train_rec, subcat_rec_train, test_size=0.25, random_state=42)\n\n # Corpus from all categories in train set\n newsgroups_train = train_comp + train_scien + train_polit + train_rel + train_rec\n #print(f\"Total number of documents in all categories in the train set is {len(newsgroups_train)}.\")\n train_labels = np.concatenate((y_train_comp,y_train_scien,y_train_polit,y_train_rel,y_train_rec), axis=None)\n #print(train_labels.shape)\n train_subcat = np.concatenate((subcat_comp_train,subcat_scien_train,subcat_polit_train,subcat_rel_train,subcat_rec_train), axis=None)\n #print(train_subcat.shape)\n\n # Corpus from all categories in test set\n newsgroups_test = test_comp + test_scien + test_polit + test_rel + test_rec\n test_labels = np.concatenate((y_test_comp,y_test_scien,y_test_polit,y_test_rel,y_test_rec), axis=None)\n test_subcat = np.concatenate((subcat_comp_test,subcat_scien_test,subcat_polit_test,subcat_rel_test,subcat_rec_test), axis=None)\n\n # Corpus from all categories in validation set\n newsgroups_val = val_comp + val_scien + val_polit + val_rel + val_rec\n val_labels = np.concatenate((y_val_comp,y_val_scien,y_val_polit,y_val_rel,y_val_rec), axis=None)\n val_subcat = np.concatenate((subcat_comp_val,subcat_scien_val,subcat_polit_val,subcat_rel_val,subcat_rec_val), axis=None)\n\n # Data Split\n total = len(test_labels) + len(val_labels) + len(train_labels)\n\n return newsgroups_train, train_labels, newsgroups_test, test_labels, newsgroups_val, val_labels, train_subcat, test_subcat, val_subcat", "def _collect_quizzes():\n data_path = join(dirname(abspath(__file__)), 'data')\n for _, _, filenames in os.walk(data_path):\n for filename in filenames:\n if filename.endswith('.yml'):\n quiz_type = filename.replace('.yml', '').capitalize()\n QUIZ_DICT[quiz_type] = []\n with open(join(data_path, filename), encoding='utf-8') as f:\n data = yaml.load(f)\n for class_name, settings in data.items():\n Q = type(class_name, (Quiz, ), settings)\n QUIZ_DICT[quiz_type].append(Q)\n QUIZ_DICT_FLAT[class_name] = Q", "def evalcards(cardA, cardB, cardC, cardD):\n array = []\n ranks = []\n spadessort = []\n cardsinsuit = 1\n # BASESUIT definitions\n if cardA[-3:] == \"SPA\":\n basesuit = suitspades\n if cardA[-3:] == \"HEA\":\n basesuit = suithearts\n if cardA[-3:] == \"DIA\":\n basesuit = suitdiamonds\n if cardA[-3:] == \"CLB\":\n basesuit = suitclubs\n if cardB in basesuit:\n cardsinsuit += 1\n if cardC in basesuit:\n cardsinsuit += 1\n if cardD in basesuit:\n cardsinsuit += 1\n #BEGIN SORTING CARDS\n cardBBB = cardB\n cardCCC = cardC\n cardDDD = cardD\n if cardB not in basesuit:\n cardBBB = basesuit[12]\n if cardC not in basesuit:\n cardCCC = basesuit[12]\n if cardD not in basesuit:\n cardDDD = basesuit[12]\n array += [str(basesuit.index(cardA))]\n if len(str(basesuit.index(cardA))) == 1:\n del array[0]\n array += [\"0\"+str(basesuit.index(cardA))]\n array += [str(basesuit.index(cardBBB))]\n if len(str(basesuit.index(cardBBB))) == 1:\n del array[1]\n array += [\"0\"+str(basesuit.index(cardBBB))]\n array += [str(basesuit.index(cardCCC))]\n if len(str(basesuit.index(cardCCC))) == 1:\n del array[2]\n array += [\"0\"+str(basesuit.index(cardCCC))]\n array += [str(basesuit.index(cardDDD))]\n if len(str(basesuit.index(cardDDD))) == 1:\n del array[3]\n array += [\"0\"+str(basesuit.index(cardDDD))]\n array.sort()\n for x in range(0,cardsinsuit):\n ranks += [basesuit[int(array[x])]]\n #CHECKING FOR NOT IN SUIT AND FOR SPADES\n if cardB not in basesuit:\n if cardB in spades:\n spadessort += [cardB]\n else:\n ranks += [cardB]\n if cardC not in basesuit:\n if cardC in spades:\n if (cardB in spades) and (spades.index(cardC) < spades.index(cardB)):\n spadessort = listinsert(spadessort, 0, cardC)\n elif (cardB in spades) and (spades.index(cardC) > spades.index(cardB)):\n spadessort += [cardC]\n else:\n spadessort += [cardC]\n else:\n ranks += [cardC]\n if cardD not in basesuit:\n if cardD in spades:\n if (cardB in spades) and (cardC in spades):\n if (spades.index(cardD) < spades.index(cardC)) and (spades.index(cardD) < spades.index(cardB)):\n spadessort = listinsert(spadessort, 0, cardD)\n elif ((spades.index(cardD) < spades.index(cardC)) and (spades.index(cardD) > spades.index(cardB))) or ((spades.index(cardD) > spades.index(cardC)) and (spades.index(cardD) < spades.index(cardB))):\n spadessort = listinsert(spadessort, 1, cardD)\n elif (spades.index(cardD) > spades.index(cardC)) and (spades.index(cardD) > spades.index(cardB)):\n spadessort += [cardD]\n elif (cardB in spades) and (cardC not in spades):\n if spades.index(cardD) < spades.index(cardB):\n spadessort = listinsert(spadessort, 0, cardD)\n if spades.index(cardD) > spades.index(cardB):\n spadessort += [cardD]\n elif (cardB not in spades) and (cardC in spades):\n if spades.index(cardD) < spades.index(cardC):\n spadessort = listinsert(spadessort, 0, cardD)\n if spades.index(cardD) > spades.index(cardC):\n spadessort += [cardD]\n else:\n spadessort += [cardD]\n else:\n ranks += [cardD]\n ranks = spadessort + ranks\n return(ranks)", "def parse_decklist(manastack_id: str, zones: list[str] = [\"commander\", \"mainboard\"]):\r\n decklist = Decklist()\r\n warnings = []\r\n ok = True\r\n\r\n r = requests.get(f\"https://manastack.com/api/decklist?format=json&id={manastack_id}\")\r\n if r.status_code != 200:\r\n raise (ValueError(f\"Manastack returned statuscode {r.status_code}\"))\r\n\r\n data = r.json()\r\n for zone in zones:\r\n if len(data[\"list\"][zone]) > 0:\r\n decklist.append_comment(zone.capitalize())\r\n for item in data[\"list\"][zone]:\r\n # Extract relevant data\r\n count = item[\"count\"]\r\n card_name = item[\"card\"][\"name\"]\r\n set_id = item[\"card\"][\"set\"][\"slug\"]\r\n collector_number = item[\"card\"][\"num\"]\r\n\r\n # Validate card name\r\n card_name, warnings_name = validate_card_name(card_name)\r\n if card_name is None:\r\n decklist.append_comment(card_name)\r\n warnings.extend([(decklist.entries[-1], level, msg) for level, msg in warnings_name])\r\n ok = False\r\n continue\r\n\r\n # Validate card print\r\n card, warnings_print = validate_print(card_name, set_id, collector_number)\r\n\r\n decklist.append_card(count, card)\r\n warnings.extend([(decklist.entries[-1], level, msg) for level, msg in warnings_name + warnings_print])\r\n\r\n if zone != zones[-1]:\r\n decklist.append_comment(\"\")\r\n\r\n decklist.name = data[\"info\"][\"name\"]\r\n\r\n return decklist, ok, warnings", "def get_data_loaders(opt):\n return find_dataloader_using_name(opt.dataloader)(opt).load_data()", "def load_data(args, sample_idx):\n # The preference scores are not used for the MetFrag comparison (MetFrag uses LogP values as preference scores\n # and calculates those internally. However, due to the implementation of 'load_dataset_EA' we need to specify some\n # preference model.\n pref_model = {\"training_dataset\": \"MEOH_AND_CASMI_JOINT\", \"keep_test_molecules\": False,\n \"estimator\": \"ranksvm\", \"molecule_representation\": \"substructure_count\"}\n\n with sqlite3.connect(\"file:\" + args.database_fn + \"?mode=ro\", uri=True) as db:\n challenges, candidates = load_dataset_EA(\n db, participant=args.participant, prefmodel=pref_model, ion_mode=args.ion_mode,\n max_n_cand=args.max_n_cand, sort_candidates_by_ms2_score=args.sort_candidates_by_ms2_score,\n sample_idx=sample_idx)\n\n return challenges, candidates", "def shuffled_deck(deck):\n random.shuffle(deck)\n return deck", "def get_all_cards(self, filter='open'):\n print('Searching Trello cards..\\n')\n done_sources = []\n for list in self.my_lists:\n for card in list.list_cards(card_filter=filter):\n name = card.name.split()[0]\n done_sources.append(card)\n return done_sources", "async def get_deck_fixed_labware(\n self,\n deck_definition: DeckDefinitionV2\n ) -> List[DeckFixedLabware]:\n labware = []\n\n for fixture in deck_definition[\"locations\"][\"fixtures\"]:\n labware_id = fixture[\"id\"]\n load_name = fixture.get(\"labware\") # type: ignore[misc]\n slot = fixture.get(\"slot\") # type: ignore[misc]\n\n if load_name is not None and slot is not None:\n location = DeckSlotLocation(slot=DeckSlotName.from_primitive(slot))\n definition = await self._labware_data.get_labware_definition(\n load_name=load_name,\n namespace=\"opentrons\",\n version=1,\n )\n\n labware.append(\n DeckFixedLabware(\n labware_id=labware_id,\n definition=definition,\n location=location,\n )\n )\n\n return labware", "def deck_get_decks():\n log_request(request)\n\n data = request.json\n \n if not valid_params(['username', 'session_id'], data):\n logging.debug(\"Missing parameters\")\n return jsonify({'error' : 500})\n \n username = request.json['username']\n session_id = request.json['session_id']\n \n # verify session\n if not user.verify(username, session_id):\n logging.debug(\"Invalid username or session\")\n return jsonify({'error' : 101})\n \n uId = user.id_name(username)\n\n decks = deck.get_decks_by_uId(uId)\n ret = {'decks' : []}\n for d in decks:\n ret['decks'].append({'name' : d[0],\n 'deck_id' : d[1],\n 'description' : d[2]})\n\n ret['error'] = 0\n return jsonify(ret)", "def getAllCards(self):\n return self._cards", "def create_deck(self):\n\n id_already_use, deck, hand = [], [], []\n\n for _ in range(self.number_domino - self.hand_size):\n\n # We generate a domino and keep its id in id_alread_use\n # then we make sure to ony keep new id\n\n id = (randint(0, 6), randint(0, 6))\n while id in id_already_use:\n id = (randint(0, 6), randint(0, 6))\n deck.append(Domino(id[0], id[1]))\n id_already_use.append(id)\n\n for _ in range(self.hand_size):\n id = (randint(0, 6), randint(0, 6))\n while id in id_already_use:\n id = (randint(0, 6), randint(0, 6))\n hand.append(Domino(id[0], id[1]))\n id_already_use.append(id)\n\n return deck, hand", "def load_deck_obj(deckFile, deckObj):\n\n deckObj = []\n\n # open up the deck file, and start adding lines to the deckObj\n with open(deckFile, 'r') as deckItem:\n for deckLine in deckItem:\n # print([deckLine])\n deckObj += [deckLine]\n\n # print the first 4 card names in the deck.\n # for card in deckObj[:4]:\n # print(card)\n\n # print the length of the json cards, and the deck's lines\n # print('deck item lines: {}'.format(len(deckObj)))\n \n return", "def load_data():\n if _LOCATIONS_BY_ID:\n return _LOCATIONS_BY_NAME, _LOCATIONS_BY_ID\n\n # We need to read the locations in order of country -> admin level 1 -> admin level 2 -> city.\n # This is so that the higher resolution locations can look up the lower resolution locations\n # that they belong to, and compute the necessary fields.\n countries_by_code = _load_country_data(_DATA_FILES['country'])\n admin1_by_code = _load_admin1_data(_DATA_FILES['admin_1'], countries_by_code)\n admin2_by_code = _load_admin2_data(_DATA_FILES['admin_2'], countries_by_code, admin1_by_code)\n _load_city_data(_DATA_FILES['city'], countries_by_code, admin1_by_code, admin2_by_code)\n _add_alternate_names(_DATA_FILES['alt_wiki_names'])\n _add_estimated_importances(_DATA_FILES['estimated_importance'])\n\n return _LOCATIONS_BY_NAME, _LOCATIONS_BY_ID", "def available_datasets(self) -> List[str]:\n return sorted(self.__by_name.keys())", "def create_deck():\r\n deck = []\r\n faces = [2,3,4,5,6,7,8,9,10,\r\n 'Jack','Queen','King','Ace']\r\n suits = ['Spades', 'Diamonds', 'Clubs', 'Hearts']\r\n for face in faces:\r\n for suit in suits:\r\n # Creates a card-tuple and adds it to the deck.\r\n deck.append((face, suit))\r\n \r\n return deck", "def generate_card_deck() -> [Card]:\n\n card_deck = []\n\n for card_color in CardColor:\n for card_value in CardValue:\n card_deck.append(Card(card_color, card_value))\n\n return Shuffle.__shuffle_card_deck(card_deck * Shuffle._CARD_DECK_MULTIPLIER)", "def _load_raw_datashards(shard_num, nb_collaborators): \n train_obj = torchvision.datasets.CIFAR10('~/.CIFAR10', train=True, download=True) \n test_obj = torchvision.datasets.CIFAR10('~/.CIFAR10', train=False, download=True) \n x_train = train_obj.data\n y_train = np.asarray(train_obj.targets)\n x_test = test_obj.data\n y_test = np.asarray(test_obj.targets)\n # fix the label dimension to be (N,)\n y_train = y_train.reshape(-1)\n y_test = y_test.reshape(-1) \n \n # create the shards\n X_train_shards = x_train[shard_num::nb_collaborators]\n y_train_shards = y_train[shard_num::nb_collaborators]\n \n X_test_shards = x_test[shard_num::nb_collaborators]\n y_test_shards = y_test[shard_num::nb_collaborators]\n return (X_train_shards, y_train_shards), (X_test_shards, y_test_shards)", "def fetch_cifar100_efficient_kd_dataloaders(args):\n\n loaders = {}\n for mode in [\"train\", \"test\"]:\n dataset = CachedKDDataset(mode=mode)\n loaders[mode] = \\\n torch.utils.data.DataLoader(\n dataset,\n batch_size=args.batch_size,\n shuffle=(mode == \"train\"),\n num_workers=4,\n collate_fn=dict_collate\n )\n\n return loaders[\"train\"], loaders[\"test\"]", "def get_deck_as_str_list(self):\n\n ls = []\n for card in self.deck:\n ls.append(card.get_card())\n return ls", "def get_faces_loaders(batch_size=128, test=True, data_path=\"./data/\"):\n\n dat = np.load(data_path + \"rotated_faces_data.npz\")\n train_images = torch.FloatTensor(dat['train_images'])\n train_targets = torch.FloatTensor(dat['train_angles'])\n\n traindata = torch.utils.data.TensorDataset(train_images, train_targets)\n trainloader = torch.utils.data.DataLoader(traindata, batch_size=batch_size,\n shuffle=True)\n\n if test:\n test_images = torch.FloatTensor(dat['test_images'])\n test_targets = torch.FloatTensor(dat['test_angles'])\n\n testdata = torch.utils.data.TensorDataset(test_images, test_targets)\n testloader = torch.utils.data.DataLoader(testdata, batch_size=batch_size)\n\n return trainloader, testloader\n\n return trainloader", "def get_available_datasets():\n files = [file for file in glob.glob(os.path.join(MODULE_ROOT, \"datasets/*.json\"))]\n datasets = []\n for file in files:\n with open(file, \"r\") as f:\n dataset_info = json.load(f)\n datasets.append(dataset_info)\n return datasets", "def get_datasets(FIELDS='all'):\n dsinfostr = fork_and_get_output(\"zfs list -H -o {0}\".format(FIELDS).split())\n header = get_zfs_ds_header()\n dsinfo = dsinfostr.splitlines()\n dsobjs = []\n for dsstr in dsinfo:\n dsobjs.append(DataZFS(dsstr, header, 'dataset'))\n return dsobjs", "def fetch_review(self):\n c = self.db.cursor()\n c.execute(\"\"\"SELECT * FROM cards\n WHERE date_last_reviewed < (DATETIME('now', 'localtime', '-8 hours'))\n OR correct = 0\"\"\")\n rows = c.fetchall()\n cards = [\n Card(\n id=id,\n card_type=card_type,\n text=text,\n created=created,\n uri=uri,\n updated=updated,\n difficulty=difficulty,\n days_between=days_between,\n date_last_reviewed=date_last_reviewed,\n correct=correct,\n )\n for id, card_type, text, uri, created, updated, difficulty, days_between, date_last_reviewed, correct in rows\n ]\n cards = filter(lambda card: card.percent_overdue >= 1, cards)\n cards = sorted(cards, key=lambda card: card.percent_overdue)\n\n return cards[:20]", "def get_results(path: str, threshold1: float, threshold2: float, quantiles: int, clear_cache: bool = True):\n create_cache_dirs()\n\n correlation_clustering = load_dataset(path, threshold1, threshold2, quantiles, clear_cache=clear_cache)\n print(\"DATA LOADED\")\n\n correlation_clustering.find_matches()" ]
[ "0.5819562", "0.5731613", "0.544646", "0.53726274", "0.5366942", "0.5298741", "0.5272846", "0.52254647", "0.5140814", "0.50974256", "0.50899714", "0.50865227", "0.5069896", "0.50649506", "0.5054334", "0.50400245", "0.5027452", "0.50256103", "0.50096786", "0.50049436", "0.4990609", "0.49646613", "0.4954556", "0.49482644", "0.49426016", "0.4938606", "0.49299636", "0.4926834", "0.49254796", "0.4924566", "0.49029097", "0.49005836", "0.4890899", "0.48813173", "0.48632264", "0.48506513", "0.48446253", "0.4842137", "0.48375872", "0.4825627", "0.48236677", "0.48153964", "0.48141256", "0.478497", "0.47720465", "0.47677535", "0.47603887", "0.4760373", "0.4759246", "0.4759064", "0.47521466", "0.47501683", "0.47412425", "0.47400504", "0.47339097", "0.47301137", "0.47273877", "0.47133395", "0.47133297", "0.47117725", "0.4707252", "0.46988466", "0.46984452", "0.46865392", "0.46790886", "0.46712542", "0.46705425", "0.4667598", "0.4657107", "0.4652513", "0.46463388", "0.46448675", "0.46294528", "0.46276632", "0.4627134", "0.46239272", "0.46235293", "0.46221435", "0.46193847", "0.46141276", "0.4612455", "0.46118468", "0.46118212", "0.46020073", "0.46011117", "0.4600475", "0.45985457", "0.45974976", "0.4595586", "0.45931476", "0.45914948", "0.45896", "0.45870155", "0.4584769", "0.45845485", "0.4584389", "0.45843363", "0.45831826", "0.45785546", "0.4576486" ]
0.648076
0
Calculates the distance matrix of a list of Deck or FuzzyDeck objects. Returns the vectorform distance vector.
def calculate_distance_matrix(played_decks: Union[List[FuzzyDeck], List[Deck]], measure: str): deck_data = np.array(played_decks).reshape(len(played_decks), 1) if measure == "jaccard": dist = pdist(deck_data, lambda u, v: u[0].jaccard_distance(v[0])) elif measure == "euclidean": dist = pdist(deck_data, lambda u, v: u[0].euclidean_distance(v[0])) else: raise ValueError("Unknown distance measure {}. ".format(measure) + "Please choose one of the following distance measures ['euclidean','jaccard']") return dist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getDistanceMatrix(self):\n v = self.getVectors()\n vLis = v.keys()\n N = len(v.keys())\n D = np.zeros([N, N], dtype=np.float32)\n print(N)\n for i in range(N):\n print(\"%d/%d\" %(i, N))\n D[i, i] = 1\n for j in range(i + 1, N):\n dist = self.cosin_sim_pairs(v[vLis[i]], v[vLis[j]])\n D[i, j] = dist\n D[j, i] = dist\n return D", "def cal_distances(embeddings):\n # calculate\n dist = np.zeros([len(embeddings), len(embeddings)], dtype=float)\n for ii in xrange(len(embeddings)):\n for jj in xrange(ii + 1, len(embeddings)):\n dist[ii, jj] = np.linalg.norm(embeddings[ii] - embeddings[jj])\n dist[jj, ii] = dist[ii, jj] \n \n # return\n return dist", "def _compute_distances(self, atoms: List[CellAtom]):\n muon = self._cell_atoms[self._muon_index]\n\n for atom in atoms:\n atom.distance_from_muon = np.linalg.norm(muon.position - atom.position)", "def calculate_distance_matrix(atomlist):\n distlist = []\n for atom in atomlist:\n atomdict = {}\n for partner in atomlist:\n if not str(int(partner[0][1])) in atomdict.keys():\n atomdict[str(int(partner[0][1]))] = []\n atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1]))\n else:\n atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1]))\n atomdict[str(int(partner[0][1]))].sort()\n\n distlist.append(atomdict)\n\n return distlist", "def _distance_matrix(self):\n\n # Log the type of metric being used in Sequencing\n logger.info('Using {} Distance'.format(self.measure))\n\n # Convert the nodal coordinate tuples to a np.array\n coords = np.vstack(map(np.array, self.coords.values()))\n \n if self.measure == 'haversine':\n # Partially applied haversine function that takes a coord and computes the vector distances for all coords\n haversine = lambda coord: get_hav_distance(coords[:, 0], coords[:, 1], *coord) \n # Map the partially applied function over all coordinates, and stack to a matrix\n return np.vstack(map(haversine, coords))\n\n # Partially applied haversine function that takes a coord and computes the vector distances for all coords\n euclidean = lambda coord: get_euclidean_dist(coords, coord)\n # Map the partially applied function over all coordinates, and stack to a matrix\n return np.vstack(map(euclidean, coords))", "def DistanceMatrices(self):\r\n return self._dms", "def compute_distances(Ls):\n if not isinstance(Ls, list):\n Ls = [Ls]\n\n dists = []\n for L in Ls:\n N,D = L.shape\n # 1xNxD - Nx1xD (L1 distance)\n dist = (np.abs(L[None,:,:] - L[:,None,:])).sum(axis=2)\n dists.append(dist)\n\n return dists", "def __build_distance_matrix(self):\n for i in range(0, len(self.__corpus)):\n doc_i = self.__corpus[i]\n for j in range(i + 1, len(self.__corpus)):\n doc_j = self.__corpus[j]\n distance = doc_i.calc_distance(doc_j)\n self.__distance_matrix.append(distance)", "def distance(self, features, targets):\n cost_matrix = np.zeros((len(targets), len(features)))\n for i, target in enumerate(targets):\n cost_matrix[i, :] = self._metric(self.samples[target], features)\n return cost_matrix", "def distance(self, features, targets):\n cost_matrix = np.zeros((len(targets), len(features)))\n for i, target in enumerate(targets):\n cost_matrix[i, :] = self._metric(self.samples[target], features)\n return cost_matrix", "def get_distances(self):\n N = len(self.cells) # Number of cells\n distances = np.zeros([N, N]) # distances between cells\n positions = self.position_matrix() # positions of cells \n \n # get distances between cells (exploit symmetry between upper and lower triangular form)\n for i, position in enumerate(positions[:-1, :]): # Iterate matrix except the last one\n directions = positions[i+1:, :] - position # direction from i to j > i\n distances[i, i+1:] = np.linalg.norm(directions, axis=1) # length of directions\n \n return distances + distances.T # Add lower triangle of matrix to upper ", "def compute_distances(self, X):\n #print(X.shape, self.Xtr.shape)\n dists = np.zeros((X.shape[0], self.Xtr.shape[0]))\n for i in range(X.shape[0]):\n X_r = np.tile(X[i], (self.Xtr.shape[0], 1))\n dists[i] = np.sqrt(np.sum(np.square(self.Xtr - X_r), axis = 1))\n #print(dists.shape)\n return dists", "def calcDistance(self):\n # Initialize the distance matrix\n arr = np.repeat(0, self.num_col)\n result_mat = np.repeat(arr, self.num_col)\n result_mat = np.reshape(result_mat, (self.num_col, self.num_col))\n trinary_mat = self.df_trinary.values\n for left_val in TRINARY_VALUES:\n left_func = lambda v: 1 if v==left_val else 0\n left_mat = np.transpose(np.vectorize(left_func)(trinary_mat))\n for right_val in TRINARY_VALUES:\n if left_val == right_val:\n continue\n right_func = lambda v: 1 if v==right_val else 0\n right_mat = np.vectorize(right_func)(trinary_mat)\n # Count the number of occurrences of this combination of values\n # by doing a matrix multiply\n new_mat = np.matmul(left_mat, right_mat)\n # Multiply by the squared distance between the values\n squared_distance = (left_val - right_val)**2\n new_mat = new_mat*squared_distance\n # Accumulate the result\n result_mat = result_mat + new_mat\n # Convert to dataframe\n result_mat = np.vectorize(lambda v: np.sqrt(v)) (result_mat)\n self.df_distance = pd.DataFrame(result_mat, columns=self.columns,\n index=self.columns)", "def distance_matrix(data):\n D = numpy.zeros( (data.shape[0], data.shape[0]) )\n for i in xrange(data.shape[0]):\n for j in xrange(i):\n D[i,j] = numpy.linalg.norm(data[i,:]-data[j,:])\n D[j,i] = D[i,j]\n\n return D", "def _calculate_distances(self):\n all_dists = []\n for ref in range(len(self.atoms)):\n if self.atoms[ref].symbol in self.exclude:\n continue\n indices = list(range(ref+1, len(self.atoms)))\n indices = self._filter_excluded(indices)\n if len(indices) == 0:\n continue\n dists = self.atoms.get_distances(ref, indices, mic=True)\n all_dists += list(dists)\n \n # Normalize by the mean distance\n return np.array(all_dists)/np.mean(all_dists)", "def _calc_distance(self, X):\n distances = np.zeros((X.shape[0], self.n_clusters))\n print(distances.shape)\n for i, centroid in enumerate(self.centroids):\n distances[:, i] = np.linalg.norm(X - centroid, axis=1)\n return distances", "def getDistancesWithNames(twoDList):\n matrix = []\n for i in range(0,len(twoDList)):\n for j in range(len(twoDList) - len(twoDList) + i):\n SD = determineIdenticalBases(data[i][1], data[j][1])\n temp = []\n if SD[1] != 0:\n p = calculateP(SD[0]+SD[1], SD[1])\n temp.append(data[i][0])\n temp.append(data[j][0]) \n temp.append(estimateMutationsPerSite(p))\n matrix.append(temp)\n return matrix", "def getDistanceMatrix(self):\n return self.pointcloud.distmat", "def _calc_distance_features(self):\n d = ()\n for dx, dy in DIRECTIONS:\n if dx and dy:\n d += (list(self.__calc_distance(direction_x=dx, direction_y=dy)), )\n elif dx:\n tmp, _, _ = self.__calc_distance(direction_x=dx, direction_y=dy)\n d += (tmp, )\n elif dy:\n _, tmp, _ = self.__calc_distance(direction_x=dx, direction_y=dy)\n d += (tmp, )\n\n self.dist_features = d\n\n self.direc_dist = self.__calc_direc_distance()", "def compute_distance(self, transpose=False):\n\n # Calculate distance matrix\n if transpose:\n distance_matrix = pdist(self.matrix.T, self.distance)\n else:\n distance_matrix = pdist(self.matrix, self.distance)\n\n # Remove NaNs\n distance_matrix[np.isnan(distance_matrix)] = 1.0\n\n return distance_matrix", "def get_distance_matrix(self, points):\n return points[:, :, np.newaxis, :]-points[:, np.newaxis, :, :]", "def test_distances_with_vector_input(self):\n input_vector = self.vectors['dog.n.01']\n distances = self.vectors.distances(input_vector, ['mammal.n.01', 'dog.n.01'])\n self.assertTrue(np.allclose(distances, [4.5278745, 0]))\n\n distances = self.vectors.distances(input_vector)\n self.assertEqual(len(distances), len(self.vectors.vocab))\n self.assertTrue(np.allclose(distances[-1], 10.04756))", "def norm_dist(face_vectors, f_vector):\n if len(face_vectors) == 0:\n return np.empty((0))\n return np.linalg.norm(face_vectors - f_vector, axis=1)", "def distance_matrix(d1, d2=None):\n if d2 is None:\n dists = np.zeros(shape=(d1.shape[0], d1.shape[0]))\n for i in range(dists.shape[0]):\n dists[i] = (((d1 - d1[i]) ** 2).sum(axis=1)) ** 0.5\n else:\n dists = np.zeros(shape=(d1.shape[0], d2.shape[0]))\n for i in range(d1.shape[0]):\n dists[i] = (((d2 - d1[i]) ** 2).sum(axis=1)) ** 0.5\n return dists", "def distances(self):\n self._sort_measurements()\n return self._distances", "def calcDistanceList(work_list):\n distance_list = []\n for swap in work_list: # for every work item find distance\n distance_list.append(Cluster.calcDistance(*swap))\n return distance_list", "def distances(self):", "def nm_dist_mat(self):\n mat = np.zeros([self.N, self.M])\n for n in range(self.N):\n for m in range(self.M):\n mat[n, m] = distance(self.N_coords[n], self.M_coords[m])\n return mat", "def distance_matrix(sunspots1, sunspots2):\n \n N1 = len(sunspots1)\n N2 = len(sunspots2)\n\n distance_matrix = np.zeros((N1, N2))\n\n for i in list(range(N1)):\n for j in list(range(N2)):\n\n distance_matrix[i, j] = euclidean_dist(sunspots1[i], sunspots2[j])\n\n return distance_matrix", "def DistanceMatrices(self, dms):\r\n if not isinstance(dms, ListType):\r\n raise TypeError(\"The item passed in as the new list was not a \"\r\n \"list data type.\")\r\n if self._num_dms >= 0 and len(dms) != self._num_dms:\r\n raise ValueError(\"Cannot set %d distance matrices. Must provide \"\r\n \"exactly %d distance matrices.\" % (len(dms),\r\n self._num_dms))\r\n for dm in dms:\r\n if not isinstance(dm, DistanceMatrix):\r\n raise TypeError(\r\n 'Invalid type (%s); expected DistanceMatrix' %\r\n dm.__class__.__name__)\r\n if self._min_dm_size >= 0 and dm.shape[0] < self._min_dm_size:\r\n raise ValueError(\"Distance matrix of size %dx%d is smaller \"\r\n \"than the minimum allowable distance matrix \"\r\n \"size of %dx%d for this analysis.\" %\r\n (dm.shape[0], dm.shape[0], self._min_dm_size,\r\n self._min_dm_size))\r\n self._dms = dms", "def getDistanceMatrix(self):\n return self.distmat.as_matrix()", "def _derive_euclidean_dm(self, cat_mat, dim):\r\n res_mat = []\r\n\r\n for i in range(dim):\r\n res_mat.append([0 for k in range(dim)])\r\n for j in range(i):\r\n res_mat[i][j] = self._vector_dist(cat_mat[i], cat_mat[j])\r\n res_mat[j][i] = res_mat[i][j]\r\n\r\n return DistanceMatrix(res_mat, self.DistanceMatrices[0].ids)", "def test_distance(self):\n self.assertTrue(np.allclose(self.vectors.distance('dog.n.01', 'mammal.n.01'), 4.5278745))\n self.assertEqual(self.vectors.distance('dog.n.01', 'dog.n.01'), 0)", "def Dvec(self):\n return vec(-self.distance)", "def distance_unsafe(self, *mass_functions):\n #Submethod to get distance between self and one mass function:\n def distance_one_mass(mass_function):\n #Get the jaccard index matrix:\n difference = self.difference_unsafe(mass_function)\n matrix = {}\n for e1 in difference:\n matrix[e1] = {}\n for e2 in difference:\n if (not e1.is_empty()) or (not e2.is_empty()):\n matrix[e1][e2] = e1.conjunction_unsafe(e2).cardinal / e1.disjunction_unsafe(e2).cardinal\n else:\n matrix[e1][e2] = 1\n \n #Compute the distance as sqtr(0.5 * diffT * matrix * diff):\n distance = 0\n temp = {}\n for e1 in difference:\n temp[e1] = 0\n for e2 in difference:\n temp[e1] += difference[e2] * matrix[e1][e2]\n for e1 in difference:\n distance += temp[e1] * difference[e1]\n return math.sqrt(0.5 * distance)\n\n #Get the distance between self and the provided set of mass functions:\n distance = 0\n for mass_function in mass_functions:\n distance += distance_one_mass(mass_function)\n return round(distance / len(mass_functions), 6)", "def compute_distances(self, X):\n num_test = X.shape[0]\n num_train = self.X_train.shape[0]\n dists = np.zeros((num_test, num_train))\n \n sum_test_square = np.sum(np.square(X), axis=1).reshape(-1, 1)\n sum_train_square = np.sum(np.square(self.X_train), axis=1).reshape(-1, 1)\n product_test_train = X @ self.X_train.T\n \n sum_test_square = np.repeat(sum_test_square, num_train, axis=1)\n sum_train_square = np.repeat(sum_train_square, num_test, axis=1).T\n \n dists_square = sum_test_square - 2 * product_test_train + sum_train_square\n \n dists = np.sqrt(dists_square)\n \n return dists", "def test_distances(self):\n distances = self.vectors.distances('dog.n.01', ['mammal.n.01', 'dog.n.01'])\n self.assertTrue(np.allclose(distances, [4.5278745, 0]))\n\n distances = self.vectors.distances('dog.n.01')\n self.assertEqual(len(distances), len(self.vectors.vocab))\n self.assertTrue(np.allclose(distances[-1], 10.04756))", "def distance_matrix(dnas: Collection[str], metric=hamming_distance, relative=True, as_ndarray=False):\n n = len(dnas)\n result = [[0] * n for _ in range(n)]\n for pair in itertools.combinations(zip(range(n), dnas), r=2):\n (idx1, dna1), (idx2, dna2) = pair\n distance = metric(dna1, dna2)\n distance = distance / max(len(dna1), len(dna2)) if relative else distance\n result[idx1][idx2] = distance\n result[idx2][idx1] = distance\n if as_ndarray:\n result = np.asarray(result)\n return result", "def calc_dist_matrix(self,verbose=False):\n\n print(\"Calculating distance matrix.\"); sys.stdout.flush()\n\n nrow = self.data_vector.shape[0]\n self.dist_matrix = np.zeros((nrow, nrow),dtype=float)\n for i in range(nrow):\n if verbose:\n if i % 1000 == 0:\n print(\"Row\",i,\"of\",nrow)\n sys.stdout.flush()\n\n for j in range(i + 1, nrow):\n self.dist_matrix[i,j] = self._pairwise_dist(self.data_vector[i],self.data_vector[j])\n self.dist_matrix[j,i] = self.dist_matrix[i,j]\n \n self.dist_frame = pd.DataFrame(self.dist_matrix,\n index = self.seq_strings,\n columns = self.seq_strings)", "def calc_dist_matrix(self):\n\n self.dist_matrix = spatial.distance.squareform(spatial.distance.pdist(self.data_vector,metric=\"hamming\"))\n\n self.dist_frame = pd.DataFrame(self.dist_matrix,\n index = self.seq_strings,\n columns = self.seq_strings)", "def distance_matrix(cities):\n\n return [[city1.distance(city2) for city2 in cities]\n for city1 in cities]", "def distance(self, *mass_functions):\n #Submethod to get distance between self and one mass function:\n def distance_one_mass(mass_function):\n #Get the jaccard index matrix:\n difference = self.difference(mass_function)\n matrix = {}\n for e1 in difference:\n matrix[e1] = {}\n for e2 in difference:\n if (not e1.is_empty()) or (not e2.is_empty()):\n matrix[e1][e2] = e1.conjunction(e2).cardinal / e1.disjunction(e2).cardinal\n else:\n matrix[e1][e2] = 1\n \n #Compute the distance as sqtr(0.5 * diffT * matrix * diff):\n distance = 0\n temp = {}\n for e1 in difference:\n temp[e1] = 0\n for e2 in difference:\n temp[e1] += difference[e2] * matrix[e1][e2]\n for e1 in difference:\n distance += temp[e1] * difference[e1]\n return math.sqrt(0.5 * distance)\n\n #Get the distance between self and the provided set of mass functions:\n distance = 0\n for mass_function in mass_functions:\n distance += distance_one_mass(mass_function)\n return round(distance / len(mass_functions), 6)", "def get_distance(list1, list2):\n import math\n distance_result = (list1[0] - list2[0]) ** 2 + (list1[1] - list2[1]) ** 2 + (list1[2] - list2[2]) ** 2\n return math.sqrt(abs(distance_result))", "def __surface_distances(result, reference, voxelspacing=None, connectivity=1):\n result = np.atleast_1d(result.astype(np.bool))\n reference = np.atleast_1d(reference.astype(np.bool))\n if voxelspacing is not None:\n voxelspacing = _ni_support._normalize_sequence(voxelspacing, result.ndim)\n voxelspacing = np.asarray(voxelspacing, dtype=np.float64)\n if not voxelspacing.flags.contiguous:\n voxelspacing = voxelspacing.copy()\n \n # binary structure\n footprint = generate_binary_structure(result.ndim, connectivity)\n \n # test for emptiness\n if 0 == np.count_nonzero(result): \n raise RuntimeError('The first supplied array does not contain any binary object.')\n if 0 == np.count_nonzero(reference): \n raise RuntimeError('The second supplied array does not contain any binary object.') \n \n # extract only 1-pixel border line of objects\n result_border = result ^ binary_erosion(result, structure=footprint, iterations=1)\n\n reference_border = reference ^ binary_erosion(reference, structure=footprint, iterations=1)\n\n dt = distance_transform_edt(~reference_border, sampling=voxelspacing)\n # print(dt)\n reference_border = (reference_border + 0).astype(np.float32)\n\n sds = dt[result_border]\n \n return sds", "def test_vector_distance(self):\n\n # Example 1.2\n vector_p = np.array([0.5, 0.0, 0.5])\n vector_q = np.array([0.5, 0.5, 0.0])\n crystal = crystal_system.Tetragonal(0.5, 1.0)\n d_ref_nm = np.sqrt(5.0)/4.0\n\n g_ij_nm2 = crystal.gij_nm2\n\n vector_d = vector_p - vector_q\n d_nm = vector.distance(crystal, vector_d, vector_d)\n\n self.assertAlmostEqual(d_ref_nm, d_nm, 5)\n\n #self.fail(\"Test if the testcase is working.\")", "def distance(self, X, Mu):\r\n # Reshape X to N x 1 x D to enable broadcasting\r\n X_reshaped = tf.reshape(X, (-1, 1, X.shape[-1]))\r\n\r\n # subtraction is performed on the last two dimensions of X_reshaped\r\n # X_reshaped(N stack, 1 x D) - Mu(K x D) => (N stack, K x D)\r\n # norm(N stack, K x D) => (N stack, K)\r\n return tf.reduce_sum(tf.square(X_reshaped - Mu), axis=-1)", "def distance_dmc(distances, Ks, points):\n doors = []\n for d in distances:\n dmc = []\n for k in Ks:\n print \"==========================\", k, \"==========================\"\n clusters = create_clusters(25, k)\n\n kmeans(points, clusters)\n # print \"Finished creating kmeans algorithm\"\n\n create_backbone_network(GRAPH, clusters, d)\n # print \"Finished creating backbone network\"\n\n find_all_shortest_paths(clusters, SP_TABLE, GRAPH)\n # print \"Finished finding all shortest paths\"\n\n for clst in clusters:\n clst.inter_cost = inter_cost(clst)\n clst.intra_cost = intra_cost(points, clst)\n clst.dm_cost = door_matt_cost(clusters, clst, SP_TABLE)\n\n ret = total_cost(clusters)\n dmc.append(ret[2])\n doors.append(sum(dmc))\n draw_door_matts(map(lambda d: float(format(d, \".4g\")), distances), doors)", "def distances(self) -> ndarray:\n return self._distances", "def compute_distances_one_loop(self, X):\n num_test = X.shape[0]\n num_train = self.X_train.shape[0]\n dists = np.zeros((num_test, num_train))\n for i in range(num_test):\n dists[i, :] = np.sqrt(np.sum(np.square(X[i, :] - self.X_train), axis=1)).transpose()\n return dists", "def _calculate_distance(self, ordered_list):\r\n\r\n total_distance = 0\r\n previous_point = None\r\n for point in ordered_list:\r\n if previous_point is not None:\r\n angle, distance = previous_point.angleAndDistanceTo(point) # geodesic distance in meters\r\n total_distance += distance\r\n previous_point = point\r\n\r\n return total_distance", "def compute_distance(self):\n loc = np.extend_dims(self.state[:, :, Boids.Attr.LOC], axis=-1)\n m = np.tile(loc, (1, 1, self.num_boids))\n pos_diff = m-m.transpose(0, 2, 1)\n self.distance = np.linalg.norm(pos_diff, axis=0)", "def compute_derivs_matrices(vecs, adv_vecs, dt):\n return (adv_vecs - vecs)/(1.*dt)", "def dist_matrix(self):\n return self.__dist_matrix", "def get_distances(self, crds):\n self.all_dist = np.zeros((self.natom, self.natom))\n # Loop over upper triangle of atom pairs\n for iat in range(self.natom-1):\n # Get the atom indices\n at_inds = np.arange(len(crds))\n\n # Calc distances between atoms (only upper triangle though)\n at_msk = at_inds > iat\n all_ut_dist = crds[at_msk] - crds[iat]\n all_ut_dist = np.linalg.norm(all_ut_dist, axis=1)\n\n self.all_dist[iat, iat+1:] = all_ut_dist\n\n # Get lower triangle indices\n self.all_dist = self.all_dist + self.all_dist.T", "def _get_distances(self):\n for molecule in self.values():\n molecule.get_distances()\n\n # for atom in self.atoms:\n # atom.get_distances()", "def get_distance_matrix(self):\n names = self.get_named_leaves()\n num_names = len(names)\n dist_mat = np.zeros((num_names, num_names), dtype='float')\n for i, j in itertools.combinations(range(num_names), 2):\n node1, node2 = self.node_names[names[i]], self.node_names[names[j]]\n dist = self.node_distance(node1, node2)\n dist_mat[i,j] = dist\n dist_mat[j,i] = dist\n return names, dist_mat", "def get_correct_distance_matrix(L):\n n = len(L)\n D = np.zeros((n,n))\n for i in range(n):\n for j in range(n):\n if i != j:\n D[i][j] = get_minor(L, [i, j], [i, j]) / get_minor(L, [i], [i])\n return D", "def test_poincare_distances_batch(self):\n vector_1 = self.vectors['dog.n.01']\n vectors_2 = self.vectors[['mammal.n.01', 'dog.n.01']]\n distances = self.vectors.vector_distance_batch(vector_1, vectors_2)\n self.assertTrue(np.allclose(distances, [4.5278745, 0]))", "def distancematrix(vec1, vec2):\n v1, v2 = np.meshgrid(vec1, vec2)\n return np.abs(v1 - v2)", "def measure_distance(self, mat):\n if len(mat) == 1:\n print(\"chain has only one CAatom\")\n return\n self.dists = []\n for num in range(0, len(mat)):\n if num + 1 <= len(mat) - 1:\n c1 = mat[num]\n c2 = mat[num + 1]\n d = c2 - c1\n self.dists.append(math.sqrt(np.sum(d * d)))\n return self.dists", "def calculateDistances(df):\n return", "def getDistanceMatrix(self, alignedSequences):\n\t\tif not alignedSequences:\n\t\t\traise ValueError(\"alignedSequences must not be empty\")\n\t\tdominantAlignedSequence = alignedSequences[0]\n\t\tsubdominantAlignedSequences = alignedSequences[1:]\n\t\tdistanceMatrix = []\n\t\tfor seq in subdominantAlignedSequences:\n\t\t\tdistanceMatrix.append(len(seq) - self._getNumberOfSpaces(seq) - self._getNumberOfAlignedNucleotides(dominantAlignedSequence, seq))\n\t\treturn distanceMatrix", "def _calc_distances(preds, targets, mask, normalize):\n N, K, _ = preds.shape\n _mask = mask.copy()\n _mask[np.where((normalize == 0).sum(1))[0], :] = False\n distances = np.full((N, K), -1, dtype=np.float32)\n normalize[np.where(normalize <= 0)] = 1000000.0\n distances[_mask] = np.linalg.norm(((preds - targets) / normalize[:, None, :])[_mask], axis=-1)\n return distances.T", "def calcEuclideanDistance(d1, d2):\n #initiate empty list\n result = []\n #for each index in the list, each position in both list minus each other\n #and to the power of two. Add this in the result list\n for idx in range(len(d1)):\n result.append((d1[idx]-d2[idx])**2)\n\n #Return the square of the sum of all values in the result list\n return math.sqrt(sum(result))", "def get_distance_metrics():\n\n return [HausdorffDistance(),\n AverageDistance(),\n MahalanobisDistance(),\n VariationOfInformation(),\n GlobalConsistencyError(),\n ProbabilisticDistance()]", "def calculate_distances(train_data, test_datum):\n n = train_data.shape[0]\n dist = []\n for i in range(n):\n distance = np.sqrt(np.sum(np.square(train_data[i]-test_datum)))\n dist.append(distance)\n dist = np.asarray(dist)\n return dist", "def build_homes_matrix(self, homes_list):\n\n # Matrix that contains every home and the duration it takes to get to every other home in the list\n homes_matrix = []\n\n # Loop through every home in the list\n # And create a list of the times between this home and the other homes\n for home in homes_list:\n home_distances = []\n\n # Only let them do driving\n commute_type = CommuteType.objects.get_or_create(commute_type=CommuteType.DRIVING)[0]\n\n # Depending on the accuracy either get the exact commute or the approximate\n if self.commute_accuracy == CommuteAccuracy.EXACT:\n result_distance_wrapper = retrieve_exact_commute_client_scheduler(homes_list, [home], commute_type)\n for commute in result_distance_wrapper:\n time_seconds = commute[0][0]\n distance_meters = commute[0][1]\n if time_seconds is not None and distance_meters is not None:\n home_distances.append(time_seconds)\n else:\n home_distances.append(math.inf)\n else:\n update_commutes_cache_client_scheduler(homes_list,\n home,\n accuracy=CommuteAccuracy.APPROXIMATE,\n commute_type=CommuteType.DRIVING)\n home_distances = retrieve_approximate_commute_client_scheduler(homes_list,\n home,\n commute_type=CommuteType.DRIVING)\n\n homes_matrix.append(home_distances)\n\n return homes_matrix", "def DM(self, masses=None):\n N = len(self.diameters)\n rs = self.rs\n d = self.ndim\n M = np.zeros((d * N, d * N))\n\n for i in range(N):\n sigi = self.diameters[i]\n for j in range(i):\n rijvec = rs[i, :] - rs[j, :]\n rijvec = rijvec - np.around(rijvec)\n rijsq = np.sum(rijvec**2)\n dij = (sigi + self.diameters[j]) / 2\n dijsq = dij**2\n if rijsq < dijsq:\n rij = np.sqrt(rijsq)\n rijouter = np.outer(rijvec, rijvec)\n # U(r) = ½(1 - r/d)²\n # d²U/dxdy = (dr/dx)(dr/dy)/d² - (1 - r/d)(d²r/dxdy)/d\n # dr/dx = x/r\n # d²r/dxdy = -(x y) / r³\n # d²U/dxdy = -(x y)/(r² d²) + (1 - r/d)((x y)/r²)/(d r)\n # d²U/dx² = (dr/dx)²/d² - (1 - r/d)(d²r/dx²)/d\n # d²r/dx² = -x² / r³ + 1/r\n # d²U/dxᵢdxⱼ = -(xᵢ xⱼ)/(r² d²) + (1 - r/d)((xᵢ xⱼ)/r² -\n # δᵢⱼ)/(d r)\n\n Mij1 = -rijouter / rijsq / dijsq\n Mij2 = (1 - rij / dij) * \\\n (rijouter / rijsq - np.eye(d)) / rij / dij\n Mij = Mij1 + Mij2\n\n M[d * i:d * i + d, d * j:d * j + d] = Mij\n M[d * j:d * j + d, d * i:d * i + d] = Mij\n M[d * i:d * i + d, d * i:d * i + d] -= Mij\n M[d * j:d * j + d, d * j:d * j + d] -= Mij\n\n np.divide(M, self.L**2, out=M)\n if masses is None:\n return M\n\n # TODO: is the mass part of this really part of this?\n marr = np.array(masses)\n assert np.shape(masses) == np.shape(self.diameters)\n marr = np.array([masses] * d)\n marr = marr.T.flatten()\n # marr is now [m1,m1,m2,m2,...] (in 2D)\n mm = np.eye(d * N)\n np.multiply(mm, marr**-.5, out=mm)\n # mm is now M^-½, where M is the mass matrix\n\n mm.dot(M, out=M)\n M.dot(mm, out=M)\n return M", "def distancePairs(self):\n return spsd.squareform(spsd.pdist(self.coordinates()))", "def face_distance(face_encodings, face_to_compare):\n if len(face_encodings) == 0:\n return np.empty((0))\n\n #return 1/np.linalg.norm(face_encodings - face_to_compare, axis=1)\n return np.sum(face_encodings*face_to_compare,axis=1)", "def compute_distance(X, K_clusters):\n dis = np.linalg.norm((X-K_clusters),2,axis=1)**2\n return dis", "def compute_pairwise_distances(input_vecs: types.Tensor) -> types.Tensor:\n r = tf.reduce_sum(input_vecs * input_vecs, axis=1, keepdims=True)\n pdistance_matrix = (\n r\n - 2 * tf.matmul(input_vecs, input_vecs, transpose_b=True)\n + tf.transpose(r)\n )\n return tf.cast(pdistance_matrix, dtype=tf.float32)", "def F(self, distances):\n return np.sum(self.F_mat(distances), 1)", "def get_distance_vector(catchpoints, closest):\n\n deg_rad = math.pi / 180\n \n dphis = catchpoints[:, 1] - closest[:, 1]\n phims = 0.5 * (catchpoints[:, 1] + closest[:, 1])\n dlams = catchpoints[:,0] - closest[:,0]\n\n k1s = (111.13209 - 0.56605 * numpy.cos(2 * phims * deg_rad) + \n 0.00120 * numpy.cos(4 * phims * deg_rad))\n k2s = (111.41513 * numpy.cos(phims * deg_rad) - 0.09455 * \n numpy.cos(3 * phims * deg_rad) + 0.0012 * \n numpy.cos(5 * phims * deg_rad))\n \n return numpy.sqrt(k1s**2 * dphis**2 + k2s**2 * dlams**2)", "def _location_distances(self, positions) -> torch.Tensor:\n diff = positions[..., None, :, :] - positions[..., None, :]\n distances = torch.norm(diff, dim=3)\n return distances", "def calculate_vectors(self, spectrum_list: List[Spectrum]) -> np.ndarray:\n n_rows = len(spectrum_list)\n reference_vectors = np.empty(\n (n_rows, self.output_vector_dim), dtype=\"float\")\n binned_spectrums = self.model.spectrum_binner.transform(spectrum_list, progress_bar=self.progress_bar)\n for index_reference, reference in enumerate(\n tqdm(binned_spectrums,\n desc='Calculating vectors of reference spectrums',\n disable=(not self.progress_bar))):\n reference_vectors[index_reference, 0:self.output_vector_dim] = \\\n self.model.base.predict(self._create_input_vector(reference), verbose=0)\n return reference_vectors", "def test_euclidean_distance_Ndimension(self):\n\n self.assertEqual(15, euclidean_distance([0, 0, 0], [10, 10, 5]))\n self.assertEqual(15, euclidean_distance([0, 0, 0], [-10, -10, -5]))\n\n self.assertEqual(17, euclidean_distance([0, 0, 0, 0], [10, 10, 8, 5]))\n self.assertEqual(17, euclidean_distance([0, 0, 0, 0], [-10, -10, -8, -5]))\n\n self.assertEqual(8, euclidean_distance([0, 0, 0, 0, 0], [5, 1, 1, 1, 6]))\n self.assertEqual(8, euclidean_distance([0, 0, 0, 0, 0], [-5, -1, -1, -1, -6]))", "def test_distances(self):\n sf = make_classifier_data(n=10, d=2, seed=37)\n sf.remove_column(\"class\", inplace=True)\n\n numeric_features = [\"int0\", \"int1\", \"float0\", \"float1\"]\n array_features = [\"array0\"]\n string_features = [\"str0\"]\n dict_features = [\"dict0\"]\n\n ## Numeric standard distances should work for numeric columns\n for d in [\n \"euclidean\",\n \"squared_euclidean\",\n \"manhattan\",\n \"cosine\",\n \"transformed_dot_product\",\n ]:\n try:\n m = tc.dbscan.create(\n sf,\n features=numeric_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## Numeric standard distances should work for array columns\n for d in [\n \"euclidean\",\n \"squared_euclidean\",\n \"manhattan\",\n \"cosine\",\n \"transformed_dot_product\",\n ]:\n try:\n m = tc.dbscan.create(\n sf,\n features=array_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## String standard distances should work.\n for d in [\"levenshtein\"]:\n try:\n m = tc.dbscan.create(\n sf,\n features=string_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## Dictionary standard distances should work.\n for d in [\"jaccard\", \"weighted_jaccard\", \"cosine\", \"transformed_dot_product\"]:\n try:\n m = tc.dbscan.create(\n sf,\n features=dict_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n # Nonsensical combinations of feature types and distances should fail.\n with self.assertRaises(ValueError):\n m = tc.dbscan.create(\n sf,\n features=numeric_features,\n distance=\"levenshtein\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n with self.assertRaises(ToolkitError):\n m = tc.dbscan.create(\n sf,\n features=dict_features,\n distance=\"levenshtein\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n with self.assertRaises(ToolkitError):\n m = tc.dbscan.create(\n sf,\n features=string_features,\n distance=\"euclidean\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n # If no distance is specified, the automatic distance construction\n # should kick in and be correct.\n correct_dist = [\n [[\"str0\"], \"levenshtein\", 1],\n [[\"str1\"], \"levenshtein\", 1],\n [[\"dict0\"], \"jaccard\", 1],\n [[\"int0\", \"int1\", \"float0\", \"float1\"], \"euclidean\", 1],\n [[\"array0\"], \"euclidean\", 1],\n ]\n\n m = tc.dbscan.create(\n sf, radius=1, distance=None, min_core_neighbors=3, verbose=False\n )\n\n self.assertItemsEqual(m.distance, correct_dist)\n\n m = tc.dbscan.create(\n sf, radius=1, distance=\"auto\", min_core_neighbors=3, verbose=False\n )\n self.assertItemsEqual(m.distance, correct_dist)", "def compute_distance (uVector, uOther):\n ## since each element can be either 0 or 1,\n ## no need for square roots and pow\n d = 0\n for i in range (len(uVector)):\n d = d + math.pow((int(uVector [i]) - int(uOther [i])), 2)\n\n return d", "def distance(self,m,n):\n if type(m) == np.ndarray:\n return np.linalg.norm( self.positions[m] - self.positions[n] , axis = 1)\n \n return np.linalg.norm(self.positions[m]-self.positions[n])", "def getDistanceM(self, test, train):\n p = 2 # TUNE currently euclidian distance\n distanceM = pd.DataFrame(index=test.index.values, columns=train.index.values)\n for testrow, testing in test.iterrows():\n for trainrow, training in train.iterrows():\n tot = 0\n for indexc, column in test.iteritems():\n #print(indexc)\n if indexc in self.discrete: # need to reference VDM\n datapoint = self.VDMdict.get(indexc)\n dif = datapoint[testing[indexc]][training[indexc]]\n elif indexc != \"class\": #get distance beween 2 points\n dif = abs(float(testing[indexc]) - float(training[indexc]))\n\n tot += dif ** p\n distance = tot ** (1 / p) #distance is calculated\n distanceM.at[testrow, trainrow] = distance #put in distance matrix\n return(distanceM)", "def get_dist_mat(self):\n n_site = self.status.give(keyword=\"n_site\")\n sites = self.status.give(keyword=\"sites\")\n dist_mat = [[0.0 for j in xrange(n_site)] for i in xrange(n_site)]\n for i in xrange(n_site):\n for j in xrange(n_site):\n ri = sites[i].pos\n rj = sites[j].pos\n dist_mat[i][j] = np.linalg.norm(ri-rj)\n # print ri, rj\n return dist_mat", "def get_distance(positions: np.ndarray, position: np.ndarray, unit_factor: np.ndarray = None):\n\n if unit_factor is None:\n unit_factor = np.array([1, 1, 1])\n\n distances = np.sqrt(np.sum(((positions - position) * unit_factor.reshape(1, 3)) ** 2, axis=1))\n\n return distances", "def face_distance(face_encodings, face_to_compare):\n if len(face_encodings) == 0:\n return np.empty((0))\n\n return np.linalg.norm(face_encodings - face_to_compare, axis=1)", "def squared_distance_matrix(X, augmented=False):\n XX = np.dot(X,X.T)\n D = np.outer(np.diag(XX), np.ones(len(X)))-2*XX+np.outer(np.ones(len(X)),\n np.diag(XX))\n if augmented == True:\n n = len(D)\n zeros_v = np.zeros((n,1))\n zeros_h = np.zeros((1,n+1))\n D = np.bmat('D zeros_v; zeros_h')\n return D", "def F_mat(self, distances):\n distances_norm2 = norm2(distances)\n distances_norm = np.sqrt(distances_norm2)\n isColliding = self.isColliding(distances_norm)[:, :, :, None]\n\n # Repulsion force when a collision happens\n f_colliding = (2/self.d_coll**2)*isColliding\n \n # Interaction force\n ident = np.identity(np.shape(distances)[1])[None, :, :]\n d = (ident+distances_norm)\n dn = (d**self.n)[:, :, :, None]\n d2 = (ident+distances_norm2)\n d2n = (d2**(self.n+1))[:, :, :, None]\n\n f_interact = self.n*self.d_attr**self.n*(self.d_attr**self.n-dn)/(d2n + 10e-50)*(1-isColliding)\n\n # Total Force\n f = (f_colliding + f_interact)*distances\n\n # Remove self-interaction\n diag = np.einsum('ijj->ij', f[:, :, :, 0])\n diag[:, :] = 0\n\n diag2 = np.einsum('ijj->ij', f[:, :, :, 1])\n diag2[:, :] = 0\n\n return f", "def test_cmatrix_list(self):\n\n test_dtraj = [np.array([0, 1, 1, 0, 0, 0, 1, 1, 1, 1]), \\\n np.array([0,1,1,1,0])]\n\n cmatrix_compare = np.array([[2., 3.], [2., 6.]])\n cmatrix_computed = cmatrix(test_dtraj)\n self.assertTrue(np.allclose(cmatrix_compare, cmatrix_computed))", "def cal_dis(data,Cen,dis=\"Euclidean\"):\n dis = []\n for i in range(len(data)):\n dis.append([])\n for j in range(len(Cen)):\n dd = math.sqrt(sum(map(lambda x:x**2,data[i]-Cen[j])))\n dis[i].append(dd)\n \n return dis", "def _computeDistances(self) -> None:\n length = len(self.data)\n for i, sequenceOne in enumerate(self.data):\n print(f\"[SeqCluBaselineOffline] Computing distances is at iteration {i} of {length}.\")\n for j, sequenceTwo in enumerate(self.data):\n if i == j:\n self.distances[i][j] = 0\n continue\n distance = self.distanceMeasure.calculateDistance(sequenceOne, sequenceTwo)\n self.distances[i][j] = distance\n self.distances[j][i] = distance", "def _object_distance(self, object1, object2):\n return np.linalg.norm(np.array(object1) - np.array(object2))", "def test_distance(self):\n for emb_vals, point, dist_gt in self.DISTANCE_EXAMPLES:\n print(emb_vals, point, dist_gt)\n emb = to_emb(emb_vals)\n dist = emb.distance(point)\n assert np.allclose(dist, dist_gt), \\\n (\"Wrong distance for point {}: expected {} but was {};\"\n \"\\nembedding:\\n{}\").format(point, dist_gt, dist, str(emb))", "def compute_distance(df):\n pass", "def get_distance ( self, X: np.ndarray, x0: np.ndarray ):\n \n # Apply distance_function across rows of X\n return [ self.distance_function ( row, x0 ) for row in X ]\n # End get_distance()", "def distance(self, vector1, vector2):\n\t\tsum_sq = 0\n\t\tfor i in range(28):\n\t\t\tfor j in range(28):\n\t\t\t\tsum_sq += (vector1[i][j] - vector2[i][j])**2\n\t\treturn math.sqrt(sum_sq)", "def return_energy_mat(grid, objects_list, choice_parallelization):\n assert len(grid.shape) == 1, \"energy distance can only be applied on 1D objects\"\n\n n_obj = len(objects_list)\n n_pix = len(grid)\n distance_matrix = numpy.zeros((n_obj, n_obj))\n\n if choice_parallelization == True:\n master_index_list = []\n for i in range(n_obj):\n for j in range(i+1, n_obj):\n master_index_list.append([i, j])\n\n num_cores = multiprocessing.cpu_count()\n results = Parallel(n_jobs=num_cores)(delayed(energy_simple_1D)(grid, objects_list, i) for i in master_index_list)\n\n for i_pair in range(len(results)):\n i = master_index_list[i_pair][0]\n j = master_index_list[i_pair][1]\n distance_matrix[i, j] = results[i_pair]\n distance_matrix[j, i] = results[i_pair]\n\n if choice_parallelization == False:\n for i in range(n_obj):\n for j in range(i+1, n_obj):\n obj_i = objects_list[i]\n obj_j = objects_list[j]\n energy_val = energy_distance(grid, grid, u_weights=obj_i, v_weights=obj_j)\n distance_matrix[i, j] = energy_val\n distance_matrix[j, i] = energy_val\n\n return distance_matrix", "def calculate_distances(drives):\n for d in drives:\n d.set_distance()", "def eval_mean_distance(played_decks, clustering_data: List, fuzzy: bool, debug: bool = False):\n\n for alg_dict in clustering_data:\n decks = np.array(played_decks)\n clusters = []\n for label in set(alg_dict[\"labels\"]):\n indices = np.where(alg_dict[\"labels\"] == label)\n if fuzzy:\n clusters.append(FuzzyDeckCluster(decks[indices]))\n else:\n clusters.append(DeckCluster(decks[indices]))\n\n if fuzzy:\n clustering = FuzzyDeckClustering(clusters)\n else:\n clustering = DeckClustering(clusters)\n\n sum_of_squared_distances_centroid = 0\n sum_of_squared_distances_core = 0\n\n for cluster in clustering.deck_clusters:\n centroid = cluster.centroid()\n core = cluster.core()\n for deck in cluster.decks:\n sum_of_squared_distances_centroid += (deck.jaccard_distance(centroid))**2\n sum_of_squared_distances_core += (deck.jaccard_distance(core))**2\n alg_dict[\"sse_centroid\"] = sum_of_squared_distances_centroid\n alg_dict[\"sse_core\"] = sum_of_squared_distances_core\n\n if debug:\n print(\"Alg: \" + alg_dict[\"name\"] + \"; \\t sse = \" + str(alg_dict[\"sse_centroid\"]))\n print(\"Alg: \" + alg_dict[\"name\"] + \"; \\t sse = \" + str(alg_dict[\"sse_core\"]))", "def compute_distances_two_loops(self, X):\n num_test = X.shape[0]\n num_train = self.X_train.shape[0]\n dists = np.zeros((num_test, num_train))\n for i in range(num_test):\n for j in range(num_train):\n dists[i, j] = np.sqrt(np.sum(np.square(X[i] - self.X_train[j])))\n return dists", "def transform(self, samples):\n check_is_fitted(self, [\"cluster_centers\"])\n\n distance_mat = self.get_distance(samples, self.cluster_centers)\n return distance_mat", "def getDistances(trainingSet, testInstance, distances):\n # Empty list to store distances of between testInstance and each trainSet item\n # Number of dimensions to check\n length=len(testInstance) - 1\n # Iterate through all items in trainingSet and compute the distance, then append to the distances list\n for x in range(len(trainingSet)):\n dist=calculateDistance(testInstance, trainingSet[x], length)\n distances.append((trainingSet[x], dist))\n return distances" ]
[ "0.6267775", "0.60056895", "0.59760046", "0.5957225", "0.5952635", "0.5888199", "0.5881407", "0.5870848", "0.58344764", "0.58344764", "0.5777284", "0.5726229", "0.5702075", "0.5700626", "0.5683218", "0.5678633", "0.56485", "0.55838466", "0.5582898", "0.55652297", "0.5557965", "0.5538048", "0.5524559", "0.5520437", "0.55093014", "0.5492023", "0.5476906", "0.54740274", "0.5468652", "0.54592836", "0.5457917", "0.5448652", "0.54400367", "0.5436454", "0.54340523", "0.5427078", "0.54269075", "0.5416064", "0.54154843", "0.5410674", "0.540545", "0.53949946", "0.53861356", "0.5383494", "0.53687376", "0.5355188", "0.53515697", "0.533828", "0.533723", "0.53245854", "0.5311616", "0.52939904", "0.52919304", "0.52798796", "0.5272237", "0.52686805", "0.524248", "0.5241534", "0.52353513", "0.52337164", "0.5231434", "0.5225982", "0.5225211", "0.5213618", "0.5211022", "0.51966524", "0.5191735", "0.5175747", "0.51739544", "0.5166391", "0.51469094", "0.51397663", "0.51294327", "0.5124239", "0.51191765", "0.50992894", "0.5096712", "0.50916415", "0.50812113", "0.50792336", "0.5076106", "0.50724345", "0.50662065", "0.5057851", "0.5038595", "0.50378203", "0.50354904", "0.5029184", "0.5028512", "0.5026596", "0.5024021", "0.5016911", "0.5014636", "0.50143015", "0.5012521", "0.5010736", "0.49971828", "0.49853188", "0.49781188", "0.49730885" ]
0.68410903
0
Calculates vmeasure, homogeneity, and completeness for each clustering algorithm stored in clustering_alg and adds it to each algorithms dictionary.
def eval_v_measure_homogeneity_completeness(clustering_alg: List, sdist_euclidean, sdist_jaccard, labels_true, debug: bool = False): for i, alg_dict in enumerate(clustering_alg): if "alg" in alg_dict: if alg_dict["distance"] == "euclidean": clustering = alg_dict["alg"].fit(sdist_euclidean) elif alg_dict["distance"] == "jaccard": clustering = alg_dict["alg"].fit(sdist_jaccard) else: raise ValueError("Unknown distance measure {}. ".format(alg_dict["distance"]) + "Please choose one of the following distance measures ['euclidean','jaccard']") labels_predicted = clustering.labels_ alg_dict["labels"] = labels_predicted else: labels_predicted = alg_dict["labels"] alg_dict["homogeneity"], alg_dict["completeness"], alg_dict["v-measure"] = \ homogeneity_completeness_v_measure(labels_true, labels_predicted) if debug: print("Alg: " + alg_dict["name"] + "; \t v-measure = " + str(alg_dict["v-measure"]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate_clustering_methods(methods):\r\n results = {}\r\n for m in methods:\r\n res = results[m['name']] = {}\r\n prec = 3\r\n res['Adjusted Rand Score'] = round(sklearn.metrics.adjusted_rand_score(m['target'], m['clustering']),prec)\r\n res['Normalized Mutual Information'] = round(sklearn.metrics.normalized_mutual_info_score(m['target'], m['clustering']),prec)\r\n res['Adjusted Mutual Information'] = round(sklearn.metrics.adjusted_mutual_info_score(m['target'], m['clustering']),prec)\r\n return np.transpose(results)", "def __init__(self, dictAlg):\n\n # values of dict dictAlg are DataSetList which should have only one\n # element which will be assigned as values in the following lines.\n d = set()\n f = set()\n for i in dictAlg.values():\n d |= set(j.dim for j in i)\n f |= set(j.funcId for j in i)\n\n if len(f) > 1 or len(d) > 1:\n Usage('Expect the data of algorithms for only one function and '\n 'one dimension.')\n\n f = f.pop()\n d = d.pop()\n\n dictMaxEvals = {}\n dictFinalFunVals = {}\n tmpdictAlg = {}\n for alg, i in dictAlg.iteritems():\n if len(i) == 0:\n warnings.warn('Algorithm %s was not tested on f%d %d-D.'\n % (alg, f, d))\n continue\n elif len(i) > 1:\n warnings.warn('Algorithm %s has a problem on f%d %d-D.'\n % (alg, f, d))\n continue\n\n tmpdictAlg[alg] = i[0] # Assign ONLY the first element as value\n dictMaxEvals[alg] = i[0].maxevals\n dictFinalFunVals[alg] = i[0].finalfunvals\n\n dictAlg = tmpdictAlg\n\n sortedAlgs = dictAlg.keys()\n # algorithms will be sorted along sortedAlgs which is now a fixed list\n\n # Align ERT\n erts = list(np.transpose(np.vstack([dictAlg[i].target, dictAlg[i].ert]))\n for i in sortedAlgs)\n res = readalign.alignArrayData(readalign.HArrayMultiReader(erts))\n\n resalgs = []\n reserts = []\n # For each function value\n for i in res:\n # Find best algorithm\n curerts = i[1:]\n assert len((np.isnan(curerts) == False)) > 0\n currentbestert = np.inf\n currentbestalg = ''\n for j, tmpert in enumerate(curerts):\n if np.isnan(tmpert):\n continue # TODO: don't disregard these entries\n if tmpert == currentbestert:\n # TODO: what do we do in case of ties?\n # look at function values corresponding to the ERT?\n # Look at the function evaluations? the success ratio?\n pass\n elif tmpert < currentbestert:\n currentbestert = tmpert\n currentbestalg = sortedAlgs[j]\n reserts.append(currentbestert)\n resalgs.append(currentbestalg)\n\n dictiter = {}\n dictcurLine = {}\n resDataSet = []\n\n # write down the #fevals to reach the function value.\n for funval, alg in zip(res[:, 0], resalgs):\n it = dictiter.setdefault(alg, iter(dictAlg[alg].evals))\n curLine = dictcurLine.setdefault(alg, np.array([np.inf, 0]))\n while curLine[0] > funval:\n try:\n curLine = it.next()\n except StopIteration:\n break\n dictcurLine[alg] = curLine.copy()\n tmp = curLine.copy()\n tmp[0] = funval\n resDataSet.append(tmp)\n\n setalgs = set(resalgs)\n dictFunValsNoFail = {}\n for alg in setalgs:\n for curline in dictAlg[alg].funvals:\n if (curline[1:] == dictAlg[alg].finalfunvals).any():\n # only works because the funvals are monotonous\n break\n dictFunValsNoFail[alg] = curline.copy()\n\n self.evals = resDataSet\n # evals is not a np array but a list of arrays because they may not\n # all be of the same size.\n self.maxevals = dict((i, dictMaxEvals[i]) for i in setalgs)\n self.finalfunvals = dict((i, dictFinalFunVals[i]) for i in setalgs)\n self.funvalsnofail = dictFunValsNoFail\n self.dim = d\n self.funcId = f\n self.algs = resalgs\n self.algId = 'Virtual Best Algorithm'\n self.comment = 'Combination of ' + ', '.join(sortedAlgs)\n self.ert = np.array(reserts)\n self.target = res[:, 0]\n\n bestfinalfunvals = np.array([np.inf])\n for alg in sortedAlgs:\n if np.median(dictAlg[alg].finalfunvals) < np.median(bestfinalfunvals):\n bestfinalfunvals = dictAlg[alg].finalfunvals\n algbestfinalfunvals = alg\n self.bestfinalfunvals = bestfinalfunvals\n self.algbestfinalfunvals = algbestfinalfunvals", "def get_clustering_algorithm_class(cls):\n return {\n \"spectral\": SpectralClusteringAlgorithm,\n \"dbscan\": DBSCANAlgorithm,\n \"gromos\": GromosAlgorithm,\n \"kmedoids\": KMedoidsAlgorithm,\n \"random\": RandomClusteringAlgorithm,\n \"hierarchical\": HierarchicalClusteringAlgorithm\n }", "def cluster_analysis(\n clusterers: list,\n hyperparameter_grids: list,\n eval_metrics_grid: list,\n eval_metrics_params: dict,\n word_embeddings: np.ndarray,\n words_vocabulary: list,\n word_to_int: dict,\n word_embeddings_normalized: np.ndarray = None,\n compute_pairwise_word_distances: bool = False,\n compute_pairwise_word_distances_normalized: bool = False,\n return_word_vectors: bool = False,\n save_result_to_disk: bool = False,\n output_dir: Optional[str] = None,\n model_name: Optional[str] = None,\n dataset_name: Optional[str] = None,\n output_filepath_suffix: Optional[str] = None,\n) -> Union[dict, tuple]:\n # Create word vectors from given words/vocabulary\n word_vectors = words_to_vectors(\n words_vocabulary=words_vocabulary,\n word_to_int=word_to_int,\n word_embeddings=word_embeddings,\n )\n\n # Create normalized word vectors from given words/vocabulary if specified.\n word_vectors_normalized = None\n if word_embeddings_normalized is not None:\n word_vectors_normalized = words_to_vectors(\n words_vocabulary=words_vocabulary,\n word_to_int=word_to_int,\n word_embeddings=word_embeddings_normalized,\n )\n\n if compute_pairwise_word_distances:\n word_vectors_pairwise_distances = pairwise_cosine_distances(word_vectors)\n if (\n compute_pairwise_word_distances_normalized\n and word_vectors_normalized is not None\n ):\n normalized_word_vectors_pairwise_distances = euclidean_distances(\n word_vectors_normalized\n )\n\n # Perform cluster analysis\n clusterers_result = {}\n unique_cluster_metrics = set()\n for clusterer_tuple, hyperparameter_grid, eval_metrics in zip(\n clusterers, hyperparameter_grids, eval_metrics_grid\n ):\n if len(clusterer_tuple) == 3:\n (clusterer_name, clusterer_cls, clusterer_use_normalized) = clusterer_tuple\n else:\n clusterer_use_normalized = False\n (clusterer_name, clusterer_cls) = clusterer_tuple\n print(f\"-- Clustering using {clusterer_name} --\")\n clusterers_result[clusterer_name] = {\n \"cluster_labels\": [],\n \"cluster_params\": [],\n \"cluster_metrics\": {},\n }\n\n # Do clustering for each set of hyperparameters\n param_grid = ParameterGrid(hyperparameter_grid)\n for params_idx, params in enumerate(tqdm(param_grid)):\n clusterers_result[clusterer_name][\"cluster_params\"].append(params)\n\n # Add exception for ward linkage clustering.\n if (\n clusterer_cls is AgglomerativeClustering\n and params.get(\"linkage\") == \"ward\"\n and word_vectors_normalized is not None\n ):\n params = {**params, \"affinity\": \"euclidean\"}\n clusterer_instance = clusterer_cls(**params)\n fit_predict_X = word_vectors_normalized\n else:\n clusterer_instance = clusterer_cls(**params)\n if (\n params.get(\"affinity\") == \"precomputed\"\n or params.get(\"metric\") == \"precomputed\"\n ):\n if (\n clusterer_use_normalized\n and compute_pairwise_word_distances_normalized\n ):\n fit_predict_X = normalized_word_vectors_pairwise_distances\n elif compute_pairwise_word_distances:\n fit_predict_X = word_vectors_pairwise_distances\n else:\n if clusterer_use_normalized and word_vectors_normalized is not None:\n fit_predict_X = word_vectors_normalized\n else:\n fit_predict_X = word_vectors\n\n # Use fit_predict if it is available.\n if getattr(clusterer_instance, \"fit_predict\", None) is not None:\n predicted_labels = clusterer_instance.fit_predict(fit_predict_X)\n else:\n clusterer_instance.fit(fit_predict_X)\n predicted_labels = clusterer_instance.predict(fit_predict_X)\n\n # Separate noise labels into clusters\n if clusterer_cls is HDBSCAN:\n predicted_labels = separate_noise_labels_into_clusters(predicted_labels)\n\n clusterers_result[clusterer_name][\"cluster_labels\"].append(predicted_labels)\n\n # Evaluate predicted cluster labels using internal evaluation metrics\n for eval_metric_tuple in eval_metrics:\n if len(eval_metric_tuple) == 3:\n (\n eval_metric_key,\n eval_metric,\n eval_metric_use_normalized,\n ) = eval_metric_tuple\n else:\n eval_metric_use_normalized = False\n (eval_metric_key, eval_metric) = eval_metric_tuple\n eval_metric_params = eval_metrics_params.get(eval_metric_key, {})\n if (\n compute_pairwise_word_distances\n and eval_metric_params.get(\"metric\") == \"precomputed\"\n ):\n if (\n eval_metric_use_normalized\n and compute_pairwise_word_distances_normalized\n ):\n metric_name, metric_score, metric_obj_max = eval_metric(\n word_embeddings=normalized_word_vectors_pairwise_distances,\n cluster_labels=predicted_labels,\n clusterer=clusterer_instance,\n **eval_metric_params,\n )\n else:\n metric_name, metric_score, metric_obj_max = eval_metric(\n word_embeddings=word_vectors_pairwise_distances,\n cluster_labels=predicted_labels,\n clusterer=clusterer_instance,\n **eval_metric_params,\n )\n else:\n if (\n eval_metric_use_normalized\n and word_vectors_normalized is not None\n ):\n metric_name, metric_score, metric_obj_max = eval_metric(\n word_embeddings=word_vectors_normalized,\n cluster_labels=predicted_labels,\n clusterer=clusterer_instance,\n **eval_metric_params,\n )\n else:\n metric_name, metric_score, metric_obj_max = eval_metric(\n word_embeddings=word_vectors,\n cluster_labels=predicted_labels,\n clusterer=clusterer_instance,\n **eval_metric_params,\n )\n unique_cluster_metrics.add(metric_name)\n\n # Initialize metric result\n if (\n metric_name\n not in clusterers_result[clusterer_name][\"cluster_metrics\"]\n ):\n clusterers_result[clusterer_name][\"cluster_metrics\"][\n metric_name\n ] = {\n \"metric_scores\": [],\n \"metric_obj_max\": metric_obj_max,\n \"best_metric_score_indices\": [],\n }\n\n clusterers_result[clusterer_name][\"cluster_metrics\"][metric_name][\n \"metric_scores\"\n ].append(metric_score)\n\n # Set best metric score indices\n if params_idx == len(param_grid) - 1:\n best_metric_score_indices = np.argsort(\n clusterers_result[clusterer_name][\"cluster_metrics\"][\n metric_name\n ][\"metric_scores\"]\n )\n if metric_obj_max:\n best_metric_score_indices = best_metric_score_indices[::-1]\n clusterers_result[clusterer_name][\"cluster_metrics\"][metric_name][\n \"best_metric_score_indices\"\n ] = best_metric_score_indices\n\n # Find preferred clusterers for each cluster metric (from best to worst)\n metric_preferred_clusterers = {}\n for cluster_metric_name in unique_cluster_metrics:\n metric_obj_max = None\n metric_best_scores = []\n clusterer_names = []\n for clusterer_name, clusterer_result in clusterers_result.items():\n if cluster_metric_name in clusterer_result[\"cluster_metrics\"]:\n clusterer_names.append(clusterer_name)\n metric_result = clusterer_result[\"cluster_metrics\"][cluster_metric_name]\n if metric_obj_max is None:\n metric_obj_max = metric_result[\"metric_obj_max\"]\n best_metric_score = metric_result[\"metric_scores\"][\n metric_result[\"best_metric_score_indices\"][0]\n ]\n metric_best_scores.append(best_metric_score)\n clusterer_names = np.array(clusterer_names)\n metric_best_scores = np.array(metric_best_scores)\n\n metric_best_scores_sorted_indices = np.argsort(metric_best_scores)\n if metric_obj_max:\n metric_best_scores_sorted_indices = metric_best_scores_sorted_indices[::-1]\n metric_preferred_clusterers[cluster_metric_name] = {\n \"clusterer_names\": clusterer_names[metric_best_scores_sorted_indices],\n \"best_metric_scores\": metric_best_scores[metric_best_scores_sorted_indices],\n }\n\n # Return result as dictionary\n cluster_analysis_result = {\n \"clusterers\": clusterers_result,\n \"metric_preferred_clusterers\": metric_preferred_clusterers,\n }\n\n if return_word_vectors:\n if compute_pairwise_word_distances:\n cluster_analysis_result = (\n cluster_analysis_result,\n word_vectors,\n word_vectors_pairwise_distances,\n )\n else:\n cluster_analysis_result = (cluster_analysis_result, word_vectors)\n\n # Save result to disk\n if save_result_to_disk:\n save_cluster_result_to_disk(\n cluster_result=cluster_analysis_result,\n output_dir=output_dir,\n model_name=model_name,\n dataset_name=dataset_name,\n output_filepath_suffix=output_filepath_suffix,\n )\n\n return cluster_analysis_result", "def eval_cluster_contingency(clustering_alg: List, labels_true, sdist):\n for (alg_name, alg_dict) in clustering_alg:\n if \"alg\" in alg_dict:\n clustering = alg_dict[\"alg\"].fit(sdist)\n labels_pred = clustering.labels_\n alg_dict[\"labels\"] = labels_pred\n else:\n labels_pred = alg_dict[\"labels\"]\n\n pred_label_dict, new_labels = normalize_labels(labels_pred)\n\n alg_dict[\"cm\"] = contingency_matrix(labels_true, new_labels)", "def _eval_clustering(self, gen_reviews, clusters, embedding_model, clustering):\n result = []\n preds = self.predict_gen(gen_reviews, embedding_model, clustering)\n\n acc = accuracy_score(np.array(clusters), np.array(preds))\n conf = confusion_matrix(np.array(clusters), np.array(preds))\n\n return acc, conf", "def generate_clustering_info(self, algorithm_type, clustering_parameters, clusterings = []):\n clustering_info = {}\n for i, running_parameters in enumerate(clustering_parameters):\n\n clustering_id = \"clustering_%04d\"%(self.current_clustering_id)\n self.current_clustering_id += 1\n clustering_info[clustering_id] = {\n \"type\":algorithm_type,\n \"clustering\": None,\n \"parameters\": running_parameters\n }\n\n if clusterings != []:\n clustering_info[clustering_id][\"clustering\"] = clusterings[i]\n\n return clustering_info", "def run_algorithm(algorithm, algorithm_kwargs, clustering_id):\n clustering = algorithm.perform_clustering(algorithm_kwargs)\n return (clustering_id, clustering)", "def main(argv=None):\n\n if argv is None:\n argv = sys.argv\n\n # setup command line parser\n parser = E.OptionParser(version=\"%prog version: $Id$\",\n usage=globals()[\"__doc__\"])\n\n parser.add_option(\"-t\", \"--test\", dest=\"test\", type=\"string\",\n help=\"supply help\")\n\n parser.add_option(\"--method\", dest=\"method\", type=\"choice\",\n choices=(\"metrics\", \"summary\", \"module_summary\"),\n help=\"method to summarise clustering\")\n\n parser.add_option(\"--ref-gtf-files\", dest=\"ref_gtf\", type=\"string\",\n help=\"comma separated list of reference gtf files\")\n\n # add common options (-h/--help, ...) and parse command line\n (options, args) = E.Start(parser, argv=argv)\n\n if options.method == \"metrics\":\n infile = argv[-1]\n E.info(\"loading input file: %s\" % infile)\n assert infile\n\n df = pd.read_table(infile,\n sep=\"\\t\",\n header=None,\n index_col=0)\n\n df = df.ix[:, :50]\n cluster_combs = (x for x in itertools.combinations(df.columns,\n 2))\n genes = df.index\n results_dict = {}\n all_clusts = {}\n\n E.info(\"setting up cluster containers\")\n for i in df.columns:\n clusters = set(df[i].values.tolist())\n cluster_dict = {}\n for clust in clusters:\n cluster_dict[clust] = []\n for gene in genes:\n cluster_dict[df[i][gene]].append(gene)\n\n for col in clusters:\n col_set = set()\n clust_col = cluster_dict[col]\n gene_members = itertools.combinations(clust_col,\n 2)\n col_set.update(gene_members)\n cluster_dict[col] = col_set\n all_clusts[i] = cluster_dict\n E.info(\"generating all pair-wise cluster comparisons\")\n E.info(\"calculating adjusted mutual information\")\n for k in cluster_combs:\n clusters1 = all_clusts[k[0]]\n clusters2 = all_clusts[k[1]]\n metric_dict = {}\n metric_dict['AMI'] = TS.adjustedMutualInformation(clusters1,\n clusters2)\n results_dict[k] = metric_dict\n\n res_frame = pd.DataFrame(results_dict).T\n res_frame = res_frame.reset_index()\n res_frame.drop(['level_0'], inplace=True, axis=1)\n res_frame.drop(['level_1'], inplace=True, axis=1)\n\n # flatten rand indices and add to output dataframe\n rand_arrays = TS.randIndexes(df)\n flat_adj_rand = TS.unravel_arrays(rand_arrays[0])\n flat_rand = TS.unravel_arrays(rand_arrays[1])\n res_frame['Rand_Index'] = flat_rand\n res_frame['Adjusted_Rand_Index'] = flat_adj_rand\n E.info(\"aggregating results\")\n\n res_frame.to_csv(options.stdout,\n sep=\"\\t\",\n index_label='idx')\n\n elif options.method == \"summary\":\n infiles = argv[-1]\n list_of_files = infiles.split(\",\")\n\n file_dict = {}\n for fle in list_of_files:\n fname = fle.split(\"/\")[-1]\n condition = fname.split(\"-\")[0]\n ref = fname.split(\"-\")[1]\n df_ = pd.read_table(fle,\n sep=\"\\t\",\n header=0,\n index_col=0)\n df_.columns = ['gene_id', 'cluster']\n clust_dict = {}\n for idx in df_.index:\n cluster = df_.loc[idx]['cluster']\n gene = df_.loc[idx]['gene_id']\n try:\n clust_dict[cluster] += 1\n except KeyError:\n clust_dict[cluster] = 1\n med_size = np.median(clust_dict.values())\n file_dict[fname] = {'condition': condition,\n 'reference': ref,\n 'median_cluster_size': med_size}\n\n outframe = pd.DataFrame(file_dict).T\n outframe.to_csv(options.stdout,\n sep=\"\\t\",\n index_label='idx')\n\n elif options.method == \"module_summary\":\n # get lncRNA/gene lengths from reference gtfs\n ref_gtfs = options.ref_gtf.split(\",\")\n length_dict = {}\n for ref in ref_gtfs:\n oref = IOTools.openFile(ref, \"rb\")\n git = GTF.transcript_iterator(GTF.iterator(oref))\n for gene in git:\n for trans in gene:\n length = trans.end - trans.start\n try:\n length_dict[trans.gene_id] += length\n except KeyError:\n length_dict[trans.gene_id] = length\n oref.close()\n\n infiles = argv[-1]\n list_of_files = infiles.split(\",\")\n\n fdfs = []\n for fle in list_of_files:\n cond = fle.split(\"/\")[-1].split(\"-\")[0]\n refer = fle.split(\"/\")[-1].split(\"-\")[1]\n _df = pd.read_table(fle, sep=\"\\t\",\n header=0, index_col=0)\n _df.columns = ['gene_id', 'cluster']\n clusters = set(_df['cluster'])\n c_dict = {}\n # summarize over each cluster\n for clust in clusters:\n lengths = []\n c_df = _df[_df['cluster'] == clust]\n for lid in c_df['gene_id']:\n lengths.append(length_dict[lid])\n c_dict[clust] = {'cluster_size': len(c_df['gene_id']),\n 'mean_length': np.mean(lengths),\n 'index': (cond, refer),\n 'module': clust}\n cdf = pd.DataFrame(c_dict).T\n # use a multindex for hierarchical indexing\n midx = pd.MultiIndex.from_tuples(cdf['index'])\n cdf.index = midx\n cdf.drop(['index'], inplace=True, axis=1)\n fdfs.append(cdf)\n\n # generate a single output df\n s_df = fdfs[0]\n fdfs.pop(0)\n for df in fdfs:\n s_df = s_df.append(df)\n\n s_df.to_csv(options.stdout,\n index_label=(\"condition\", \"reference\"),\n sep=\"\\t\")\n\n # write footer and output benchmark information.\n E.Stop()", "def interpret_clusters(self, split=0.7, all_demos=None, num_clusters=None, \n max_depth=CLUSTER_DEPTH, data=None, labels=None, verbose=True):\n all_demos = self.all_data if all_demos is None else all_demos\n clusters = self.get_ordered_clusters(labels, num_clusters)\n data = self.demos if data is None else data\n labels = self.labels if labels is None else labels\n\n cluster_formulas = []\n counter = 0\n sep = \"\\n \"\n for c in clusters:\n counter += 1\n res = self.sample_from_clusters(num_samples=split,\n all_data=all_demos,\n pos_validation=True, \n neg_validation=True,\n which_cluster=counter)\n positive_samples, val_positive_samples = res[0], res[1]\n negative_samples, val_negative_samples = res[2], res[3]\n z = 0\n for d in positive_samples:\n if d[1] == 0: z += 1\n\n cluster_data = {'pos': positive_samples,\n 'neg': negative_samples}\n val_cluster_data = {'pos': val_positive_samples,\n 'neg': val_negative_samples}\n\n if verbose: print(sep +\"Checking formulas \" + \\\n \"with max depth {}\\n\".format(max_depth))\n\n cluster_formula, value_formula = wrapper_train(max_depth,\n cluster_data, \n val_cluster_data,\n verbose=verbose,\n pred_data=[self.pipeline_X,\n self.pipeline_y])\n if cluster_formula is not None:\n print(cluster_formula)\n\n cluster_formulas.append((c, cluster_formula, value_formula))\n self.reset_pipeline()\n\n return cluster_formulas", "def run_evaluation(self, n_runs=1, n_points=1000, n_iterations=1, min_n_components=2, max_n_components=25,\n\t\t\t\t\t n_splits=3, save_data=False, file_label='',n_microstates=None, all_methods=True,\n\t\t\t\t\t assign_transition_points=True):\n\n\t\tif self.presampled_data is not None:\n\t\t\tsampled_data = self.presampled_data[0]\n\t\t\ttrue_clustering = self.presampled_data[1]\n\t\t\tn_runs = sampled_data.shape[0]\n\n\t\tself.cluster_score_ami_kmeans_ = np.zeros(n_runs)\n\t\tself.cluster_score_ami_AW_ = np.zeros(n_runs)\n\t\tself.cluster_score_ami_spectral_ = np.zeros(n_runs)\n\t\tself.cluster_score_ami_density_peaks_ = np.zeros(n_runs)\n\t\tself.cluster_score_ami_GMM_ = np.zeros(n_runs)\n\t\tself.cluster_score_ami_GMM_FE_min_ = np.zeros(n_runs)\n\n\t\tself.cluster_score_fm_kmeans_ = np.zeros(n_runs)\n\t\tself.cluster_score_fm_AW_ = np.zeros(n_runs)\n\t\tself.cluster_score_fm_spectral_ = np.zeros(n_runs)\n\t\tself.cluster_score_fm_density_peaks_ = np.zeros(n_runs)\n\t\tself.cluster_score_fm_GMM_ = np.zeros(n_runs)\n\t\tself.cluster_score_fm_GMM_FE_min_ = np.zeros(n_runs)\n\n\t\tself.cluster_score_vm_kmeans_ = np.zeros(n_runs)\n\t\tself.cluster_score_vm_AW_ = np.zeros(n_runs)\n\t\tself.cluster_score_vm_spectral_ = np.zeros(n_runs)\n\t\tself.cluster_score_vm_density_peaks_ = np.zeros(n_runs)\n\t\tself.cluster_score_vm_GMM_ = np.zeros(n_runs)\n\t\tself.cluster_score_vm_GMM_FE_min_ = np.zeros(n_runs)\n\n\t\tdata = self.toy_model_.sample(3)\n\n\t\t# Create free energy estimators\n\t\tgmm_FE = GMM_FE.FreeEnergyClustering(data, min_n_components=min_n_components, max_n_components=max_n_components,\n\t\t\t\t\t\t\t\t\t x_lims=self.x_lims_, n_grids=self.n_grids_, stack_landscapes=False,\n\t\t\t\t\t\t\t\t\t n_splits=n_splits, n_iterations=n_iterations,convergence_tol=self.convergence_tol_,\n\t\t\t\t\t\t\t\t\t verbose=self.verbose_)\n\n\t\tkm = kmc.KMeansCluster(min_n_components, max_n_components)\n\t\taw = awc.AWCluster(min_n_components, max_n_components)\n\t\tspectral = sc.SpectralCluster(min_n_components, max_n_components)\n\n\t\tall_data = []\n\t\tfor i_run in range(n_runs):\n\t\t\tprint(\"Run: \"+str(i_run+1)+'/'+str(n_runs))\n\n\t\t\tif self.presampled_data is None:\n\t\t\t\t# Sample data\n\t\t\t\tdata = self.toy_model_.sample(n_points)\n\t\t\telse:\n\t\t\t\tdata = sampled_data[i_run]\n\t\t\t\n\t\t\tall_data.append(data)\n\n\t\t\tprint('Shape data: ' + str(data.shape))\n\n\t\t\t# Set data in model and estimate GMM density\n\t\t\tgmm_FE.data_ = data\n\t\t\tcoords, est_FE_landsc, FE_points = gmm_FE.landscape()\n\n\t\t\t# Get true cluster labels\n\t\t\tif self.presampled_data is None:\n\t\t\t\tif hasattr(self.toy_model_, \"assign_cluster_labels\"):\n\t\t\t\t\tself.true_labels_ = self.toy_model_.assign_cluster_labels(data)\n\t\t\t\telse:\n\t\t\t\t\tprint('Setting true labels.')\n\t\t\t\t\tself.true_labels_, _ = self.true_FE_.cluster(data, np.zeros(data.shape[0]))\n\t\t\telse:\n\t\t\t\tself.true_labels_ = true_clustering[i_run]\n\t\t\t\n\t\t\t# Cluster data with different methods\n\t\t\tif n_microstates is None:\n\t\t\t\tself.FE_min_labels, _ = gmm_FE.cluster(data, FE_points, assign_transition_points=assign_transition_points)\n\t\t\telse:\n\t\t\t\tkmea = KMeans(n_clusters=n_microstates).fit(data[::2])\n\t\t\t\tmicrostate_centers = kmea.cluster_centers_\n\t\t\t\tself.FE_min_labels, _ = gmm_FE.cluster(microstate_centers, FE_points, data, assign_transition_points=assign_transition_points, unravel_grid=False)\n\n\t\t\tif all_methods:\n\t\t\t\tself.km_labels = km.cluster(data)\n\t\t\t\tself.aw_labels = aw.cluster(data)\n\t\t\t\tself.spectral_labels = spectral.cluster(data)\n\n\t\t\t# Score clustering using different scoring metrics\n\t\t\t# V-measure score\n\t\t\tself.cluster_score_vm_GMM_FE_min_[i_run] = self._score_clustering(self.FE_min_labels,'vm')\n\t\t\tprint(self.cluster_score_vm_GMM_FE_min_[i_run])\n\t\t\tif all_methods:\n\t\t\t\tself.cluster_score_vm_GMM_[i_run] = self._score_clustering(gmm_FE.density_est_.predict(data),'vm')\n\t\t\t\tself.cluster_score_vm_kmeans_[i_run] = self._score_clustering(self.km_labels,'vm')\n\t\t\t\tself.cluster_score_vm_AW_[i_run] = self._score_clustering(self.aw_labels,'vm')\n\t\t\t\tself.cluster_score_vm_spectral_[i_run] = self._score_clustering(self.spectral_labels,'vm')\n\n\t\t\t\t# Adjusted MI\n\t\t\t\tself.cluster_score_ami_GMM_FE_min_[i_run] = self._score_clustering(self.FE_min_labels,'ami')\n\t\t\t\tself.cluster_score_ami_GMM_[i_run] = self._score_clustering(gmm_FE.density_est_.predict(data),'ami')\n\t\t\t\tself.cluster_score_ami_kmeans_[i_run] = self._score_clustering(self.km_labels,'ami')\n\t\t\t\tself.cluster_score_ami_AW_[i_run] = self._score_clustering(self.aw_labels,'ami')\n\t\t\t\tself.cluster_score_ami_spectral_[i_run] = self._score_clustering(self.spectral_labels,'ami')\n\n\t\t\t\t# Fowlkes Mallows\n\t\t\t\tself.cluster_score_fm_GMM_FE_min_[i_run] = self._score_clustering(self.FE_min_labels,'fm')\n\t\t\t\tself.cluster_score_fm_GMM_[i_run] = self._score_clustering(gmm_FE.density_est_.predict(data),'fm')\n\t\t\t\tself.cluster_score_fm_kmeans_[i_run] = self._score_clustering(self.km_labels,'fm')\n\t\t\t\tself.cluster_score_fm_AW_[i_run] = self._score_clustering(self.aw_labels,'fm')\n\t\t\t\tself.cluster_score_fm_spectral_[i_run] = self._score_clustering(self.spectral_labels,'fm')\n\t\t\n\t\tif save_data:\n\t\t\tif self.presampled_data is None:\n\t\t\t\tnp.save('data_out/sampled_data_'+self.toy_model_.name+file_label+'.npy',all_data)\n\n\t\t\tif False:\n\t\t\t\tnp.save('data_out/cluster_score_fm_FE_min_'+self.toy_model_.name+file_label+'.npy',self.cluster_score_fm_GMM_FE_min_)\n\t\t\t\tnp.save('data_out/cluster_score_fm_GMM_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_fm_GMM_)\n\t\t\t\tnp.save('data_out/cluster_score_fm_kmeans_' + self.toy_model_.name +file_label +'.npy', self.cluster_score_fm_kmeans_)\n\t\t\t\tnp.save('data_out/cluster_score_fm_AW_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_fm_AW_)\n\t\t\t\tnp.save('data_out/cluster_score_fm_spectral_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_fm_spectral_)\n\n\t\t\t\tnp.save('data_out/cluster_score_ami_FE_min_'+self.toy_model_.name+file_label+'.npy',self.cluster_score_ami_GMM_FE_min_)\n\t\t\t\tnp.save('data_out/cluster_score_ami_GMM_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_ami_GMM_)\n\t\t\t\tnp.save('data_out/cluster_score_ami_kmeans_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_ami_kmeans_)\n\t\t\t\tnp.save('data_out/cluster_score_ami_AW_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_ami_AW_)\n\t\t\t\tnp.save('data_out/cluster_score_ami_spectral_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_ami_spectral_)\n\n\t\t\tnp.save('data_out/cluster_score_vm_FE_min_'+self.toy_model_.name+file_label+'.npy',self.cluster_score_vm_GMM_FE_min_)\n\t\t\tif all_methods:\n\t\t\t\tnp.save('data_out/cluster_score_vm_GMM_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_vm_GMM_)\n\t\t\t\tnp.save('data_out/cluster_score_vm_kmeans_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_vm_kmeans_)\n\t\t\t\tnp.save('data_out/cluster_score_vm_AW_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_vm_AW_)\n\t\t\t\tnp.save('data_out/cluster_score_vm_spectral_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_vm_spectral_)\n\t\treturn", "def fit(self):\n self.cluseter_agglomerative(n_clusters=20, linkage='average', iterate=5)\n self.sub_clustering(n_clusters=3, index_cluster=[79], linkage='complete')\n self.merge_clusters([[0,9,53],[1,83],[46,35,67],[88,23],[6,68]])\n self.merge_clusters([[6,33,52],[17,14]])\n self.sub_clustering(n_clusters=2, index_cluster=[0], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[2], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[85], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[14], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[16], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[22], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[24], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[26], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[28], linkage='ward')\n self.merge_clusters([[6,98,99]])\n self.merge_clusters([[35,80]])\n self.sub_clustering(n_clusters=4, index_cluster=[35], linkage='complete')\n self.merge_clusters([[76,98]])\n self.sub_clustering(n_clusters=3, index_cluster=[35], linkage='complete')\n self.merge_clusters([[39,42]])\n self.sub_clustering(n_clusters=3, index_cluster=[47], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='average')\n self.merge_clusters([[70,101]])\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[61], linkage='ward')\n self.merge_clusters()\n return", "def getAllContributingAlgorithmsToBest(algnamelist, target_lb=1e-8, \n target_ub=1e2):\n \n print \"Generating best algorithm data from given algorithm list...\\n\", \n customgenerate(algnamelist)\n \n bestalgfilepath = 'bestCustomAlg'\n picklefilename = os.path.join(bestalgfilepath, 'bestalg.pickle')\n fid = open(picklefilename, 'r')\n bestalgentries = pickle.load(fid)\n fid.close()\n print 'loading of best algorithm data done.'\n \n countsperalgorithm = {}\n for (d, f) in bestalgentries:\n print 'dimension:', d, ', function:', f\n print f\n setofalgs = set(bestalgentries[d,f].algs)\n # pre-processing data to only look at targets >= target_lb:\n correctedbestalgentries = []\n for i in range(0,len(bestalgentries[d,f].target)):\n if ((bestalgentries[d,f].target[i] >= target_lb) and\n (bestalgentries[d,f].target[i] <= target_ub)):\n \n correctedbestalgentries.append(bestalgentries[d,f].algs[i])\n print len(correctedbestalgentries)\n # now count how often algorithm a is best for the extracted targets\n for a in setofalgs:\n # use setdefault to initialize with zero if a entry not existant:\n countsperalgorithm.setdefault((d, a), 0) \n countsperalgorithm[(d,a)] += correctedbestalgentries.count(a)\n \n selectedalgsperdimension = {}\n for (d,a) in sorted(countsperalgorithm):\n if not selectedalgsperdimension.has_key(d):\n selectedalgsperdimension[d] = []\n selectedalgsperdimension[d].append((countsperalgorithm[(d,a)], a))\n \n for d in sorted(selectedalgsperdimension):\n print d, 'D:'\n for (count, alg) in sorted(selectedalgsperdimension[d], reverse=True):\n print count, alg\n print '\\n'\n \n \n print \" done.\"", "def gen_cluster_accuracies():\n accuracies = {}\n with Parallel(n_jobs=morphs.parallel.N_JOBS) as parallel:\n for block_path in morphs.paths.blocks():\n print(block_path)\n spikes = morphs.load.ephys_data(block_path, collapse_endpoints=True)\n\n if len(spikes[\"recording\"].unique()) >= 1:\n template_spikes = spikes[spikes[\"stim_id\"].isin(list(\"abcdefgh\"))]\n assert len(template_spikes) > 0\n cluster_groups = template_spikes.groupby(\"cluster\")\n\n morph_dims = spikes.morph_dim.unique()\n morph_dims = morph_dims[~pd.isnull(morph_dims)]\n morph_dims.sort()\n\n max_num_reps = np.max(\n [\n len(stim_group.groupby(by=[\"recording\", \"stim_presentation\"]))\n for stim_id, stim_group in template_spikes.groupby(\"stim_id\")\n ]\n )\n\n accuracies_list = parallel(\n delayed(cluster_accuracy)(\n cluster, cluster_group, morph_dims, max_num_reps\n )\n for (cluster, cluster_group) in cluster_groups\n )\n\n accuracies[block_path] = pd.concat(accuracies_list)\n\n morphs.paths.PROCESSED_DIR.mkdir(parents=True, exist_ok=True)\n with open(morphs.paths.ACCURACIES_PKL.as_posix(), \"wb\") as f:\n pickle.dump(accuracies, f)", "def calculate_all_metrcis(self):\n self.calculate_gc_metrcis()\n self.calculate_sam_metrics()\n self.calculate_classification_metrics()\n self.calculate_losses()", "def run(\n self,\n number_of_clusters=None,\n max_K=8,\n method_clustering=\"pam\",\n init_clustering=\"random\",\n max_iter_clustering=100,\n discart_value_JI=0.6,\n bootstraps_JI=100,\n bootstraps_p_value=100,\n n_jobs=1,\n verbose=1,\n ):\n\n if number_of_clusters is None:\n self.k = optimizer.optimizeK(\n self.distance_matrix,\n self.y.to_numpy(),\n self.model_type,\n max_K,\n method_clustering,\n init_clustering,\n max_iter_clustering,\n discart_value_JI,\n bootstraps_JI,\n self.random_state,\n n_jobs,\n verbose,\n )\n\n if self.k == 1:\n warnings.warn(\"No stable clusters were found!\")\n return\n\n print(f\"Optimal number of cluster is: {self.k}\")\n\n else:\n self.k = number_of_clusters\n print(f\"Use {self.k} as number of cluster\")\n\n self.cluster_labels = (\n kmedoids.KMedoids(\n n_clusters=self.k,\n method=method_clustering,\n init=init_clustering,\n metric=\"precomputed\",\n max_iter=max_iter_clustering,\n random_state=self.random_state,\n )\n .fit(self.distance_matrix)\n .labels_\n )\n\n (\n self._data_clustering_ranked,\n self.p_value_of_features,\n ) = stats.calculate_global_feature_importance(\n self.X, self.y, self.cluster_labels, self.model_type\n )\n self._p_value_of_features_per_cluster = (\n stats.calculate_local_feature_importance(\n self._data_clustering_ranked, bootstraps_p_value\n )\n )", "def get_sklearn_algorithms(verbose=False):\n from collections import defaultdict\n import importlib\n import sklearn\n algos = defaultdict(list)\n if verbose: print(dir(sklearn))\n for nom_module in dir(sklearn):\n if verbose: print(nom_module)\n try:\n to_import = \"sklearn.%s\" % nom_module\n module = importlib.import_module(to_import)\n for nom_fonction in dir(module):\n fonction = getattr(module, nom_fonction)\n if hasattr(fonction, \"fit\"):\n if verbose: print(\" nom algorithme = \", nom_fonction)\n algos[nom_module].append(fonction)\n except Exception as e:\n if verbose: print(e)\n if verbose: print(\"=\" * 30)\n return algos", "def build_algorithm(self, algorithm_type):\n distance_matrix = self.matrix_handler.distance_matrix\n algorithm_execution_parameters = {}\n if algorithm_type == \"spectral\":\n # We need to set number of clusters for performance and we get sigma if defined\n algorithm_execution_parameters[\"max_clusters\"] = self.evaluation_parameters[\"maximum_clusters\"]\n if \"sigma\" in self.clustering_parameters[\"algorithms\"][\"spectral\"]:\n algorithm_execution_parameters[\"sigma_sq\"] = self.clustering_parameters[\"algorithms\"][\"spectral\"][\"sigma\"]\n # else it calculates its own sigma\n\n if algorithm_type in [\"spectral\",\"dbscan\",\"gromos\",\"kmedoids\",\"random\",\"hierarchical\"] :\n return ClusteringExplorer.get_clustering_algorithm_class()[algorithm_type](distance_matrix, **algorithm_execution_parameters)\n else:\n print \"[ERROR][ClusteringExplorer::build_algorithms] Not known algorithm type ( %s )\"%(algorithm_type)\n self.notify(\"SHUTDOWN\", \"Not known algorithm type ( %s )\"%(algorithm_type))\n exit()", "def calculate_all_scores(best_phenotype, clustering_algorithm, dataset, y):\n samples_dist_matrix = distance.squareform(distance.pdist(dataset.values))\n allowed_fitness = list(DICT_ALLOWED_FITNESSES.keys())\n scores = [(fitness_name, fitness_value) for fitness_name, fitness_value in\n zip(allowed_fitness,\n eval_multiple(dataset.values, clustering_algorithm, allowed_fitness, samples_dist_matrix, y,\n best_phenotype))]\n scores = dict(scores)\n return scores", "def measureAll(authors_texts,sectorialized_agents):\n authors_texts=P.text.aux.textFromAuthors(authors_texts,self.topm_dict[\"sectorialized_agents\"])\n authors_measures={}\n # análise de cada mensagem e de cada autor\n for author in authors_texts:\n authors_measures[author]={}\n texts=authors_texts[author]\n authors_measures[author][\"raw_strings\"]=P.text.raw.analyseAll(texts)\n authors_measures[author][\"pos\"]= P.text.pos.analyseAll(authors_analysis[author][\"raw_analysis\"])\n authors_measures[author][ \"wordnet\" ]=P.text.wordnet.analyseAll(authors_analysis[author][\"pos_analysis\"])\n authors_measures[author][\"tfIdf\"]=P.text.tfIdf.analyseAll(texts) # tfIdf de cada texto e do autor, numeric: mean e std das distancias\n # análise de cada setor e da estrutura toda\n# sectors_texts=P.text.aux.textFromSectors(authors_text,sectorialized_agents)\n sectors_measures={}\n for sector in sectorialized_agents:\n sectors_measures[sector][\"raw_strings\"]=P.text.raw.sectorsAnalyseAll(authors_analysis,sectorialized_agents[sector])\n sectors_measures[sector][\"pos\"]= P.text.pos.sectorsAnalyseAll(authors_analysis,sectorialized_agents[sector])\n sectors_measures[sector][\"wordnet\"]= P.text.wordnet.sectorsAnalyseAll(authors_analysis,sectorialized_agents[sector])\n # tfIdf de cada texto e de cada autor, numeric: mean e std das distancias por texto e por autor, e media e etd dos autores\n sectors_measures[sector][\"tfIdf\"]= P.text.tfIdf.sectorsAnalyseAll(authors_analysis,sectorialized_agents[sector])\n\n# texts=[sectors_texts[i] for i in (\"peripherals\",\"intermediaries\",\"hubs\")]\n# sectors_analysis[\"raw_strings\"]=P.text.raw.analyseAll(texts)\n# sectors_analysis[\"pos\"]= P.text.pos.analyseAll(sectors_analysis[\"raw_analysis\"])\n# sectors_analysis[ \"wordnet\" ]=P.text.wordnet.analyseAll(sectors_analysis[\"pos_analysis\"])\n# sectors_analysis[\"tfIdf\"]=P.text.tfIdf.tfIdf(texts)\n\n overall_measures[\"raw_strings\"]=P.text.raw.systemAnalysis(sectors_analysis) # medias de toda a rede por mensagem, por autor e por setor\n overall_measures[\"pos\"]=P.text.raw.systemAnalysis(sectors_analysis) # medias de toda a rede por mensagem, por autor e por setor\n overall_measures[\"wordnet\"]=P.text.raw.systemAnalysis(sectors_analysis) # medias de toda a rede por mensagem, por autor e por setor\n # tfIdf measurespor texto, autor e setor, numeric: media e desvio das distancias por cada grupo, media e desvio dos setores e dos autores\n overall_measures[\"tfIdf\"]=P.text.tfIdf.systemAnalysis(sectors_analysis) # medias de toda a rede por mensagem, por autor e por setor\n\n del authors_texts,sectorialized_agents,author, sector\n return locals()", "def evaluate(self, clustering):\n # Pca for each one of the clusters\n pca_mean_val = 0.;\n MAX_ELEMENTS = 1000\n for c in clustering.clusters:\n # Pick the coordinates (ensuring that we are copying them)\n element_indexes = c.all_elements\n ###################\n # Performance hack\n ###################\n # As it can be very slow for big clusters (i.e. > 3k elements) we'll compress this clusters \n # before calculating PCA. It should increase variance but will allow calculations.\n # It should use the kmedoids compressor\n if len(c.all_elements) > MAX_ELEMENTS:\n element_indexes = c.get_random_sample(MAX_ELEMENTS)\n print \"[PCA] Random sampling too big cluster to improve performance (%d elements -> %d elements).\"%(len(c.all_elements),MAX_ELEMENTS)\n ###################\n \n fitting_coordinates_of_this_cluster = self.fitting_coordinates[element_indexes]\n \n calculator = RMSDCalculator(calculatorType = \"QTRFIT_SERIAL_CALCULATOR\",\n fittingCoordsets = fitting_coordinates_of_this_cluster)\n \n if self.calculation_coordinates is not None:\n calculation_coordinates_of_this_cluster = self.calculation_coordinates[element_indexes]\n calculator = RMSDCalculator(calculatorType = \"QTRFIT_SERIAL_CALCULATOR\",\n fittingCoordsets = fitting_coordinates_of_this_cluster,\n calculationCoordsets = calculation_coordinates_of_this_cluster)\n \n # Make an iterative superposition (to get the minimum RMSD of all with respect to a mean conformation)\n calculator.iterativeSuperposition()\n\n # Calculate the covariance matrix\n if self.calculation_coordinates is None:\n covariance_matrix = PCAMetric.create_covariance_matrix(fitting_coordinates_of_this_cluster)\n else:\n covariance_matrix = PCAMetric.create_covariance_matrix(calculation_coordinates_of_this_cluster)\n \n # And then the eigenvalue we are interested in\n pca_mean_val += PCAMetric.calculate_biggest_eigenvalue(covariance_matrix)\n print \"PCA finished\"\n return pca_mean_val /clustering.total_number_of_elements", "def _compute_util_data(self):\n\n print(\"Computing PCA of document vectors.\")\n self.pca = PCA(n_components = 3)\n\n print(\"Computing document clusters in PCA basis.\")\n inferred_vecs = np.array([self.model.infer_vector(doc.words) for doc in self.tagged_docs])\n self.pca_reduced_vecs = self.pca.fit_transform(inferred_vecs)\n n_clusters = 25 # TODO find way to determine approx cluster size\n self.kmeans = KMeans(init = 'k-means++', n_clusters = n_clusters, random_state = 0)\n self.kmeans_preds = self.kmeans.fit_predict(self.pca_reduced_vecs)", "def compute_feature_properties(self):\n\n self.valuecounts = {}\n self.unique_values = {}\n self.missing_ratios = {}\n self.counts = {}\n self.codemaps = {}\n for f in self.features:\n # Compute various things\n all_values = [self.data[l].get(f,\"?\") for l in self.data]\n missing_data_ratio = all_values.count(\"?\") / (1.0*len(all_values))\n non_q_values = [v for v in all_values if v != \"?\"]\n counts = {}\n for v in non_q_values:\n counts[v] = non_q_values.count(v)\n unique_values = list(set(non_q_values))\n # Sort unique_values carefully.\n # Possibly all feature values are numeric strings, e.g. \"1\", \"2\", \"3\".\n # If we sort these as strings then we get weird things like \"10\" < \"2\".\n # This can actually matter for things like ordinal models.\n # So convert these to ints first...\n if all([v.isdigit() for v in unique_values]):\n unique_values = list(map(int, unique_values))\n unique_values.sort()\n unique_values = list(map(str, unique_values))\n # ...otherwise, just sort normally\n else:\n unique_values.sort()\n self.unique_values[f] = unique_values\n\n N = len(unique_values)\n self.valuecounts[f] = N\n self.missing_ratios[f] = missing_data_ratio\n self.counts[f] = counts\n self.codemaps[f] = self.build_codemap(unique_values)", "def compute_statistics(self):\n for i in range(len(self.wine_matrix[0, :])):\n feature = self.wine_matrix[:, i]\n self.wine_stats['feature ' + str(i)] = {}\n if i == 11: # results column\n self.wine_stats['feature ' + str(i)]['positive_class_ratio'] = (feature == 1).sum() / len(feature)\n null, self.wine_stats['feature ' + str(i)]['pvalue'] = stats.normaltest(feature)\n\n # plot\n # pyplot.hist(feature, bins=50)\n # pyplot.show()\n\n for i in range(len(self.cancer_matrix[0, :])):\n feature = self.cancer_matrix[:, i]\n self.cancer_stats['feature ' + str(i)] = {}\n if i == 10: # results column\n self.cancer_stats['feature ' + str(i)]['positive_class_ratio'] = (feature == 1).sum() / len(feature)\n null, self.cancer_stats['feature ' + str(i)]['pvalue'] = stats.normaltest(feature)\n\n # plot\n # pyplot.hist(feature, bins=50)\n # pyplot.show()", "def aggregate_results(results):\n\n for (config,con,dec),folds in results.iteritems():\n m = MODEL_PATTERN.match(config)\n if m:\n mode = m.groupdict()['mode'] # mle, rl, mrt, ...\n model = m.groupdict()['model'] # haem, hacm, hard, ...\n align = m.groupdict()['align'] # crp, cls ...\n else:\n mode, model, align = '', '', ''\n # mean accuracies across seeds for each fold\n foldaccuracies = []\n # we count number of models over folds and seeds\n num_individual_models = 0\n\n for foldname,fold in folds.items():\n if 'Q' in options.mode:\n seedaccurracies = fold.values()[:1] if fold.values() else [] # pick one\n# SUPPORT_STATISTICS[(config,con,dec,model,align,mode,foldname)] += 1\n else:\n seedaccurracies = []\n for seed_acc in fold.values():\n seedaccurracies.append(seed_acc)\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,foldname)] += 1\n # aggregate on fold level\n fold['__MEAN__'] = float(np.mean(seedaccurracies))\n fold['__SD__'] = float(np.std(seedaccurracies))\n l = len(seedaccurracies)\n num_individual_models += l\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,'__MEAN__')] += l\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,'__SD__')] += l\n\n # statistics over seeds for this fold\n fold['__STATS__'] = fold['__MEAN__'], fold['__SD__'], l\n foldaccuracies.append(fold['__MEAN__'])\n # aggregate on (config, condition, decoding) level\n folds['__MEAN__'] = float(np.mean(foldaccuracies))\n folds['__SD__'] = float(np.std(foldaccuracies))\n # statistics over folds for this (config, condition, decoding)\n folds['__STATS__'] = folds['__MEAN__'], folds['__SD__'], num_individual_models", "def compute_metrics(\n Phi, optimal_subspace\n):\n feature_norm = jnp.linalg.norm(Phi) / Phi.shape[0]\n cosine_similarity = compute_cosine_similarity(Phi, optimal_subspace)\n\n metrics = {\n 'cosine_similarity': cosine_similarity,\n 'feature_norm': feature_norm,\n 'eigengame_subspace_distance': eigengame_subspace_distance(\n Phi, optimal_subspace\n ),\n }\n\n _, d = Phi.shape\n if d > 1:\n grassman_distance = compute_grassman_distance(Phi, optimal_subspace)\n metrics |= {'grassman_distance': grassman_distance}\n elif d == 1:\n dot_product = compute_normalized_dot_product(Phi, optimal_subspace)\n metrics |= {'dot_product': dot_product}\n\n return metrics", "def extractBestAlgorithms(args = algs2009, f_factor=2,\n target_lb=1e-8, target_ub=1e22):\n\n # TODO: use pproc.TargetValues class as input target values\n # default target values:\n targets = pproc.TargetValues(\n 10**np.arange(np.log10(max((1e-8, target_lb))),\n np.log10(target_ub) + 1e-9, 0.2))\n # there should be a simpler way to express this to become the\n # interface of this function\n\n print 'Loading algorithm data from given algorithm list...\\n' \n\n verbose = True\n dsList, sortedAlgs, dictAlg = pproc.processInputArgs(args, verbose=verbose)\n\n print 'This may take a while (depending on the number of algorithms)'\n\n selectedAlgsPerProblem = {}\n for f, i in pproc.dictAlgByFun(dictAlg).iteritems():\n for d, j in pproc.dictAlgByDim(i).iteritems():\n selectedAlgsPerProblemDF = []\n best = BestAlgSet(j)\n \n for i in range(0, len(best.target)):\n t = best.target[i]\n # if ((t <= target_ub) and (t >= target_lb)):\n if toolsstats.in_approximately(t,\n targets((f, d), discretize=True)):\n # add best for this target:\n selectedAlgsPerProblemDF.append(best.algs[i])\n \n # add second best or all algorithms that have an ERT\n # within a factor of f_factor of the best:\n secondbest_ERT = np.infty\n secondbest_str = ''\n secondbest_included = False \n for astring in j:\n currdictalg = dictAlg[astring].dictByDim()\n if currdictalg.has_key(d):\n curralgdata = currdictalg[d][f-1] \n currERT = curralgdata.detERT([t])[0]\n if (astring != best.algs[i]):\n if (currERT < secondbest_ERT):\n secondbest_ERT = currERT\n secondbest_str = astring\n if (currERT <= best.detERT([t])[0] * f_factor):\n selectedAlgsPerProblemDF.append(astring)\n secondbest_included = True\n if not (secondbest_included) and (secondbest_str != ''):\n selectedAlgsPerProblemDF.append(secondbest_str)\n \n if len(selectedAlgsPerProblemDF) > 0:\n selectedAlgsPerProblem[(d, f)] = selectedAlgsPerProblemDF\n \n print 'pre-processing of function', f, 'done.' \n \n print 'loading of best algorithm(s) data done.'\n \n countsperalgorithm = {}\n for (d, f) in selectedAlgsPerProblem:\n print 'dimension:', d, ', function:', f\n setofalgs = set(selectedAlgsPerProblem[d,f])\n \n # now count how often algorithm a is best for the extracted targets\n for a in setofalgs:\n # use setdefault to initialize with zero if a entry not existant:\n countsperalgorithm.setdefault((d, a), 0) \n countsperalgorithm[(d,a)] += selectedAlgsPerProblem[d,f].count(a)\n \n selectedalgsperdimension = {}\n for (d,a) in sorted(countsperalgorithm):\n if not selectedalgsperdimension.has_key(d):\n selectedalgsperdimension[d] = []\n selectedalgsperdimension[d].append((countsperalgorithm[(d,a)], a))\n \n for d in sorted(selectedalgsperdimension):\n print d, 'D:'\n for (count, alg) in sorted(selectedalgsperdimension[d], reverse=True):\n print count, alg\n print '\\n'\n \n \n print \" done.\"\n \n return selectedalgsperdimension", "def get_clusters(ensemble, grouping, clustering):\n\n\t# Prevent SQL injected since column names cannot be parameterized.\n\tif \";\" in ensemble or \";\" in grouping or \";\" in clustering:\n\t\treturn None\n\n\tensemble = ensemble.replace('EnsEns','Ens')\n\tdf = None;\n\n\tif grouping in ['annotation','cluster']:\n\t\tgroupingu = ensemble+\".\"+grouping+\"_\"+clustering\n\telif grouping in ['NeuN']:\n\t\tgroupingu = \"CONCAT('NeuN',cells.\"+grouping+\")\"\n\telse:\n\t\tgroupingu = \"cells.\"+grouping\n\n\t# Get methylation info\n\tquery = \"SELECT count(cells.cell_id) ncells, 'snmC' as modality, \\\n\t\t%(groupingu)s as groups \\\n\t\tFROM cells \\\n\t\tINNER JOIN %(ensemble)s ON cells.cell_id = %(ensemble)s.cell_id \\\n\t\tGROUP BY groups \" % {'ensemble': ensemble,\n\t\t\t\t\t'groupingu': groupingu,\n\t\t\t\t\t'clustering': clustering}\n\ttry:\n\t\tdf = pd.read_sql(query, db.get_engine(current_app, 'methylation_data'))\n\texcept exc.ProgrammingError as e:\n\t\tnow = datetime.datetime.now()\n\t\tprint(\"[{}] ERROR in app(get_clusters): {}\".format(str(now), e))\n\t\tsys.stdout.flush()\n\t\t# return None\n\n\t# Get snATAC info\n\tquery = \"SELECT count(cells.cell_id) ncells, 'snATAC' AS modality, %(ensemble)s.cluster_ATAC groups \\\n\t\tFROM cells \\\n\t\tINNER JOIN %(ensemble)s ON cells.cell_id = %(ensemble)s.cell_id \\\n\t\tGROUP BY groups \" % {'ensemble': ensemble,\n\t\t\t\t\t'grouping': grouping,\n\t\t\t\t\t'clustering': clustering}\n\n\ttry:\n\t\tdf_atac = pd.read_sql(query, db.get_engine(current_app, 'snATAC_data'))\n\t\tdf=df.append(df_atac)\n\texcept exc.ProgrammingError as e:\n\t\tnow = datetime.datetime.now()\n\t\tprint(\"[{}] ERROR in app(get_clusters): {}\".format(str(now), e))\n\t\tsys.stdout.flush()\n\n\n\t# Get snRNA info\n\tquery = \"SELECT count(cells.cell_id) ncells, 'RNA' AS modality, %(ensemble)s.cluster_RNA groups \\\n\t\tFROM cells \\\n\t\tINNER JOIN %(ensemble)s ON cells.cell_id = %(ensemble)s.cell_id \\\n\t\tGROUP BY groups \" % {'ensemble': ensemble,\n\t\t\t\t\t'grouping': grouping,\n\t\t\t\t\t'clustering': clustering}\n\n\ttry:\n\t\tdf_rna = pd.read_sql(query, db.get_engine(current_app, 'RNA_data'))\n\t\tdf=df.append(df_rna)\n\texcept exc.ProgrammingError as e:\n\t\tnow = datetime.datetime.now()\n\t\tprint(\"[{}] ERROR in app(get_clusters): {}\".format(str(now), e))\n\t\tsys.stdout.flush()\n\n\treturn df", "def clustering_and_visulization(self):\n centroids, _ = kmeans(self.data_mat, self.k)\n idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[idx == i, 0])\n self.plot_list1.append(self.data_mat[idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n for i in range(self.k):\n self.cluster = self.data_mat[idx == i]\n self.clusterlist.append(self.cluster)\n\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n\n self.indexdict = {}\n for i in self.clusterdict:\n self.indexdict[i] = []\n print(len(self.clusterdict))\n for i in range(len(idx)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n self.indexdict[j].append(i)\n print(\"cluster dict of packs\",self.indexdict)\n\n self.drugdict = {}\n for i in self.clusterdict:\n self.drugdict[i] = []\n self.drug=[]\n for i in range(len(self.indexdict.keys())):\n for j in range(len(self.indexdict[i])):\n self.drugdict[i].append(self.df.iloc[self.indexdict[i][j]].to_dict())\n print(\"drugs dict with their frequencies\",self.drugdict)\n clusterdict_from_df_as_drug_non_O_frequency = {}\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs ={}\n for i in self.drugdict:\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n for i in self.drugdict:\n for j in self.drugdict[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i]=list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n try:\n common_drug_list = [x for x in clusterdict_of_non_repeated_drugs[0] if x in clusterdict_of_non_repeated_drugs[1]]\n print('\\n')\n print(\"common drug list\", common_drug_list)\n total_frequency_of_drugs_dict = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict[i] = []\n\n for drug in common_drug_list:\n\n for cluster_keys in clusterdict_from_df_as_drug_non_O_frequency.keys():\n temp_list = []\n for cluster_values_as_list in clusterdict_from_df_as_drug_non_O_frequency[cluster_keys]:\n try:\n temp_list.append(cluster_values_as_list[str(drug)])\n except KeyError:\n print(\"\\t\")\n total_frequency_of_drugs_dict[cluster_keys].append(np.sum(temp_list))\n print(\"total drugs frequency\",total_frequency_of_drugs_dict)\n total_frequency_of_drugs_dict_with_drugs = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[i] = []\n temp_list1 = []\n temp_list2 = []\n for keys in self.drugdict.keys():\n temp_list1.append(clusterdict_of_non_repeated_drugs[keys])\n for keys in self.drugdict.keys():\n temp_list2.append(total_frequency_of_drugs_dict[keys])\n temp_list3 = []\n for i in temp_list1:\n for j in temp_list2:\n temp_list3.append(dict(zip(i,j)))\n temp_list4 = temp_list3[:2]\n print('\\n')\n for keys in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[keys].append(temp_list4[keys])\n print(\"total frequency with drugs dict\",total_frequency_of_drugs_dict_with_drugs)\n\n final_drugs_in_clusters_dict = {}\n for i in self.drugdict:\n final_drugs_in_clusters_dict[i] = []\n compare_list = []\n for drug in common_drug_list:\n compare_list.append(min(total_frequency_of_drugs_dict_with_drugs[0][0][drug], total_frequency_of_drugs_dict_with_drugs[1][0][drug]))\n print(\"compare list\",compare_list)\n for values in total_frequency_of_drugs_dict_with_drugs.values():\n for key1, value1 in values[0].items():\n if value1 in compare_list:\n\n key2 =values[0].keys()[values[0].values().index(value1)]\n values[0].pop(key2, None)\n\n\n print('final dict with deleted keys', total_frequency_of_drugs_dict_with_drugs)\n\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in total_frequency_of_drugs_dict_with_drugs[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n print(\"only drugs\",clusterdict_of_non_repeated_drugs)\n\n final_robot_packs_dict = {}\n for i in self.drugdict:\n final_robot_packs_dict[i] = []\n\n winner_drug_dict = {}\n for i in common_drug_list:\n winner_drug_dict[i] = []\n for drug in common_drug_list:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n winner_drug_dict[str(drug)].append(0)\n if drug in clusterdict_of_non_repeated_drugs[1]:\n winner_drug_dict[str(drug)].append(1)\n print(\"winner drug dict\",winner_drug_dict)\n\n for i in self.indexdict:\n print(i)\n for pack in self.indexdict[i]:\n packdict = self.df.iloc[pack].to_dict()\n packdict_non_0 = {x: y for x, y in packdict.items() if y != 0}\n packdict_non_0_key = packdict_non_0.keys()\n for drug in packdict_non_0_key:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n final_robot_packs_dict[0].append(pack)\n elif drug in clusterdict_of_non_repeated_drugs[1]:\n final_robot_packs_dict[1].append(pack)\n\n final_robot_packs_dict[i].append(pack)\n for commondrugs in winner_drug_dict:\n for winnercluster in winner_drug_dict[commondrugs]:\n if winnercluster==0:\n loosercluster =1\n if winnercluster == 1:\n loosercluster = 0\n if commondrugs in packdict_non_0_key and i==loosercluster:\n try:\n final_robot_packs_dict[i].remove(pack)\n final_robot_packs_dict[winnercluster].append(pack)\n except ValueError:\n print('\\t')\n\n for i in self.indexdict:\n final_robot_packs_dict[i] = set(final_robot_packs_dict[i])\n\n print(\"final which pack which robot dict\",final_robot_packs_dict)\n\n except IndexError:\n print(\"No common drugs\")", "def clustering_metrics(clusts, node_assn, node_pred):\n pred_vox = cluster_to_voxel_label(clusts, node_pred)\n true_vox = cluster_to_voxel_label(clusts, node_assn)\n ari = ARI(pred_vox, true_vox)\n ami = AMI(pred_vox, true_vox)\n sbd = SBD(pred_vox, true_vox)\n pur, eff = purity_efficiency(pred_vox, true_vox)\n return ari, ami, sbd, pur, eff", "def get_combinations(classes_folder='./data/CASIA1_classes_by_unbalanced_kmeans/', \n originals='./data/CASIA1_originals', fakes_ela='./data/CASIA1_fakes_ela'):\n classes_ = []\n for i in range(20):\n classes_.append('{}{}' .format(classes_folder, i+1))\n\n medians_ = [0,3,5,7,9,11,13,15,17,19]\n\n iterations_ = []\n for i in range(21):\n iterations_.append(i)\n\n threshold_ = []\n for i in range(40):\n threshold_.append(i)\n\n for i, item in enumerate(classes_):\n fakes_list = os.listdir(item)\n fakes = load_fakes(fakes_list, item, originals)\n\n best = 0\n best_median_filter_size = 0\n best_number_of_iterations = 0\n best_thresh = 0\n for x, median_filter_size in enumerate(medians_):\n for y, number_of_iterations in enumerate(iterations_):\n for t, thresh in enumerate(threshold_):\n whole_score = 0\n for e, elem in enumerate(fakes):\n image = cv2.imread(os.path.join(fakes_ela, elem.path.split('\\\\')[-1]))\n\n if thresh > 0:\n image_ = pywt.threshold(image, thresh, 'soft')\n image = cv2.normalize(image_, image, 0, 1, cv2.NORM_MINMAX)\n image = 255 * image\n image = image.astype(np.uint8)\n\n image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n \n image = cv2.inRange(image, np.array([0,0,0]), np.array([180,255,60]))\n image = cv2.bitwise_not(image)\n\n if median_filter_size > 0:\n image = cv2.medianBlur(image, median_filter_size)\n\n kernel = np.ones((3, 3), np.uint8)\n image = cv2.morphologyEx(image, cv2.MORPH_GRADIENT, kernel, iterations=number_of_iterations)\n\n cnts = cv2.findContours(image.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = imutils.grab_contours(cnts)\n\n max_idx = 0\n max_pnts = 0\n for u, ulem in enumerate(cnts):\n if cv2.contourArea(ulem) < max_pnts:\n continue\n else:\n max_idx = u\n max_pnts = cv2.contourArea(ulem)\n\n if len(cnts) > 0:\n (x, y, w, h) = cv2.boundingRect(cnts[max_idx])\n pred = {\n \"x\": x,\n \"y\": y,\n \"w\": w,\n \"h\": h\n }\n else:\n pred = None\n\n whole_score += evaluate_augmentation_fit(pred, elem)\n if best < whole_score:\n best = whole_score\n best_median_filter_size = median_filter_size\n best_number_of_iterations = number_of_iterations\n best_thresh = thresh\n print(\"Class: {}; MedianFilterSize: {}; Iterations: {}; Thresh: {}; Score: {}\" .format(item, median_filter_size, number_of_iterations, thresh, round(whole_score, 2)))\n print(\"###########\")\n print(\"Best: {} -> {} % ({}, {}, {})\" .format(round(best, 2), round((best/len(fakes)), 2), best_median_filter_size, best_number_of_iterations, best_thresh))\n print(\"###########\")", "def calculate_clusters(study_id):\n\n with current_app.app_context():\n cur = conn.cursor()\n\n cur.execute(\"\"\"SELECT * FROM STATS WHERE STUDY_ID=%s\"\"\", (str(study_id),))\n study = fetchoneClean(cur)\n clusters_calculating = study[4]\n clusters_changed = study[5]\n if clusters_changed:\n if clusters_calculating:\n return {'message': 'calculating'}\n cur.execute(\"\"\"UPDATE STATS SET CLUSTERS_CALCULATING = TRUE WHERE STUDY_ID = %s\"\"\", (str(study_id),))\n conn.commit()\n\n distance = study[7]['matrix']\n card_names = study[7]['cardNames']\n cur.execute(\"\"\"SELECT COUNT(ID) FROM PARTICIPANT WHERE STUDY_ID = %s\"\"\", (str(study_id),))\n total_participants = fetchoneClean(cur)[0]\n\n distance_matrix = calculate_square_form(distance, total_participants)\n distArray = ssd.squareform(distance_matrix)\n\n try:\n clusters = hierarchy.linkage(distArray, method='average')\n except ValueError:\n return {'message': 'not enough data'}\n\n tree = hierarchy.to_tree(clusters, rd=False)\n # TODO Distance 0 on root\n dendro = dict(children=[], hierarchy=0, distance=100)\n add_node(tree, dendro, card_names)\n\n cur.execute(\"\"\"UPDATE STATS SET CLUSTERS = %s WHERE STUDY_ID = %s\"\"\", (json.dumps(dendro), str(study_id),))\n cur.execute(\"\"\"UPDATE STATS SET CLUSTERS_CALCULATING = FALSE WHERE STUDY_ID = %s\"\"\", (str(study_id),))\n cur.execute(\"\"\"UPDATE STATS SET CLUSTERS_CHANGED = FALSE WHERE STUDY_ID = %s\"\"\", (str(study_id),))\n conn.commit()\n else:\n dendro = study[6]\n\n return dendro", "def apply_algorithms(x: np.ndarray, label_true, params, components, database_name):\n names = ['Original dataset', 'Our PCA results', 'KMeans with previous our PCA reduction',\n 'KMeans without previous reduction (PCA)', 'KMeans without previous reduction (T-SNE)']\n\n datasets = []\n labels = []\n reduct = []\n\n # get the representation of the original matrix splitted to be plotted\n partial_x = split_db_original(x, components)\n datasets.append(partial_x)\n labels.append(label_true)\n reduct.append(None)\n\n # get our PCA\n pca = OPCA(n_components=params['n_components'])\n our_pca = pca.fit_transform(x)\n datasets.append(our_pca)\n labels.append(label_true)\n reduct.append(None)\n\n # get PCA and IPCA from sklearn\n sk_pca = pca_sklearn(x, params['db_name'], params['n_components'])\n sk_ipca = ipca_sklearn(x, params['db_name'], params['n_components'])\n\n # compare the three PCA algorithms\n name = ['Our PCA', 'SK_PCA', 'SK_IPCA', 'original_data']\n pca_data = [our_pca, sk_pca['db'], sk_ipca['db'], x]\n apply_evaluation(pca_data, label_true, params, name, database_name)\n\n # KMeans with PCA reduction\n algorithm = KMeans(k=params['k'], seed=params['seed'], max_it=params['max_it'], tol=params['tol'])\n labels_kmeans = algorithm.fit_predict(our_pca)\n datasets.append(our_pca)\n labels.append(labels_kmeans)\n reduct.append(None)\n\n # KMeans without PCA reduction\n algorithm = KMeans(k=params['k'], seed=params['seed'], max_it=params['max_it'], tol=params['tol'])\n labels_kmeans = algorithm.fit_predict(x)\n datasets.append(x)\n labels.append(labels_kmeans)\n reduct.append('pca')\n datasets.append(x)\n labels.append(labels_kmeans)\n reduct.append('tsne')\n\n # selection number of dimensions of plot\n if type(params['n_components']) == int:\n if params['n_components'] == 2:\n nd = 2\n if params['n_components'] > 2:\n nd = 3\n elif type(params['n_components']) == float:\n if our_pca.shape[1] == 2:\n nd = 2\n if our_pca.shape[1] > 2:\n nd = 3\n else:\n nd = 3\n\n if nd == 2:\n pca_names = ['PCA Component 1', 'PCA Component 2']\n plot_names = [components[0], pca_names, pca_names, pca_names, ['TSNE 1', 'T-SNE 2']]\n plot2d(datasets, labels, names, plot_names, reduct)\n elif nd == 3:\n pca_names = ['PCA Component 1', 'PCA Component 2', 'PCA Component 3']\n plot_names = [components[0], pca_names, pca_names, pca_names, ['TSNE 1', 'T-SNE 2', 'T-SNE 3']]\n plot3d(datasets, labels, names, plot_names, reduct)", "def evaluate(self):\n results = dict()\n for metric in self.metrics:\n print('Evaluating clustering with metric %s' % metric)\n if metric in LABEL_METRICS.keys():\n results[metric] = LABEL_METRICS[metric](self.X, self.model.labels_)\n results['adjusted_rand_score'] = SCORE_METRICS['adjusted_rand_score'](self.Y[:, 0], self.model.labels_)\n self.results = results\n return results", "def __tool_classification__(self,task_id,shape,aggregations):\n\n print \"tool classification - more than one tool could create \" +str(shape) + \"s in task \" + str(task_id)\n\n if aggregations == {}:\n print \"warning - empty classifications\"\n return {}\n\n # only go through the \"uncertain\" shapes\n tool_classifications = {}\n\n for subject_id in aggregations:\n # look at the individual points in the cluster\n\n for cluster_index,cluster in aggregations[subject_id][task_id][shape+ \" clusters\"].items():\n # all_users just gives us a list of all of the users who have seen this subject\n # not relevant here\n if cluster_index == \"all_users\":\n continue\n\n # which users marked this cluster\n users = cluster[\"users\"]\n # which tool each individual user used\n tools = cluster[\"tools\"]\n assert len(tools) == len(users)\n\n # in this case, we want to \"vote\" on the tools\n ballots = zip(users,tools)\n\n tool_classifications[(subject_id,cluster_index)] = ballots\n\n # classify\n print \"tool results classification\"\n tool_results = self.__task_aggregation__(tool_classifications,task_id,{})\n assert isinstance(tool_results,dict)\n\n for subject_id,cluster_index in tool_results:\n\n new_results = tool_results[(subject_id,cluster_index)][task_id]\n # the clustering results already exist so we are just adding more data to it\n aggregations[subject_id][task_id][shape + \" clusters\"][cluster_index][\"tool_classification\"] = new_results\n\n return aggregations", "def algorithm_parameters(alg):\n if alg in list(SEM_TYPE.keys()):\n return simulate_parameters(alg)\n\n param_dict = dict()\n\n param = getfullargspec(INLINE_ALGORITHMS[alg.upper()].__init__)\n if param is not None:\n param_len = len(param.args)\n if param.defaults:\n if 'input_dim' in param.args:\n param_dict.update({'input_dim': None})\n for index, value in enumerate(reversed(param.defaults)):\n if not isfunction(value) and (value is not None):\n param_dict.update(\n {param.args[param_len - index - 1]: value})\n param = getfullargspec(INLINE_ALGORITHMS[alg.upper()].learn)\n if param is not None:\n param_len = len(param.args)\n if param_len > 2:\n if 'rank' in param.args:\n param_dict.update({'rank': None})\n return param_dict", "def clustering_and_visulization(self):\n try:\n centroids, _ = kmeans(self.data_mat, self.k)\n except ValueError:\n print(\"The number of clusters is more than the data points\")\n self.idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[self.idx == i, 0])\n self.plot_list1.append(self.data_mat[self.idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n\n for i in range(self.k):\n self.cluster = self.data_mat[self.idx == i]\n self.clusterlist.append(self.cluster)\n print(self.clusterlist)\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n index_dict ={}\n for i in self.clusterdict:\n index_dict[i] = []\n for i in range(len(self.data_mat)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n index_dict[j].append(i)\n print(\"drugs cluster dict\", index_dict)\n\n self.drugsdict = {}\n for i in index_dict:\n self.drugsdict[i] = []\n drugslist = list(self.df.columns.values)\n print(\"drugs list from dataframe\", drugslist)\n\n for i in index_dict:\n self.drugsdict[i] = [drugslist[index] for index in index_dict[i]]\n\n print(\"drugs cluster dict\", self.drugsdict)\n########################################################################################################################\n clusterdict_from_df_as_drug_frequency = {}\n clusterdict_from_df_as_drug_non_O_frequency = {}\n\n print('\\n')\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_frequency[i] = []\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_frequency[i].append(self.df.iloc[i].to_dict()) #\n print(\"packs in dict form of drugs frequency\", clusterdict_from_df_as_drug_frequency)\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n\n for i in range(len(self.df)):\n for j in clusterdict_from_df_as_drug_frequency[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n for i in range(len(self.df)):\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in range(len(self.df)):\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in range(len(self.df)):\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse(\n [list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n robot_for_packs_dict = {}\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = []\n\n # for i in range(len(self.df)):\n for i in range(len(self.df)):\n for j in clusterdict_of_non_repeated_drugs[i]:\n if j in self.drugsdict[0]:\n robot_for_packs_dict[i].append(0)\n elif j in self.drugsdict[1]:\n robot_for_packs_dict[i].append(1)\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = set(robot_for_packs_dict[i])\n\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = list(more_itertools.collapse(robot_for_packs_dict[i]))\n print('\\n')\n print(\"clusterdict_of_non_repeated_drugs\", robot_for_packs_dict)", "def compute_cluster_similarities(emb_clusters1, emb_clusters2, compare, order, clmethod, plot):\n def compute_sim(e, e1, cls, cls1):\n sims = np.empty((20, 20))\n xticks, yticks = [], []\n for i, c in enumerate(cls):\n yticks.append(', '.join(c[1]) + (f' {round(c[3], 5)}' if order == 'avgfreq' else ''))\n for j, c1 in enumerate(cls1):\n if len(xticks) < 20:\n xticks.append(', '.join(c1[1]) + (f' {round(c1[3], 5)}' if order == 'avgfreq' else ''))\n sims[i, j] = jaccard_similarity_score(c[2], c1[2])\n jaccard_similarities[f'{e}-{e1}'] = sims\n\n if plot:\n if order == 'clustermap':\n similarity_clustermap(sims, xticks, yticks, f'{e}-{e1}_{clmethod}')\n elif order == 'default' or order == 'avgfreq':\n similarity_heatmap(sims, xticks, yticks, f'{e}-{e1}_{clmethod}', order)\n else:\n pass\n\n jaccard_similarities = {}\n if compare == 'cross':\n for ie, (e, cls) in enumerate(emb_clusters1.items()):\n for ie1, (e1, cls1) in enumerate(emb_clusters2.items()):\n if ie < ie1:\n compute_sim(e, e1, cls, cls1)\n elif compare == 'dot':\n for (e, cls), (e1, cls1) in zip(emb_clusters1.items(), emb_clusters2.items()):\n compute_sim(e, e1, cls, cls1)\n\n return jaccard_similarities", "def integrated_clustering(t_all,y_all,num_of_days=500,period = 1440,trim=10,min_n_clusters = 4, max_n_clusters=10,hierarchical=0):\n\n\n\n all_seg_april = initial_disaggregate(t_all,y_all,num_of_days,period = period)\n \n ''' '''\n all_seg_april_normalized = [np.array(x[0])-np.mean(x[1]) for x in all_seg_april if len(x[1])==3]\n \n ''' filter the empty segments'''\n all_seg_april_normalized = [x for x in all_seg_april_normalized if len(x)>0]\n \n ''' clustering in different ranges will probably have a better result'''\n if hierarchical == 0:\n pass\n elif hierarchical ==1:\n all_seg_april_normalized = [x for x in all_seg_april_normalized if x.mean()>1000]\n else:\n all_seg_april_normalized = [x for x in all_seg_april_normalized if x.mean()<1000]\n \n ''' filter out the positive segments'''\n all_positive_seg_april_normalized = [x for x in all_seg_april_normalized if x.min()>0]\n \n \n all_seg_april_normalized_trim50 = extract_first_n(all_positive_seg_april_normalized, trim)\n cluster_average = []\n \n # find optimal clustering number using silhouette score\n \n optimal_dict = {}\n \n for n_clusters in range(min_n_clusters,max_n_clusters):\n \n y_pred = KMeans(n_clusters=n_clusters).fit_predict(all_seg_april_normalized_trim50)\n\n cluster_average = []\n for i_cluster in range(n_clusters):\n cluster_average.append(\n np.mean([np.mean(x) for i, x in enumerate(all_seg_april_normalized_trim50) if y_pred[i]==i_cluster])\n ) \n\n # sihouette score\n cluster_labels = y_pred\n sample_silhouette_values = silhouette_samples(all_seg_april_normalized_trim50, cluster_labels)\n \n silhouette_avg = silhouette_score(pd.DataFrame(all_seg_april_normalized_trim50), cluster_labels)\n\n optimal_dict[n_clusters] = silhouette_avg +(sample_silhouette_values.min()+sample_silhouette_values.max())/2\n \n # n_clusters will give us the optimal number of clusters\n n_clusters = max(optimal_dict.iteritems(), key=operator.itemgetter(1))[0]\n\n #print n_clusters\n \n y_pred = KMeans(n_clusters=n_clusters).fit_predict(all_seg_april_normalized_trim50)\n\n cluster_average = []\n \n for i_cluster in range(n_clusters):\n cluster_average.append(\n np.mean([np.mean(x) for i, x in enumerate(all_seg_april_normalized_trim50) if y_pred[i]==i_cluster])\n ) \n cluster_average_rank = np.argsort(cluster_average)[::-1]\n rank_map = {cluster_average_rank[i_cluster]:i_cluster for i_cluster in range(n_clusters)} # old index:new index\n\n y_pred_old = y_pred\n y_pred = [rank_map[x] for x in y_pred]\n all_seg_per_cluster = [[] for i in range(n_clusters) ]\n for i_seg in range(len(all_seg_april_normalized_trim50)):\n all_seg_per_cluster[y_pred[i_seg]].append(all_seg_april_normalized_trim50[i_seg])\n \n cluster_mean = [[] for i in range(n_clusters) ]\n cluster_std = [[] for i in range(n_clusters) ]\n for i_cluster in range(n_clusters):\n cluster_mean[ i_cluster ] = np.mean(np.array(all_seg_per_cluster[i_cluster]), axis=0)\n cluster_std[ i_cluster ] = np.std(np.array(all_seg_per_cluster[i_cluster]), axis=0)\n \n \n \n \n #cluster_mean_2 = cluster_mean[5:6]\n \n return cluster_mean,cluster_std,n_clusters,all_seg_per_cluster", "def __existence_classification__(self,task_id,shape,aggregations):\n\n # aggregations = {}\n\n # raw_classifications and clustering_results have different hierarchy orderings- raw_classifications\n # is better for processing data and clustering_results is better for showing the end result\n # technically we only need to look at the data from clustering_results right now but its\n # hierarchy is really inefficient so use raw_classifications to help\n\n # each shape is done independently\n\n # set - so if multiple tools create the same shape - we only do that shape once\n # for shape in set(marking_tasks[task_id]):\n\n\n # pretentious name but basically whether each person who has seen a subject thinks it is a true\n # positive or not\n existence_classification = {\"param\":\"subject_id\"}\n\n global_cluster_index = 0\n # clusters_per_subject = []\n\n # look at the individual points in the cluster\n for subject_id in aggregations.keys():\n if subject_id == \"param\":\n continue\n\n # gold standard pts may not match up perfectly with the given clusters -\n # for example, we could have a gold penguin at 10,10 but the users' cluster\n # is centered at 10.1,9.8 - same penguin though\n # so as we go through the clusters, we need to see which ones match up more closely\n # with the gold standard\n # if subject_id in gold_standard_clustering[0]:\n # # closest cluster and distance\n # gold_to_cluster = {pt:(None,float(\"inf\")) for pt in gold_standard_clustering[0][subject_id]}\n # else:\n # gold_to_cluster = None\n\n\n # clusters_per_subject.append([])\n\n # # in either case probably an empty image\n # if subject_id not in clustering_results:\n # continue\n # if task_id not in clustering_results[subject_id]:\n # continue\n\n if (shape+ \" clusters\") not in aggregations[subject_id][task_id]:\n # if none of the relevant markings were made on this subject, skip it\n continue\n\n all_users = aggregations[subject_id][task_id][shape+ \" clusters\"][\"all_users\"]\n\n for local_cluster_index in aggregations[subject_id][task_id][shape+ \" clusters\"]:\n if local_cluster_index == \"all_users\":\n continue\n\n # extract the users who marked this cluster\n cluster = aggregations[subject_id][task_id][shape+ \" clusters\"][local_cluster_index]\n\n # todo - put this back when we support gold standard clustering\n # # is this user cluster close to any gold standard pt?\n # if subject_id in gold_standard_clustering[0]:\n # x,y = cluster[\"center\"]\n # for (gold_x,gold_y) in gold_to_cluster:\n # dist = math.sqrt((x-gold_x)**2+(y-gold_y)**2)\n # if dist < gold_to_cluster[(gold_x,gold_y)][1]:\n # gold_to_cluster[(gold_x,gold_y)] = local_cluster_index,dist\n #\n # # now repeat for negative gold standards\n # if subject_id in gold_standard_clustering[1]:\n # x,y = cluster[\"center\"]\n # min_dist = float(\"inf\")\n # closest= None\n # for x2,y2 in gold_standard_clustering[1][subject_id]:\n # dist = math.sqrt((x-x2)**2+(y-y2)**2)\n # if dist < min_dist:\n # min_dist = min(dist,min_dist)\n # closest = (x2,y2)\n # if min_dist == 0.:\n # assert (x,y) == closest\n # mapped_gold_standard[(subject_id,local_cluster_index)] = 0\n\n users = cluster[\"users\"]\n\n ballots = []\n\n # todo - the 15 hard coded value - might want to change that at some point\n for u in all_users:\n if u in users:\n ballots.append((u,1))\n else:\n ballots.append((u,0))\n\n existence_classification[(subject_id,local_cluster_index)] = ballots\n # clusters_per_subject[-1].append(global_cluster_index)\n # global_cluster_index += 1\n\n # # note we don't care about why a cluster corresponds to a gold standard pt - that is\n # # it could be really close to given gold standards - the point is that it is close\n # # to at least one of them\n # if gold_to_cluster is not None:\n # for (local_cluster_index,dist) in gold_to_cluster.values():\n # # arbitrary threshold but seems reasonable\n # if dist < 1:\n # mapped_gold_standard[(subject_id,local_cluster_index)] = 1\n\n existence_results = self.__task_aggregation__(existence_classification,task_id,{})#,mapped_gold_standard)\n assert isinstance(existence_results,dict)\n\n for subject_id,cluster_index in existence_results:\n new_results = existence_results[(subject_id,cluster_index)][task_id]\n # new_agg = {subject_id: {task_id: {shape + \" clusters\": {cluster_index: {\"existence\": new_results}}}}}\n # aggregations = self.__merge_results__(aggregations,new_agg)\n aggregations[subject_id][task_id][shape + \" clusters\"][cluster_index][\"existence\"] = new_results\n # if subject_id not in aggregations:\n # aggregations[subject_id] = {}\n # if task_id not in aggregations[subject_id]:\n # aggregations[subject_id][task_id] = {}\n # if (shape + \" clusters\") not in aggregations[subject_id][task_id]:\n # aggregations[subject_id][task_id][shape+ \" clusters\"] = {}\n # # this part is probably redundant\n # if cluster_index not in aggregations[subject_id][task_id][shape+ \" clusters\"]:\n # aggregations[subject_id][task_id][shape+ \" clusters\"][cluster_index] = {}\n #\n # aggregations[subject_id][task_id][shape+ \" clusters\"][cluster_index][\"existence\"] = existence_results[(subject_id,cluster_index)]\n\n return aggregations", "def clustering(self): \n clusterOfFiles=self.getClusters()\n \n #group files based on the hash of their contents\n self.keyingMethod=md5Hash\n [self.addFile(afile) for acluster in clusterOfFiles for afile in acluster]\n clusterOfFiles=self.getClusters()\n self.showClusters(clusterOfFiles)", "def test(dist_param, picker_param, iters):\n orig = '/home/zby/MAGISTERKA/MGR/results/oryginal.clustered.t'\n cl_orig = read_clustered(orig)\n name_tag = ''\n ndist = dist_param[1:]\n npick = picker_param[1:]\n for index in drange(4, 20, 0.5):\n name_tag = \"{}_{}_{}\".format(index, npick, ndist)\n tf_conf = configs.TfidfConfig(\n root_name('all_merged.txt', None),\n tfidf_name('merged.stem{}.stop', name_tag),\n tfidf_name('merged.stem{}.stop.txt', name_tag),\n None,\n tfidf_name('merged.stem{}.tfidf', name_tag),\n 10,\n 0,\n None)\n execute(tf_conf)\n tf_conf = configs.TfidfConfig(\n root_name('all_merged.txt', None),\n None,\n tfidf_name('merged.stem{}.stop.txt', name_tag),\n tfidf_name('merged.stem{}.stop', name_tag),\n tfidf_name('merged.stem{}.stop.tfidf', name_tag),\n None,\n None,\n None)\n execute(tf_conf)\n #input, out, picker, distance, iterations,\n clust_cfg = configs.ClusteringConfig(\n tfidf_name('merged.stem{}.stop.tfidf', name_tag),\n tfidf_name('merged.stem{}.stop.clustered.t', name_tag),\n picker_param,\n dist_param,\n iters,\n None\n )\n execute(clust_cfg)\n clust2 = read_clustered(tfidf_name('merged.stem{}.stop.clustered.t', name_tag))\n var, norm = variation_of_information(cl_orig, clust2)\n print(\"**** FOR var {} VOI is {}\".format(name_tag, norm))", "def get_feature(glcm, featurelist=['contrast']):\n measure_list = dict(max_prob=0, contrast=0, dissimilarity=0, homogeneity=0, ASM=0, energy=0, entropy=0,\n correlation=0, cluster_shade=0, variance_i=0, variance_j=0, mean_i=0, mean_j=0)\n\n M, N = glcm.shape\n\n np.seterr(divide='ignore', invalid='ignore')\n\n flat_glcm = glcm.flatten()\n index_i = np.arange(0, M) # row index\n index_j = np.arange(0, N) # column index = row\n\n sum_v = np.sum(glcm, axis=0) # sum column[] , vertical\n sum_h = np.sum(glcm, axis=1) # sum row[] , horizontal\n\n max_prob = np.max(flat_glcm)\n mean_i = np.dot(index_i, sum_h.flatten())\n mean_j = np.dot(index_j, sum_v.flatten())\n var_i = np.dot((index_i - mean_i) ** 2, sum_h.flatten())\n var_j = np.dot((index_j - mean_j) ** 2, sum_v.flatten())\n\n measure_list['max_prob'] = max_prob\n measure_list['variance_i'] = var_i\n measure_list['variance_j'] = var_j\n measure_list['mean_i'] = mean_i\n measure_list['mean_j'] = mean_j\n\n for name in featurelist:\n if name in measure_list.keys():\n if name is 'max_prob':\n measure_list[name] = np.max(flat_glcm)\n elif name is 'ASM':\n measure_list[name] = np.dot(flat_glcm, flat_glcm)\n elif name is 'energy':\n ASM = np.dot(flat_glcm, flat_glcm)\n measure_list[name] = np.sqrt(ASM)\n elif name is 'cluster_shade':\n cluster_weights = np.zeros([M, N])\n for i in range(M):\n for j in range(N):\n cluster_weights[i, j] = (i + j - mean_i - mean_j) ** 3\n measure_list[name] = np.dot(flat_glcm, cluster_weights.flatten())\n elif name is 'correlation':\n stdev_i = np.sqrt(var_i)\n stdev_j = np.sqrt(var_j)\n correl_weights = np.outer((index_i - mean_i), (index_j - mean_j)) / (stdev_i * stdev_j)\n measure_list[name] = np.dot(flat_glcm, correl_weights.flatten())\n elif name is 'contrast':\n contrast_weights = np.zeros([M, N])\n for i in range(M):\n for j in range(N):\n contrast_weights[i, j] = (i - j) ** 2\n measure_list[name] = np.dot(flat_glcm, contrast_weights.flatten())\n elif name is 'entropy':\n # ln = np.log(flat_glcm) here, log(0) = -inf, will have some problem, using np.ma.log instead\n # np.ma.log(0) = -- : not -inf. ? can pass\n ln = np.ma.log(flat_glcm)\n measure_list[name] = -np.dot(flat_glcm, ln)\n elif name is 'dissimilarity':\n dissi_weights = np.zeros([M, N])\n for i in range(M):\n for j in range(N):\n dissi_weights[i, j] = abs(i - j)\n measure_list[name] = np.dot(flat_glcm, dissi_weights.flatten())\n elif name is 'homogeneity':\n homo_weights = np.zeros([M, N])\n for i in range(M):\n for j in range(N):\n homo_weights[i, j] = 1 / (1 + (i - j) ** 2)\n measure_list[name] = np.dot(flat_glcm, homo_weights.flatten())\n\n return measure_list", "def perform_calculations(collector):\n result = {}\n try:\n radius, mass = Calculator.calculate_radius_mass(collector)\n result['radius'] = radius\n result['mass'] = mass\n average_density = Calculator.calculate_average_density(radius,\n mass)\n result['average_density'] = average_density\n escape_velocity = Calculator.calculate_escape_velocity(radius,\n mass)\n result['escape_velocity'] = escape_velocity\n earth_similarity_index = Calculator.calculate_esi_index(\n radius, mass, collector.get_average_temperature())\n result['earth_similarity_index'] = earth_similarity_index\n except NoDataError:\n pass\n\n try:\n avg_atm_molar_mass = Calculator.calculate_molar_mass(collector)\n except NoDataError:\n avg_atm_molar_mass = None\n if avg_atm_molar_mass is not None and avg_atm_molar_mass <= 0:\n logging.getLogger('Analyzer').debug('Molar mass <= 0: %d',\n avg_atm_molar_mass)\n avg_atm_molar_mass = None\n\n if avg_atm_molar_mass is not None:\n result['avg_atm_molar_mass'] = avg_atm_molar_mass\n avg_molecule_mass = avg_atm_molar_mass / Calculator.A\n result['avg_molecule_mass'] = avg_molecule_mass\n specific_gas_const = Calculator.R / avg_atm_molar_mass\n result['specific_gas_const'] = specific_gas_const\n\n try:\n speed_of_sound = Kundt.speed_of_sound(collector.kundt)\n result['speed_of_sound'] = speed_of_sound\n\n if avg_atm_molar_mass is None:\n # All further calculations require valid molar mass\n return result\n\n # Since calculate_molar_mass already uses get_average_temperature\n # and get_ground_pressure, it's safe to use these functions here\n # without worrying about NoDataError\n adiabatic_index = Calculator.calculate_adiabatic_index(\n collector, speed_of_sound, avg_atm_molar_mass)\n result['adiabatic_index'] = adiabatic_index\n\n atmosphere_density = (adiabatic_index *\n collector.get_ground_pressure() /\n speed_of_sound ** 2)\n result['atmosphere_density'] = atmosphere_density\n\n refractive_index = (3 * avg_atm_molar_mass *\n collector.get_ground_pressure() /\n atmosphere_density / Calculator.R /\n collector.get_average_temperature() - 2) ** 0.5\n result['refractive_index'] = refractive_index\n\n molar_refractivity = (avg_atm_molar_mass /\n atmosphere_density *\n (refractive_index ** 2 - 1) /\n (refractive_index ** 2 + 2))\n result['molar_refractivity'] = molar_refractivity\n\n atm_speed_of_light = Calculator.C / refractive_index\n result['atm_speed_of_light'] = atm_speed_of_light\n except NoDataError:\n pass\n\n return result", "def calculate3(pred_ccm, pred_ad, truth_ccm, truth_ad, method=\"sym_pseudoV\", weights=None, verbose=False, pseudo_counts=True, full_matrix=True, in_mat=2):\n larger_is_worse_methods = ['sym_pseudoV_nc', 'sym_pseudoV', 'pseudoV_nc', 'pseudoV', \"simpleKL_nc\", 'simpleKL'] # methods where a larger score is worse\n\n \n pc_pred_ccm, pc_pred_ad, pc_truth_ccm, pc_truth_ad = pred_ccm, pred_ad, truth_ccm, truth_ad\n y = np.array(pc_pred_ad.shape)[1]\n nssms = int(np.ceil(0.5 * (2*y + 1) - 0.5 * np.sqrt(4*y + 1)))\n\n if isinstance(method, list):\n res = [calculate3_onemetric(pc_pred_ccm, pc_pred_ad, pc_truth_ccm, pc_truth_ad,\n method=m, verbose=verbose, in_mat=in_mat) for m in method] # calculate the score for each method\n\n # normalize the scores to be between (worst of NCluster score and OneCluster score) and (Truth score)\n ncluster_ccm, ncluster_ad = add_pseudo_counts(mb.get_ccm('NClusterOneLineage', nssms=nssms), mb.get_ad('NClusterOneLineage', nssms=nssms))\n ncluster_score = [calculate3_onemetric(ncluster_ccm, ncluster_ad, pc_truth_ccm, pc_truth_ad,\n method=m, verbose=verbose, full_matrix=full_matrix, in_mat=in_mat) for m in method]\n del ncluster_ccm, ncluster_ad\n onecluster_ccm, onecluster_ad = add_pseudo_counts(mb.get_ccm('OneCluster', nssms=nssms), mb.get_ad('OneCluster', nssms=nssms))\n onecluster_score = [calculate3_onemetric(onecluster_ccm, onecluster_ad, pc_truth_ccm, pc_truth_ad,\n method=m, verbose=verbose, full_matrix=full_matrix, in_mat=in_mat) for m in method]\n del onecluster_ccm, onecluster_ad\n for i in range(len(method)):\n if method[i] in larger_is_worse_methods: # normalization for methods where a larger score is worse\n worst_score = max(ncluster_score[i], onecluster_score[i]) # worst of NCluster and OneCluster scores\n res[i] = 1 - (res[i] / worst_score) # normalize the score\n else: # normalization for methods where a smaller score is worse\n worst_score = min(ncluster_score[i], onecluster_score[i])\n res[i] = (res[i] - worst_score) / (1 - worst_score)\n\n\n if weights is None: # if weights are not specified or if they cannot be normalized then default to equal weights\n weights = [1] * len(method)\n elif sum(weights) == 0:\n Warning('Weights sum to zero so they are invalid, defaulting to equal weights')\n weights = [1] * len(method)\n\n weights = np.array(weights) / float(sum(weights)) # normalize the weights\n score = sum(np.multiply(res, weights))\n else:\n \n score = calculate3_onemetric(pc_pred_ccm, pc_pred_ad, pc_truth_ccm, pc_truth_ad,\n method=method, verbose=verbose, full_matrix=full_matrix, in_mat=in_mat)\n del pc_pred_ccm\n del pc_pred_ad\n # normalize the score to be between (worst of NCluster score and OneCluster score) and (Truth score) - similar to above\n ncluster_ccm, ncluster_ad = add_pseudo_counts(mb.get_ccm('NClusterOneLineage', nssms=nssms), mb.get_ad('NClusterOneLineage', nssms=nssms))\n ncluster_score = calculate3_onemetric(ncluster_ccm, ncluster_ad, pc_truth_ccm, pc_truth_ad,\n method=method, verbose=verbose, full_matrix=full_matrix, in_mat=in_mat)\n del ncluster_ccm, ncluster_ad\n onecluster_ccm, onecluster_ad = add_pseudo_counts(mb.get_ccm('OneCluster', nssms=nssms), mb.get_ad('OneCluster', nssms=nssms))\n \n onecluster_score = calculate3_onemetric(onecluster_ccm, onecluster_ad, pc_truth_ccm, pc_truth_ad,\n method=method, verbose=verbose, full_matrix=full_matrix, in_mat=in_mat)\n del onecluster_ccm, onecluster_ad\n\n #print score, ncluster_score, onecluster_score\n if method in larger_is_worse_methods:\n worst_score = max(ncluster_score, onecluster_score)\n score = 1 - (score / worst_score)\n else:\n worst_score = min(ncluster_score, onecluster_score)\n score = (score - worst_score) / (1 - worst_score)\n return score", "def algorithms():\n algorith_paradigms = ['Divide-and-conquer', 'Backtrackig', 'Greedy-Algorithms', 'Dynamic-programming']\n return algorith_paradigms", "def main():\r\n algos = [merge_sort, quick_sort, heap_sort, radix_sort, bucket_sort_general]\r\n array_sizes = [5000, 10000, 15000, 20000, 50000, 75000, 100000, 150000]\r\n results = {algo.__name__: [] for algo in algos}\r\n for algo in algos:\r\n result = []\r\n for size in array_sizes:\r\n time = test(algo, size)\r\n result.append(time)\r\n results[algo.__name__] = result\r\n\r\n display_results(results, array_sizes)", "def __all_Algs_ ( self ) :\n _algs = self.algorithms()\n\n algs = []\n for _a in _algs :\n algs += [ self.algorithm ( _a ) ]\n return algs", "def __all_Algs_ ( self ) :\n _algs = self.algorithms()\n\n algs = []\n for _a in _algs :\n algs += [ self.algorithm ( _a ) ]\n return algs", "def all_means(runtimes):\n tmp = {}\n for name in runtimes:\n tmp[name] = compute_means(runtimes[name])\n return tmp", "def generate(dictalg):\n\n # dsList, sortedAlgs, dictAlg = processInputArgs(args, verbose=verbose)\n res = {}\n for f, i in pproc.dictAlgByFun(dictalg).iteritems():\n for d, j in pproc.dictAlgByDim(i).iteritems():\n tmp = BestAlgSet(j)\n res[(d, f)] = tmp\n return res", "def compute_statistics(self):", "def _calculate_cluster_measures(\n arr4d,\n threshold,\n bin_struct,\n two_sided_test=False,\n):\n n_regressors = arr4d.shape[3]\n\n max_sizes = np.zeros(n_regressors, int)\n max_masses = np.zeros(n_regressors, float)\n\n for i_regressor in range(n_regressors):\n arr3d = arr4d[..., i_regressor].copy()\n\n if two_sided_test:\n arr3d[np.abs(arr3d) <= threshold] = 0\n else:\n arr3d[arr3d <= threshold] = 0\n\n labeled_arr3d, _ = label(arr3d > 0, bin_struct)\n\n if two_sided_test:\n # Label positive and negative clusters separately\n n_positive_clusters = np.max(labeled_arr3d)\n temp_labeled_arr3d, _ = label(\n arr3d < 0,\n bin_struct,\n )\n temp_labeled_arr3d[temp_labeled_arr3d > 0] += n_positive_clusters\n labeled_arr3d = labeled_arr3d + temp_labeled_arr3d\n del temp_labeled_arr3d\n\n clust_vals, clust_sizes = np.unique(labeled_arr3d, return_counts=True)\n assert clust_vals[0] == 0\n\n clust_vals = clust_vals[1:] # First cluster is zeros in matrix\n clust_sizes = clust_sizes[1:]\n\n # Cluster mass-based inference\n max_mass = 0\n for unique_val in clust_vals:\n ss_vals = np.abs(arr3d[labeled_arr3d == unique_val]) - threshold\n max_mass = np.maximum(max_mass, np.sum(ss_vals))\n\n # Cluster size-based inference\n max_size = 0\n if clust_sizes.size:\n max_size = np.max(clust_sizes)\n\n max_sizes[i_regressor], max_masses[i_regressor] = max_size, max_mass\n\n return max_sizes, max_masses", "def _calculate_metrics(self):\n metrics = {}\n precision, recall = self.calc_precision_recall()\n metrics[\"precision\"] = precision\n metrics[\"recall\"] = recall\n metrics[\"entropy\"] = self.calc_entropy()\n metrics[\"component_entropy\"] = self.calc_component_entropy()\n metrics[\"num_comps\"] = len(self.get_components())\n metrics[\"num_diagnoses\"] = len(self.diagnoses)\n metrics[\"distinct_diagnoses_scores\"] = len(Counter(list(map(lambda x: x.probability, self.diagnoses))))\n metrics[\"num_tests\"] = len(self.get_tests())\n metrics[\"num_distinct_traces\"] = len(self.get_distinct_traces())\n metrics[\"num_failed_tests\"] = len(self._get_tests_by_error(1))\n metrics[\"num_passed_tests\"] = len(self._get_tests_by_error(0))\n passed_comps = set(self._get_components_by_error(0))\n failed_comps = set(self.get_components_in_failed_tests())\n metrics[\"num_failed_comps\"] = len(failed_comps)\n metrics[\"only_failed_comps\"] = len(failed_comps - passed_comps)\n metrics[\"only_passed_comps\"] = len(passed_comps - failed_comps)\n metrics[\"num_bugs\"] = len(self.get_bugs())\n metrics[\"wasted\"] = self.calc_wasted_components()\n metrics[\"top_k\"] = self.calc_top_k()\n metrics[\"num_comps_in_diagnoses\"] = len(self._get_comps_in_diagnoses())\n metrics[\"bugs_cover_ratio\"] = self._get_bugs_cover_ratio()\n metrics[\"average_trace_size\"] = self._get_average_trace_size()\n metrics[\"average_component_activity\"] = self._get_average_component_activity()\n metrics[\"average_diagnosis_size\"] = self._get_average_diagnosis_size()\n metrics[\"bugs_scores_average\"], metrics[\"bugs_scores_std\"], metrics[\"bugs_scores_entropy\"] = self._get_bugs_scores()\n metrics[\"non_bugs_scores_average\"], metrics[\"non_bugs_scores_std\"], metrics[\"non_bugs_scores_entropy\"] = self._get_non_bugs_scores()\n metrics.update(self.cardinality())\n # metrics[\"ochiai\"] = self.calc_ochiai_values()\n return metrics", "def compute_metrics(self):\n overall_ret = OrderedDict()\n for ap_iou_thresh in self.ap_iou_thresh:\n ret_dict = OrderedDict()\n rec, prec, ap = eval_det_multiprocessing(self.pred_map_cls, self.gt_map_cls, ovthresh=ap_iou_thresh)\n for key in sorted(ap.keys()):\n clsname = self.class2type_map[key] if self.class2type_map else str(key)\n ret_dict[\"%s Average Precision\" % (clsname)] = ap[key]\n ap_vals = np.array(list(ap.values()), dtype=np.float32)\n ap_vals[np.isnan(ap_vals)] = 0\n ret_dict[\"mAP\"] = ap_vals.mean()\n rec_list = []\n for key in sorted(ap.keys()):\n clsname = self.class2type_map[key] if self.class2type_map else str(key)\n try:\n ret_dict[\"%s Recall\" % (clsname)] = rec[key][-1]\n rec_list.append(rec[key][-1])\n except:\n ret_dict[\"%s Recall\" % (clsname)] = 0\n rec_list.append(0)\n ret_dict[\"AR\"] = np.mean(rec_list)\n overall_ret[ap_iou_thresh] = ret_dict\n return overall_ret", "def eval_mean_distance(played_decks, clustering_data: List, fuzzy: bool, debug: bool = False):\n\n for alg_dict in clustering_data:\n decks = np.array(played_decks)\n clusters = []\n for label in set(alg_dict[\"labels\"]):\n indices = np.where(alg_dict[\"labels\"] == label)\n if fuzzy:\n clusters.append(FuzzyDeckCluster(decks[indices]))\n else:\n clusters.append(DeckCluster(decks[indices]))\n\n if fuzzy:\n clustering = FuzzyDeckClustering(clusters)\n else:\n clustering = DeckClustering(clusters)\n\n sum_of_squared_distances_centroid = 0\n sum_of_squared_distances_core = 0\n\n for cluster in clustering.deck_clusters:\n centroid = cluster.centroid()\n core = cluster.core()\n for deck in cluster.decks:\n sum_of_squared_distances_centroid += (deck.jaccard_distance(centroid))**2\n sum_of_squared_distances_core += (deck.jaccard_distance(core))**2\n alg_dict[\"sse_centroid\"] = sum_of_squared_distances_centroid\n alg_dict[\"sse_core\"] = sum_of_squared_distances_core\n\n if debug:\n print(\"Alg: \" + alg_dict[\"name\"] + \"; \\t sse = \" + str(alg_dict[\"sse_centroid\"]))\n print(\"Alg: \" + alg_dict[\"name\"] + \"; \\t sse = \" + str(alg_dict[\"sse_core\"]))", "def __init__(self, conn, args, data, split_type, num_clusters):\n\n self.conn = conn\n self.args = args\n self.data = data\n self.split_type = split_type\n\n self.pca_model = None\n self.cluster_model = None\n self.algorithm = args['cluster_algorithm']\n\n # http://scikit-learn.org/stable/auto_examples/cluster/plot_cluster_comparison.html\n hdbsc = hdbscan.HDBSCAN(min_cluster_size=10)\n affinity_propagation = cluster.AffinityPropagation()\n ms = cluster.MeanShift(bin_seeding=True)\n spectral = cluster.SpectralClustering(n_clusters=num_clusters, \n eigen_solver='arpack',\n affinity=\"nearest_neighbors\", \n random_state=self.args['seed'])\n ward = cluster.AgglomerativeClustering(n_clusters=num_clusters, \n linkage='ward')\n birch = cluster.Birch(n_clusters=num_clusters)\n two_means = cluster.MiniBatchKMeans(n_clusters=num_clusters,\n random_state=self.args['seed'])\n average_linkage = cluster.AgglomerativeClustering(linkage=\"average\", \n n_clusters=num_clusters)\n hdbsc = hdbscan.HDBSCAN(min_cluster_size=10)\n kmeans = cluster.KMeans(n_clusters=num_clusters, random_state=self.args['seed'])\n dbscan = cluster.DBSCAN()\n \n self.clustering_algorithms = {\n 'MiniBatchKMeans': two_means,\n 'AffinityPropagation': affinity_propagation,\n 'MeanShift': ms,\n 'SpectralClustering': spectral,\n 'Ward': ward,\n 'AgglomerativeClustering': average_linkage,\n 'DBSCAN': dbscan,\n 'Birch': birch,\n 'HDBSCAN': hdbsc,\n 'KMeans': kmeans\n }", "def Clustering(typeVector, behaviour, sampleType):\r\n ProduceVector.produceVector(typeVector, behaviour, sampleType)\r\n vector = ProduceVector.getVector()\r\n\r\n if sampleType == \"\":\r\n sampleType2 = \"ransomware\"\r\n if sampleType == \"2\":\r\n sampleType2 = \"backdoor\"\r\n if sampleType == \"3\":\r\n sampleType2 = \"trojan\"\r\n\r\n if (behaviour == \"f\") or (behaviour == \"fc\"): \r\n\r\n calculateStandardisation(vector)\r\n vector = getStandardisation()\r\n\r\n typeOfVector = typeVector + \" \" + behaviour\r\n\r\n nGram, syscallRep = typeOfVector.split()\r\n\r\n if syscallRep == \"b\":\r\n nGram += \"Full Representation Bit Vector\"\r\n if syscallRep == \"f\":\r\n nGram += \"Full Representation Frequency Vector\"\r\n if syscallRep == \"bc\":\r\n nGram += \"Category Bit Vector\"\r\n if syscallRep == \"fc\":\r\n nGram += \"Category Frequency Vector\"\r\n\r\n\r\n fileName = sampleType2 + \" \" + syscallRep + \" standardisation \" + nGram + \".txt\"\r\n\r\n file = open(fileName, \"w\")\r\n file.write(str(vector.tolist()))\r\n file.close()\r\n\r\n calculateEuclideanDistance(vector) \r\n\r\n typeOfVector = typeVector + \" \" + behaviour\r\n\r\n nGram, syscallRep = typeOfVector.split()\r\n\r\n if syscallRep == \"b\":\r\n nGram += \"Full Representation Bit Vector\"\r\n if syscallRep == \"f\":\r\n nGram += \"Full Representation Frequency Vector\"\r\n if syscallRep == \"bc\":\r\n nGram += \"Category Bit Vector\"\r\n if syscallRep == \"fc\":\r\n nGram += \"Category Frequency Vector\" \r\n\r\n fileName = sampleType2 + \" \" + syscallRep + \" matrix \" + nGram + \".txt\"\r\n\r\n file = open(fileName, \"w\")\r\n file.write(str(getEuclideanDistance().tolist()))\r\n file.close()\r\n\r\n print(\"Producing a dendrogram\")\r\n\r\n\r\n typeOfVector = typeVector + \" \" + behaviour\r\n\r\n setBestCluster(Validation.evaluate(getEuclideanDistance(), vector, typeOfVector, sampleType))", "def parse_clustering(key, content):\n if inspect.isclass(key):\n cl = key(**content)\n key = cl.__class__.__name__.lower()\n\n elif 'auto' in (content.get('n_clusters', ''),\n content.get('preference', '')) \\\n and key.lower() != 'hierarchical':\n # Wrapper class that automatically detects the best number of clusters\n # via 10-Fold CV\n content.pop('n_clusters', '')\n content.pop('preference', '')\n\n kwargs = {'param_grid': [], 'n_jobs': -1,\n 'scoring': silhouette_score, 'cv': 10}\n\n if key.lower() == 'kmeans':\n content.setdefault('init', 'k-means++')\n content.setdefault('n_jobs', 1)\n kwargs['estimator'] = KMeans(**content)\n elif key.lower() == 'ap':\n kwargs['estimator'] = AffinityPropagation(**content)\n kwargs['affinity'] = kwargs['estimator'].affinity\n else:\n logging.error(\"n_clusters = 'auto' specified outside kmeans or \"\n \"ap. Trying to create GridSearchCV pipeline anyway \"\n \" ...\")\n cl = GridSearchCV(**kwargs)\n elif 'auto' in (content.get('n_clusters', ''),\n content.get('preference', '')) \\\n and key.lower() == 'hierarchical':\n # TODO implement this\n # from adenine.utils.extensions import AgglomerativeClustering\n cl = AgglomerativeClustering(**content)\n else:\n if key.lower() == 'kmeans':\n content.setdefault('n_jobs', -1)\n cl = KMeans(**content)\n elif key.lower() == 'ap':\n content.setdefault('preference', 1)\n cl = AffinityPropagation(**content)\n elif key.lower() == 'ms':\n cl = MeanShift(**content)\n elif key.lower() == 'spectral':\n cl = SpectralClustering(**content)\n elif key.lower() == 'hierarchical':\n cl = AgglomerativeClustering(**content)\n else:\n cl = DummyNone()\n return (key, cl, 'clustering')", "def calculate_distances(self, iterations=[1]):\n for i in iterations:\n saved_policies = self.memory[i]\n # Load relevant saved experiences\n trajectories = []\n policies = []\n kdes = []\n\n for p in saved_policies:\n trajectories.extend(p['trajectory'])\n policies.append(p['policy'])\n kdes.append(p['kde'])\n\n num_policies = len(policies)\n\n kde_all = self.calculate_KDE(trajectories)\n samples = self.sample_kde(kde_all, 100)\n\n self.KDEs[i] = kde_all\n self.meta_samples[i] = samples\n\n sample_divergences = []\n\n for sample_ix, sample in enumerate(samples):\n probs_n = []\n sample_divergence = np.zeros([num_policies, num_policies])\n for ix in range(num_policies):\n occupancy_measure = self.calculate_occupancy(\n kdes[ix], policies[ix], sample)\n probs_n.append(occupancy_measure)\n # Save occupancy measures for comparison later\n key = sample_ix\n\n try:\n self.meta_occupancies[i][ix][key] = occupancy_measure\n except:\n try:\n self.meta_occupancies[i][ix] = {\n key: occupancy_measure}\n except:\n self.meta_occupancies[i] = {\n ix: {key: occupancy_measure}}\n\n for a in range(num_policies):\n for b in range(num_policies):\n sample_divergence[a][b] = self.calculate_JSD(\n probs_n[a].squeeze(), probs_n[b].squeeze())\n\n sample_prob = np.exp(kde_all.score(sample.reshape(1, -1)))\n sample_divergences.append(sample_divergence * sample_prob)\n # Compute average divergence over entirety\n divergence = np.sum(sample_divergences, axis=0) # or sum?\n self.meta_divergences[i] = divergence", "def answer_q10():\n data_sources = [viz.DATA_111_URL, viz.DATA_290_URL, viz.DATA_896_URL]\n x_vals = range(6, 21)\n y_vals_hier = {}\n y_vals_kmean = {}\n for idx in range(len(data_sources)):\n # 0. Generate data_field & cluster_list\n clust_list, data_table = closest_pair.create_cluster_list(\n data_sources[idx])\n y_vals_hier[idx] = []\n # 1. calculate values for hierarchical - decreasing order\n for clust_size in reversed(x_vals):\n clust_list = closest_pair.hierarchical_clustering(clust_list,\n clust_size)\n clust_error = closest_pair.compute_distortions(clust_list,\n data_table)\n y_vals_hier[idx].insert(0, clust_error)\n # 2. calculate values for kmeans - decreasing order\n y_vals_kmean[idx] = []\n for clust_size in x_vals:\n clust_list, data_table = closest_pair.create_cluster_list(\n data_sources[idx])\n clust_list = closest_pair.kmeans_clustering(clust_list,\n clust_size, 5)\n clust_error = closest_pair.compute_distortions(clust_list,\n data_table)\n y_vals_kmean[idx].append(clust_error)\n return x_vals, y_vals_hier, y_vals_kmean", "def calculate_metric(self, embeddings, fingerprints, properties, estimator, param_dict):\n\n metric_array = []\n for col in properties.columns:\n props = properties[col].astype(cupy.float32).to_array()\n embedding_error = self.gpu_gridsearch_cv(estimator, param_dict, embeddings, props)\n fingerprint_error = self.gpu_gridsearch_cv(estimator, param_dict, fingerprints, props)\n ratio = fingerprint_error / embedding_error # If ratio > 1.0 --> embedding error is smaller --> embedding model is better\n metric_array.append(ratio)\n return cupy.array(metric_array)", "def analysis_function_details(self,clustering):\n return clustering.details", "def selectedComparisons(samples_dict):\n sectors=\"peripherals\",\"intermediaries\",\"hubs\"\n ks_measures={}\n for analyses_type in samples_dict[sectors[0]]:\n ks_measures[analyses_type]={}\n for analysis_grouping in samples_dict[sectors[0]][analyses_type]:\n ks_measures[analyses_type][analysis_grouping]={}\n if samples_dict[sectors[0]][analyses_type][analysis_grouping]==dict:\n ks_measures[analyses_type][analysis_grouping]={}\n for analysis in samples_dict[sectors[0]][analyses_type][analysis_grouping]:\n for var in samples_dict[sectors[0]][analyses_type][analysis_grouping][analysis]:\n samples_peripherals=samples_dict[sectors[0]][analyses_type][analysis_grouping][analysis]\n samples_intermediaries=samples_dict[sectors[1]][analyses_type][analysis_grouping][analysis]\n samples_hubs=samples_dict[sectors[2]][analyses_type][analysis_grouping][analysis]\n ks_measures[analysis][\"peripherals_intermediaries\"]=P.kolmogorovSmirnovTest(samples_peripherals,samples_intermediaries)\n ks_measures[analysis][\"peripherals_hubs\"]=P.kolmogorovSmirnovTest(samples_peripherals,samples_hubs)\n ks_measures[analysis][\"hubs_intermediaries\"]=P.kolmogorovSmirnovTest(samples_hubs,samples_intermediaries)\n else:\n for var in samples_dict[sectors[0]][analyses_type][analysis_grouping]:\n samples_peripherals=samples_dict[sectors[0]][analyses_type][analysis_grouping]\n samples_intermediaries=samples_dict[sectors[1]][analyses_type][analysis_grouping]\n samples_hubs=samples_dict[sectors[2]][analyses_type][analysis_grouping]\n\n\n\n samples[sector][analyses][analysis_grouping]=updateDict(samples[sector][analyses][analysis_grouping],getSamples(authors_analysis[analyses][author][analysis_grouping]))", "def analyzeClusterPerformance(self, cluster):\n def calcOverlap(playlist, group):\n urls = set(group[\"uri\"].values)\n playlist = set(playlist)\n return len(urls & playlist)\n\n groupedDF = self.audioDF.groupby(cluster)\n playlists = list(self.playlists)\n\n clusters = [group for _, group in groupedDF]\n numSongs = sum([len(p) for p in playlists])\n\n numOverlap, maxOverlap = 0, 1\n while len(playlists) > 0 and len(clusters) > 0 and maxOverlap > 0:\n maxOverlap, pIDX, gIDX = 0, None, None\n for i, playlist in enumerate(playlists):\n for j, c in enumerate(clusters):\n overlap = calcOverlap(playlist, c)\n if overlap > maxOverlap:\n maxOverlap, pIDX, gIDX = overlap, i, j\n \n if maxOverlap > 0:\n playlists.pop(pIDX)\n clusters.pop(gIDX)\n numOverlap += maxOverlap\n\n performance = float(numOverlap) / float(numSongs)\n performancePercentage = round(performance, 4) * 100\n\n return {\n \"Clustering Algorithm\": cluster,\n \"Performance\": performance\n }", "def _compute_experiment_statistics(self):\n pass", "def test(referrer, tester, test_word, algorithm, algo_params={}, visualize_results=False, apply_PCA=False, clear_referrer_with_algo=False, clean_algo_parameters={\"name\": 'EllipticEnvelope', \"contamination\": 0.1, \"visualize_results\": False}):\n\n # Split the test_word to digraphs\n test_digraphs = [('Key' + c[0].upper() if c[0] != ' ' else 'Space') + ('Key' + c[1].upper() if c[1] != ' ' else 'Space')\n for c in [z[0] + z[1] for z in zip(test_word, test_word[1:])]]\n\n # Test each digraph data of tester against referrer\n count_insufficient_ref_samples = 0.\n # `score` Represents number of inliers if algo != 'GMM' else represents score for each GMM component\n score = 0. if algorithm != 'GMM' else []\n count_global = 0.\n for test_digraph in test_digraphs:\n\n # Find the digraph data of referrer\n ref_di_points = ret_digraph_points(\n referrer, test_digraph).astype(float)\n # print ref_di_points\n if len(ref_di_points) >= 10:\n\n # If tester and referrer sample\n if referrer['_subject'] == tester['_subject'] and referrer['_track_code'] == tester['_track_code']:\n _tmp = ref_di_points\n np.random.shuffle(_tmp)\n perc = int(np.floor(0.8 * len(_tmp)))\n ref_di_points = _tmp[0:perc, :]\n test_di_points = _tmp[perc:, :]\n else:\n # Sample some data for the particular digraph in tester\n _test_di_points = ret_digraph_points(tester, test_digraph)\n test_di_points = _test_di_points[np.random.choice(\n _test_di_points.shape[0], size=randint(10, 12), replace=False), :]\n\n # Transform ref and tester data with standard scaler\n sscaler = StandardScaler(with_mean=True, with_std=True).fit(\n np.append(ref_di_points, test_di_points, axis=0))\n train_points = sscaler.transform(ref_di_points)\n test_points = sscaler.transform(test_di_points)\n\n # Apply PCA if needed\n if apply_PCA is True:\n pca_model = PCA().fit(\n np.append(train_points, test_points, axis=0))\n train_points = pca_model.transform(train_points)[:, 0:2]\n test_points = pca_model.transform(test_points)[:, 0:2]\n\n # Clean referrer points from noise if needed\n if clear_referrer_with_algo is True:\n train_points = general_purpose.clean_with_algo(\n train_points, algorithm=clean_algo_parameters['name'], contamination=clean_algo_parameters['contamination'])\n\n # Anomaly-Test the digraph and get the count_global of inliers\n if algorithm != 'GMM':\n score += digraph_test(train_points,\n test_points, algorithm, algo_params, visualize_results=visualize_results,\n plt_title_info={\"referrer\": referrer['_subject'],\n \"tester\": tester['_subject'], \"digraph\": test_digraph})\n else:\n # Special procedure for GMM\n _sc = digraph_test_GMM(train_points,\n test_points, algo_params, visualize_results=visualize_results,\n plt_title_info={\"referrer\": referrer['_subject'],\n \"tester\": tester['_subject'], \"digraph\": test_digraph})\n score.append(_sc)\n\n else:\n\n count_insufficient_ref_samples += len(test_points)\n\n count_global += len(test_points)\n\n # Find eventually the total score\n if algorithm != 'GMM':\n total_score = score / \\\n (count_global - count_insufficient_ref_samples)\n else:\n total_score = 0.\n for s in score:\n total_score += sum(s)\n total_score = total_score / \\\n (count_global - count_insufficient_ref_samples)\n\n # print '\\n'\n # print 'Digraph passed: ' + str(total_score) + '%, ' + str(int(score)) + ' out of ' + str(int(count_global - count_insufficient_ref_samples))\n # print 'Insufissient digraph samples: ' + str(count_insufficient_ref_samples) + ' out of ' + str(int(count_global))\n # print total_score\n if count_insufficient_ref_samples > 0:\n print 'Insuffisient samples: %d' % (count_insufficient_ref_samples)\n print 'For word: ' + test_word\n return total_score", "def multiClusterAlgorithm(values):\n clusterMap = dict()\n # Lowest unixtime in the values list\n startUnixtime = values[0][0]\n # Separate values into separate clusters in the map, clusterMap\n for value in values:\n if value[2] not in clusterMap.keys():\n clusterMap[value[2]] = []\n clusterMap[value[2]].append(value)\n \n # An array of the predicted values per cluster\n clusterPredicted = []\n # An array of unixtime averages per cluster\n unixtimeAverage = []\n \n # Generate predicted value for each cluster using a weighted average\n # Adds predicted value for each cluster to clusterPredicted and adds\n # the unixtime average for each cluster to unixtimeAverage\n for key in clusterMap.keys():\n clusterLength = len(clusterMap[key]) \n totalUnixtime = sum([elem[0] for elem in clusterMap[key]])\n unixtimeAverage.append(float(totalUnixtime) / float(clusterLength))\n \n clusterLight = [elem[1] for elem in clusterMap[key]]\n\n totalDistanceFromStart = sum([elem[0] - startUnixtime for elem in clusterMap[key]])\n clusterWeightValues = [float(elem[0] - startUnixtime) / float(totalDistanceFromStart) for elem in clusterMap[key]]\n \n predicted = np.average(clusterLight, weights = clusterWeightValues)\n clusterPredicted.append(predicted)\n \n print clusterPredicted\n total = sum([elem - startUnixtime for elem in unixtimeAverage])\n # Create weighted values based on unixtime average per cluster\n weightValues = [float(elem - startUnixtime) / float(total) for elem in unixtimeAverage]\n print weightValues\n # Return a weighted average across clusters\n return np.average(clusterPredicted, weights = weightValues)", "def get_metrics(cfg, model, X_anchor, y_anchor, X_gal, y_gal, annoy_index, vec_dim):\n rank10_acc = 0\n rank5_acc = 0\n rank1_acc = 0\n avg_acc = 0\n vote_res = 0\n\n l2 = []\n for anchor in range(0, len(X_anchor)):\n res = get_result(get_image_features(cfg, model, X_anchor[anchor]), annoy_index)\n vote = defaultdict(int)\n # Accuracy\n correct = 0\n for i in res[:10]:\n vote[y_gal[i]] += 1\n\n max_key = max(vote, key=vote.get)\n if max_key == y_anchor[anchor]:\n vote_res += 1\n \n\n for recomm in res[:10]:\n if y_gal[recomm] == y_anchor[anchor]:\n correct += 1 \n\n avg_acc += correct/len(res)\n\n # Mean Average Precision\n l1 = []\n for recomm in res[:10]:\n if y_gal[recomm] == y_anchor[anchor]:\n correct += 1\n l1.append(1)\n else:\n l1.append(0)\n l2.append(l1) \n\n # Rank10 Accuracy\n for each_val in res[:10]:\n if y_gal[each_val] == y_anchor[anchor]:\n rank10_acc += 1\n break\n \n # Rank5 Accuracy\n for each_val in res[:5]:\n if y_gal[each_val] == y_anchor[anchor]:\n rank5_acc += 1\n break\n\n # Rank1 Accuracy\n for each_val in res[:1]:\n if y_gal[each_val] == y_anchor[anchor]:\n rank1_acc += 1\n break\n\n print(\"Avg acc is :: {avg_acc}\".format(avg_acc = avg_acc/len(X_anchor)))\n print(\"Rank 10 acc is :: {rank10_acc}\".format(rank10_acc = rank10_acc/len(X_anchor)))\n print(\"Rank 5 acc is :: {rank5_acc}\".format(rank5_acc = rank5_acc/len(X_anchor)))\n print(\"Rank 1 acc is :: {rank1_acc}\".format(rank1_acc = rank1_acc/len(X_anchor)))\n print(\"Mean Avg Precision is :: {mAP}\".format(mAP=mean_average_precision(l2)))\n print(\"Vote res :: \", vote_res/len(X_anchor))\n\n return rank1_acc/len(X_anchor), mean_average_precision(l2)", "def enumerate_clusterings(self):\n\n # Initialize an empty list of clusterings. Each element of the list\n # is a dictionary mapping NOEs to the signatures they are clustered to\n # in a solution. Each clustering is initialize with all uniquely\n # clusterable NOEs as keys mapping to their unique clusters\n\n clusterings = []\n\n while True:\n\n # Run the solver and get a solution back\n\n solution = self.solve()\n\n # If UNSAT, then flush aux clauses from the formula and return\n # all the clusterings we found so far\n\n if not solution:\n self.flush()\n return clusterings\n\n # Iterate over the clustering variables set to true by in the\n # discovered solution. Forbid this clustering from reoccuring and\n # add it to the list of found clusterings\n\n clause = []\n clustering = {}\n for node in self.clustering_variables.keys():\n if len(node.clusters) == 1:\n clustering[node] = list(node.clusters)[0]\n\n for vtype, node, cluster in solution:\n if vtype == Formula.CST_VAR:\n clustering[node] = cluster\n clause.append(-self.clustering_variables[node][cluster])\n\n self.add_clause(clause)\n clusterings.append(clustering)", "def compute_means(self):\n ###TODO\n vector_means = []\n for doc in self.fin_clust.values():\n vec = defaultdict(float)\n for d_id in doc:\n doc_keys = self.docs[d_id].keys()\n for key in self.docs[d_id]:\n vec[key] = vec[key] + self.docs[d_id][key]\n tot = len(doc)\n x = defaultdict(float)\n for k,v in vec.items():\n x[k] = float(v)/tot\n vec = Counter(x)\n vector_means.append(vec)\n return vector_means", "def evaluation_cc(self, property='clustering-coeff'):\n\n if property == 'clustering-coeff':\n rw_cc = [np.mean(clustering_coef_wu(self.rw_data[t])) for t in range(0, self.T)]\n smth_cc = [np.mean(clustering_coef_wu(self.smth_data[t])) for t in range(0, self.T)]\n elif property == 'transitivity':\n rw_cc = [np.mean(transitivity_wu(self.rw_data[t])) for t in range(0, self.T)]\n smth_cc = [np.mean(transitivity_wu(self.smth_data[t])) for t in range(0, self.T)]\n elif property == 'coreness':\n rw_cc = [np.mean(core.core_periphery_dir(self.rw_data[t])) for t in range(0, self.T)]\n smth_cc = [np.mean(core.core_periphery_dir(self.smth_data[t])) for t in range(0, self.T)]\n elif property == 'assortativity':\n rw_cc = [np.mean(core.assortativity_wei(self.rw_data[t], 0)) for t in range(0, self.T)]\n smth_cc = [np.mean(core.assortativity_wei(self.smth_data[t], 0)) for t in range(0, self.T)]\n elif property == 'modularity':\n rw_cc, _ = get_number_of_components(self.rw_data)\n smth_cc, _ = get_number_of_components(self.smth_data)\n elif property == 'path_length':\n rw_cc = [charpath(rw)[0] for rw in self.rw_data]\n smth_cc = [charpath(sm)[0] for sm in self.smth_data]\n\n # rw_cc_ent = get_entropy_list(rw_cc)\n # smth_cc_ent = get_entropy_list(smth_cc)\n\n return rw_cc, smth_cc", "def ehost_iaa_compute(folder1, folder2, no_context=False):\n annotator1 = read_ehost_annotated_result(folder1, no_context=no_context)\n annotator2 = read_ehost_annotated_result(folder2, no_context=no_context)\n merged_keys = list(set(annotator1.keys()) | set(annotator2.keys()))\n y1 = []\n y2 = []\n for key in merged_keys:\n if key in annotator1 and key in annotator2:\n y1.append(annotator1[key])\n y2.append(annotator2[key])\n else:\n print('%s not matched in all' % key)\n iaa = sklearn.metrics.cohen_kappa_score(y1, y2)\n print('IAA is %s on %s' % (iaa, len(annotator1)))\n return iaa", "def clustering(self):\n ret_concepts = []\n clusters = []\n for word in self.words:\n clusters.append(WordCluster(None, word))\n while len(clusters) > 1:\n maxi = -1\n maxj = -1\n max = -1\n m = -1\n for i in range(len(clusters)):\n for j in range(len(clusters)):\n if i == j:\n continue\n # print(\"%d cluster compare with %d cluster\" % (i, j))\n # 1: join 21: i absorb j 22: j absorb i 3: collapse\n # l1: join L(Tm) value l21: A absorb B L(Tm)value\n l1, newtags = self.__calculate_ltm(clusters[i], clusters[j], 1)\n if l1 > max:\n m = 1\n maxi = i\n maxj = j\n max = l1\n print(\"max L(Tm) for clustering in current loop: %lf\" % max)\n if max < ClusterAlgorithm.P_threshold:\n return\n Tm = clusters[maxi].join(clusters[maxj])\n Tm_concepts = self.__select_concepts(self.__getword(Tm))\n for tmp_concept in Tm_concepts.items():\n ret_concepts.append(tmp_concept)\n rm1 = clusters[maxi]\n rm2 = clusters[maxj]\n clusters.remove(rm1)\n clusters.remove(rm2)\n if Tm is not None:\n print(\"merged cluster's words:\")\n print(self.__getword(Tm))\n return ret_concepts", "def main():\n parser = argparse.ArgumentParser(description=\"Wrapper of the scikit-learn AgglomerativeClustering method. \", formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))\n parser.add_argument('--config', required=False, help='Configuration file')\n\n # Specific args of each building block\n required_args = parser.add_argument_group('required arguments')\n required_args.add_argument('--input_dataset_path', required=True, help='Path to the input dataset. Accepted formats: csv.')\n required_args.add_argument('--output_results_path', required=True, help='Path to the clustered dataset. Accepted formats: csv.')\n parser.add_argument('--output_plot_path', required=False, help='Path to the clustering plot. Accepted formats: png.')\n\n args = parser.parse_args()\n args.config = args.config or \"{}\"\n properties = settings.ConfReader(config=args.config).get_prop_dic()\n\n # Specific call of each building block\n agglomerative_clustering(input_dataset_path=args.input_dataset_path,\n output_results_path=args.output_results_path,\n output_plot_path=args.output_plot_path,\n properties=properties)", "def compute_metrics(self):\n self.finalize_output_dict()\n self.metric_dict = {\n key: value(self.output_dict[\"labels\"], self.output_dict[\"pred_probs\"])\n for key, value in self.metric_fns.items()\n }", "def _cluster(self):\n # , distance_function=spearman_squared_distance, max_iter=1000, tol=0.0001):\n if self.cluster_method is None:\n clusters = KMedoids(\n self.k,\n self.batchsize,\n dist_func=self.distance_function,\n max_iter=self.max_iter,\n tol=self.tol,\n init_medoids=self.init_medoids,\n swap_medoids=self.swap_medoids,\n )\n clusters.fit(self.clustering_attributions, verbose=self.verbose)\n\n self.subpopulations = clusters.members\n self.subpopulation_sizes = GAM.get_subpopulation_sizes(clusters.members)\n self.explanations = self._get_explanations(clusters.centers)\n # Making explanations return numerical values instead of dask arrays\n if isinstance(self.explanations[0][0][1], da.Array):\n explanations = []\n for explanation in self.explanations:\n explanations.append([(x[0], x[1].compute()) for x in explanation])\n self.explanations = explanations\n else:\n self.cluster_method(self)", "def prepare_statistics(self):\n\n # statistics of clustering files\n len0 = len(self.cluster_lists[0])\n len1 = len(self.cluster_lists[1])\n longer_index = 0 if len0 >= len1 else 1\n shorter_index = 1 if len1 <= len0 else 0\n\n percentage_stars = \"%.2f\" % (100.0 * float(self.shared_spec_num)/float(self.cluster_spectra_num[shorter_index]))\n percentage_starlets = \"%.2f\" % (100.0 * float(self.shared_spec_num)/float(self.cluster_spectra_num[longer_index]))\n\n head = \"{0:<25}{1:<20}{2:<20}\\n\".format(\"name\", \"number\", \"description\")\n rows = \"\"\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"stars No.\", self.stars_length, \"in file with less(or equal) clusters: file\" + str(shorter_index))\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"starlets No.\", self.starlets_length, \"in file with more(or equal) clusters: file\" + str(longer_index))\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"identical cluster No.\", self.similarity_dist[10], \"between them\")\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"spectrum No\", self.cluster_spectra_num[shorter_index], \"in stars\")\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"spectrum No\", self.cluster_spectra_num[longer_index], \"in starlets \")\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"shared spectrum No\", self.shared_spec_num, \"between them\")\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"shared spectrum percent\", percentage_stars, \"in stars\")\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"shared spectrum percent\", percentage_starlets, \"in starlets\")\n self.tables.append(('statistics of files', head, rows))\n\n # distribution of cluster size in stars\n head = '{0:<20}{1:<20}{2:<20}{3:<20}\\n'.format(\"cluster size\",\"No.\", \"percentage\", \"accumulate pecentage\")\n rows = \"\"\n rows += \"{0:<20}{1:<20}\\n\".format(\"%.2f\" % (self.ave_star_size), \"average\")\n accumulate_num = 0\n for key in sorted(self.cluster_size_dist[shorter_index].keys()):\n value = self.cluster_size_dist[shorter_index][key]\n accumulate_num += value\n percent = \"%.2f\" % (100 * value/self.stars_length)\n accum_percent = \"%.2f\" % (100 * accumulate_num/self.stars_length)\n rows += '{0:<20}{1:<20}{2:<20}{3:<20}\\n'.format(key, value, percent, accum_percent)\n self.tables.append(('distribution of cluster size in stars', head, rows))\n \n head = '{0:<20}{1:<20}{2:<20}{3:<20}\\n'.format(\"cluster size\",\"No.\", \"percentage\", \"accumulate pecentage\")\n rows = \"\"\n rows += \"{0:<20}{1:<20}\\n\".format(\"%.2f\" % (self.ave_starlet_size), \"average\")\n accumulate_num = 0\n for key in sorted(self.cluster_size_dist[longer_index].keys()):\n value = self.cluster_size_dist[longer_index][key]\n accumulate_num += value\n percent = \"%.2f\" % (100 * value/self.starlets_length)\n accum_percent = \"%.2f\" % (100 * accumulate_num/self.starlets_length)\n rows += '{0:<20}{1:<20}{2:<20}{3:<20}\\n'.format(key, value, percent, accum_percent)\n self.tables.append(('distribution of cluster size in starlets', head, rows))\n\n # distribution of similarity\n head = \"{0:<20}{1:<20}{2:<20}{3:<20}\\n\".format(\"similarity score\", \"pairs of clusters\", \"percentage(stars)\", \"percentage(starlets)\")\n rows = \"\"\n for key in reversed(sorted(self.similarity_dist.keys())):\n value = self.similarity_dist[key]\n percent_star = \"%.2f\" % (100.0*value/self.stars_length)\n percent_starlet = \"%.2f\" % (100.0*value/self.starlets_length)\n rows += '{0:<20}{1:<20}{2:<20}{3:<20}\\n'.format(key, value, percent_star, percent_starlet)\n self.tables.append(('distribution of similarity (identical = 10)', head, rows))\n\n # distribution of star divide factors\n head = '{0:<20}{1:<20}{2:<20}\\n'.format(\"divide factor\",\"No.\",\"percentage\")\n rows = \"\"\n rows += \"{0:<20}{1:<20}\\n\".format(\"%.2f\" % (self.ave_divide_factor_star), \"average\")\n for key in sorted(self.star_divide_factor_dist.keys()):\n value = self.star_divide_factor_dist[key]\n percent_star = \"%.2f\" % (100.0*value/self.stars_length)\n rows += '{0:<20}{1:<20}{2:<20}\\n'.format(key, value, percent_star)\n self.tables.append(('distribution of star divide factors', head, rows))\n\n # distribution of starlet divide factors\n head = '{0:<20}{1:<20}{2:<20}\\n'.format(\"divide factor\",\"No.\",\"percentage\")\n rows = \"\"\n rows += \"{0:<20}{1:<20}\\n\".format(\"%.2f\" % (self.ave_divide_factor_starlet), \"average\")\n for key in sorted(self.starlet_divide_factor_dist.keys()):\n value = self.starlet_divide_factor_dist[key]\n percent_starlet = \"%.2f\" % (100.0*value/self.starlets_length)\n rows += '{0:<20}{1:<20}{2:<20}\\n'.format(key, value, percent_starlet)\n self.tables.append(('distribution of starlet divide factors', head, rows))", "def cluster_membership_occupancy(data):\n \n \n \n n_clusters = len(set(data['clusters'])-{-1}) # since -1 element denotes noice\n\n if n_clusters == 0:\n membership=[Cluster_Membership_Features()]\n membership = pd.DataFrame([o.__dict__ for o in membership])\n areas=[Cluster_Area_Features()]\n areas = pd.DataFrame([o.__dict__ for o in areas])\n density=[Cluster_Density_Features()]\n density = pd.DataFrame([o.__dict__ for o in density])\n all_features = pd.concat([membership.reset_index(drop=True), areas.reset_index(drop=True),\n density], axis=1)\n \n elif n_clusters ==1:\n #obtain_total_cluster_areas_set_everything_else_to_default\n membership=[Cluster_Membership_Features()]\n membership = pd.DataFrame([o.__dict__ for o in membership])\n d = dict(tuple(data.groupby('clusters')))\n d.pop(-1, None)\n \n try:\n cluster_chull_areas=[ss.ConvexHull(np.column_stack([d[i]['X'].array,d[i]['Y'].array])).volume for i in d.keys()]\n except:\n cluster_chull_areas=[0,0,0]\n \n Total_cluster_area=np.sum(cluster_chull_areas)\n areas=[Cluster_Area_Features([Total_cluster_area,0,0,0,0,0,0,0,0])]\n areas = pd.DataFrame([o.__dict__ for o in areas])\n density=[Cluster_Density_Features()]\n density = pd.DataFrame([o.__dict__ for o in density])\n all_features = pd.concat([membership.reset_index(drop=True), areas.reset_index(drop=True),\n density], axis=1)\n \n elif n_clusters >1:\n #Summarizing the cluster membership distribution characteristics\n cluster_size_nums=np.delete(np.array(data.groupby(['clusters']).size()),0)\n (cluster_size_nums_avg,cluster_size_nums_min,cluster_size_nums_max,\n cluster_size_nums_std,cluster_size_nums_cv,cluster_size_nums_cd,\n cluster_size_nums_IQR,cluster_size_nums_Quartile_CD)= distribution_statistics(cluster_size_nums)\n\n #For each cluster calculate the area by calculating the area of the convex hull of cluster members\n # Note: concavehull implementation here might be a good addition as it will provide more imformative values. \n\n d = dict(tuple(data.groupby('clusters')))\n d.pop(-1, None)\n try:\n cluster_chull_areas=[ss.ConvexHull(np.column_stack([d[i]['X'].array,d[i]['Y'].array])).volume for i in d.keys()]\n except:\n cluster_chull_areas=[0,0,0,0,0]\n \n\n (avg_cluster_area,min_cluster_area,max_cluster_area,\n std_cluster_area,CV_cluster_area,CD_cluster_area,\n IQR_cluster_area,Quartile_CD_cluster_area)= distribution_statistics(cluster_chull_areas)\n Total_cluster_area=np.sum(cluster_chull_areas)\n\n #Calculate cluster density: number of nuclei/ convex area of cluster\n cluster_density=np.divide(cluster_size_nums,cluster_chull_areas)\n (avg_cluster_density,min_cluster_density,max_cluster_density,\n std_cluster_density,CV_cluster_density,CD_cluster_density,\n IQR_cluster_density,Quartile_CD_cluster_density)= distribution_statistics(cluster_density)\n\n #return dataframe of features\n membership=[Cluster_Membership_Features([cluster_size_nums_avg,cluster_size_nums_min,cluster_size_nums_max,\n cluster_size_nums_std,cluster_size_nums_cv,cluster_size_nums_cd,\n cluster_size_nums_IQR,cluster_size_nums_Quartile_CD])]\n membership = pd.DataFrame([o.__dict__ for o in membership])\n areas=[Cluster_Area_Features([Total_cluster_area,\n avg_cluster_area,min_cluster_area,max_cluster_area,\n std_cluster_area,CV_cluster_area,CD_cluster_area,\n IQR_cluster_area,Quartile_CD_cluster_area])]\n areas = pd.DataFrame([o.__dict__ for o in areas])\n density=[Cluster_Density_Features([avg_cluster_density,min_cluster_density,max_cluster_density,\n std_cluster_density,CV_cluster_density,CD_cluster_density,\n IQR_cluster_density,Quartile_CD_cluster_density])]\n density = pd.DataFrame([o.__dict__ for o in density])\n\n all_features = pd.concat([membership.reset_index(drop=True), areas.reset_index(drop=True),\n density], axis=1)\n return all_features", "def convergence_measure_all(filename,index,mean_subtract,smoothing_scale=None):\n\n\tlogging.info(\"Processing {0}\".format(filename))\n\n\t#Load the map\n\tconv_map = ConvergenceMap.load(filename)\n\n\tif mean_subtract:\n\t\tconv_map.data -= conv_map.mean()\n\n\t#Smooth the map maybe\n\tif smoothing_scale is not None:\n\t\tlogging.info(\"Smoothing {0} on {1}\".format(filename,smoothing_scale))\n\t\tconv_map.smooth(smoothing_scale,kind=\"gaussianFFT\",inplace=True)\n\n\t#Allocate memory for observables\n\tdescriptors = index\n\tobservables = np.zeros(descriptors.size)\n\n\t#Measure descriptors as directed by input\n\tfor n in range(descriptors.num_descriptors):\n\t\t\n\t\tif type(descriptors[n]) == PowerSpectrum:\n\t\t\tl,observables[descriptors[n].first:descriptors[n].last] = conv_map.powerSpectrum(descriptors[n].l_edges)\n\t\t\t\n\t\telif type(descriptors[n]) == Moments:\n\t\t\tobservables[descriptors[n].first:descriptors[n].last] = conv_map.moments(connected=descriptors[n].connected)\n\t\t\t\n\t\telif type(descriptors[n]) == Peaks:\n\t\t\tv,observables[descriptors[n].first:descriptors[n].last] = conv_map.peakCount(descriptors[n].thresholds,norm=descriptors[n].norm)\n\n\t\telif type(descriptors[n]) == PDF:\n\t\t\tv,observables[descriptors[n].first:descriptors[n].last] = conv_map.pdf(descriptors[n].thresholds,norm=descriptors[n].norm)\n\t\t\n\t\telif type(descriptors[n]) == MinkowskiAll:\n\t\t\tv,V0,V1,V2 = conv_map.minkowskiFunctionals(descriptors[n].thresholds,norm=descriptors[n].norm)\n\t\t\tobservables[descriptors[n].first:descriptors[n].last] = np.hstack((V0,V1,V2))\n\t\t\n\t\telif type(descriptors[n]) == MinkowskiSingle:\n\t\t\traise ValueError(\"Due to computational performance you have to measure all Minkowski functionals at once!\")\n\t\t\n\t\telse:\n\t\t\t\n\t\t\traise ValueError(\"Measurement of this descriptor not implemented!!!\")\n\n\t#Return\n\treturn observables", "def cal_topology_feature(self):\n self.NPL()\n self.topo_efficiency_cal()\n self.efficiency_cal()\n self.cluster_cal()\n self.topo_diameter()\n self.spatial_diameter()", "def main(seed, numpoints, dimensions, num_centres, fragments, mode, iterations,\n epsilon, arity, use_storage):\n start_time = time.time()\n\n # Generate the data\n fragment_list = []\n # Prevent infinite loops in case of not-so-smart users\n points_per_fragment = max(1, numpoints // fragments)\n\n for l in range(0, numpoints, points_per_fragment):\n # Note that the seed is different for each fragment.\n # This is done to avoid having repeated data.\n r = min(numpoints, l + points_per_fragment)\n\n fragment_list.append(\n generate_fragment(r - l, dimensions, mode, seed + l, use_storage)\n )\n\n compss_barrier()\n print(\"Generation/Load done\")\n initialization_time = time.time()\n print(\"Starting kmeans\")\n\n # Run kmeans\n centres = kmeans_frag(fragments=fragment_list,\n dimensions=dimensions,\n num_centres=num_centres,\n iterations=iterations,\n seed=seed,\n epsilon=epsilon,\n arity=arity)\n compss_barrier()\n print(\"Ending kmeans\")\n kmeans_time = time.time()\n\n # Run again kmeans (system cache will be filled)\n print(\"Second kmeans\")\n centres = kmeans_frag(fragments=fragment_list,\n dimensions=dimensions,\n num_centres=num_centres,\n iterations=iterations,\n seed=seed,\n epsilon=epsilon,\n arity=arity)\n compss_barrier()\n print(\"Ending second kmeans\")\n kmeans_2nd = time.time()\n\n print(\"-----------------------------------------\")\n print(\"-------------- RESULTS ------------------\")\n print(\"-----------------------------------------\")\n print(\"Initialization time: %f\" % (initialization_time - start_time))\n print(\"Kmeans time: %f\" % (kmeans_time - initialization_time))\n print(\"Kmeans 2nd round time: %f\" % (kmeans_2nd - kmeans_time))\n print(\"Total time: %f\" % (kmeans_2nd - start_time))\n print(\"-----------------------------------------\")\n centres = compss_wait_on(centres)\n print(\"CENTRES:\")\n print(centres)\n print(\"-----------------------------------------\")", "def evaluate():\n global dictionary, wv\n count = 0\n # To save the scores by distance and similarity\n scores = np.zeros(6)\n similar = np.zeros(6)\n itr = len(dictionary)\n logging.info('running evaluation for {0} samples'.format(itr))\n for key in dictionary:\n progress = (count / itr) * 100\n d = dictionary[key].split('resource/')\n d = [idx.split()[0].translate(table).lower() for idx in d[1:]]\n try:\n r = np.array(list(map(lambda x: wv.get_vector(x), d)),\n dtype=np.float32)\n except KeyError:\n itr -= 1\n continue\n if np.any(np.isnan(r)):\n itr -= 1\n continue\n else:\n if r.ndim == 2:\n try:\n # Mean of vector containing all word vectors\n # obtained from abstract.\n r = r.mean(axis=0).reshape(1, -1)\n \n # Obtain the vectors for the entity\n mean_vec = mean_encoder(dictionary[key])\n mean_vec = mean_vec.reshape(1, -1) / norm(mean_vec)\n mean_dist_vec = distance_encoder(dictionary[key])\n mean_dist_vec = mean_dist_vec.reshape(1, -1)\n mean_dist_vec = mean_dist_vec / norm(mean_dist_vec)\n title_vec = title_mean(key)\n title_vec = title_vec.reshape(1, -1) / norm(title_vec)\n abstract_vec = abstract_encoder(key)\n abstract_vec = abstract_vec.reshape(1, -1)\n abstract_vec = abstract_vec / norm(abstract_vec)\n random_vec = np.random.randn(100).reshape(1, -1)\n zero_vec = np.zeros(100).reshape(1, -1)\n \n # Score the entity vectors\n scores[0] += norm(r - mean_vec)\n scores[1] += norm(r - mean_dist_vec)\n scores[2] += norm(r - title_vec)\n scores[3] += norm(r - abstract_vec)\n scores[4] += norm(r - random_vec)\n scores[5] += norm(r - zero_vec)\n similar[0] += cosine_similarity(r, mean_vec)\n similar[1] += cosine_similarity(r, mean_dist_vec)\n similar[2] += cosine_similarity(r, title_vec)\n similar[3] += cosine_similarity(r, abstract_vec)\n similar[4] += cosine_similarity(r, random_vec)\n similar[5] += cosine_similarity(r, zero_vec)\n count += 1\n print(count, end='\\r')\n except (ValueError, KeyError) as _:\n itr -= 1\n continue\n else:\n itr -= 1\n continue\n # Normalize the scores to get a better\n # comparison against the baselines.\n scores = scores / norm(scores)\n similar = similar / norm(similar)\n print_summary(scores, similar)", "def evaluate_clusters(self, cluster_formulas, value='weighted_sum'):\n num_elems = len(self.labels)\n total_val = {}\n num_cl = len(cluster_formulas)\n clustered_points_num = 0\n print(\"\\n\\n\")\n print(\"Sufficiently big clusters: {}\".format(num_cl))\n for c, formula, val in cluster_formulas:\n c_size = len([l for l in self.labels if l == c])\n clustered_points_num += c_size\n\n if value == 'weighted_sum':\n total_val[c] = val * c_size / num_elems\n elif value == 'sum':\n total_val[c] = val * 1\n\n clust_val = sum(total_val.values())\n self.clustering_value = total_val\n print(\"Value of clustering: {}\".format(clust_val))\n return clust_val", "def cluster_life_expectancy() -> Dict:\n return dict(model=None, score=None, clusters=None)", "def _prepare_algorithms(\n self, initial_model: TModel, initial_graph: NNCFGraph, combinations: Dict[CombinationKey, Combination]\n ) -> None:\n for combination_key, combination in combinations.items():\n kwargs = apply_combination(self._init_params, combination)\n self._algorithms[combination_key] = self._algorithm_cls(**kwargs)\n\n # Collect required statistics for created algorithms\n stats_aggregator = StatisticsAggregatorFactory.create(initial_model, self._calibration_dataset)\n for algorithm in self._algorithms.values():\n statistic_points = algorithm.get_statistic_points(initial_model, initial_graph)\n stats_aggregator.register_statistic_points(statistic_points)\n stats_aggregator.collect_statistics(initial_model, initial_graph)\n self._statistic_points = stats_aggregator.statistic_points", "def _compute_cluster_averages(self, key=\"_scvi_labels\"):\n # find cell label column\n label_col = self.adata.uns[\"_scvi\"][\"categorical_mappings\"][key][\"original_key\"]\n\n # find data slot\n x_dict = self.adata.uns[\"_scvi\"][\"data_registry\"][\"X\"]\n if x_dict[\"attr_name\"] == \"X\":\n use_raw = False\n else:\n use_raw = True\n if x_dict[\"attr_name\"] == \"layers\":\n layer = x_dict[\"attr_key\"]\n else:\n layer = None\n\n # compute mean expression of each gene in each cluster/batch\n aver = compute_cluster_averages(self.adata, labels=label_col, use_raw=use_raw, layer=layer)\n\n return aver", "def load_cluster_accuracies():\n accuracies = morphs.utils.load._pickle(morphs.paths.ACCURACIES_PKL)\n cluster_accuracies = {\n block_path: accuracies[block_path]\n .groupby(\"cluster\")\n .agg(np.mean)\n .sort_values(\"accuracy\")\n for block_path in accuracies\n }\n return accuracies, cluster_accuracies", "def schedule_algorithm(self, algorithm_type):\n\n algorithm_data = self.clustering_parameters[\"algorithms\"][algorithm_type]\n\n # The algorithm we are going to use\n algorithm = self.build_algorithm(algorithm_type)\n\n # If not parameters were given we have to get the better ones\n clusterings = []\n\n auto_parameter_generation = True if not \"parameters\" in algorithm_data else False\n\n if auto_parameter_generation:\n print \"Generating params for\", algorithm_type\n algorithm_run_params, clusterings = self.parameters_generator.get_parameters_for_type(algorithm_type)\n else:\n # A list with all the parameters for diverse runs\n algorithm_run_params = algorithm_data[\"parameters\"]\n\n clusterings_info = self.generate_clustering_info(algorithm_type, algorithm_run_params, clusterings)\n\n # Sometimes getting the best parameters imply getting the clusterings themselves\n if clusterings == []:\n for clustering_id in clusterings_info:\n one_clustering_info = clusterings_info[clustering_id]\n self.scheduler.add_task( task_name = clustering_id,\n description = \"Generation of clustering with %s algorithm and id %s\"%(\n one_clustering_info[\"type\"],\n clustering_id\n ),\n target_function = run_algorithm,\n function_kwargs = {\n \"algorithm\":algorithm,\n \"clustering_id\":clustering_id,\n \"algorithm_kwargs\":one_clustering_info[\"parameters\"]\n },\n dependencies = {})\n return clusterings_info", "def compute_performance_analysis(self, G, I, thresholds=0.01):\r\n FAR = self.compute_FAR(I, thresholds)\r\n FRR = self.compute_FRR(G, thresholds)\r\n CRR = self.compute_CRR(FAR)\r\n CAR = self.compute_CAR(FRR)\r\n EER = self.compute_EER(FAR, FRR)\r\n AUC = self.compute_AUC(FAR, CAR)\r\n return FAR, FRR, CRR, CAR, EER, AUC", "def compute_metrics(self, results: list) -> dict:", "def test_optimalk(parallel_backend, n_jobs, n_clusters):\n import numpy as np\n from sklearn.datasets.samples_generator import make_blobs\n from gap_statistic import OptimalK\n\n # Create data\n X, y = make_blobs(n_samples=int(1e3), n_features=2, centers=3)\n\n for algo in ['kmeans', 'kmeans2', 'skl-kmeans', 'sph-kmeans']:\n # Create optimalK instance\n optimalK = OptimalK(parallel_backend=parallel_backend, n_jobs=n_jobs, algo=algo)\n\n suggested_clusters = optimalK(X, n_refs=3, cluster_array=np.arange(1, 10))\n\n assert np.allclose(suggested_clusters, n_clusters, 2), \\\n ('Correct clusters is {}, OptimalK suggested {}'.format(n_clusters, suggested_clusters))", "def benchmark(self):\n nsites = []\n for m in self.methods:\n for name, structure in self.test_structures.items():\n cns = []\n if self.unique_sites:\n es = SpacegroupAnalyzer(structure).get_symmetrized_structure().equivalent_sites\n sites = [structure.index(x[0]) for x in es]\n else:\n sites = range(len(structure))\n\n for key, val in self.hi.items():\n if name == key:\n for j in sites:\n if isinstance(m, NearNeighbors):\n tmpcn = m.get_cn_dict(structure, j, self.use_weights)\n else:\n tmpcn = m.compute(structure, j)\n if tmpcn == \"null\":\n continue\n if self.nround:\n self._roundcns(tmpcn, self.nround)\n cns.append((structure[j].species_string, tmpcn))\n if self.cation_anion:\n for mat, cat in self.cations.items():\n if (name == mat) and cat:\n cns = self._popel(cns, cat)\n elif self.anion_cation:\n for mat, an in self.anions.items():\n if name == mat:\n cns = self._popel(cns, an)\n m._cns[name] = cns\n nsites.append(len(cns))\n self.nsites = max(nsites)", "def plot_metric_results():\n from run_metric_comparison_experiments import (\n PIVECTOR_TEMPLATE,\n PIVECTOR_DISTANCE_MATRIX_TEMPLATE,\n DISCRIMINATOR_DISTANCE_MATRIX_TEMPLATE,\n GAUSSIAN_DISTANCE_MATRIX_TEMPLATE,\n ENCODER_DISTANCE_MATRIX_TEMPLATE,\n DISCRETIZATION_DISTANCE_MATRIX_TEMPLATE,\n NUM_TRAJECTORIES,\n NUM_COMPONENTS,\n NUM_REPETITIONS,\n REWARD_SCALES,\n ENVS\n )\n\n # Path-templates to each distance matrix to compare\n # BC = Behavioural Characteristication\n BC_DISTANCE_MATRIX_TEMPLATES = [\n PIVECTOR_DISTANCE_MATRIX_TEMPLATE,\n GAUSSIAN_DISTANCE_MATRIX_TEMPLATE,\n DISCRIMINATOR_DISTANCE_MATRIX_TEMPLATE,\n ENCODER_DISTANCE_MATRIX_TEMPLATE,\n DISCRETIZATION_DISTANCE_MATRIX_TEMPLATE\n ]\n\n BC_LEGEND_NAMES = [\n \"Supervector\",\n \"Gaussian\",\n \"Discriminator\",\n \"Encoder\",\n \"Discretization\"\n ]\n\n BC_PLOT_COLORS = [\n \"C0\",\n \"C1\",\n \"C2\",\n \"C3\",\n \"C4\"\n ]\n\n fig, axs = pyplot.subplots(\n figsize=[4.8 * 3 * 0.75, 4.8 * 0.75],\n nrows=1,\n ncols=3,\n )\n\n def get_policy_names(env):\n policy_names = glob(PIVECTOR_TEMPLATE.format(env=env, num_traj=\"*\", num_components=\"*\", policy_name=\"*\", repetition_num=\"*\"))\n policy_names = [\"_\".join(os.path.basename(x).split(\"_\")[-4:-2]) for x in policy_names]\n policy_names = sorted(list(set(policy_names)))\n return policy_names\n\n # For each different distance measurement\n for distance_matrix_template, plot_legend_name, plot_color in zip(BC_DISTANCE_MATRIX_TEMPLATES, BC_LEGEND_NAMES, BC_PLOT_COLORS):\n # These will be NUM_TRAJECTORY length lists\n average_scores = np.ones((len(NUM_TRAJECTORIES),))\n std_scores = np.ones((len(NUM_TRAJECTORIES),))\n for num_traj_idx, num_traj in enumerate(NUM_TRAJECTORIES):\n # Average over environments, policies and repetitions\n scores = []\n for env_i, env in enumerate(ENVS):\n if \"Bipedal\" in env and distance_matrix_template == DISCRETIZATION_DISTANCE_MATRIX_TEMPLATE:\n print(\"[Note] Skipping env {} for discretization distances (OOM)\".format(env))\n continue\n min_reward, max_reward = REWARD_SCALES[env]\n policy_names = get_policy_names(env)\n\n for policy_name in policy_names:\n for repetition in range(1, NUM_REPETITIONS + 1):\n # Ugh bit of messing around because I did not think this through...\n if distance_matrix_template == PIVECTOR_DISTANCE_MATRIX_TEMPLATE:\n file_path = distance_matrix_template.format(env=env, num_traj=num_traj, num_components=NUM_COMPONENTS, policy_name=policy_name, repetition_num=repetition)\n else:\n file_path = distance_matrix_template.format(env=env, num_traj=num_traj, policy_name=policy_name, repetition_num=repetition)\n\n data = np.load(file_path)\n distance_matrix = data[\"distance_matrix\"]\n rewards = data[\"average_episodic_rewards\"]\n\n raveled_reward_distances = np.abs(rewards - rewards[:, None])\n # Take upper diagonal, skip diagonal\n raveled_reward_distances = raveled_reward_distances[np.triu_indices(raveled_reward_distances.shape[0], 1)]\n raveled_distances = distance_matrix[np.triu_indices(distance_matrix.shape[0], 1)]\n\n # Score is correlation between the two\n correlation = np.corrcoef(raveled_distances, raveled_reward_distances)[0, 1]\n scores.append(correlation)\n\n scores = np.array(scores)\n average_score = np.mean(scores)\n std_score = np.std(scores)\n average_scores[num_traj_idx] = average_score\n std_scores[num_traj_idx] = std_score\n ax = axs[0]\n ax.plot(NUM_TRAJECTORIES, average_scores, c=plot_color, label=plot_legend_name)\n ax.scatter(NUM_TRAJECTORIES, average_scores, c=plot_color)\n #ax.fill_between(\n # NUM_TRAJECTORIES,\n # average_scores - std_scores,\n # average_scores + std_scores,\n # alpha=0.2,\n # color=plot_color,\n # edgecolor=\"none\",\n # linewidth=0.0\n #)\n ax.set_xticks(NUM_TRAJECTORIES)\n ax.tick_params(axis='both', which='both', labelsize=\"x-large\")\n ax.set_ylabel(\"Correlation with return-distances\", fontsize=\"x-large\")\n ax.set_xlabel(\"Number of trajectories\", fontsize=\"x-large\")\n ax.grid(alpha=0.2)\n\n # Amount of error to \"ground truth\" result,\n # where \"ground truth\" is one of the results with 100 trajectories of data.\n # Because of wonkyness of this, store list [#num-traj] of lists,\n # each storing results for that num-traj run\n per_trajectory_relative_errors = [[] for i in NUM_TRAJECTORIES]\n for env in ENVS:\n if \"Bipedal\" in env and distance_matrix_template == DISCRETIZATION_DISTANCE_MATRIX_TEMPLATE:\n print(\"[Note] Skipping env {} for discretization distances (OOM)\".format(env))\n continue\n policy_names = get_policy_names(env)\n for policy_name in policy_names:\n # The \"ground truth\" distances, will be filled with first\n # result with 100 trajectories.\n anchor_distance = None\n for traj_i, num_traj in enumerate(NUM_TRAJECTORIES):\n for repetition in range(1, NUM_REPETITIONS + 1):\n if distance_matrix_template == PIVECTOR_DISTANCE_MATRIX_TEMPLATE:\n file_path = distance_matrix_template.format(env=env, num_traj=num_traj, num_components=NUM_COMPONENTS, policy_name=policy_name, repetition_num=repetition)\n else:\n file_path = distance_matrix_template.format(env=env, num_traj=num_traj, policy_name=policy_name, repetition_num=repetition)\n distance_matrix = np.load(file_path)[\"distance_matrix\"]\n # Normalize to [0, 1]\n distance_matrix = (distance_matrix - distance_matrix.min()) / (distance_matrix.max() - distance_matrix.min())\n # Get only upper triangle as distance matrix is symmetric. Exlude diagonal\n raveled_distances = distance_matrix[np.triu_indices(distance_matrix.shape[0], 1)]\n # Check if we use this as the zero-point or compute relative error to\n if anchor_distance is None:\n assert num_traj == 100\n anchor_distance = raveled_distances\n else:\n per_trajectory_relative_errors[traj_i].append(\n np.mean(np.abs(raveled_distances - anchor_distance) / np.abs(anchor_distance))\n )\n # Lists are not of equal length, so can not just change into an array\n mean_average_errors = np.array([np.mean(np.array(results) * 100) for results in per_trajectory_relative_errors])\n std_average_errors = np.array([np.std(np.array(results) * 100) for results in per_trajectory_relative_errors])\n ax = axs[1]\n ax.plot(NUM_TRAJECTORIES, mean_average_errors, c=plot_color, label=plot_legend_name)\n ax.scatter(NUM_TRAJECTORIES, mean_average_errors, c=plot_color)\n #ax.fill_between(\n # NUM_TRAJECTORIES,\n # mean_average_errors - std_average_errors,\n # mean_average_errors + std_average_errors,\n # alpha=0.2,\n # color=plot_color,\n # edgecolor=\"none\",\n # linewidth=0.0\n #)\n ax.set_xticks(NUM_TRAJECTORIES)\n ax.tick_params(axis='both', which='both', labelsize=\"x-large\")\n ax.set_ylabel(\"Relative error to ground truth (%)\", fontsize=\"x-large\")\n ax.set_xlabel(\"Number of trajectories\", fontsize=\"x-large\")\n ax.grid(alpha=0.2)\n\n # Variation between results\n cv_means = np.ones((len(NUM_TRAJECTORIES,)))\n cv_stds = np.ones((len(NUM_TRAJECTORIES,)))\n for traj_i, num_traj in enumerate(NUM_TRAJECTORIES):\n traj_averaged_cvs = []\n for env in ENVS:\n if \"Bipedal\" in env and distance_matrix_template == DISCRETIZATION_DISTANCE_MATRIX_TEMPLATE:\n print(\"[Note] Skipping env {} for discretization distances (OOM)\".format(env))\n continue\n policy_names = get_policy_names(env)\n for policy_name in policy_names:\n # Compute std over repetitions\n distances = []\n for repetition in range(1, NUM_REPETITIONS + 1):\n if distance_matrix_template == PIVECTOR_DISTANCE_MATRIX_TEMPLATE:\n file_path = distance_matrix_template.format(env=env, num_traj=num_traj, num_components=NUM_COMPONENTS, policy_name=policy_name, repetition_num=repetition)\n else:\n file_path = distance_matrix_template.format(env=env, num_traj=num_traj, policy_name=policy_name, repetition_num=repetition)\n\n distance_matrix = np.load(file_path)[\"distance_matrix\"]\n # Normalize to [0, 1]\n distance_matrix = (distance_matrix - distance_matrix.min()) / (distance_matrix.max() - distance_matrix.min())\n # Get only upper triangle as distance matrix is symmetric. Exlude diagonal\n raveled_distances = distance_matrix[np.triu_indices(distance_matrix.shape[0], 1)]\n distances.append(raveled_distances)\n distances = np.stack(distances)\n # Coefficient of variance (std / mean)\n average_cv = np.mean(np.std(distances, axis=0) / np.mean(distances, axis=0))\n traj_averaged_cvs.append(average_cv)\n traj_averaged_cvs = np.array(traj_averaged_cvs)\n cv_means[traj_i] = np.mean(traj_averaged_cvs)\n cv_stds[traj_i] = np.std(traj_averaged_cvs)\n\n ax = axs[2]\n ax.plot(NUM_TRAJECTORIES, cv_means, c=plot_color, label=plot_legend_name)\n ax.scatter(NUM_TRAJECTORIES, cv_means, c=plot_color)\n #ax.fill_between(\n # NUM_TRAJECTORIES,\n # cv_means - cv_stds,\n # cv_means + cv_stds,\n # alpha=0.2,\n # color=plot_color,\n # edgecolor=\"none\",\n # linewidth=0.0\n #)\n ax.set_xticks(NUM_TRAJECTORIES)\n ax.tick_params(axis='both', which='both', labelsize=\"x-large\")\n ax.set_ylabel(\"Coefficient of variance $\\\\sigma/\\\\mu$\", fontsize=\"x-large\")\n ax.set_xlabel(\"Number of trajectories\", fontsize=\"x-large\")\n ax.grid(alpha=0.2)\n\n axs[1].legend(prop={\"size\": \"large\"})\n pyplot.tight_layout()\n pyplot.savefig(\"figures/metric_comparison.pdf\", bbox_inches=\"tight\", pad_inches=0.0)", "def run(self):\n # load_data\n layers = self.load_all_data() # list of tuples (file_name, feature_matrix)\n\n # check variable types\n if len(self.method) == 1:\n self.method = [self.method[0]] * len(layers)\n elif len(layers) != len(self.method):\n raise ValueError(\"Number of matrices extracted from input files and number of similarity methods \" +\n \"does not correspond\")\n\n # check missing value parameter\n if len(self.missing) == 1:\n self.logger.info(\"#Setting all 'missing' parameters to {}\".format(self.missing[0]))\n self.missing = [self.missing[0]] * len(layers)\n elif len(layers) != len(self.missing):\n raise ValueError(\"Number of matrices extracted from input files and number of given missing parameters \" +\n \"does not correspond\")\n\n # extract sample names\n all_samples = set()\n for layer_data in layers:\n all_samples = all_samples.union({name for name in layer_data[1].columns})\n self.logger.info(\"#Total number of unique samples: {}\".format(len(all_samples)))\n\n out_arrays = {}\n adj_matrices = []\n\n # create adjacency matrices\n for i in range(len(layers)):\n self.logger.info(\"#Layer: {}\".format(i))\n layer_data = layers[i][1]\n\n # add missing samples layer\n samples = {name for name in layer_data.columns}\n for name in all_samples - samples:\n layer_data[name] = np.nan\n\n # sort data frame by sample names\n layer_data.sort_index(axis=1, inplace=True)\n\n # extract feature matrices\n f = layer_data.values.T\n self.logger.info(\"Feature matrix: ({} samples x {} features)\".format(f.shape[0], f.shape[1]))\n\n # check if feature matrix values are correct\n ncat = check_categories(f)\n if ncat != [0, 1]:\n standardized = is_standardized(f, axis=0, atol=self.atol)\n if not standardized[0]:\n raise ValueError(\"Incorrect values in feature matrix. Mean of features in \" +\n \"({},{}) \".format(round(standardized[1][0], 3), round(standardized[1][1], 3)) +\n \"range. Standard deviation of features in \" +\n \"({}, {}) \".format(round(standardized[2][0], 3), round(standardized[2][1], 3)) +\n \"range. Please, supply either binary dataset \" +\n \"(0 or 1 feature values) or continuous values standardized feature-wise. \" +\n \"Alternatively for almost standardized continuous data, \" +\n \"increase '-atol' parameter value (currently {}).\".format(self.atol))\n else:\n self.logger.debug(\"Data is correctly standardized\")\n else:\n self.logger.debug(\"Found two unique categories in data: [0, 1]\")\n if self.method[i] != 'cosine':\n self.logger.info(\"Using '{}' similarity for [0, 1] data. \".format(self.method[i]) +\n \"Suggested better measure: cosine similarity.\")\n\n # create adjacency matrix\n a = feature_to_adjacency(f, missing=self.missing[i], method=self.method[i], n=self.k, alpha=self.alpha)\n self.logger.info('Adjacency matrix {} created [similarity method: {}]'.format(a.shape, self.method[i]))\n\n # plot adjacency matrix\n plot_path = self.plot_base + \"_\" + str(i) + \".png\" if self.plot else self.plot_base\n plot_heatmap_seaborn(a, title=\"Layer {} (source:{})\".format(i, layers[i][0]), file_path=plot_path)\n if self.plot:\n self.logger.info(\"Adjacency matrix plot saved to {}\".format(plot_path))\n\n # add matrices to output arrays\n out_arrays[str(i)] = a\n adj_matrices.append(a)\n out_arrays[\"f\" + str(i)] = f\n\n # check if there are samples not accessible in any layer\n missing_samples = []\n for a in adj_matrices:\n missing_samples += [i for i in range(a.shape[1]) if np.all(np.isnan(a[:, i]))]\n\n samples_to_drop = [sample for sample, val in Counter(missing_samples).items() if val == len(adj_matrices)]\n if samples_to_drop:\n # drop inaccessible samples\n self.logger.info(\"Found samples inaccessible in every layer of graph. \" +\n \"Try changing '-missing' parameter or inspect your data \")\n sample_names = np.array(sorted(list(all_samples)))[np.array(samples_to_drop)]\n self.logger.info(\"Dropped samples: {}\".format(list(sample_names)))\n updated_out_arrays = {}\n selector = np.array([x for x in range(len(all_samples)) if x not in samples_to_drop])\n for i in range(len(out_arrays.keys())):\n if str(i) not in out_arrays.keys():\n break\n updated_out_arrays[str(i)] = out_arrays[str(i)][selector[:, None], selector]\n updated_out_arrays[\"f\" + str(i)] = out_arrays[\"f\" + str(i)][selector, :]\n\n # create output file\n updated_out_arrays[\"samples\"] = np.array(sorted(list(all_samples)))[selector]\n save_arrays_to_npz(data=updated_out_arrays, file_path=self.outfile)\n\n else:\n # create output file\n out_arrays[\"samples\"] = np.array(sorted(list(all_samples)))\n save_arrays_to_npz(data=out_arrays, file_path=self.outfile)\n\n self.logger.info(\"#Output file {} created\".format(self.outfile))", "def get_feature_statistics(results):\n to_be_deleted = []\n\n for result in results:\n if len(result.subset) != 6:\n to_be_deleted.append(result)\n\n length = len(results)\n feature_labels = datapoint_features\n statistics = {}\n\n for label in feature_labels:\n result_with = metrics.filter_results(results, features=[label])\n result_without = metrics.filter_results(results, without_features=[label])\n\n with_length = len(result_with)\n without_length = len(result_without)\n prevalence = with_length / length\n\n if prevalence != 0:\n avg_f1_dos = math.fsum([result.metrics['dos'].f1 for result in result_with]) / with_length\n avg_f1_fuzzy = math.fsum([result.metrics['fuzzy'].f1 for result in result_with]) / with_length\n avg_f1_imp = math.fsum([result.metrics['impersonation'].f1 for result in result_with]) / with_length\n else:\n avg_f1_dos = 0\n avg_f1_fuzzy = 0\n avg_f1_imp = 0\n\n avg_f1_without_dos = math.fsum([result.metrics['dos'].f1 for result in result_without]) / without_length\n avg_f1_without_fuzzy = math.fsum([result.metrics['fuzzy'].f1 for result in result_without]) / without_length\n avg_f1_without_imp = math.fsum([result.metrics['impersonation'].f1 for result in result_without]) / without_length\n avg_f1_diff_dos = avg_f1_without_dos - avg_f1_dos\n avg_f1_diff_fuzzy = avg_f1_without_fuzzy - avg_f1_fuzzy\n avg_f1_diff_imp = avg_f1_without_imp - avg_f1_imp\n\n statistics[label] = [prevalence, avg_f1_diff_dos, avg_f1_diff_fuzzy, avg_f1_diff_imp]\n\n return statistics", "def analyse_algorithms(R, u_t, inverse_transform, true_weight_vect, algo_list, equation, preconditioner):\n\n # Generate the model list for each sparse regression algorithm\n algo_model_list_list = []\n for algo_index, algo in enumerate(algo_list):\n model_list = generate_models(R, u_t, inverse_transform, algo)\n algo_model_list_list.append(model_list)\n\n # set up plot\n symbol_list = ['v', 's', 'o', '+', '*', 'D']\n color_list = [(162 / 256., 173 / 256., 0., 0.9), (156 / 256., 157 / 256., 159 / 256., 0.8),\n (0., 101 / 256., 189 / 256., 1.), (227 / 256., 114 / 256., 34 / 256., 0.9)]\n fig, ax = plt.subplots(figsize=(1.8 * 2.6, 1.8 * 1.95))\n\n # iterate all models of all algorithms and plot the summaries\n for algo_index, algo in enumerate(algo_list):\n model_list = algo_model_list_list[algo_index]\n for i_model, model in enumerate(model_list):\n set_model_correctness_and_error(model, true_weight_vect) # calculate absolute and relative error\n # plot model properties\n ax.semilogy(model.terms, model.mean_rel_err, marker=symbol_list[algo_index], markerfacecolor='none',\n linestyle='None', color=color_list[algo_index], label=algo if i_model == 0 else \"\")\n\n # finish plot\n ax.plot((-1., 10.), (1.5, 1.5), color='k')\n # xlim needs to be adjusted with respect to the maximum number of correct term in the model\n ax.set(ylim=[1.e-9, 75.], xlim=[0.5, 6.5], xlabel='Number of Terms', ylabel='MRE')\n ax.legend(loc=4)\n ax.set_yticks(ax.get_yticks()[:-3])\n ax.set_xticks(np.arange(1, 7, 1))\n plt.savefig('Regression_accuracy_comparison_' + str(equation) + '_' + preconditioner + '_test.png')\n plt.savefig('Regression_accuracy_comparison_' + str(equation) + '_' + preconditioner + '_test.pgf')", "def test_query_algorithms_101Algorithms_(self):\n right_list = []\n create_test_algorithm_list(right_list, 101)\n documents = []\n create_test_documents_list(right_list, documents, 101)\n index = search.Index(name=search_algorithm._INDEX_STRING)\n index.put(documents)\n result = search_algorithm.query_algorithms(index)\n self.assertEqual(101, len(result), msg='Wrong number of algorithms')\n self.assertItemsEqual(right_list, result, msg='Discrepancy in returned algorithms')", "def main():\n\n file_name_base = \"./lab-record/result/fairness/\"\n scenarios = ['lan', 'wan1', 'wan2']\n scenario = scenarios[2]\n\n algorithms = [\"bbr\", \"scalable\", \"bic\", \"highspeed\", \"htcp\", \"hybla\",\n \"illinois\", \"vegas\", \"yeah\"]\n names = [\"BBR\", \"Scalable\", \"BIC\", \"High Speed\",\n \"H-TCP\", \"Hybla\", \"Illinois\", \"Vegas\", \"YeAH\"]\n\n test_types = [\"vs_reno\", \"vs_cubic\", \"vs_itself\"]\n\n fsize = 36\n \n index_reno = []\n index_cubic = []\n index_itself = []\n\n data = []\n \n print 'Loadint statistics for ' + file_name_base + '/' + scenario\n\n for algorithm in algorithms:\n for test in test_types:\n path_base = file_name_base + \"/\" + scenario + \"/\" + test + \"/\" + \\\n algorithm + \"/\"\n if test == \"vs_itself\":\n exp_name = names[algorithms.index(algorithm)] + \"_1\"\n con_name = names[algorithms.index(algorithm)] + \"_2\"\n print path_base + exp_name\n print path_base + con_name\n exp_filename = \"/\" + algorithm + \"_1.log\"\n con_filename = \"/\" + algorithm + \"_2.log\"\n process(path_base, exp_filename, con_filename, index_itself)\n if test == \"vs_reno\":\n exp_name = names[algorithms.index(algorithm)]\n con_name = \"Reno\"\n print path_base + exp_name\n print path_base + con_name\n exp_filename = \"/\" + algorithm + \".log\"\n con_filename = \"/reno.log\"\n process(path_base, exp_filename, con_filename, index_reno)\n if test == \"vs_cubic\":\n con_name = \"CUBIC\"\n exp_name = names[algorithms.index(algorithm)]\n print path_base + exp_name\n print path_base + con_name\n exp_filename = \"/\" + algorithm + \".log\"\n con_filename = \"/cubic.log\"\n process(path_base, exp_filename, con_filename, index_cubic)\n\n size = 9\n x = numpy.arange(size)\n\n total_width, n = 1.2, 2.5\n width = 1.0 / n\n x = x - (total_width - width) / 2\n\n for i in range(0, len(x)):\n x[i] += 0.5 * i\n\n # Exp\n fig = plt.figure()\n\n # Con\n con_reno = plt.bar(x + 0 * width - 1.2,\n index_reno,\n width=width,\n label='Against Reno',\n alpha=0.5,\n color=\"darkorange\")\n\n con_cubic = plt.bar(x + 1 * width - 1.2,\n index_cubic,\n width=width,\n label='Against CUBIC',\n alpha=0.5,\n color=\"lawngreen\")\n\n con_itself = plt.bar(x + 2 * width - 1.2,\n index_itself,\n width=width,\n label='Against Another Same CCA',\n alpha=0.5,\n color=\"dodgerblue\")\n\n # Index\n plt.xticks(x + 1.5 * width - 1.2, [\"BBR\", \"Scalable\", \"BIC\", \"High Speed\",\n \"H-TCP\", \"Hybla\", \"Illinois\", \"Vegas\",\n \"YeAH\"],\n fontsize=fsize,\n rotation=\"45\")\n plt.ylabel(\"Jain`s Fairness Index\", fontsize=fsize)\n plt.yticks(fontsize=fsize)\n plt.ylim(0.5, 1.1)\n\n ax = plt.subplot(111)\n ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,\n ncol=3, mode=\"expand\", borderaxespad=0., fontsize=fsize)\n\n plt.subplots_adjust(left=0.07, right=0.98, top=0.9, bottom=0.2)\n\n plt.show()", "def set_algorithm(self, algorithm):\n self._data_dict[self.ALGO_INFO] = {'module' : algorithm.algo_group.module,\n 'class' : algorithm.algo_group.classname,\n 'init_param' : algorithm.algo_group.init_parameter,\n 'identifier' : algorithm.identifier}" ]
[ "0.6315484", "0.61245906", "0.5925279", "0.5822709", "0.57913584", "0.57853943", "0.57158273", "0.56992424", "0.5629585", "0.56262213", "0.55483156", "0.5493547", "0.5488877", "0.54682064", "0.5456372", "0.5410733", "0.5409758", "0.54085684", "0.53946304", "0.53820395", "0.5365618", "0.5353488", "0.53240466", "0.5320415", "0.5304393", "0.5303199", "0.52986354", "0.5283749", "0.5268433", "0.52369326", "0.5180255", "0.5175428", "0.5164062", "0.515936", "0.51485467", "0.5147816", "0.5142116", "0.5135826", "0.51307833", "0.51235807", "0.5123402", "0.5120338", "0.51176727", "0.5116642", "0.5108093", "0.51047105", "0.50972265", "0.5096663", "0.5096663", "0.50897586", "0.5089474", "0.5086195", "0.5073285", "0.5069407", "0.5045897", "0.50371", "0.5032525", "0.5024298", "0.50012976", "0.49893403", "0.49816048", "0.496002", "0.49576274", "0.4942733", "0.4941863", "0.49375358", "0.49359924", "0.49280685", "0.49258095", "0.49247968", "0.4923402", "0.4920409", "0.4914313", "0.49006045", "0.4899087", "0.48885742", "0.48871896", "0.48808146", "0.48771632", "0.48620665", "0.48606378", "0.485639", "0.4851699", "0.48480994", "0.48418057", "0.4839492", "0.48355648", "0.48308", "0.48281068", "0.4818679", "0.48169157", "0.48157486", "0.48147896", "0.4811808", "0.4810023", "0.4807312", "0.48026064", "0.47996712", "0.4797637", "0.47930786" ]
0.69420356
0
Calculates a clustering's contingency matrix for each clustering algorithm stored in the list clustering_alg and adds it to the dict.
def eval_cluster_contingency(clustering_alg: List, labels_true, sdist): for (alg_name, alg_dict) in clustering_alg: if "alg" in alg_dict: clustering = alg_dict["alg"].fit(sdist) labels_pred = clustering.labels_ alg_dict["labels"] = labels_pred else: labels_pred = alg_dict["labels"] pred_label_dict, new_labels = normalize_labels(labels_pred) alg_dict["cm"] = contingency_matrix(labels_true, new_labels)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_clustering_info(self, algorithm_type, clustering_parameters, clusterings = []):\n clustering_info = {}\n for i, running_parameters in enumerate(clustering_parameters):\n\n clustering_id = \"clustering_%04d\"%(self.current_clustering_id)\n self.current_clustering_id += 1\n clustering_info[clustering_id] = {\n \"type\":algorithm_type,\n \"clustering\": None,\n \"parameters\": running_parameters\n }\n\n if clusterings != []:\n clustering_info[clustering_id][\"clustering\"] = clusterings[i]\n\n return clustering_info", "def enumerate_clusterings(self):\n\n # Initialize an empty list of clusterings. Each element of the list\n # is a dictionary mapping NOEs to the signatures they are clustered to\n # in a solution. Each clustering is initialize with all uniquely\n # clusterable NOEs as keys mapping to their unique clusters\n\n clusterings = []\n\n while True:\n\n # Run the solver and get a solution back\n\n solution = self.solve()\n\n # If UNSAT, then flush aux clauses from the formula and return\n # all the clusterings we found so far\n\n if not solution:\n self.flush()\n return clusterings\n\n # Iterate over the clustering variables set to true by in the\n # discovered solution. Forbid this clustering from reoccuring and\n # add it to the list of found clusterings\n\n clause = []\n clustering = {}\n for node in self.clustering_variables.keys():\n if len(node.clusters) == 1:\n clustering[node] = list(node.clusters)[0]\n\n for vtype, node, cluster in solution:\n if vtype == Formula.CST_VAR:\n clustering[node] = cluster\n clause.append(-self.clustering_variables[node][cluster])\n\n self.add_clause(clause)\n clusterings.append(clustering)", "def matrix_dist(self):\n matrix_dic = {}\n for clus in self.clusters:\n for other_clus in self.clusters:\n if clus.samples[0].s_id > other_clus.samples[0].s_id: # avoid duplicates\n matrix_dic[(clus.samples[0].s_id, other_clus.samples[0].s_id)] = clus.samples[0]\\\n .compute_euclidean_distance(other_clus.samples[0])\n return matrix_dic", "def _eval_clustering(self, gen_reviews, clusters, embedding_model, clustering):\n result = []\n preds = self.predict_gen(gen_reviews, embedding_model, clustering)\n\n acc = accuracy_score(np.array(clusters), np.array(preds))\n conf = confusion_matrix(np.array(clusters), np.array(preds))\n\n return acc, conf", "def clustering_and_visulization(self):\n centroids, _ = kmeans(self.data_mat, self.k)\n idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[idx == i, 0])\n self.plot_list1.append(self.data_mat[idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n for i in range(self.k):\n self.cluster = self.data_mat[idx == i]\n self.clusterlist.append(self.cluster)\n\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n\n self.indexdict = {}\n for i in self.clusterdict:\n self.indexdict[i] = []\n print(len(self.clusterdict))\n for i in range(len(idx)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n self.indexdict[j].append(i)\n print(\"cluster dict of packs\",self.indexdict)\n\n self.drugdict = {}\n for i in self.clusterdict:\n self.drugdict[i] = []\n self.drug=[]\n for i in range(len(self.indexdict.keys())):\n for j in range(len(self.indexdict[i])):\n self.drugdict[i].append(self.df.iloc[self.indexdict[i][j]].to_dict())\n print(\"drugs dict with their frequencies\",self.drugdict)\n clusterdict_from_df_as_drug_non_O_frequency = {}\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs ={}\n for i in self.drugdict:\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n for i in self.drugdict:\n for j in self.drugdict[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i]=list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n try:\n common_drug_list = [x for x in clusterdict_of_non_repeated_drugs[0] if x in clusterdict_of_non_repeated_drugs[1]]\n print('\\n')\n print(\"common drug list\", common_drug_list)\n total_frequency_of_drugs_dict = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict[i] = []\n\n for drug in common_drug_list:\n\n for cluster_keys in clusterdict_from_df_as_drug_non_O_frequency.keys():\n temp_list = []\n for cluster_values_as_list in clusterdict_from_df_as_drug_non_O_frequency[cluster_keys]:\n try:\n temp_list.append(cluster_values_as_list[str(drug)])\n except KeyError:\n print(\"\\t\")\n total_frequency_of_drugs_dict[cluster_keys].append(np.sum(temp_list))\n print(\"total drugs frequency\",total_frequency_of_drugs_dict)\n total_frequency_of_drugs_dict_with_drugs = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[i] = []\n temp_list1 = []\n temp_list2 = []\n for keys in self.drugdict.keys():\n temp_list1.append(clusterdict_of_non_repeated_drugs[keys])\n for keys in self.drugdict.keys():\n temp_list2.append(total_frequency_of_drugs_dict[keys])\n temp_list3 = []\n for i in temp_list1:\n for j in temp_list2:\n temp_list3.append(dict(zip(i,j)))\n temp_list4 = temp_list3[:2]\n print('\\n')\n for keys in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[keys].append(temp_list4[keys])\n print(\"total frequency with drugs dict\",total_frequency_of_drugs_dict_with_drugs)\n\n final_drugs_in_clusters_dict = {}\n for i in self.drugdict:\n final_drugs_in_clusters_dict[i] = []\n compare_list = []\n for drug in common_drug_list:\n compare_list.append(min(total_frequency_of_drugs_dict_with_drugs[0][0][drug], total_frequency_of_drugs_dict_with_drugs[1][0][drug]))\n print(\"compare list\",compare_list)\n for values in total_frequency_of_drugs_dict_with_drugs.values():\n for key1, value1 in values[0].items():\n if value1 in compare_list:\n\n key2 =values[0].keys()[values[0].values().index(value1)]\n values[0].pop(key2, None)\n\n\n print('final dict with deleted keys', total_frequency_of_drugs_dict_with_drugs)\n\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in total_frequency_of_drugs_dict_with_drugs[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n print(\"only drugs\",clusterdict_of_non_repeated_drugs)\n\n final_robot_packs_dict = {}\n for i in self.drugdict:\n final_robot_packs_dict[i] = []\n\n winner_drug_dict = {}\n for i in common_drug_list:\n winner_drug_dict[i] = []\n for drug in common_drug_list:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n winner_drug_dict[str(drug)].append(0)\n if drug in clusterdict_of_non_repeated_drugs[1]:\n winner_drug_dict[str(drug)].append(1)\n print(\"winner drug dict\",winner_drug_dict)\n\n for i in self.indexdict:\n print(i)\n for pack in self.indexdict[i]:\n packdict = self.df.iloc[pack].to_dict()\n packdict_non_0 = {x: y for x, y in packdict.items() if y != 0}\n packdict_non_0_key = packdict_non_0.keys()\n for drug in packdict_non_0_key:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n final_robot_packs_dict[0].append(pack)\n elif drug in clusterdict_of_non_repeated_drugs[1]:\n final_robot_packs_dict[1].append(pack)\n\n final_robot_packs_dict[i].append(pack)\n for commondrugs in winner_drug_dict:\n for winnercluster in winner_drug_dict[commondrugs]:\n if winnercluster==0:\n loosercluster =1\n if winnercluster == 1:\n loosercluster = 0\n if commondrugs in packdict_non_0_key and i==loosercluster:\n try:\n final_robot_packs_dict[i].remove(pack)\n final_robot_packs_dict[winnercluster].append(pack)\n except ValueError:\n print('\\t')\n\n for i in self.indexdict:\n final_robot_packs_dict[i] = set(final_robot_packs_dict[i])\n\n print(\"final which pack which robot dict\",final_robot_packs_dict)\n\n except IndexError:\n print(\"No common drugs\")", "def cluster(self):\n\t\tself.index[\"cluster\"] = {}\n\n\t\tfor item in self.index[\"items\"]:\n\t\t\tself.index[\"cluster\"][item] = [{\"weight\" : float(len(set(self.index[\"items\"][item]).intersection( set(self.index[\"items\"][id]))))/float(len(self.index[\"items\"][item])) , \"name\" : id, \"authority\" : set(self.index[\"items\"][item]).intersection( set(self.index[\"items\"][id])) } for id in self.index[\"items\"] if id != item and len(set(self.index[\"items\"][item]).intersection( set(self.index[\"items\"][id]))) >= 1]\n\n\t\treturn self.index", "def evaluate_clustering_methods(methods):\r\n results = {}\r\n for m in methods:\r\n res = results[m['name']] = {}\r\n prec = 3\r\n res['Adjusted Rand Score'] = round(sklearn.metrics.adjusted_rand_score(m['target'], m['clustering']),prec)\r\n res['Normalized Mutual Information'] = round(sklearn.metrics.normalized_mutual_info_score(m['target'], m['clustering']),prec)\r\n res['Adjusted Mutual Information'] = round(sklearn.metrics.adjusted_mutual_info_score(m['target'], m['clustering']),prec)\r\n return np.transpose(results)", "def clustering(distribution, areal_units, classes=None):\n\n # Regroup into classes if specified. Otherwise return categories indicated\n # in the data\n if not classes:\n classes = return_categories(distribution) \n \n ## Get the number of neighbourhoods\n neigh = mb.neighbourhoods(distribution, areal_units, classes)\n num_neigh = {cl: len(neigh[cl]) for cl in classes}\n num_units = {cl: len([a for ne in neigh[cl] for a in ne])\n for cl in classes}\n\n ## Compute clustering values\n clustering = {}\n for cl in classes:\n if num_units[cl] == 0:\n clustering[cl] = float('nan')\n elif num_units[cl] == 1:\n clustering[cl] = 1\n else:\n clustering[cl] = _single_clustering(num_units[cl],\n num_neigh[cl])\n\n clustering[cl] = ((num_neigh[cl] - num_units[cl]) /\n (1 - num_units[cl]))\n return clustering", "def get_clustering_algorithm_class(cls):\n return {\n \"spectral\": SpectralClusteringAlgorithm,\n \"dbscan\": DBSCANAlgorithm,\n \"gromos\": GromosAlgorithm,\n \"kmedoids\": KMedoidsAlgorithm,\n \"random\": RandomClusteringAlgorithm,\n \"hierarchical\": HierarchicalClusteringAlgorithm\n }", "def cluster_all_features(feature_mat):\n n_dims = feature_mat.shape[1]\n whitened = whiten(feature_mat.transpose())\n all_codebooks = dict()\n for k in range(n_dims, 0, -1):\n centroids, distortion = kmeans(whitened, k)\n all_codebooks[k] = (distortion, centroids)\n\n return all_codebooks", "def create_clusters(self):\n ex = 0\n print 'Iter - Purity Gini Index'\n while ex < self.MAX_ITERATION:\n new_clusters = np.zeros(self.centroids.shape)\n distances = euclidean_distances(self.vectors, self.centroids).argmin(axis=1)\n for i in range(self.K):\n indexes = np.argwhere(distances == i)\n data = self.vectors[indexes.transpose()[0]]\n if data.shape[0] > 1:\n new_clusters[i] = (np.sum(data, axis=0) / data.shape[0])\n else:\n new_clusters[i] = np.sum(data, axis=0)\n print ex, '----', self.cal_purity()\n ex += 1\n if np.allclose(self.centroids, new_clusters, atol=self.TOLERANCE):\n break\n self.centroids = new_clusters", "def update(self, clusters):\n centroids = {}\n for cluster, coordinates in clusters.iteritems():\n sumLat = 0\n sumLong = 0\n for coordinate in coordinates:\n sumLat += float(coordinate[0])\n sumLong += float(coordinate[1])\n centroids[cluster] = (sumLat/float(len(coordinates)), sumLong/float(len(coordinates)))\n return centroids", "def evaulate_clusters(self, pred_dict, model_dir):\n\t\tclustering_dict = {\"Topic\":[], \"Text\":[], \"Keywords\": []}\n\t\tfor cluster_num, sents_list in pred_dict.items():\n\t\t\tprint(\"\\n cluster number : \", cluster_num)\n\t\t\tprint(\"\\n number of sents : \", len(sents_list))\n\t\t\ttfidf_vec = TfidfVectorizer(use_idf=True, sublinear_tf=True, max_df=0.8, max_features=20, ngram_range=(1,5), min_df=1)\n\t\t\tX_tfidf = tfidf_vec.fit_transform(sents_list).toarray()\n\t\t\ttotal_tfidf = tfidf_vec.get_feature_names()\n\t\t\tfor sent in sents_list:\n\t\t\t\tclustering_dict[\"Topic\"].append(cluster_num)\n\t\t\t\tclustering_dict[\"Text\"].append(sent)\n\t\t\t\tclustering_dict[\"Keywords\"].append(\",\".join(total_tfidf))\n\t\t\"\"\" save the clusters to csv file \"\"\"\n\t\tdf_dominant_topic = defaultdict(list) \n\t\tdf_dominant_topic[\"Topic\"] = clustering_dict[\"Topic\"]\n\t\tdf_dominant_topic[\"Text\"] = clustering_dict[\"Text\"]\n\t\tdf_dominant_topic[\"Keywords\"] = clustering_dict[\"Keywords\"]\n\t\tdf_dominant_topic = pd.DataFrame(df_dominant_topic)\n\t\tdf_dominant_topic.to_csv(os.path.join(model_dir, \"cluster_sentence_topic_mapping.csv\"))\n\t\treturn df_dominant_topic", "def calc_cc(graph):\n\tclustering_coeffs = {}\n\tfor node in graph.nodes():\n\t\tclustering_coeffs[node] = { \"cc\" : nx.clustering(graph, node)}\n\tnx.set_node_attributes(graph, clustering_coeffs)", "def gen_cluster_accuracies():\n accuracies = {}\n with Parallel(n_jobs=morphs.parallel.N_JOBS) as parallel:\n for block_path in morphs.paths.blocks():\n print(block_path)\n spikes = morphs.load.ephys_data(block_path, collapse_endpoints=True)\n\n if len(spikes[\"recording\"].unique()) >= 1:\n template_spikes = spikes[spikes[\"stim_id\"].isin(list(\"abcdefgh\"))]\n assert len(template_spikes) > 0\n cluster_groups = template_spikes.groupby(\"cluster\")\n\n morph_dims = spikes.morph_dim.unique()\n morph_dims = morph_dims[~pd.isnull(morph_dims)]\n morph_dims.sort()\n\n max_num_reps = np.max(\n [\n len(stim_group.groupby(by=[\"recording\", \"stim_presentation\"]))\n for stim_id, stim_group in template_spikes.groupby(\"stim_id\")\n ]\n )\n\n accuracies_list = parallel(\n delayed(cluster_accuracy)(\n cluster, cluster_group, morph_dims, max_num_reps\n )\n for (cluster, cluster_group) in cluster_groups\n )\n\n accuracies[block_path] = pd.concat(accuracies_list)\n\n morphs.paths.PROCESSED_DIR.mkdir(parents=True, exist_ok=True)\n with open(morphs.paths.ACCURACIES_PKL.as_posix(), \"wb\") as f:\n pickle.dump(accuracies, f)", "def clustering_and_visulization(self):\n try:\n centroids, _ = kmeans(self.data_mat, self.k)\n except ValueError:\n print(\"The number of clusters is more than the data points\")\n self.idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[self.idx == i, 0])\n self.plot_list1.append(self.data_mat[self.idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n\n for i in range(self.k):\n self.cluster = self.data_mat[self.idx == i]\n self.clusterlist.append(self.cluster)\n print(self.clusterlist)\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n index_dict ={}\n for i in self.clusterdict:\n index_dict[i] = []\n for i in range(len(self.data_mat)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n index_dict[j].append(i)\n print(\"drugs cluster dict\", index_dict)\n\n self.drugsdict = {}\n for i in index_dict:\n self.drugsdict[i] = []\n drugslist = list(self.df.columns.values)\n print(\"drugs list from dataframe\", drugslist)\n\n for i in index_dict:\n self.drugsdict[i] = [drugslist[index] for index in index_dict[i]]\n\n print(\"drugs cluster dict\", self.drugsdict)\n########################################################################################################################\n clusterdict_from_df_as_drug_frequency = {}\n clusterdict_from_df_as_drug_non_O_frequency = {}\n\n print('\\n')\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_frequency[i] = []\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_frequency[i].append(self.df.iloc[i].to_dict()) #\n print(\"packs in dict form of drugs frequency\", clusterdict_from_df_as_drug_frequency)\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n\n for i in range(len(self.df)):\n for j in clusterdict_from_df_as_drug_frequency[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n for i in range(len(self.df)):\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in range(len(self.df)):\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in range(len(self.df)):\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse(\n [list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n robot_for_packs_dict = {}\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = []\n\n # for i in range(len(self.df)):\n for i in range(len(self.df)):\n for j in clusterdict_of_non_repeated_drugs[i]:\n if j in self.drugsdict[0]:\n robot_for_packs_dict[i].append(0)\n elif j in self.drugsdict[1]:\n robot_for_packs_dict[i].append(1)\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = set(robot_for_packs_dict[i])\n\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = list(more_itertools.collapse(robot_for_packs_dict[i]))\n print('\\n')\n print(\"clusterdict_of_non_repeated_drugs\", robot_for_packs_dict)", "def cluster_classification(weblog,classification_column_transaction,\\\n classification_column_diversity, session_data_threshold, cluster_type, classification_wanted_transaction, verbose = False):\n if verbose== True:\n start_time = timelib.time()\n print(\"\\n * Computing cluster matrices ...\") \n browsing_matrix = {}\n diversifying_matrix = {}\n # Selecting sessions from each cluster\n for cluster_id in session_data_threshold[cluster_type].unique():\n sessions_cluster = session_data_threshold[session_data_threshold[cluster_type]==cluster_id].session_id\n divpat_log = weblog[weblog.session_id.isin(sessions_cluster)]\n # Filtering some requests\n divpat_log=divpat_log[divpat_log['requested_'+classification_column_transaction].isin(classification_wanted_transaction)]\n divpat_log=divpat_log[divpat_log['referrer_'+classification_column_transaction].isin(classification_wanted_transaction)]\n \n # Defining matrices\n diversity_columns=('referrer_'+classification_column_diversity,'requested_'+classification_column_diversity)\n browsing_matrix[cluster_id],_ = compute_browsing_matrix(divpat_log,'referrer_'+classification_column_transaction,'requested_'+classification_column_transaction,labels=classification_wanted_transaction)\n diversifying_matrix[cluster_id],_ = compute_diversifying_matrix(divpat_log,'referrer_'+classification_column_transaction,'requested_'+classification_column_transaction,\\\n diversity_columns,labels = classification_wanted_transaction)\n if verbose == True:\n print(\" Cluster matrices computed in %.1f seconds.\"%(timelib.time() - start_time))\n \n return browsing_matrix, diversifying_matrix;", "def _granger_causality(self):\r\n gc = dict(frequencies={}, gc_xy={}, gc_yx={}, gc_sim={},\r\n spectral_density={})\r\n for i, j in self.ij:\r\n w, f_x2y, f_y2x, f_xy, Sw = \\\r\n alg.granger_causality_xy(self.model_coef[i, j],\r\n self.error_cov[i, j],\r\n n_freqs=self._n_freqs)\r\n\r\n # All other measures are dependent on i, j:\r\n gc['gc_xy'][i, j] = f_x2y\r\n gc['gc_yx'][i, j] = f_y2x\r\n gc['gc_sim'][i, j] = f_xy\r\n gc['spectral_density'][i, j] = Sw\r\n\r\n return gc", "def compute_clusters(self, documents):\n ###TODO\n for d in range(0, len(documents)):\n maxi = 999999999\n for cid in range(0, len(self.means)):\n dist = self.distance(documents[d], self.means[cid], self.norms[cid])\n if dist < maxi:\n maxi = dist\n clust = cid \n self.cluster[d] = clust", "def kmode_calculation(self, data):\n col_dict = {}\n\n for col in data.columns:\n data[col] = data[col].astype('category')\n col_dict.update({col: dict(enumerate(data[col].cat.categories))})\n\n # Get all the cols in the DataFrame\n cols = [col for col in data.columns]\n\n # Transform all values into categorical and numerical values\n for col in cols:\n data[col] = data[col].astype('category')\n data[col] = data[col].cat.codes\n\n # Run k-modes using the algorithm\n kmodes_method = KModes(n_clusters=self.n_cluster, init=self.init_method, n_init=self.n_iter, verbose=1)\n kmode_result = kmodes_method.fit_predict(data[cols])\n\n # Attach the output label for each data point\n data['classification'] = pd.Series(kmode_result, index=data.index)\n\n return col_dict, kmodes_method.cluster_centroids_, data", "def _compute_centroids(self, encodings, labels):\n counts = {}\n centroids = {}\n\n # Copy encodings to avoid ref modification when computing centroid.\n encodings = encodings.copy()\n\n for i, encoding in enumerate(encodings):\n key = int(labels[i])\n if key in centroids:\n centroids[key] += encoding\n counts[key] += 1\n else:\n centroids[key] = encoding\n counts[key] = 1\n for key in centroids:\n centroids[key] /= counts[key]\n self.centroids = centroids", "def get_clusters_adjacencies(adjacency, clusters: list):\n clusters.sort(key=lambda t: len(t), reverse=True)\n id_to_cluster = get_id_to_cluster(clusters, adjacency.shape[0])\n num_clusters = len(clusters)\n mat = np.zeros((num_clusters, num_clusters))\n rows, cols = adjacency.nonzero()\n for i, j in zip(rows, cols):\n weight = adjacency[i, j]\n src_cluster = id_to_cluster[i]\n dest_cluster = id_to_cluster[j]\n mat[src_cluster, dest_cluster] += weight\n return mat", "def compute_confusion_matrix(num_clusters, clustered_points_algo, sorted_indices_algo):\r\n seg_len = 400\r\n true_confusion_matrix = np.zeros([num_clusters, num_clusters])\r\n for point in range(len(clustered_points_algo)):\r\n cluster = clustered_points_algo[point]\r\n num = (int(sorted_indices_algo[point]/seg_len) % num_clusters)\r\n true_confusion_matrix[int(num), int(cluster)] += 1\r\n return true_confusion_matrix", "def clustering_metrics(clusts, node_assn, node_pred):\n pred_vox = cluster_to_voxel_label(clusts, node_pred)\n true_vox = cluster_to_voxel_label(clusts, node_assn)\n ari = ARI(pred_vox, true_vox)\n ami = AMI(pred_vox, true_vox)\n sbd = SBD(pred_vox, true_vox)\n pur, eff = purity_efficiency(pred_vox, true_vox)\n return ari, ami, sbd, pur, eff", "def get_clusters(ensemble, grouping, clustering):\n\n\t# Prevent SQL injected since column names cannot be parameterized.\n\tif \";\" in ensemble or \";\" in grouping or \";\" in clustering:\n\t\treturn None\n\n\tensemble = ensemble.replace('EnsEns','Ens')\n\tdf = None;\n\n\tif grouping in ['annotation','cluster']:\n\t\tgroupingu = ensemble+\".\"+grouping+\"_\"+clustering\n\telif grouping in ['NeuN']:\n\t\tgroupingu = \"CONCAT('NeuN',cells.\"+grouping+\")\"\n\telse:\n\t\tgroupingu = \"cells.\"+grouping\n\n\t# Get methylation info\n\tquery = \"SELECT count(cells.cell_id) ncells, 'snmC' as modality, \\\n\t\t%(groupingu)s as groups \\\n\t\tFROM cells \\\n\t\tINNER JOIN %(ensemble)s ON cells.cell_id = %(ensemble)s.cell_id \\\n\t\tGROUP BY groups \" % {'ensemble': ensemble,\n\t\t\t\t\t'groupingu': groupingu,\n\t\t\t\t\t'clustering': clustering}\n\ttry:\n\t\tdf = pd.read_sql(query, db.get_engine(current_app, 'methylation_data'))\n\texcept exc.ProgrammingError as e:\n\t\tnow = datetime.datetime.now()\n\t\tprint(\"[{}] ERROR in app(get_clusters): {}\".format(str(now), e))\n\t\tsys.stdout.flush()\n\t\t# return None\n\n\t# Get snATAC info\n\tquery = \"SELECT count(cells.cell_id) ncells, 'snATAC' AS modality, %(ensemble)s.cluster_ATAC groups \\\n\t\tFROM cells \\\n\t\tINNER JOIN %(ensemble)s ON cells.cell_id = %(ensemble)s.cell_id \\\n\t\tGROUP BY groups \" % {'ensemble': ensemble,\n\t\t\t\t\t'grouping': grouping,\n\t\t\t\t\t'clustering': clustering}\n\n\ttry:\n\t\tdf_atac = pd.read_sql(query, db.get_engine(current_app, 'snATAC_data'))\n\t\tdf=df.append(df_atac)\n\texcept exc.ProgrammingError as e:\n\t\tnow = datetime.datetime.now()\n\t\tprint(\"[{}] ERROR in app(get_clusters): {}\".format(str(now), e))\n\t\tsys.stdout.flush()\n\n\n\t# Get snRNA info\n\tquery = \"SELECT count(cells.cell_id) ncells, 'RNA' AS modality, %(ensemble)s.cluster_RNA groups \\\n\t\tFROM cells \\\n\t\tINNER JOIN %(ensemble)s ON cells.cell_id = %(ensemble)s.cell_id \\\n\t\tGROUP BY groups \" % {'ensemble': ensemble,\n\t\t\t\t\t'grouping': grouping,\n\t\t\t\t\t'clustering': clustering}\n\n\ttry:\n\t\tdf_rna = pd.read_sql(query, db.get_engine(current_app, 'RNA_data'))\n\t\tdf=df.append(df_rna)\n\texcept exc.ProgrammingError as e:\n\t\tnow = datetime.datetime.now()\n\t\tprint(\"[{}] ERROR in app(get_clusters): {}\".format(str(now), e))\n\t\tsys.stdout.flush()\n\n\treturn df", "def index_nodes(self):\n out = {}\n\n #avg = np.mean(list(self.rtype_vectors.values()),axis=0)\n\n\n #for name, node in self.nodes.items():\n # tmp1 = [self.rtype_vectors[rtype]\n # for rtype, dest in node.outgoing_relations] or [NULL_VEC()]\n # tmp2 = [permute_rtype_vector(self.rtype_vectors[rtype])\n # for rtype, prev in node.incoming_relations] or [NULL_VEC()]\n\n # net = tmp1 + tmp2\n\n # #out[name] = np.asarray(net).mean(axis=0)\n # #out[name] = np.asarray(net).sum(axis=0)\n # v = np.asarray(net).sum(axis=0)\n # if v.any():\n # out[name] = v/max(v)#softmax(v/max(v))\n # else:\n # out[name] = v\n\n\n #avg = np.mean(list(out.values()),axis=0)\n\n #maxm = np.max(list(out.values()),axis=0)\n\n ####normalize everything\n #for r,v in out.items():\n # if v.any():\n # #out[r] = v / sqrt(v.dot(v))\n # out[r] = softmax((v-avg)/maxm)\n\n\n\n # PCA method 0001701\n rmap = self.rtype_vectors\n data = np.zeros((len(self.nodes), JACCARD_DIMENSIONS), dtype=np.float)\n ix = 0\n for node in self.nodes.values():\n\n #compute weighted average of each relation type\n tmp = [rmap[rtype] for \n rtype, dest in node.outgoing_relations] + \\\n [permute_rtype_vector(rmap[rtype]) for \n rtype, prev in node.incoming_relations]\n\n v = np.asarray(tmp).mean(axis=0) if tmp else NULL_VEC()\n\n #normalize\n if v.any():\n data[ix] = v / sqrt(v.dot(v))\n else:\n data[ix] = v\n ix += 1\n\n #eliminate projection onto first 7 principal components\n d2 = data - PCA(data, 7)\n\n #order of nodes is preserved\n for i,v in enumerate(self.nodes):\n out[v] = softmax(d2[i])\n\n return out", "def evaluate(self):\n results = dict()\n for metric in self.metrics:\n print('Evaluating clustering with metric %s' % metric)\n if metric in LABEL_METRICS.keys():\n results[metric] = LABEL_METRICS[metric](self.X, self.model.labels_)\n results['adjusted_rand_score'] = SCORE_METRICS['adjusted_rand_score'](self.Y[:, 0], self.model.labels_)\n self.results = results\n return results", "def cluster_cal(self):\n self.Cluster = []\n for i in range(self.nodenum):\n neighborhood_node = self.neighbor_node(i)\n Node_num = len(neighborhood_node)\n Count = self.neighbor_edge(neighborhood_node)\n if(Node_num == 0 or Node_num == 1):\n self.Cluster.append(0.5)\n else:\n self.Cluster.append(Count/(Node_num*(Node_num - 1)))\n \n self.cluster_coeff = np.average(self.Cluster)", "def _generate_adjacency_matrices(self):\n self.adj_matrices = dict()\n mes = []\n args = []\n for metaedge in self.metaedges:\n mes.append(metaedge)\n args.append(self._prepare_parallel_adj_matrix_args(self.edge_df.query('abbrev == @metaedge')))\n res = parallel_process(array=args, function=mt.get_adj_matrix, use_kwargs=True, n_jobs=self.n_jobs,\n front_num=0)\n for metaedge, matrix in zip(mes, res):\n self.adj_matrices[metaedge] = matrix", "def clustering(self):\n ret_concepts = []\n clusters = []\n for word in self.words:\n clusters.append(WordCluster(None, word))\n while len(clusters) > 1:\n maxi = -1\n maxj = -1\n max = -1\n m = -1\n for i in range(len(clusters)):\n for j in range(len(clusters)):\n if i == j:\n continue\n # print(\"%d cluster compare with %d cluster\" % (i, j))\n # 1: join 21: i absorb j 22: j absorb i 3: collapse\n # l1: join L(Tm) value l21: A absorb B L(Tm)value\n l1, newtags = self.__calculate_ltm(clusters[i], clusters[j], 1)\n if l1 > max:\n m = 1\n maxi = i\n maxj = j\n max = l1\n print(\"max L(Tm) for clustering in current loop: %lf\" % max)\n if max < ClusterAlgorithm.P_threshold:\n return\n Tm = clusters[maxi].join(clusters[maxj])\n Tm_concepts = self.__select_concepts(self.__getword(Tm))\n for tmp_concept in Tm_concepts.items():\n ret_concepts.append(tmp_concept)\n rm1 = clusters[maxi]\n rm2 = clusters[maxj]\n clusters.remove(rm1)\n clusters.remove(rm2)\n if Tm is not None:\n print(\"merged cluster's words:\")\n print(self.__getword(Tm))\n return ret_concepts", "def clusterAlgorithm(values):\n clusterMap = dict()\n for value in values:\n if value[2] not in clusterMap.keys():\n clusterMap[value[2]] = []\n clusterMap[value[2]].append(value)\n frequency = [float(len(clusterMap[value[2]])) for value in values]\n total = sum(frequency)\n weightValues = [freq / total for freq in frequency]\n print sum(weightValues)\n lightValues = [value[1] for value in values]\n return np.average(lightValues, weights = weightValues)", "def make_contingency_tables(\n y: np.ndarray, flagged_A: np.ndarray, flagged_B: np.ndarray\n) -> Dict[int, np.ndarray]:\n\n y = np.array(y).astype(np.int64).flatten()\n flagged_A = np.array(flagged_A).astype(np.bool_).flatten()\n flagged_B = np.array(flagged_B).astype(np.bool_).flatten()\n\n if len(flagged_A) != len(y) or len(flagged_B) != len(y):\n raise ValueError(\n f\"Expected arrays y, flagged_A, and flagged_B of the same length: \\\n got {len(y)}, {len(flagged_A)}, and {len(flagged_B)}.\"\n )\n\n contingency_tables = {}\n for class_id in np.unique(y):\n\n items_flagged_A = flagged_A[y == class_id]\n items_flagged_B = flagged_B[y == class_id]\n\n a = (~items_flagged_A & ~items_flagged_B).sum()\n b = (~items_flagged_A & items_flagged_B).sum()\n c = (items_flagged_A & ~items_flagged_B).sum()\n d = (items_flagged_A & items_flagged_B).sum()\n\n table = np.array([[a, b], [c, d]])\n contingency_tables[class_id] = table\n\n return contingency_tables", "def cluster_life_expectancy() -> Dict:\n return dict(model=None, score=None, clusters=None)", "def _cluster(self):\n # , distance_function=spearman_squared_distance, max_iter=1000, tol=0.0001):\n if self.cluster_method is None:\n clusters = KMedoids(\n self.k,\n self.batchsize,\n dist_func=self.distance_function,\n max_iter=self.max_iter,\n tol=self.tol,\n init_medoids=self.init_medoids,\n swap_medoids=self.swap_medoids,\n )\n clusters.fit(self.clustering_attributions, verbose=self.verbose)\n\n self.subpopulations = clusters.members\n self.subpopulation_sizes = GAM.get_subpopulation_sizes(clusters.members)\n self.explanations = self._get_explanations(clusters.centers)\n # Making explanations return numerical values instead of dask arrays\n if isinstance(self.explanations[0][0][1], da.Array):\n explanations = []\n for explanation in self.explanations:\n explanations.append([(x[0], x[1].compute()) for x in explanation])\n self.explanations = explanations\n else:\n self.cluster_method(self)", "def eval_v_measure_homogeneity_completeness(clustering_alg: List, sdist_euclidean, sdist_jaccard,\n labels_true, debug: bool = False):\n for i, alg_dict in enumerate(clustering_alg):\n if \"alg\" in alg_dict:\n if alg_dict[\"distance\"] == \"euclidean\":\n clustering = alg_dict[\"alg\"].fit(sdist_euclidean)\n elif alg_dict[\"distance\"] == \"jaccard\":\n clustering = alg_dict[\"alg\"].fit(sdist_jaccard)\n else:\n raise ValueError(\"Unknown distance measure {}. \".format(alg_dict[\"distance\"]) +\n \"Please choose one of the following distance measures ['euclidean','jaccard']\")\n labels_predicted = clustering.labels_\n alg_dict[\"labels\"] = labels_predicted\n else:\n labels_predicted = alg_dict[\"labels\"]\n\n alg_dict[\"homogeneity\"], alg_dict[\"completeness\"], alg_dict[\"v-measure\"] = \\\n homogeneity_completeness_v_measure(labels_true, labels_predicted)\n\n if debug:\n print(\"Alg: \" + alg_dict[\"name\"] + \"; \\t v-measure = \" + str(alg_dict[\"v-measure\"]))", "def predict_clustering_pairs(args, **kwargs):\n # We first compute the exponential and uniform likelihoods for all genomes\n # in the dataset, individually\n organisms_uniform_probabilities, n_pairs = \\\n clustering_probs.compute_organisms_probabilities(\n args, **kwargs)\n\n # Finds all the probabilities files for single organisms\n probabilities_files_list = file_utilities.get_file_list(\n args[\"individual_probabilities_dir\"],\n args[\"probabilities_filename_suffix\"])\n\n if args[\"weight\"] is True or args[\"subclades\"] is True:\n logger.info(\"Importing phylogenetic tree %s\", args[\"tree_file\"])\n tree = ete3.Tree(args[\"tree_file\"])\n logger.info(\"Phylogenetic tree %s imported\", args[\"tree_file\"])\n if args[\"weight\"] is True:\n if not os.path.exists(args[\"weights_file\"]) or args[\n \"force\"] is True:\n GSC.save_weights(None, args[\"weights_file\"], None, \"w\")\n else:\n tree = None\n\n # Conserved clustering analysis in each subclade:\n if args[\"subclades\"] is True:\n logger.info(\n \"Started computing subclade-specific conserved\"\n \" clustering probabilities\")\n subclade_analysis.partition_tree(probabilities_files_list,\n organisms_uniform_probabilities,\n n_pairs,\n tree,\n args,\n **kwargs)\n logger.info(\n \"Parsing subclade-specific conserved clustering probabilities, \"\n \"this will take a while\")\n find_last_subclade.parse_and_clean_tree(n_pairs, args)\n logger.info(\n \"Parsing subclade-specific conserved clustering probabilities done\")\n logger.info(\n \"Subclade-specific conserved clustering probabilities computed\")\n\n\n # Clustering analysis just in the root:\n else:\n if not os.path.exists(args[\"final_probabilities_filename\"]) \\\n or probabilities_files.is_table_empty(\n args[\"final_probabilities_filename\"]) \\\n or args[\"force\"] is True:\n logger.info(\n \"Started computing root-only conserved clustering probabilities\")\n final_probs.compute_conserved_probabilities(\n probabilities_files_list,\n organisms_uniform_probabilities,\n n_pairs,\n tree,\n 0,\n args[\"final_probabilities_filename\"],\n args)\n\n logger.info(\"Root conserved clustering probabilities saved in file %s\",\n args[\"final_probabilities_filename\"])\n\n if args[\"weight\"] is True:\n logger.info(\"Phylogenetic weights saved in file %s\",\n args[\"weights_file\"])\n\n\n return 0", "def _calculate_cluster_measures(\n arr4d,\n threshold,\n bin_struct,\n two_sided_test=False,\n):\n n_regressors = arr4d.shape[3]\n\n max_sizes = np.zeros(n_regressors, int)\n max_masses = np.zeros(n_regressors, float)\n\n for i_regressor in range(n_regressors):\n arr3d = arr4d[..., i_regressor].copy()\n\n if two_sided_test:\n arr3d[np.abs(arr3d) <= threshold] = 0\n else:\n arr3d[arr3d <= threshold] = 0\n\n labeled_arr3d, _ = label(arr3d > 0, bin_struct)\n\n if two_sided_test:\n # Label positive and negative clusters separately\n n_positive_clusters = np.max(labeled_arr3d)\n temp_labeled_arr3d, _ = label(\n arr3d < 0,\n bin_struct,\n )\n temp_labeled_arr3d[temp_labeled_arr3d > 0] += n_positive_clusters\n labeled_arr3d = labeled_arr3d + temp_labeled_arr3d\n del temp_labeled_arr3d\n\n clust_vals, clust_sizes = np.unique(labeled_arr3d, return_counts=True)\n assert clust_vals[0] == 0\n\n clust_vals = clust_vals[1:] # First cluster is zeros in matrix\n clust_sizes = clust_sizes[1:]\n\n # Cluster mass-based inference\n max_mass = 0\n for unique_val in clust_vals:\n ss_vals = np.abs(arr3d[labeled_arr3d == unique_val]) - threshold\n max_mass = np.maximum(max_mass, np.sum(ss_vals))\n\n # Cluster size-based inference\n max_size = 0\n if clust_sizes.size:\n max_size = np.max(clust_sizes)\n\n max_sizes[i_regressor], max_masses[i_regressor] = max_size, max_mass\n\n return max_sizes, max_masses", "def cluster(self):\n logger.debug(\"Beginning feature based clustering on %d clusters.\" % len(self.c2b))\n # Merge the two nearest clusters until we can't.\n #\n while self.mergeNearestClusters():\n pass\n logger.debug(\"After clustering, there are now %d clusters remaining.\" % len(self.c2b))\n return self.c2b.values()", "def __generate_dict_of_keys_to_classification__(self):\n dict_of_assigned_citations = {}\n # duplicating citation dataset to filter as matches go on meaning\n # it should result in quicker allocation\n # can be removed to reduce memory load at expense of speed\n list_of_unassigned = []\n for key in self.dict_of_keywords:\n list_of_current_key = []\n for citation_instance in self.array_of_citations:\n if key == citation_instance.get_classification():\n list_of_current_key.append(citation_instance)\n if \"Unassigned\" == citation_instance.get_classification():\n list_of_unassigned.append(citation_instance)\n dict_of_assigned_citations[key] = list_of_current_key\n dict_of_assigned_citations[\"Unassigned\"] = list_of_unassigned\n return dict_of_assigned_citations", "def calculate_conductivities(self, vertices, gm_label):\r\n self.gm_label = gm_label\r\n data = pd.read_csv(vertices, sep=' ', header=None)\r\n\r\n self.__calculate_gm_bounds(data)\r\n self.dmri_handler.handle() #counts evals and evecs\r\n shape = self.dmri_handler.get_shape()\r\n\r\n self.xscale_coef = (self.gm_box.get_max_x() - self.gm_box.get_min_x()) / shape[0]\r\n self.yscale_coef = (self.gm_box.get_max_y() - self.gm_box.get_min_y()) / shape[1]\r\n self.zscale_coef = (self.gm_box.get_max_z() - self.gm_box.get_min_z()) / shape[2]\r\n\r\n self.__calculate_conductivities(data)\r\n data.to_csv(self.output_file, sep=' ')", "def evaluation_cc(self, property='clustering-coeff'):\n\n if property == 'clustering-coeff':\n rw_cc = [np.mean(clustering_coef_wu(self.rw_data[t])) for t in range(0, self.T)]\n smth_cc = [np.mean(clustering_coef_wu(self.smth_data[t])) for t in range(0, self.T)]\n elif property == 'transitivity':\n rw_cc = [np.mean(transitivity_wu(self.rw_data[t])) for t in range(0, self.T)]\n smth_cc = [np.mean(transitivity_wu(self.smth_data[t])) for t in range(0, self.T)]\n elif property == 'coreness':\n rw_cc = [np.mean(core.core_periphery_dir(self.rw_data[t])) for t in range(0, self.T)]\n smth_cc = [np.mean(core.core_periphery_dir(self.smth_data[t])) for t in range(0, self.T)]\n elif property == 'assortativity':\n rw_cc = [np.mean(core.assortativity_wei(self.rw_data[t], 0)) for t in range(0, self.T)]\n smth_cc = [np.mean(core.assortativity_wei(self.smth_data[t], 0)) for t in range(0, self.T)]\n elif property == 'modularity':\n rw_cc, _ = get_number_of_components(self.rw_data)\n smth_cc, _ = get_number_of_components(self.smth_data)\n elif property == 'path_length':\n rw_cc = [charpath(rw)[0] for rw in self.rw_data]\n smth_cc = [charpath(sm)[0] for sm in self.smth_data]\n\n # rw_cc_ent = get_entropy_list(rw_cc)\n # smth_cc_ent = get_entropy_list(smth_cc)\n\n return rw_cc, smth_cc", "def multiClusterAlgorithm(values):\n clusterMap = dict()\n # Lowest unixtime in the values list\n startUnixtime = values[0][0]\n # Separate values into separate clusters in the map, clusterMap\n for value in values:\n if value[2] not in clusterMap.keys():\n clusterMap[value[2]] = []\n clusterMap[value[2]].append(value)\n \n # An array of the predicted values per cluster\n clusterPredicted = []\n # An array of unixtime averages per cluster\n unixtimeAverage = []\n \n # Generate predicted value for each cluster using a weighted average\n # Adds predicted value for each cluster to clusterPredicted and adds\n # the unixtime average for each cluster to unixtimeAverage\n for key in clusterMap.keys():\n clusterLength = len(clusterMap[key]) \n totalUnixtime = sum([elem[0] for elem in clusterMap[key]])\n unixtimeAverage.append(float(totalUnixtime) / float(clusterLength))\n \n clusterLight = [elem[1] for elem in clusterMap[key]]\n\n totalDistanceFromStart = sum([elem[0] - startUnixtime for elem in clusterMap[key]])\n clusterWeightValues = [float(elem[0] - startUnixtime) / float(totalDistanceFromStart) for elem in clusterMap[key]]\n \n predicted = np.average(clusterLight, weights = clusterWeightValues)\n clusterPredicted.append(predicted)\n \n print clusterPredicted\n total = sum([elem - startUnixtime for elem in unixtimeAverage])\n # Create weighted values based on unixtime average per cluster\n weightValues = [float(elem - startUnixtime) / float(total) for elem in unixtimeAverage]\n print weightValues\n # Return a weighted average across clusters\n return np.average(clusterPredicted, weights = weightValues)", "def dictionary(descriptors, n_clusters):\n # TODO\n print(descriptors.shape)\n dummy=MiniBatchKMeans(n_clusters=n_clusters, batch_size=3000, random_state=9).fit(descriptors)\n clusters=dummy.cluster_centers_\n return clusters", "def _compute_util_data(self):\n\n print(\"Computing PCA of document vectors.\")\n self.pca = PCA(n_components = 3)\n\n print(\"Computing document clusters in PCA basis.\")\n inferred_vecs = np.array([self.model.infer_vector(doc.words) for doc in self.tagged_docs])\n self.pca_reduced_vecs = self.pca.fit_transform(inferred_vecs)\n n_clusters = 25 # TODO find way to determine approx cluster size\n self.kmeans = KMeans(init = 'k-means++', n_clusters = n_clusters, random_state = 0)\n self.kmeans_preds = self.kmeans.fit_predict(self.pca_reduced_vecs)", "def clustering(clusters, dend_matrix, labels, linkagefun):\n Z = linkagefun(dend_matrix)\n color_threshold = Z[-1*clusters][2]+0.0000000001 #Cut slightly above the tree node\n \n # Defining to which cluster belongs to each simulation\n T = fcluster(Z, t=clusters, criterion='maxclust')\n clustdict = { \"cluster\" + str(clust) : [] for clust in T }\n for sim,clust in zip(labels,T):\n clustdict[\"cluster\" + str(clust)].append(sim)\n\n return(color_threshold, clustdict)", "def prepare_data_matrix():\n # create matrix X and list of languages\n\n lds = {}\n for fn in listdir(\"clustering\"):\n if fn.lower().endswith(\".txt\"):\n with open(join(\"clustering\", fn), encoding=\"utf8\") as f:\n text = f.read()\n nter = terke(text, n=3)\n lds[fn] = nter\n #print(lds.keys())\n \n #lds is a dictionary of dictionaries: {\"slovenian.txt\": {\"abc\":3,\"efg\":4...}, \"macedonian.txt\":{\"efg\":6...},...}\n l=listOfTuples(lds) #list of strings\n #print(l[:100])\n languages = list(lds.keys()) # ['Slo', 'Mac', ]\n # which language represents row number i: languages[i]\n # which row does language s represent: languagues.index(s)\n X=np.zeros([len(languages),100])\n for i in range(len(languages)):\n #print(languages[i])\n count = 0\n for j in range(100):\n if l[j] in lds[languages[i]]:\n X[i,j]=lds[languages[i]][l[j]]\n count += 1\n # print(count)\n\n #print([sum(x) for x in X])\n \n return X, languages\n # X, languages = prepare_data_matrix()", "def fit(self):\n self.cluseter_agglomerative(n_clusters=20, linkage='average', iterate=5)\n self.sub_clustering(n_clusters=3, index_cluster=[79], linkage='complete')\n self.merge_clusters([[0,9,53],[1,83],[46,35,67],[88,23],[6,68]])\n self.merge_clusters([[6,33,52],[17,14]])\n self.sub_clustering(n_clusters=2, index_cluster=[0], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[2], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[85], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[14], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[16], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[22], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[24], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[26], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[28], linkage='ward')\n self.merge_clusters([[6,98,99]])\n self.merge_clusters([[35,80]])\n self.sub_clustering(n_clusters=4, index_cluster=[35], linkage='complete')\n self.merge_clusters([[76,98]])\n self.sub_clustering(n_clusters=3, index_cluster=[35], linkage='complete')\n self.merge_clusters([[39,42]])\n self.sub_clustering(n_clusters=3, index_cluster=[47], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='average')\n self.merge_clusters([[70,101]])\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[61], linkage='ward')\n self.merge_clusters()\n return", "def evaluate(self, clustering):\n # Pca for each one of the clusters\n pca_mean_val = 0.;\n MAX_ELEMENTS = 1000\n for c in clustering.clusters:\n # Pick the coordinates (ensuring that we are copying them)\n element_indexes = c.all_elements\n ###################\n # Performance hack\n ###################\n # As it can be very slow for big clusters (i.e. > 3k elements) we'll compress this clusters \n # before calculating PCA. It should increase variance but will allow calculations.\n # It should use the kmedoids compressor\n if len(c.all_elements) > MAX_ELEMENTS:\n element_indexes = c.get_random_sample(MAX_ELEMENTS)\n print \"[PCA] Random sampling too big cluster to improve performance (%d elements -> %d elements).\"%(len(c.all_elements),MAX_ELEMENTS)\n ###################\n \n fitting_coordinates_of_this_cluster = self.fitting_coordinates[element_indexes]\n \n calculator = RMSDCalculator(calculatorType = \"QTRFIT_SERIAL_CALCULATOR\",\n fittingCoordsets = fitting_coordinates_of_this_cluster)\n \n if self.calculation_coordinates is not None:\n calculation_coordinates_of_this_cluster = self.calculation_coordinates[element_indexes]\n calculator = RMSDCalculator(calculatorType = \"QTRFIT_SERIAL_CALCULATOR\",\n fittingCoordsets = fitting_coordinates_of_this_cluster,\n calculationCoordsets = calculation_coordinates_of_this_cluster)\n \n # Make an iterative superposition (to get the minimum RMSD of all with respect to a mean conformation)\n calculator.iterativeSuperposition()\n\n # Calculate the covariance matrix\n if self.calculation_coordinates is None:\n covariance_matrix = PCAMetric.create_covariance_matrix(fitting_coordinates_of_this_cluster)\n else:\n covariance_matrix = PCAMetric.create_covariance_matrix(calculation_coordinates_of_this_cluster)\n \n # And then the eigenvalue we are interested in\n pca_mean_val += PCAMetric.calculate_biggest_eigenvalue(covariance_matrix)\n print \"PCA finished\"\n return pca_mean_val /clustering.total_number_of_elements", "def cluster_classification_tex(f,browsing_matrix,diversifying_matrix, weblog,session_data_threshold,cluster_type,classification_column_diversity,classification_wanted_transaction):\n divpat_classification_wanted_transaction = classification_wanted_transaction\n divpat_N_classification_wanted_transaction=len(divpat_classification_wanted_transaction)\n f.write(\"\\n% 6. Cluster Classification\")\n columns_latex = '|'+'c|'*len(session_data_threshold[cluster_type].unique())\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('DivColumnsLatex',columns_latex)) \n columns_blank = ' ' + '& '*(len(session_data_threshold[cluster_type].unique()) -1)\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('DivColumnsBlank',columns_blank)) \n cluster_list = []\n ieuc_clusters = []\n star_chain_like_clusters = []\n length_clusters = []\n browsing_pattern_1 = []\n browsing_pattern_2 = []\n browsing_pattern_3 = []\n diversifying_pattern_1 = []\n diversifying_pattern_2 = []\n diversifying_pattern_3 = []\n cluster_ids = session_data_threshold[cluster_type].unique()\n cluster_ids.sort()\n for cluster_id in cluster_ids:\n cluster_list.append(str(cluster_id))\n \n cluster_session_list=session_data_threshold[session_data_threshold[cluster_type]==cluster_id].session_id.values\n temp_cluster_weblog=weblog[weblog.session_id.isin(cluster_session_list)]\n pa,pa_names = proportional_abundance(temp_cluster_weblog,'requested_'+classification_column_diversity)\n cluster_entropy=ShannonEntropy(pa,normalize=True)\n \n ieuc_clusters.append(str(round(np.power(2.0,cluster_entropy),2)))\n star_chain_like_clusters.append(star_chain_str(session_data_threshold[session_data_threshold[cluster_type]==cluster_id].star_chain_like.mean()))\n length_clusters.append(length(session_data_threshold[session_data_threshold[cluster_type]==cluster_id].requests.mean()))\n # Browsing patterns\n r,c=np.unravel_index(browsing_matrix[cluster_id][:-1,:-1].argsort(axis=None)[::-1][:3],dims=(divpat_N_classification_wanted_transaction,divpat_N_classification_wanted_transaction))\n browsing_pattern_1.append('%.1f\\%%: %s$\\\\rightarrow$%s'%(100.0*browsing_matrix[cluster_id][r[0],c[0]],divpat_classification_wanted_transaction[r[0]],divpat_classification_wanted_transaction[c[0]]))\n browsing_pattern_2.append('%.1f\\%%: %s$\\\\rightarrow$%s'%(100.0*browsing_matrix[cluster_id][r[1],c[1]],divpat_classification_wanted_transaction[r[1]],divpat_classification_wanted_transaction[c[1]]))\n browsing_pattern_3.append('%.1f\\%%: %s$\\\\rightarrow$%s'%(100.0*browsing_matrix[cluster_id][r[2],c[2]],divpat_classification_wanted_transaction[r[2]],divpat_classification_wanted_transaction[c[2]]))\n \n # Diversifying patterns\n r,c=np.unravel_index(np.nan_to_num(diversifying_matrix[cluster_id])[:-1,:-1].argsort(axis=None)[::-1][:3],dims=(divpat_N_classification_wanted_transaction,divpat_N_classification_wanted_transaction))\n diversifying_pattern_1.append('%.1f\\%%: %s$\\\\rightarrow$%s'%(100.0*diversifying_matrix[cluster_id][r[0],c[0]],divpat_classification_wanted_transaction[r[0]],divpat_classification_wanted_transaction[c[0]]))\n diversifying_pattern_2.append('%.1f\\%%: %s$\\\\rightarrow$%s'%(100.0*diversifying_matrix[cluster_id][r[1],c[1]],divpat_classification_wanted_transaction[r[1]],divpat_classification_wanted_transaction[c[1]]))\n diversifying_pattern_3.append('%.1f\\%%: %s$\\\\rightarrow$%s'%(100.0*diversifying_matrix[cluster_id][r[2],c[2]],divpat_classification_wanted_transaction[r[2]],divpat_classification_wanted_transaction[c[2]]))\n\n del temp_cluster_weblog\n \n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('DivClusterList',' & '.join(cluster_list)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('DivIEUCClusters',' & '.join(ieuc_clusters)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('StarChainClusters',' & '.join(star_chain_like_clusters)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('LengthClusters',' & '.join(length_clusters)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('BrowsingPatternClustersOne',' & '.join(browsing_pattern_1)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('BrowsingPatternClustersTwo',' & '.join(browsing_pattern_2)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('BrowsingPatternClustersThree',' & '.join(browsing_pattern_3)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('DiversifyingPatternClustersOne',' & '.join(diversifying_pattern_1)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('DiversifyingPatternClustersTwo',' & '.join(diversifying_pattern_2)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('DiversifyingPatternClustersThree',' & '.join(diversifying_pattern_3)))\n\n return f;", "def generate(dictalg):\n\n # dsList, sortedAlgs, dictAlg = processInputArgs(args, verbose=verbose)\n res = {}\n for f, i in pproc.dictAlgByFun(dictalg).iteritems():\n for d, j in pproc.dictAlgByDim(i).iteritems():\n tmp = BestAlgSet(j)\n res[(d, f)] = tmp\n return res", "def calculate_clusters(study_id):\n\n with current_app.app_context():\n cur = conn.cursor()\n\n cur.execute(\"\"\"SELECT * FROM STATS WHERE STUDY_ID=%s\"\"\", (str(study_id),))\n study = fetchoneClean(cur)\n clusters_calculating = study[4]\n clusters_changed = study[5]\n if clusters_changed:\n if clusters_calculating:\n return {'message': 'calculating'}\n cur.execute(\"\"\"UPDATE STATS SET CLUSTERS_CALCULATING = TRUE WHERE STUDY_ID = %s\"\"\", (str(study_id),))\n conn.commit()\n\n distance = study[7]['matrix']\n card_names = study[7]['cardNames']\n cur.execute(\"\"\"SELECT COUNT(ID) FROM PARTICIPANT WHERE STUDY_ID = %s\"\"\", (str(study_id),))\n total_participants = fetchoneClean(cur)[0]\n\n distance_matrix = calculate_square_form(distance, total_participants)\n distArray = ssd.squareform(distance_matrix)\n\n try:\n clusters = hierarchy.linkage(distArray, method='average')\n except ValueError:\n return {'message': 'not enough data'}\n\n tree = hierarchy.to_tree(clusters, rd=False)\n # TODO Distance 0 on root\n dendro = dict(children=[], hierarchy=0, distance=100)\n add_node(tree, dendro, card_names)\n\n cur.execute(\"\"\"UPDATE STATS SET CLUSTERS = %s WHERE STUDY_ID = %s\"\"\", (json.dumps(dendro), str(study_id),))\n cur.execute(\"\"\"UPDATE STATS SET CLUSTERS_CALCULATING = FALSE WHERE STUDY_ID = %s\"\"\", (str(study_id),))\n cur.execute(\"\"\"UPDATE STATS SET CLUSTERS_CHANGED = FALSE WHERE STUDY_ID = %s\"\"\", (str(study_id),))\n conn.commit()\n else:\n dendro = study[6]\n\n return dendro", "def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n points = cluster_list[:]\n \n # n <-- |p|;\n len_points_list = len(points)\n\n # position initial clusters at the location of clusters with largest populations (i.e., cluster[3] which is population) \n cluster_centers = []\n temp_cl = points[:]\n \n temp_cl.sort(key=lambda cluster: cluster.total_population())\n for cluster in reversed(temp_cl):\n if len(cluster_centers) < num_clusters:\n cluster_centers.append(alg_cluster.Cluster(set([]), cluster.horiz_center(), cluster.vert_center(), 0, 0))\n\n # For number of iterations\n for dummy_var in range(num_iterations):\n # initialize k (num_clusters) empty sets C1, ... Ck;\n cluster_groupings = []\n for index in range(len(cluster_centers)):\n cluster_groupings.append(alg_cluster.Cluster(set(), 0, 0, 0, 0))\n # # For each county\n # for j = 0 to n - 1 do\n for index in range(len_points_list):\n # Find the old cluster center that is closest \n # L <-- argminsub(1<=f<=k) (dsub(psubj), musubf); \n min_dist = float('inf')\n nearest_cluster_index = None\n\n for idx, cluster in enumerate(cluster_centers):\n if points[index].distance(cluster) < min_dist:\n min_dist = points[index].distance(cluster)\n nearest_cluster_index = idx\n\n # Add the county to the corresponding new cluster\n # Handled with Cluster class merge_clusters method, which will automatically update the cluster centers to correct locations.\n cluster_groupings[nearest_cluster_index].merge_clusters(points[index])\n # Set old clusters equal to new clusters \n # for f = 1 to k do\n for index in range(len(cluster_centers)):\n # muf = center (Cf) // handled with Cluster class built-in method(s)\n cluster_centers[index] = cluster_groupings[index].copy()\n\n # return {C1, C2, ..., Ck}; \n return cluster_groupings", "def cluster_membership_occupancy(data):\n \n \n \n n_clusters = len(set(data['clusters'])-{-1}) # since -1 element denotes noice\n\n if n_clusters == 0:\n membership=[Cluster_Membership_Features()]\n membership = pd.DataFrame([o.__dict__ for o in membership])\n areas=[Cluster_Area_Features()]\n areas = pd.DataFrame([o.__dict__ for o in areas])\n density=[Cluster_Density_Features()]\n density = pd.DataFrame([o.__dict__ for o in density])\n all_features = pd.concat([membership.reset_index(drop=True), areas.reset_index(drop=True),\n density], axis=1)\n \n elif n_clusters ==1:\n #obtain_total_cluster_areas_set_everything_else_to_default\n membership=[Cluster_Membership_Features()]\n membership = pd.DataFrame([o.__dict__ for o in membership])\n d = dict(tuple(data.groupby('clusters')))\n d.pop(-1, None)\n \n try:\n cluster_chull_areas=[ss.ConvexHull(np.column_stack([d[i]['X'].array,d[i]['Y'].array])).volume for i in d.keys()]\n except:\n cluster_chull_areas=[0,0,0]\n \n Total_cluster_area=np.sum(cluster_chull_areas)\n areas=[Cluster_Area_Features([Total_cluster_area,0,0,0,0,0,0,0,0])]\n areas = pd.DataFrame([o.__dict__ for o in areas])\n density=[Cluster_Density_Features()]\n density = pd.DataFrame([o.__dict__ for o in density])\n all_features = pd.concat([membership.reset_index(drop=True), areas.reset_index(drop=True),\n density], axis=1)\n \n elif n_clusters >1:\n #Summarizing the cluster membership distribution characteristics\n cluster_size_nums=np.delete(np.array(data.groupby(['clusters']).size()),0)\n (cluster_size_nums_avg,cluster_size_nums_min,cluster_size_nums_max,\n cluster_size_nums_std,cluster_size_nums_cv,cluster_size_nums_cd,\n cluster_size_nums_IQR,cluster_size_nums_Quartile_CD)= distribution_statistics(cluster_size_nums)\n\n #For each cluster calculate the area by calculating the area of the convex hull of cluster members\n # Note: concavehull implementation here might be a good addition as it will provide more imformative values. \n\n d = dict(tuple(data.groupby('clusters')))\n d.pop(-1, None)\n try:\n cluster_chull_areas=[ss.ConvexHull(np.column_stack([d[i]['X'].array,d[i]['Y'].array])).volume for i in d.keys()]\n except:\n cluster_chull_areas=[0,0,0,0,0]\n \n\n (avg_cluster_area,min_cluster_area,max_cluster_area,\n std_cluster_area,CV_cluster_area,CD_cluster_area,\n IQR_cluster_area,Quartile_CD_cluster_area)= distribution_statistics(cluster_chull_areas)\n Total_cluster_area=np.sum(cluster_chull_areas)\n\n #Calculate cluster density: number of nuclei/ convex area of cluster\n cluster_density=np.divide(cluster_size_nums,cluster_chull_areas)\n (avg_cluster_density,min_cluster_density,max_cluster_density,\n std_cluster_density,CV_cluster_density,CD_cluster_density,\n IQR_cluster_density,Quartile_CD_cluster_density)= distribution_statistics(cluster_density)\n\n #return dataframe of features\n membership=[Cluster_Membership_Features([cluster_size_nums_avg,cluster_size_nums_min,cluster_size_nums_max,\n cluster_size_nums_std,cluster_size_nums_cv,cluster_size_nums_cd,\n cluster_size_nums_IQR,cluster_size_nums_Quartile_CD])]\n membership = pd.DataFrame([o.__dict__ for o in membership])\n areas=[Cluster_Area_Features([Total_cluster_area,\n avg_cluster_area,min_cluster_area,max_cluster_area,\n std_cluster_area,CV_cluster_area,CD_cluster_area,\n IQR_cluster_area,Quartile_CD_cluster_area])]\n areas = pd.DataFrame([o.__dict__ for o in areas])\n density=[Cluster_Density_Features([avg_cluster_density,min_cluster_density,max_cluster_density,\n std_cluster_density,CV_cluster_density,CD_cluster_density,\n IQR_cluster_density,Quartile_CD_cluster_density])]\n density = pd.DataFrame([o.__dict__ for o in density])\n\n all_features = pd.concat([membership.reset_index(drop=True), areas.reset_index(drop=True),\n density], axis=1)\n return all_features", "def cluster_bal_iter(self):\n # moving\n for j,cluster in enumerate(self.clusters):\n cluster.move()\n self.clusters_allocate_cells()\n for j,cluster in enumerate(self.clusters):\n cluster.calc()\n #print j, '\\t', cluster.center, '\\t', cluster.np, '\\t', cluster.size\n \n # resizing\n for j,cluster in enumerate(self.clusters):\n cluster.resize()\n self.clusters_allocate_cells()\n for j,cluster in enumerate(self.clusters):\n cluster.calc()\n #print j, '\\t', cluster.center, '\\t', cluster.np, '\\t', cluster.size\n \n self.calc()", "def get_all_clusters(self) -> Dict[str, List[str]]:\n result = {}\n for c_id in set(self._clusters.values()):\n result[c_id] = self.get_cluster_by_id(c_id)\n return result", "def _create_adjacency_matrix(layer_edges):\n A = defaultdict(int)\n for l, edges in list(layer_edges.items()):\n for edge in edges:\n A[(edge[0], edge[1], l)] += 1\n A[(edge[1], edge[0], l)] += 1 \n return A", "def calculate_all_metrcis(self):\n self.calculate_gc_metrcis()\n self.calculate_sam_metrics()\n self.calculate_classification_metrics()\n self.calculate_losses()", "def assign_clusters(self):\n running_perts = {}\n for name in self.tensor_info:\n item = self.tensor_info[name]\n pert_list = item[1]\n pert_names = []\n prob_list = []\n if pert_list is not None:\n for pert in pert_list:\n pert_names.append(pert.__class__.__name__)\n prob_list.append(pert.p)\n pert_names = '_'.join(pert_names)\n if pert_names not in running_perts:\n running_perts[pert_names] = [(name, prob_list)]\n else:\n running_perts[pert_names].append((name, prob_list))\n\n running_perts.pop('')\n\n assert len(running_perts) <= len(self.clusters), \"More different perturbations than clusters available, cannot assign tensors to clusters\"\n\n # ONLY BITWISEPERT FOR THE TIME BEING\n bitwises = running_perts['BitwisePert']\n bitwise_probs = [item[1][0] for item in bitwises]\n centers, _ = kmeans(bitwise_probs, len(self.clusters))\n groups, _ = vq(bitwise_probs, centers)\n\n for tensor, cluster in zip(bitwises, groups):\n name = tensor[0]\n tensor_ref = self.tensor_info[name][0]\n repr = self.tensor_info[name][2]\n self.clusters[cluster].add_tensor(tensor_ref, repr)\n\n for cluster, rate in zip(self.clusters, centers):\n pert_dict = {\n \"name\": \"BitwisePert\",\n \"p\": rate}\n pert = P.construct_pert(pert_dict)\n cluster.set_perturb([pert])", "def cluster_analysis(\n clusterers: list,\n hyperparameter_grids: list,\n eval_metrics_grid: list,\n eval_metrics_params: dict,\n word_embeddings: np.ndarray,\n words_vocabulary: list,\n word_to_int: dict,\n word_embeddings_normalized: np.ndarray = None,\n compute_pairwise_word_distances: bool = False,\n compute_pairwise_word_distances_normalized: bool = False,\n return_word_vectors: bool = False,\n save_result_to_disk: bool = False,\n output_dir: Optional[str] = None,\n model_name: Optional[str] = None,\n dataset_name: Optional[str] = None,\n output_filepath_suffix: Optional[str] = None,\n) -> Union[dict, tuple]:\n # Create word vectors from given words/vocabulary\n word_vectors = words_to_vectors(\n words_vocabulary=words_vocabulary,\n word_to_int=word_to_int,\n word_embeddings=word_embeddings,\n )\n\n # Create normalized word vectors from given words/vocabulary if specified.\n word_vectors_normalized = None\n if word_embeddings_normalized is not None:\n word_vectors_normalized = words_to_vectors(\n words_vocabulary=words_vocabulary,\n word_to_int=word_to_int,\n word_embeddings=word_embeddings_normalized,\n )\n\n if compute_pairwise_word_distances:\n word_vectors_pairwise_distances = pairwise_cosine_distances(word_vectors)\n if (\n compute_pairwise_word_distances_normalized\n and word_vectors_normalized is not None\n ):\n normalized_word_vectors_pairwise_distances = euclidean_distances(\n word_vectors_normalized\n )\n\n # Perform cluster analysis\n clusterers_result = {}\n unique_cluster_metrics = set()\n for clusterer_tuple, hyperparameter_grid, eval_metrics in zip(\n clusterers, hyperparameter_grids, eval_metrics_grid\n ):\n if len(clusterer_tuple) == 3:\n (clusterer_name, clusterer_cls, clusterer_use_normalized) = clusterer_tuple\n else:\n clusterer_use_normalized = False\n (clusterer_name, clusterer_cls) = clusterer_tuple\n print(f\"-- Clustering using {clusterer_name} --\")\n clusterers_result[clusterer_name] = {\n \"cluster_labels\": [],\n \"cluster_params\": [],\n \"cluster_metrics\": {},\n }\n\n # Do clustering for each set of hyperparameters\n param_grid = ParameterGrid(hyperparameter_grid)\n for params_idx, params in enumerate(tqdm(param_grid)):\n clusterers_result[clusterer_name][\"cluster_params\"].append(params)\n\n # Add exception for ward linkage clustering.\n if (\n clusterer_cls is AgglomerativeClustering\n and params.get(\"linkage\") == \"ward\"\n and word_vectors_normalized is not None\n ):\n params = {**params, \"affinity\": \"euclidean\"}\n clusterer_instance = clusterer_cls(**params)\n fit_predict_X = word_vectors_normalized\n else:\n clusterer_instance = clusterer_cls(**params)\n if (\n params.get(\"affinity\") == \"precomputed\"\n or params.get(\"metric\") == \"precomputed\"\n ):\n if (\n clusterer_use_normalized\n and compute_pairwise_word_distances_normalized\n ):\n fit_predict_X = normalized_word_vectors_pairwise_distances\n elif compute_pairwise_word_distances:\n fit_predict_X = word_vectors_pairwise_distances\n else:\n if clusterer_use_normalized and word_vectors_normalized is not None:\n fit_predict_X = word_vectors_normalized\n else:\n fit_predict_X = word_vectors\n\n # Use fit_predict if it is available.\n if getattr(clusterer_instance, \"fit_predict\", None) is not None:\n predicted_labels = clusterer_instance.fit_predict(fit_predict_X)\n else:\n clusterer_instance.fit(fit_predict_X)\n predicted_labels = clusterer_instance.predict(fit_predict_X)\n\n # Separate noise labels into clusters\n if clusterer_cls is HDBSCAN:\n predicted_labels = separate_noise_labels_into_clusters(predicted_labels)\n\n clusterers_result[clusterer_name][\"cluster_labels\"].append(predicted_labels)\n\n # Evaluate predicted cluster labels using internal evaluation metrics\n for eval_metric_tuple in eval_metrics:\n if len(eval_metric_tuple) == 3:\n (\n eval_metric_key,\n eval_metric,\n eval_metric_use_normalized,\n ) = eval_metric_tuple\n else:\n eval_metric_use_normalized = False\n (eval_metric_key, eval_metric) = eval_metric_tuple\n eval_metric_params = eval_metrics_params.get(eval_metric_key, {})\n if (\n compute_pairwise_word_distances\n and eval_metric_params.get(\"metric\") == \"precomputed\"\n ):\n if (\n eval_metric_use_normalized\n and compute_pairwise_word_distances_normalized\n ):\n metric_name, metric_score, metric_obj_max = eval_metric(\n word_embeddings=normalized_word_vectors_pairwise_distances,\n cluster_labels=predicted_labels,\n clusterer=clusterer_instance,\n **eval_metric_params,\n )\n else:\n metric_name, metric_score, metric_obj_max = eval_metric(\n word_embeddings=word_vectors_pairwise_distances,\n cluster_labels=predicted_labels,\n clusterer=clusterer_instance,\n **eval_metric_params,\n )\n else:\n if (\n eval_metric_use_normalized\n and word_vectors_normalized is not None\n ):\n metric_name, metric_score, metric_obj_max = eval_metric(\n word_embeddings=word_vectors_normalized,\n cluster_labels=predicted_labels,\n clusterer=clusterer_instance,\n **eval_metric_params,\n )\n else:\n metric_name, metric_score, metric_obj_max = eval_metric(\n word_embeddings=word_vectors,\n cluster_labels=predicted_labels,\n clusterer=clusterer_instance,\n **eval_metric_params,\n )\n unique_cluster_metrics.add(metric_name)\n\n # Initialize metric result\n if (\n metric_name\n not in clusterers_result[clusterer_name][\"cluster_metrics\"]\n ):\n clusterers_result[clusterer_name][\"cluster_metrics\"][\n metric_name\n ] = {\n \"metric_scores\": [],\n \"metric_obj_max\": metric_obj_max,\n \"best_metric_score_indices\": [],\n }\n\n clusterers_result[clusterer_name][\"cluster_metrics\"][metric_name][\n \"metric_scores\"\n ].append(metric_score)\n\n # Set best metric score indices\n if params_idx == len(param_grid) - 1:\n best_metric_score_indices = np.argsort(\n clusterers_result[clusterer_name][\"cluster_metrics\"][\n metric_name\n ][\"metric_scores\"]\n )\n if metric_obj_max:\n best_metric_score_indices = best_metric_score_indices[::-1]\n clusterers_result[clusterer_name][\"cluster_metrics\"][metric_name][\n \"best_metric_score_indices\"\n ] = best_metric_score_indices\n\n # Find preferred clusterers for each cluster metric (from best to worst)\n metric_preferred_clusterers = {}\n for cluster_metric_name in unique_cluster_metrics:\n metric_obj_max = None\n metric_best_scores = []\n clusterer_names = []\n for clusterer_name, clusterer_result in clusterers_result.items():\n if cluster_metric_name in clusterer_result[\"cluster_metrics\"]:\n clusterer_names.append(clusterer_name)\n metric_result = clusterer_result[\"cluster_metrics\"][cluster_metric_name]\n if metric_obj_max is None:\n metric_obj_max = metric_result[\"metric_obj_max\"]\n best_metric_score = metric_result[\"metric_scores\"][\n metric_result[\"best_metric_score_indices\"][0]\n ]\n metric_best_scores.append(best_metric_score)\n clusterer_names = np.array(clusterer_names)\n metric_best_scores = np.array(metric_best_scores)\n\n metric_best_scores_sorted_indices = np.argsort(metric_best_scores)\n if metric_obj_max:\n metric_best_scores_sorted_indices = metric_best_scores_sorted_indices[::-1]\n metric_preferred_clusterers[cluster_metric_name] = {\n \"clusterer_names\": clusterer_names[metric_best_scores_sorted_indices],\n \"best_metric_scores\": metric_best_scores[metric_best_scores_sorted_indices],\n }\n\n # Return result as dictionary\n cluster_analysis_result = {\n \"clusterers\": clusterers_result,\n \"metric_preferred_clusterers\": metric_preferred_clusterers,\n }\n\n if return_word_vectors:\n if compute_pairwise_word_distances:\n cluster_analysis_result = (\n cluster_analysis_result,\n word_vectors,\n word_vectors_pairwise_distances,\n )\n else:\n cluster_analysis_result = (cluster_analysis_result, word_vectors)\n\n # Save result to disk\n if save_result_to_disk:\n save_cluster_result_to_disk(\n cluster_result=cluster_analysis_result,\n output_dir=output_dir,\n model_name=model_name,\n dataset_name=dataset_name,\n output_filepath_suffix=output_filepath_suffix,\n )\n\n return cluster_analysis_result", "def compute_cluster_similarities(emb_clusters1, emb_clusters2, compare, order, clmethod, plot):\n def compute_sim(e, e1, cls, cls1):\n sims = np.empty((20, 20))\n xticks, yticks = [], []\n for i, c in enumerate(cls):\n yticks.append(', '.join(c[1]) + (f' {round(c[3], 5)}' if order == 'avgfreq' else ''))\n for j, c1 in enumerate(cls1):\n if len(xticks) < 20:\n xticks.append(', '.join(c1[1]) + (f' {round(c1[3], 5)}' if order == 'avgfreq' else ''))\n sims[i, j] = jaccard_similarity_score(c[2], c1[2])\n jaccard_similarities[f'{e}-{e1}'] = sims\n\n if plot:\n if order == 'clustermap':\n similarity_clustermap(sims, xticks, yticks, f'{e}-{e1}_{clmethod}')\n elif order == 'default' or order == 'avgfreq':\n similarity_heatmap(sims, xticks, yticks, f'{e}-{e1}_{clmethod}', order)\n else:\n pass\n\n jaccard_similarities = {}\n if compare == 'cross':\n for ie, (e, cls) in enumerate(emb_clusters1.items()):\n for ie1, (e1, cls1) in enumerate(emb_clusters2.items()):\n if ie < ie1:\n compute_sim(e, e1, cls, cls1)\n elif compare == 'dot':\n for (e, cls), (e1, cls1) in zip(emb_clusters1.items(), emb_clusters2.items()):\n compute_sim(e, e1, cls, cls1)\n\n return jaccard_similarities", "def get_all_local_clustering_coef(g):\n local_cc = {}\n\n for n in nx.nodes(g):\n local_cc[n] = get_local_clustering_coef(g, n)\n\n return local_cc", "def evaluate_clusters(self, cluster_formulas, value='weighted_sum'):\n num_elems = len(self.labels)\n total_val = {}\n num_cl = len(cluster_formulas)\n clustered_points_num = 0\n print(\"\\n\\n\")\n print(\"Sufficiently big clusters: {}\".format(num_cl))\n for c, formula, val in cluster_formulas:\n c_size = len([l for l in self.labels if l == c])\n clustered_points_num += c_size\n\n if value == 'weighted_sum':\n total_val[c] = val * c_size / num_elems\n elif value == 'sum':\n total_val[c] = val * 1\n\n clust_val = sum(total_val.values())\n self.clustering_value = total_val\n print(\"Value of clustering: {}\".format(clust_val))\n return clust_val", "def load_cluster_accuracies():\n accuracies = morphs.utils.load._pickle(morphs.paths.ACCURACIES_PKL)\n cluster_accuracies = {\n block_path: accuracies[block_path]\n .groupby(\"cluster\")\n .agg(np.mean)\n .sort_values(\"accuracy\")\n for block_path in accuracies\n }\n return accuracies, cluster_accuracies", "def match_cluster_sets(cs1, cs2):\n\n matr = [[len(cl1.bibs & cl2.bibs) for cl2 in cs2.clusters] for cl1 in cs1.clusters]\n mapping = maximized_mapping(matr)\n return dict((cs1.clusters[mappy[0]], cs2.clusters[mappy[1]]) for mappy in mapping)", "def run(\n self,\n number_of_clusters=None,\n max_K=8,\n method_clustering=\"pam\",\n init_clustering=\"random\",\n max_iter_clustering=100,\n discart_value_JI=0.6,\n bootstraps_JI=100,\n bootstraps_p_value=100,\n n_jobs=1,\n verbose=1,\n ):\n\n if number_of_clusters is None:\n self.k = optimizer.optimizeK(\n self.distance_matrix,\n self.y.to_numpy(),\n self.model_type,\n max_K,\n method_clustering,\n init_clustering,\n max_iter_clustering,\n discart_value_JI,\n bootstraps_JI,\n self.random_state,\n n_jobs,\n verbose,\n )\n\n if self.k == 1:\n warnings.warn(\"No stable clusters were found!\")\n return\n\n print(f\"Optimal number of cluster is: {self.k}\")\n\n else:\n self.k = number_of_clusters\n print(f\"Use {self.k} as number of cluster\")\n\n self.cluster_labels = (\n kmedoids.KMedoids(\n n_clusters=self.k,\n method=method_clustering,\n init=init_clustering,\n metric=\"precomputed\",\n max_iter=max_iter_clustering,\n random_state=self.random_state,\n )\n .fit(self.distance_matrix)\n .labels_\n )\n\n (\n self._data_clustering_ranked,\n self.p_value_of_features,\n ) = stats.calculate_global_feature_importance(\n self.X, self.y, self.cluster_labels, self.model_type\n )\n self._p_value_of_features_per_cluster = (\n stats.calculate_local_feature_importance(\n self._data_clustering_ranked, bootstraps_p_value\n )\n )", "def clustering(self): \n clusterOfFiles=self.getClusters()\n \n #group files based on the hash of their contents\n self.keyingMethod=md5Hash\n [self.addFile(afile) for acluster in clusterOfFiles for afile in acluster]\n clusterOfFiles=self.getClusters()\n self.showClusters(clusterOfFiles)", "def matching_clusterization(self):\n result = []\n self.reclustering(self.groups.copy(deep=True), result)\n self.result = pd.DataFrame(result)\n return self.result.sort_values(by=['cluster_size'], ascending=False)", "def censuses(self, *scales: int) -> Mapping[int, Census]:\n return {\n # Map id to the whole census object\n census.id: census\n # The dict is created from a generating parsing the nodes\n for census in (\n Census.from_xml(node)\n # an XML node can be used as an iterator, where it yields children\n for node in self.shards_xml(\n \"census\",\n # we want to grab all the values, since a Census requires them\n mode=joined_parameter(\"score\", \"rank\", \"rrank\", \"prank\", \"prrank\"),\n # gets all the different stats for us\n scale=joined_parameter(*(str(scale) for scale in scales))\n if scales\n else \"all\",\n )[\"census\"]\n )\n }", "def get_cluster_info(self) -> Dict[str, Any]:\n pass", "def return_factorized_dict(ls):\r\n factos = pd.unique(pd.factorize(ls)[0])\r\n categs = pd.unique(pd.factorize(ls)[1])\r\n if -1 in factos:\r\n categs = np.insert(categs,np.where(factos==-1)[0][0],np.nan)\r\n return dict(zip(categs,factos))", "def cluster(X=None, datalabels=None, nc=2):\n from sklearn.cluster import KMeans\n from sklearn.cluster import AffinityPropagation\n\n C = KMeans(n_clusters=nc,n_init=10,init='random')\n C.fit(X[:,:1])\n\n #C = AffinityPropagation(preference=-80,damping=0.5).fit(X)\n #cluster_centers_indices = C.cluster_centers_indices_\n\n clust = {}\n for (i, label) in enumerate(C.labels_):\n key = C.cluster_centers_[label][0]\n #print label,key, datalabels[i],X[i][1]\n if not clust.has_key(key):\n clust[key]=[]\n clust[key].append(datalabels[i])\n #print clust\n return C, clust", "def _generate_weighted_matrices(self):\n self.degree_weighted_matrices = dict()\n mes = []\n args = []\n for metaedge, matrix in self.adj_matrices.items():\n mes.append(metaedge)\n args.append({'matrix': matrix, 'w': self.w, 'degree_fwd': self.out_degree[metaedge],\n 'degree_rev': self.in_degree[metaedge]})\n res = parallel_process(array=args, function=mt.weight_by_degree, use_kwargs=True, n_jobs=self.n_jobs,\n front_num=0)\n for metaedge, matrix in zip(mes, res):\n self.degree_weighted_matrices[metaedge] = matrix", "def dimension_homology_sc(self):\r\n vec_dic = {}\r\n for k in range(self.dimension()+1):\r\n p = k \r\n A = self.matrix_simmetric_representate(p)\r\n dn = 0\r\n dc = 0\r\n if (p == 0):\r\n dn = A.shape[1]\r\n if (p > 0 and (p <= self.dimension())):\r\n null = null_space(A)\r\n if (null.size != 0):\r\n dn = len(null[0])\r\n if (all(elem == 0 for elem in null[0])):\r\n dn = 0 \r\n p = k + 1\r\n if (p>0 and (p <= self.dimension())):\r\n A1=self.matrix_simmetric_representate(p)\r\n col = orth(A1)\r\n if (col.size != 0):\r\n dc = len(col[0])\r\n else: \r\n dc = 0\r\n vec_dic[k] = dn - dc\r\n return vec_dic", "def get_results_for_init(self):\n return dict(init=self.centroids, n_clusters=self.centroids.shape[0])", "def compute_feature_properties(self):\n\n self.valuecounts = {}\n self.unique_values = {}\n self.missing_ratios = {}\n self.counts = {}\n self.codemaps = {}\n for f in self.features:\n # Compute various things\n all_values = [self.data[l].get(f,\"?\") for l in self.data]\n missing_data_ratio = all_values.count(\"?\") / (1.0*len(all_values))\n non_q_values = [v for v in all_values if v != \"?\"]\n counts = {}\n for v in non_q_values:\n counts[v] = non_q_values.count(v)\n unique_values = list(set(non_q_values))\n # Sort unique_values carefully.\n # Possibly all feature values are numeric strings, e.g. \"1\", \"2\", \"3\".\n # If we sort these as strings then we get weird things like \"10\" < \"2\".\n # This can actually matter for things like ordinal models.\n # So convert these to ints first...\n if all([v.isdigit() for v in unique_values]):\n unique_values = list(map(int, unique_values))\n unique_values.sort()\n unique_values = list(map(str, unique_values))\n # ...otherwise, just sort normally\n else:\n unique_values.sort()\n self.unique_values[f] = unique_values\n\n N = len(unique_values)\n self.valuecounts[f] = N\n self.missing_ratios[f] = missing_data_ratio\n self.counts[f] = counts\n self.codemaps[f] = self.build_codemap(unique_values)", "def katz_for_pairs(self, pairs, adj_prefix, max_length=6, beta=0.1):\n filenames = [adj_prefix + str(i) + '.npz' for i in range(2, max_length + 1)]\n n = 1\n self.katz = {}\n bs = [1.500, .891, .631, .543, .413, .420] # output from Mohler method\n for u, v in pairs:\n if u not in self.katz:\n self.katz[u] = defaultdict(float)\n for f in filenames:\n a = scipy.sparse.load_npz(f)\n # b = beta ** n\n b = bs[n-1]\n for u, v in pairs:\n self.katz[u][v] += b * a[u-1, v-1]\n n += 1\n print(\"Loaded %s\" % f)", "def _gen_cg_dict(maxdim, existing_keys=None):\n cg_dict = {}\n # print(\"gen_cg_dict called with maxdim =\", maxdim)\n\n fastcgmat = memoize(clebschSU2mat)\n\n for k1, n1, k2, n2 in itertools.product(range(maxdim), repeat=4):\n if ((k1, n1), (k2, n2)) in existing_keys:\n continue\n cg_dict.setdefault(((k1, n1), (k2, n2)), {})\n kmin, kmax = abs(k1 - k2), k1 + k2\n nmin, nmax = abs(n1 - n2), n1 + n2\n # dim1, dim2 = (k1 + 1) * (n1 + 1), (k2 + 1) * (n2 + 1)\n for k, n in itertools.product(\n range(kmin, kmax + 1, 2), range(nmin, nmax + 1, 2)\n ):\n cg_dict[((k1, n1), (k2, n2))][(k, n)] = torch.tensor(\n clebschmat((k1, n1), (k2, n2), (k, n), fastcgmat=fastcgmat)\n )\n\n return cg_dict", "def compute_statistics(self):\n for i in range(len(self.wine_matrix[0, :])):\n feature = self.wine_matrix[:, i]\n self.wine_stats['feature ' + str(i)] = {}\n if i == 11: # results column\n self.wine_stats['feature ' + str(i)]['positive_class_ratio'] = (feature == 1).sum() / len(feature)\n null, self.wine_stats['feature ' + str(i)]['pvalue'] = stats.normaltest(feature)\n\n # plot\n # pyplot.hist(feature, bins=50)\n # pyplot.show()\n\n for i in range(len(self.cancer_matrix[0, :])):\n feature = self.cancer_matrix[:, i]\n self.cancer_stats['feature ' + str(i)] = {}\n if i == 10: # results column\n self.cancer_stats['feature ' + str(i)]['positive_class_ratio'] = (feature == 1).sum() / len(feature)\n null, self.cancer_stats['feature ' + str(i)]['pvalue'] = stats.normaltest(feature)\n\n # plot\n # pyplot.hist(feature, bins=50)\n # pyplot.show()", "def cluster(self, documents, iters=10):\n ###TODO\n self.means = documents[0:self.k]\n self.cluster = defaultdict(lambda : int)\n self.docs = documents\n \n for val in range(iters):\n self.norms = []\n for x in self.means:\n self.norms.append(self.sqnorm(x))\n \n self.compute_clusters(documents)\n \n self.fin_clust = defaultdict(lambda:[])\n for z in self.cluster:\n self.fin_clust[self.cluster[z]].append(z)\n \n self.means = self.compute_means()\n \n to_print_array = []\n for k in self.fin_clust:\n to_print_array.append(len(self.fin_clust[k]))\n \n print (to_print_array)\n print (self.error(documents))", "def clustering(df, mode):\n # split into list of dfs containing only one reference node\n df_list = [df.loc[i : i + 8 - 1, :] for i in range(0, len(df), 8)]\n\n df_coefficient = pd.DataFrame()\n\n # loop over every single node\n for df_single in df_list:\n df_single = df_single.reset_index()\n total_value = 0\n\n # loop over the weights of all connected nodes\n for j in range(len(df_single) - 1):\n if mode == \"geometric\":\n # geometric\n total_value = total_value + math.sqrt(df_single.chi_sq[j] * df_single.chi_sq[j + 1])\n if mode == \"arithmetic\": \n # arithmetic\n total_value = total_value + ((df_single.chi_sq[j] * df_single.chi_sq[j + 1]) / 2)\n if mode == \"argmax\": \n # max\n total_value = total_value + max(df_single.chi_sq[j], df_single.chi_sq[j + 1])\n if mode == \"argmin\":\n # min\n total_value = total_value + min(df_single.chi_sq[j], df_single.chi_sq[j + 1])\n\n for i in range(len(df_single) - 1):\n if mode == \"geometric\":\n # geometric\n triplet_value = math.sqrt(df_single.chi_sq[i] * df_single.chi_sq[i + 1])\n if mode == \"arithmetic\":\n # arithmetic\n triplet_value = (df_single.chi_sq[i] * df_single.chi_sq[i + 1]) / 2\n if mode == \"argmax\":\n # max\n triplet_value = max(df_single.chi_sq[i], df_single.chi_sq[i + 1])\n if mode == \"argmin\": \n # min\n triplet_value = min(df_single.chi_sq[i], df_single.chi_sq[i + 1])\n\n cluster_coefficient = triplet_value / total_value\n buffer = [\n [\n df_single.reference[i],\n df_single.comparison[i],\n df_single.comparison[i + 1],\n triplet_value,\n cluster_coefficient,\n ]\n ]\n df_coefficient = df_coefficient.append(buffer)\n\n df_coefficient = df_coefficient.reset_index()\n\n print(\"\\n\\n threshold 0.5*c_omega\")\n check_list = []\n # print out triangles that have a cluster coefficient bigger, than X\n for i in range(len(df_coefficient)):\n if df_coefficient[4][i] >= ((0.5) * df_coefficient[4].max()):\n print(list(df_coefficient.loc[i][1:4]))\n check_list.append(list(df_coefficient.loc[i][1:4]))\n else:\n continue\n\n print(\"\\n\\n threshold 0.75*c_omega\")\n check_list = []\n for i in range(len(df_coefficient)):\n if df_coefficient[4][i] >= ((0.75) * df_coefficient[4].max()):\n print(list(df_coefficient.loc[i][1:4]))\n check_list.append(list(df_coefficient.loc[i][1:4]))\n else:\n continue\n\n print(\"\\n\\n threshold 0.8*c_omega\")\n check_list = []\n for i in range(len(df_coefficient)):\n if df_coefficient[4][i] >= ((0.9) * df_coefficient[4].max()):\n print(list(df_coefficient.loc[i][1:4]))\n check_list.append(list(df_coefficient.loc[i][1:4]))\n else:\n continue\n\n\n print(\"\\n\\n threshold 0.9*c_omega\")\n check_list = []\n for i in range(len(df_coefficient)):\n if df_coefficient[4][i] >= ((0.9) * df_coefficient[4].max()):\n print(list(df_coefficient.loc[i][1:4]))\n check_list.append(list(df_coefficient.loc[i][1:4]))\n else:\n continue\n\n return", "def _assign_clusters(self):\n\n dist = np.zeros((self.k, ))\n distortion = 0\n\n for index in range(0, self.data.shape[0]):\n for i in range(0, self.k):\n dist[i] = np.linalg.norm(self.data[index] - self.centroids[i])\n\n self.assigned_clusters[index] = np.argmin(dist)\n distortion += np.min(dist)\n\n return distortion", "def interpret_clusters(self, split=0.7, all_demos=None, num_clusters=None, \n max_depth=CLUSTER_DEPTH, data=None, labels=None, verbose=True):\n all_demos = self.all_data if all_demos is None else all_demos\n clusters = self.get_ordered_clusters(labels, num_clusters)\n data = self.demos if data is None else data\n labels = self.labels if labels is None else labels\n\n cluster_formulas = []\n counter = 0\n sep = \"\\n \"\n for c in clusters:\n counter += 1\n res = self.sample_from_clusters(num_samples=split,\n all_data=all_demos,\n pos_validation=True, \n neg_validation=True,\n which_cluster=counter)\n positive_samples, val_positive_samples = res[0], res[1]\n negative_samples, val_negative_samples = res[2], res[3]\n z = 0\n for d in positive_samples:\n if d[1] == 0: z += 1\n\n cluster_data = {'pos': positive_samples,\n 'neg': negative_samples}\n val_cluster_data = {'pos': val_positive_samples,\n 'neg': val_negative_samples}\n\n if verbose: print(sep +\"Checking formulas \" + \\\n \"with max depth {}\\n\".format(max_depth))\n\n cluster_formula, value_formula = wrapper_train(max_depth,\n cluster_data, \n val_cluster_data,\n verbose=verbose,\n pred_data=[self.pipeline_X,\n self.pipeline_y])\n if cluster_formula is not None:\n print(cluster_formula)\n\n cluster_formulas.append((c, cluster_formula, value_formula))\n self.reset_pipeline()\n\n return cluster_formulas", "def _recalculate_centroids(self):\n\n self._prev_centroids = dict(self.centroids)\n for cluster in self.clusters:\n self.centroids[cluster] = np.average(self.clusters[cluster], axis=0)", "def calcClusters(dataset, medoids, number_of_clusters, verbosity=0, class_header=\"Class\"):\n clusters = [pandas.DataFrame(columns=dataset.columns)] * number_of_clusters # create array of clusters\n multiprocess_count = multiprocessing.cpu_count() # Find processor count\n pool = multiprocessing.Pool(processes=multiprocess_count) # create multiprocessing pool\n\n set_list = []\n partition_size = math.ceil(len(dataset) / multiprocess_count)\n for i in range(multiprocess_count - 1): # repeat for every subset\n sample = dataset.iloc[i * partition_size: (i + 1) * partition_size] # take a sample of data\n set_list.append((sample, medoids, number_of_clusters, verbosity, class_header)) # fill work list\n set_list.append(\n (dataset.iloc[(multiprocess_count - 1) * partition_size:], medoids, number_of_clusters, verbosity, class_header))\n\n # find list of clustering for each subset\n clusters_subsets = pool.starmap(Cluster.calcClustersMultiprocess, set_list)\n pool.close()\n pool.join()\n # Transpose 2d list of dataframes so each lower level list is of the same cluster\n cluster_lists = [[i for i in element if i is not None] for element in list(zip_longest(*clusters_subsets))]\n\n for i in range(number_of_clusters): # concat together each list of cluster subsets.\n clusters[i] = pandas.concat(cluster_lists[i])\n return clusters", "def compute_means(self):\n ###TODO\n vector_means = []\n for doc in self.fin_clust.values():\n vec = defaultdict(float)\n for d_id in doc:\n doc_keys = self.docs[d_id].keys()\n for key in self.docs[d_id]:\n vec[key] = vec[key] + self.docs[d_id][key]\n tot = len(doc)\n x = defaultdict(float)\n for k,v in vec.items():\n x[k] = float(v)/tot\n vec = Counter(x)\n vector_means.append(vec)\n return vector_means", "def _generate_weighted_adj_matrices(self):\n self.weighted_adj_matrices = dict()\n mes = []\n args = []\n for metaedge in self.metaedges:\n mes.append(metaedge)\n args.append(self._prepare_parallel_weighted_adj_matrix_args(self.edge_df.query('abbrev == @metaedge')))\n res = parallel_process(array=args, function=mt.get_adj_matrix, use_kwargs=True, n_jobs=self.n_jobs,\n front_num=0)\n for metaedge, matrix in zip(mes, res):\n self.weighted_adj_matrices[metaedge] = matrix", "def adj_dict(self):\n adj_dict = {i: [] for i in self.indices}\n for coeff in self.interactions[1:]:\n for _inds, value in coeff.items():\n for i in _inds:\n _inds_list = list(_inds)\n _inds_list.remove(i)\n adj_dict[i].append([_inds_list, value])\n return adj_dict", "def scores(self):\n\t\tseqLengths = []\n\t\tfor x in self.contigsInfo.keys():\n\t\t\tseq = self.contigsInfo[x]\n\t\t\tseqLengths.append(len(seq))\n\n\t\tseqLengths = sorted(seqLengths)\t\n\t\tmax_length = max(seqLengths)\n\t\tmin_length = min(seqLengths)\n\t\tmean_length = np.mean(seqLengths)\t\n\n\n\t\tmidLength = sum(seqLengths)/2\n\n\t\tcomputedMidLength = 0\n\t\tl50 = 0\n\t\tn50 = 0\n\t\tfor i,x in enumerate(seqLengths):\n\t\t\tif (midLength < computedMidLength):\n\t\t\t\tn50 = i\n\t\t\t\tl50 = x \n\t\t\t\tbreak\n\t\t\tcomputedMidLength += x\n\n\t\tscoresDict = {'number_of_contigs':len(seqLengths), 'smallestContig':min_length, 'meanContig':mean_length, \n\t\t'n50':n50, 'l50':l50, 'largestContig':max_length, 'lengthOfAssembly':sum(seqLengths)}\n\t\treturn scoresDict", "def clustering(dataset, logger):\n all_instances = dataset\n meta_dataset = collections.defaultdict(list)\n for instance in all_instances:\n meta_dataset[instance['label']].append(instance['coordinate'])\n\n tasklist = map(\n lambda item, meta_dataset=meta_dataset, logger=logger: (\n item[0],\n clustering_by_label,\n (item[1], item[0], meta_dataset, logger)), meta_dataset.items())\n\n # pool = multiprocessing.pool.Pool(PROCESS_COUNT)\n # clusters = dict(pool.map(map_generate_tuple, tasklist))\n clusters = dict(map(map_generate_tuple, tasklist))\n # pool.close()\n # pool.join()\n\n return clusters", "def get_composition(self) -> Dict[KappaAgent, int]:\n agent_types = self.get_agent_types_present()\n composition = dict(zip(agent_types, [0] * len(agent_types)))\n for agent_type in agent_types:\n for kappa_complex, abundance in self.get_all_complexes_and_abundances():\n complex_composition = kappa_complex.get_complex_composition()\n local_abundance = complex_composition[agent_type] if agent_type in complex_composition else 0\n composition[agent_type] += abundance * local_abundance\n return composition", "def analysis_function_num_clusters(self,clustering):\n return len(clustering.clusters)", "def collectInitialeccnStatistics(self, folder, databaseFilename, multiplicityFactor = 1.0, deformedNuclei = False):\n typeCollections = ((1, 'sn'), (2,'en'))\n for ecc_id, ecc_type_name in typeCollections:\n db = SqliteDB(path.join(folder, databaseFilename % ecc_type_name))\n # first write the ecc_id_lookup table, makes sure there is only one such table\n if db.createTableIfNotExists(\"ecc_id_lookup\", ((\"ecc_id\",\"integer\"), (\"ecc_type_name\",\"text\"))):\n db.insertIntoTable(\"ecc_id_lookup\", (ecc_id, ecc_type_name))\n\n # next create the eccentricities and collisionParameters table\n db.createTableIfNotExists(\"eccentricities\", ((\"event_id\",\"integer\"), (\"ecc_id\", \"integer\"), (\"n\",\"integer\"), (\"ecc_real\",\"real\"), (\"ecc_imag\",\"real\")))\n db.createTableIfNotExists(\"collisionParameters\", ((\"event_id\",\"integer\"), (\"Npart\", \"integer\"), (\"Ncoll\",\"integer\"), (\"b\",\"real\"), (\"total_entropy\",\"real\")))\n if(deformedNuclei):\n db.createTableIfNotExists(\"deformationParameters\", ((\"event_id\",\"integer\"), (\"cosTheta1\", \"real\"), (\"phi1\",\"real\"), (\"cosTheta2\",\"real\"), (\"phi2\",\"real\")))\n\n # the big loop\n for iorder in range(1,10):\n data = loadtxt(path.join(folder, '%s_ecc_eccp_%d.dat' %(ecc_type_name, iorder)))\n if iorder == 1:\n Npart = data[:,4]\n Ncoll = data[:,5]\n dSdy = data[:,6]/multiplicityFactor #scale out the multiplicity factor used in superMC\n b = data[:,7]\n for event_id in range(len(Npart)):\n db.insertIntoTable(\"collisionParameters\", (event_id, int(Npart[event_id]), int(Ncoll[event_id]), float(b[event_id]), float(dSdy[event_id])))\n if(deformedNuclei):\n cosTheta1 = data[:,8]\n phi1 = data[:,9]\n cosTheta2 = data[:,10]\n phi2 = data[:,11]\n for event_id in range(len(Npart)):\n db.insertIntoTable(\"deformationParameters\", (event_id, float(cosTheta1[event_id]), float(phi1[event_id]), float(cosTheta2[event_id]), float(phi2[event_id])))\n eccReal = data[:,2]\n eccImag = data[:,3]\n for event_id in range(len(eccReal)):\n db.insertIntoTable(\"eccentricities\",(event_id, ecc_id, iorder, float(eccReal[event_id]), float(eccImag[event_id])))\n\n # close connection to commit changes\n db.closeConnection()", "def effective_cluster_weights(self):\n weights = np.array(\n [\n np.sum(\n self._subspace.function_ordering_multiplicities[\n self._subspace.function_orbit_ids == i\n ]\n * self.eci[self.eci_orbit_ids == i] ** 2\n )\n for i in range(len(self._subspace.orbits) + 1)\n ]\n )\n return weights", "def formAdjacencyMatrix(self):\n self.adjacencyMatrix = dict()\n for i in self.node:\n self.adjacencyMatrix[i] = dict()\n for j in self.node:\n self.adjacencyMatrix[i][j] = 0\n \n for ij in self.link:\n self.adjacencyMatrix[self.link[ij].tail][self.link[ij].head] = 1", "def __init__(self, dictAlg):\n\n # values of dict dictAlg are DataSetList which should have only one\n # element which will be assigned as values in the following lines.\n d = set()\n f = set()\n for i in dictAlg.values():\n d |= set(j.dim for j in i)\n f |= set(j.funcId for j in i)\n\n if len(f) > 1 or len(d) > 1:\n Usage('Expect the data of algorithms for only one function and '\n 'one dimension.')\n\n f = f.pop()\n d = d.pop()\n\n dictMaxEvals = {}\n dictFinalFunVals = {}\n tmpdictAlg = {}\n for alg, i in dictAlg.iteritems():\n if len(i) == 0:\n warnings.warn('Algorithm %s was not tested on f%d %d-D.'\n % (alg, f, d))\n continue\n elif len(i) > 1:\n warnings.warn('Algorithm %s has a problem on f%d %d-D.'\n % (alg, f, d))\n continue\n\n tmpdictAlg[alg] = i[0] # Assign ONLY the first element as value\n dictMaxEvals[alg] = i[0].maxevals\n dictFinalFunVals[alg] = i[0].finalfunvals\n\n dictAlg = tmpdictAlg\n\n sortedAlgs = dictAlg.keys()\n # algorithms will be sorted along sortedAlgs which is now a fixed list\n\n # Align ERT\n erts = list(np.transpose(np.vstack([dictAlg[i].target, dictAlg[i].ert]))\n for i in sortedAlgs)\n res = readalign.alignArrayData(readalign.HArrayMultiReader(erts))\n\n resalgs = []\n reserts = []\n # For each function value\n for i in res:\n # Find best algorithm\n curerts = i[1:]\n assert len((np.isnan(curerts) == False)) > 0\n currentbestert = np.inf\n currentbestalg = ''\n for j, tmpert in enumerate(curerts):\n if np.isnan(tmpert):\n continue # TODO: don't disregard these entries\n if tmpert == currentbestert:\n # TODO: what do we do in case of ties?\n # look at function values corresponding to the ERT?\n # Look at the function evaluations? the success ratio?\n pass\n elif tmpert < currentbestert:\n currentbestert = tmpert\n currentbestalg = sortedAlgs[j]\n reserts.append(currentbestert)\n resalgs.append(currentbestalg)\n\n dictiter = {}\n dictcurLine = {}\n resDataSet = []\n\n # write down the #fevals to reach the function value.\n for funval, alg in zip(res[:, 0], resalgs):\n it = dictiter.setdefault(alg, iter(dictAlg[alg].evals))\n curLine = dictcurLine.setdefault(alg, np.array([np.inf, 0]))\n while curLine[0] > funval:\n try:\n curLine = it.next()\n except StopIteration:\n break\n dictcurLine[alg] = curLine.copy()\n tmp = curLine.copy()\n tmp[0] = funval\n resDataSet.append(tmp)\n\n setalgs = set(resalgs)\n dictFunValsNoFail = {}\n for alg in setalgs:\n for curline in dictAlg[alg].funvals:\n if (curline[1:] == dictAlg[alg].finalfunvals).any():\n # only works because the funvals are monotonous\n break\n dictFunValsNoFail[alg] = curline.copy()\n\n self.evals = resDataSet\n # evals is not a np array but a list of arrays because they may not\n # all be of the same size.\n self.maxevals = dict((i, dictMaxEvals[i]) for i in setalgs)\n self.finalfunvals = dict((i, dictFinalFunVals[i]) for i in setalgs)\n self.funvalsnofail = dictFunValsNoFail\n self.dim = d\n self.funcId = f\n self.algs = resalgs\n self.algId = 'Virtual Best Algorithm'\n self.comment = 'Combination of ' + ', '.join(sortedAlgs)\n self.ert = np.array(reserts)\n self.target = res[:, 0]\n\n bestfinalfunvals = np.array([np.inf])\n for alg in sortedAlgs:\n if np.median(dictAlg[alg].finalfunvals) < np.median(bestfinalfunvals):\n bestfinalfunvals = dictAlg[alg].finalfunvals\n algbestfinalfunvals = alg\n self.bestfinalfunvals = bestfinalfunvals\n self.algbestfinalfunvals = algbestfinalfunvals", "def _estimate_assignments(self, graph: GraphRepresentation) -> None:\n embed_graph = augment_diagonal(graph)\n latent = AdjacencySpectralEmbed(\n n_components=self.n_components, **self.embed_kws\n ).fit_transform(embed_graph)\n if isinstance(latent, tuple):\n latent = np.concatenate(latent, axis=1)\n gc = GaussianCluster(\n min_components=self.min_comm,\n max_components=self.max_comm,\n **self.cluster_kws\n )\n vertex_assignments = gc.fit_predict(latent) # type: ignore\n self.vertex_assignments_ = vertex_assignments", "def add_clusterings(self, clustering):\n self.clustering.append(clustering)", "def bases(layout, mvClass=MultiVector, grades=None):\n\n dict = {}\n for i in range(layout.gaDims):\n grade = layout.gradeList[i]\n if grade != 0:\n if grades is not None and grade not in grades:\n continue\n v = np.zeros((layout.gaDims,), dtype=int)\n v[i] = 1\n dict[layout.names[i]] = mvClass(layout, v)\n return dict", "def calculate_components(self, parts):\n target = {}\n for part in parts:\n rank = part[0]\n\n try:\n face = part[1]\n except IndexError:\n face = '*'\n\n try:\n target[rank][face] += 1\n except KeyError:\n if rank not in target:\n target[rank] = {}\n target[rank][face] = 1\n\n return target", "def kl_divergence(self):\r\n\r\n target_columns = list(self.origdst.columns[11:-3])\r\n target_columns.append(self.origdst.columns[1]) # channel\r\n target_columns.append(self.origdst.columns[2]) # program_title\r\n target_columns.append(self.origdst.columns[3]) # genre\r\n\r\n kl_dict = {}\r\n\r\n for col in target_columns:\r\n\r\n try:\r\n\r\n col_counts_orig = self.origdst[col].value_counts(normalize=True).sort_index(ascending=True)\r\n col_counts_synth = self.synthdst[col].value_counts(normalize=True).sort_index(ascending=True)\r\n\r\n kl = sum(rel_entr(col_counts_orig.tolist(), col_counts_synth.tolist()))\r\n\r\n kl_dict[col] = kl\r\n\r\n except:\r\n\r\n print('For the column ', col, ' you must generate the same unique values as the real dataset.')\r\n print('The number of unique values than you should generate for column ', col, 'is ',\r\n len(self.origdst[col].unique()))\r\n\r\n return kl_dict" ]
[ "0.60130954", "0.5818192", "0.57543993", "0.5732739", "0.56639177", "0.56580245", "0.56447226", "0.560482", "0.552206", "0.551928", "0.54748267", "0.5463208", "0.5428008", "0.54264724", "0.5389764", "0.535151", "0.5327543", "0.53244734", "0.5321029", "0.5317272", "0.5313567", "0.52784014", "0.5268973", "0.52574134", "0.5225924", "0.52034247", "0.5200481", "0.5199405", "0.5187148", "0.51740474", "0.5168689", "0.5168088", "0.5161099", "0.51606315", "0.5141321", "0.51361215", "0.5130891", "0.5123838", "0.51188105", "0.5114492", "0.5114134", "0.50952685", "0.5076774", "0.50671357", "0.5054872", "0.5054367", "0.50508374", "0.5020113", "0.50168306", "0.5013794", "0.50130177", "0.5012417", "0.5008053", "0.500698", "0.50005645", "0.49908346", "0.4988226", "0.49863073", "0.49841753", "0.49804747", "0.49748373", "0.4973168", "0.49665457", "0.4958062", "0.4954808", "0.4954743", "0.49452704", "0.493537", "0.49351147", "0.4931998", "0.49319458", "0.49191707", "0.4912733", "0.49119478", "0.4907791", "0.49076417", "0.49066368", "0.48908702", "0.48797494", "0.48775354", "0.48688793", "0.48541453", "0.48537183", "0.4853347", "0.4850343", "0.48494658", "0.4848352", "0.48341274", "0.48337764", "0.48269358", "0.48258853", "0.482428", "0.48193154", "0.48180807", "0.48172462", "0.4809217", "0.48090717", "0.48082712", "0.47957844", "0.47931737" ]
0.7477364
0
Calculates the mean distance and the sum of squared errors for each cluster and its related core and centroid. Always uses Jaccard distance.
def eval_mean_distance(played_decks, clustering_data: List, fuzzy: bool, debug: bool = False): for alg_dict in clustering_data: decks = np.array(played_decks) clusters = [] for label in set(alg_dict["labels"]): indices = np.where(alg_dict["labels"] == label) if fuzzy: clusters.append(FuzzyDeckCluster(decks[indices])) else: clusters.append(DeckCluster(decks[indices])) if fuzzy: clustering = FuzzyDeckClustering(clusters) else: clustering = DeckClustering(clusters) sum_of_squared_distances_centroid = 0 sum_of_squared_distances_core = 0 for cluster in clustering.deck_clusters: centroid = cluster.centroid() core = cluster.core() for deck in cluster.decks: sum_of_squared_distances_centroid += (deck.jaccard_distance(centroid))**2 sum_of_squared_distances_core += (deck.jaccard_distance(core))**2 alg_dict["sse_centroid"] = sum_of_squared_distances_centroid alg_dict["sse_core"] = sum_of_squared_distances_core if debug: print("Alg: " + alg_dict["name"] + "; \t sse = " + str(alg_dict["sse_centroid"])) print("Alg: " + alg_dict["name"] + "; \t sse = " + str(alg_dict["sse_core"]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calcAvgDistances(centroids, clusters, class_header=\"Class\"):\n avg_distances = [0] * len(centroids)\n multiprocess_count = multiprocessing.cpu_count() # Find processor count\n for centroid_row_index, centroid_tuple in enumerate(centroids.iterrows()): # For each cluster\n work_list = [] # initialize multiprocessing structures\n set_list = []\n for _, datum in clusters[centroid_row_index].iterrows(): # For each point in the medoid cluster\n work_list.append((centroid_tuple[1], datum, class_header)) # add calculation to work list\n\n partition_size = math.ceil(len(work_list) / multiprocess_count) # find size of each work subeset\n for i in range(multiprocess_count - 1): # repeat for every subset\n sample = work_list[i * partition_size: (i + 1) * partition_size] # break work list into fair subsets\n set_list.append(sample)\n set_list.append((work_list[(multiprocess_count - 1) * partition_size:]))\n pool = multiprocessing.Pool(processes=multiprocess_count) # create multiprocessing pool\n # calculate sum of list of all distances from work list tasks\n avg_distances[centroid_row_index] = sum(sum(pool.map(Cluster.calcDistanceList, set_list), []))\n pool.close()\n pool.join()\n\n if avg_distances[centroid_row_index] is not 0: # make sure we do not divide by 0\n # calculate average of distance list\n avg_distances[centroid_row_index] = avg_distances[centroid_row_index] / len(clusters[centroid_row_index])\n return avg_distances", "def _calc_distance(self, X):\n distances = np.zeros((X.shape[0], self.n_clusters))\n print(distances.shape)\n for i, centroid in enumerate(self.centroids):\n distances[:, i] = np.linalg.norm(X - centroid, axis=1)\n return distances", "def calculate_cost(self, medoids, clusters):\n cost = 0.0\n for i in range(0, len(medoids)):\n for j in range(0, len(clusters[i])):\n cost += distance.sqeuclidean(medoids[i], clusters[i][j])\n return cost\n pass", "def _compute_centroids(self):\n\n for i in range(0, self.k):\n cluster = np.argwhere(self.assigned_clusters == i)\n cluster_points = self.data[cluster].squeeze()\n self.centroids[i] = np.mean(cluster_points, axis=0)", "def clustering_error(examples):\n\n cluster_examples = partition(examples)\n cluster_averages = [\n class_average(cluster_examples[class_id])\n for class_id in range(0, cluster_count)\n ]\n\n # accumulate the distances between each example and its cluster's average\n # into an error value\n error = 0\n for example in examples:\n error += dist(cluster_averages[example.type], example)\n\n return (error, cluster_averages)", "def get_distance(self, samples, clusters):\n n_samples = samples.shape[0]\n n_features = samples.shape[1]\n n_centroids = clusters.shape[0]\n dist = np.zeros(shape=(n_samples, n_centroids))\n\n # computing squared euclidian distance for each sample-cluster pair\n for i in range(n_samples):\n for j in range(n_centroids):\n for k in range(n_features):\n dist[i, j] += (samples[i, k] - clusters[j, k])**2\n # dist[i, j] = D[i, j]**(1/2)\n\n return np.sqrt(dist)", "def test_sum_of_distances(self):\n N = 10\n centers = [[0, 0], [1, 0], [0.5, np.sqrt(0.75)]]\n cluster_std = [0.3, 0.3, 0.3]\n n_samples = int(0.75 * N)\n data, labels_true = \\\n sklearn.datasets.make_blobs(n_samples=n_samples,\n centers=centers,\n cluster_std=cluster_std)\n centers = [[0.5, np.sqrt(0.75)]]\n cluster_std = [0.3]\n extra, labels_true = \\\n sklearn.datasets.make_blobs(n_samples=int(0.25 * N),\n centers=centers,\n cluster_std=cluster_std)\n X = np.concatenate((data, extra), axis=0)\n N = X.shape[0]\n\n # Pick some random floats for the counts/weights:\n counts = np.random.random_sample((N,)) * 10\n\n # SciPy:\n Y = pdist(X, metric=cdist)\n weights = [counts[i] * counts[j]\n for i in xrange(N - 1) for j in xrange(i + 1, N)]\n scipy_sum = np.sum(weights * Y)\n N = counts.sum()\n N_unique_pairs = N * (N - 1.0) / 2.0\n scipy_mean = scipy_sum / N_unique_pairs\n\n # C & Cython:\n c_mean = c_mean_dist(X, counts)\n\n # There is minor rounding error, but check for equality:\n self.assertTrue(np.isclose(c_mean, scipy_mean))\n # Even though above is comparing the means, it is actually checking the\n # sums are correct as the means are calculated in the same way, i.e.,\n # by dividing by the same number, N_unique_pairs.", "def cluster(self, verbose=0, sum_ess=False):\n ## if sum_ess and self.linkage.__name__ != \"ward_link\":\n ## raise ValueError(\n ## \"Summing for method other than Ward makes no sense...\")\n clusters = copy.copy(self._dist_matrix)\n #clusters = self._dist_matrix\n summed_ess = 0.0\n\n while len(clusters) > max(self._num_clusters, 1):\n if verbose >= 1:\n print('k=%s' % len(clusters))\n if verbose == 2:\n print(clusters)\n\n best, i, j = self.smallest_distance(clusters)\n # In Ward (1963) ess is summed at each iteration\n # in R's hclust and Python's hcluster and some text books it is not.\n # Here it is optional...\n if sum_ess:\n summed_ess += best\n else:\n summed_ess = best\n clusters = self.update_distmatrix(i, j, clusters)\n self._dendrogram.merge(i,j)\n self._dendrogram[i].distance = summed_ess\n indices = numpy.arange(clusters.shape[0])\n indices = indices[indices!=j]\n clusters = clusters.take(indices, axis=0).take(indices, axis=1)", "def _calculate_distances(self):\n all_dists = []\n for ref in range(len(self.atoms)):\n if self.atoms[ref].symbol in self.exclude:\n continue\n indices = list(range(ref+1, len(self.atoms)))\n indices = self._filter_excluded(indices)\n if len(indices) == 0:\n continue\n dists = self.atoms.get_distances(ref, indices, mic=True)\n all_dists += list(dists)\n \n # Normalize by the mean distance\n return np.array(all_dists)/np.mean(all_dists)", "def cluster_spatial_positioning(data):\n \n n_clusters = len(set(data['clusters'])-{-1}) # since -1 element denotes noice\n if n_clusters <2:\n #Setting cluster angluar features to default\n cdist=[Cluster_Relative_Distances()]\n cdist = pd.DataFrame([o.__dict__ for o in cdist])\n\n elif n_clusters >=2:\n # Here we implement two approaches for measuring distances between clustes:\n # (1) border-boder distances and (2) centroid-centroid distances. \n # We compute dispersion measures for the distances obtained. \n \n d = dict(tuple(data.groupby('clusters')))\n d.pop(-1, None)\n\n min_dist_between_clusters=np.row_stack([[np.amin(ss.distance_matrix(np.column_stack([d[i]['X'].array,d[i]['Y'].array]), \n np.column_stack([d[j]['X'].array,d[j]['Y'].array]))) for j in d.keys()] for i in d.keys()])\n min_dist_between_clusters=np.delete(list(set(np.frombuffer(min_dist_between_clusters))) ,0)\n\n cen_dist_between_clusters=ss.distance_matrix(np.row_stack([(np.mean(d[i]['X'].array),np.mean(d[i]['Y'].array)) for i in d.keys()]),\n np.row_stack([(np.mean(d[i]['X'].array),np.mean(d[i]['Y'].array)) for i in d.keys()]))\n cen_dist_between_clusters=np.delete(list(set(np.frombuffer(cen_dist_between_clusters))) ,0)\n\n (avg_bor_bor_dist_cluster,min_bor_bor_dist_cluster,max_bor_bor_dist_cluster,\n std_bor_bor_dist_cluster,CV_bor_bor_dist_cluster,CD_bor_bor_dist_cluster,\n IQR_bor_bor_dist_cluster,Quartile_CD_bor_bor_dist_cluster)= distribution_statistics(min_dist_between_clusters)\n\n (avg_cen_cen_dist_cluster,min_cen_cen_dist_cluster,max_cen_cen_dist_cluster,\n std_cen_cen_dist_cluster,CV_cen_cen_dist_cluster,CD_cen_cen_dist_cluster,\n IQR_cen_cen_dist_cluster,Quartile_CD_cen_cen_dist_cluster)= distribution_statistics(cen_dist_between_clusters)\n\n cdist = [Cluster_Relative_Distances([avg_bor_bor_dist_cluster,min_bor_bor_dist_cluster,max_bor_bor_dist_cluster,\n std_bor_bor_dist_cluster,CV_bor_bor_dist_cluster,CD_bor_bor_dist_cluster,\n IQR_bor_bor_dist_cluster,Quartile_CD_bor_bor_dist_cluster,\n avg_cen_cen_dist_cluster,min_cen_cen_dist_cluster,max_cen_cen_dist_cluster,\n std_cen_cen_dist_cluster,CV_cen_cen_dist_cluster,CD_cen_cen_dist_cluster,\n IQR_cen_cen_dist_cluster,Quartile_CD_cen_cen_dist_cluster])]\n \n cdist = pd.DataFrame([o.__dict__ for o in cdist])\n\n \n return cdist", "def __calculate_estimation(self):\r\n estimation = 0.0\r\n for index_cluster in range(0, len(self.__clusters)):\r\n cluster = self.__clusters[index_cluster]\r\n index_medoid = self.__current[index_cluster]\r\n for index_point in cluster:\r\n estimation += euclidean_distance_square(self.__pointer_data[index_point], self.__pointer_data[index_medoid])\r\n\r\n return estimation", "def train_KMean(data: np.array, labels: np.array, n_clusters: int)->None:\n n_examples = np.size(data, 0)\n n_features = np.size(data, 1)\n\n # Scale the data so that Euclidian distance makes sense\n means = np.mean(data, axis = 0)\n stddevs = np.std(data, axis = 0, ddof = 1)\n\n #print(means)\n #print(stddevs)\n\n data_scaled = np.zeros((n_examples, n_features))\n\n for i in range(n_features):\n data_scaled[:, i] = (data[:,i] - means[i]) / stddevs[i]\n\n study_correlation(data_scaled)\n\n # Initialize the centroids\n idx = np.random.randint(n_examples, size = n_clusters)\n centroids = data_scaled[idx, :]\n\n counter = 0\n\n while True:\n\n distances = np.array([[np.sqrt(np.sum(np.square(example-centroid))) for centroid in centroids] for example in data_scaled])\n centroid_idx = np.argmin(distances, axis = 1)\n old_centroids = centroids\n centroids = update_centroids(data_scaled, centroid_idx, n_examples)\n #displacement = get_displacement(old_centroids, centroids)\n displacement = np.linalg.norm(np.array([old - new for old, new in zip(old_centroids, centroids)]))\n\n #assert np.linalg.norm(np.array([old - new for old, new in zip([1, 2, 3, 4], [5, 6, 7, 8])])) == 8\n\n if counter == 0:\n# print(\"Initial displacement = {}\".format(displacement))\n initial_displacement = displacement\n\n counter += 1\n\n if displacement < (initial_displacement / 10000): break\n\n #print(\"Total number of loops before ending : {}\".format(counter))\n converted_predictions = convert_predictions(centroid_idx)\n accuracy = np.mean([p == l for p, l in zip(converted_predictions, labels)])\n print(\"Accuracy = {}\".format(accuracy))\n\n pass", "def calcCentroid(self):\n size = len(self.vectors)\n # zip all features together\n zipped = zip(*self.vectors)\n # Calculate the mean for each feature/column\n centroid = [math.fsum(column)/size for column in zipped]\n \n return centroid", "def compute_distance(X, K_clusters):\n dis = np.linalg.norm((X-K_clusters),2,axis=1)**2\n return dis", "def cluster_cal(self):\n self.Cluster = []\n for i in range(self.nodenum):\n neighborhood_node = self.neighbor_node(i)\n Node_num = len(neighborhood_node)\n Count = self.neighbor_edge(neighborhood_node)\n if(Node_num == 0 or Node_num == 1):\n self.Cluster.append(0.5)\n else:\n self.Cluster.append(Count/(Node_num*(Node_num - 1)))\n \n self.cluster_coeff = np.average(self.Cluster)", "def computeLoss(self):\n return sum(np.arccosh(-minkowskiArrayDot(self.examples, self.centroid)) ** 2)[0] / np.shape(self.examples)[0]", "def __compute_distance(self, x, centroid):\n \n diff = x - centroid\n return np.sqrt(np.dot(diff.T, diff))", "def __calculate_estimation(self):\n estimation = 0.0\n for index_cluster in range(0, len(self.__clusters)):\n cluster = self.__clusters[index_cluster]\n index_medoid = self.__current[index_cluster]\n for index_point in cluster:\n estimation += euclidean_distance_square(\n self.__pointer_data[index_point],\n self.__pointer_data[index_medoid],\n )\n\n return estimation", "def clustering(df, mode):\n # split into list of dfs containing only one reference node\n df_list = [df.loc[i : i + 8 - 1, :] for i in range(0, len(df), 8)]\n\n df_coefficient = pd.DataFrame()\n\n # loop over every single node\n for df_single in df_list:\n df_single = df_single.reset_index()\n total_value = 0\n\n # loop over the weights of all connected nodes\n for j in range(len(df_single) - 1):\n if mode == \"geometric\":\n # geometric\n total_value = total_value + math.sqrt(df_single.chi_sq[j] * df_single.chi_sq[j + 1])\n if mode == \"arithmetic\": \n # arithmetic\n total_value = total_value + ((df_single.chi_sq[j] * df_single.chi_sq[j + 1]) / 2)\n if mode == \"argmax\": \n # max\n total_value = total_value + max(df_single.chi_sq[j], df_single.chi_sq[j + 1])\n if mode == \"argmin\":\n # min\n total_value = total_value + min(df_single.chi_sq[j], df_single.chi_sq[j + 1])\n\n for i in range(len(df_single) - 1):\n if mode == \"geometric\":\n # geometric\n triplet_value = math.sqrt(df_single.chi_sq[i] * df_single.chi_sq[i + 1])\n if mode == \"arithmetic\":\n # arithmetic\n triplet_value = (df_single.chi_sq[i] * df_single.chi_sq[i + 1]) / 2\n if mode == \"argmax\":\n # max\n triplet_value = max(df_single.chi_sq[i], df_single.chi_sq[i + 1])\n if mode == \"argmin\": \n # min\n triplet_value = min(df_single.chi_sq[i], df_single.chi_sq[i + 1])\n\n cluster_coefficient = triplet_value / total_value\n buffer = [\n [\n df_single.reference[i],\n df_single.comparison[i],\n df_single.comparison[i + 1],\n triplet_value,\n cluster_coefficient,\n ]\n ]\n df_coefficient = df_coefficient.append(buffer)\n\n df_coefficient = df_coefficient.reset_index()\n\n print(\"\\n\\n threshold 0.5*c_omega\")\n check_list = []\n # print out triangles that have a cluster coefficient bigger, than X\n for i in range(len(df_coefficient)):\n if df_coefficient[4][i] >= ((0.5) * df_coefficient[4].max()):\n print(list(df_coefficient.loc[i][1:4]))\n check_list.append(list(df_coefficient.loc[i][1:4]))\n else:\n continue\n\n print(\"\\n\\n threshold 0.75*c_omega\")\n check_list = []\n for i in range(len(df_coefficient)):\n if df_coefficient[4][i] >= ((0.75) * df_coefficient[4].max()):\n print(list(df_coefficient.loc[i][1:4]))\n check_list.append(list(df_coefficient.loc[i][1:4]))\n else:\n continue\n\n print(\"\\n\\n threshold 0.8*c_omega\")\n check_list = []\n for i in range(len(df_coefficient)):\n if df_coefficient[4][i] >= ((0.9) * df_coefficient[4].max()):\n print(list(df_coefficient.loc[i][1:4]))\n check_list.append(list(df_coefficient.loc[i][1:4]))\n else:\n continue\n\n\n print(\"\\n\\n threshold 0.9*c_omega\")\n check_list = []\n for i in range(len(df_coefficient)):\n if df_coefficient[4][i] >= ((0.9) * df_coefficient[4].max()):\n print(list(df_coefficient.loc[i][1:4]))\n check_list.append(list(df_coefficient.loc[i][1:4]))\n else:\n continue\n\n return", "def compute_distortion(cluster_list, data_table):\r\n distortion = 0\r\n \r\n for cluster in cluster_list:\r\n distortion += cluster.cluster_error(data_table)\r\n\r\n return distortion", "def _recalculate_centroids(self):\n\n self._prev_centroids = dict(self.centroids)\n for cluster in self.clusters:\n self.centroids[cluster] = np.average(self.clusters[cluster], axis=0)", "def calculate_all_distances_to_center(self):\n all_distances = pd.DataFrame()\n for label in np.unique(self.embedding_df['cluster']): \n distance_df = self.calculate_distances_for_cluster(label)\n all_distances = pd.concat([all_distances, distance_df])\n \n self.embedding_df = self.embedding_df.merge(all_distances, left_index=True, right_index=True)", "def compute_distortion(cluster_list, data_table):\n\tdistortion = 0\n\tfor cluster in cluster_list:\n\t\tdistortion += cluster.cluster_error(data_table)\n\treturn distortion", "def cluster_index_2(X):\n \n global_mean = X.mean(axis=0)\n\n sum_squared_distances = (((X - global_mean)**2).sum(axis = 1)).sum()\n #Sum of squared distances of each sample from the global mean\n \n centroids, labels, inertia = k_means(X, 2)\n\n ci = inertia / sum_squared_distances\n\n return ci , labels", "def _assign_clusters(self):\n\n dist = np.zeros((self.k, ))\n distortion = 0\n\n for index in range(0, self.data.shape[0]):\n for i in range(0, self.k):\n dist[i] = np.linalg.norm(self.data[index] - self.centroids[i])\n\n self.assigned_clusters[index] = np.argmin(dist)\n distortion += np.min(dist)\n\n return distortion", "def error(self, documents):\n ###TODO\n sum_1 = 0.0\n for c_id,clust in self.fin_clust.items():\n n = self.sqnorm(self.means[c_id]) \n sum_1 = sum_1 + sum([self.distance(self.docs[dc],self.means[c_id],n) for dc in clust]) \n return round(sum_1,2)", "def wca_mean(X, k, df):\n\t\n\n\t# Intializing the clusters\t\n\tC = dict()\n\tfor cluster in range(k):\n\t C[cluster] = pd.DataFrame()\n\n\t# Calculating the mean vector\n\tmean_vector = X.mean()\n\n\t# Choosing the seed points based on the minimum distance from the mean vector\n\tX['dist_mean'] = X.apply(lambda x: np.linalg.norm(np.asarray(x)- np.asarray(mean_vector)), axis=1)\n\tdist_means = X.sort_values(by='dist_mean')\n\t\n\t# Dropping the the datapoints which have already been assigned as seed\n\tidx_to_drop = dist_means.index[:k]\n\tdist_means.reset_index(drop=True,inplace=True)\n\tX.drop('dist_mean',axis=1,inplace=True)\n\tX.drop(idx_to_drop, inplace=True)\n\n\t# Assigning seed points to the clusters\n\tmu = list()\n\tfor cluster in range(k):\n\t C[cluster] = C[cluster].append(dist_means.iloc[cluster].drop('dist_mean'))\n\t mu.append(C[cluster].mean())\n\t\n\t# Running the algorithm\t\n\t\n\t# Initializing the p-value list which would be used for plotting\n\tpval = dict()\n\n\tfor cluster in range(k):\n\t pval[cluster] = dict()\n\t for i in C[0].columns:\n\t pval[cluster][i] = list()\n\n\t# Algorithm\n\tfor i in tqdm(range(int(len(X)/k)), desc='Iterations: '):\n\t for cluster in range(k):\n\n\t # Calculating the distances from the mean vector of eaimportch cluster (in Descending order)\n\t X['dist_mean'] = X.apply(lambda x: np.linalg.norm(np.asarray(x)- np.asarray(mu[cluster])), axis=1)\n\t dist_means = X.sort_values(by='dist_mean', ascending=False)\n\t idx_to_drop = dist_means.index[0]\n\t dist_means.reset_index(drop=True,inplace=True)\n\t X.drop('dist_mean',axis=1,inplace=True)\n\n\t # Assigning the top value to the cluster\n\t C[cluster] = C[cluster].append(dist_means.iloc[0].drop('dist_mean'))\n\t C[cluster] = C[cluster].reset_index(drop=True)\n\t \n\t # Updating means of each cluster\n\t mu[cluster] = C[cluster].mean()\n\n\t # Remove datapoint from X?\n\t X.drop(idx_to_drop,inplace=True)\n\t \n\t for i in C[0].columns:\n\t pval[cluster][i].append(sc.ks_2samp(C[cluster][i],df.drop('target',axis=1)[i])[1])\n\n\treturn(C,pval)", "def find_avg(centroids, short_cut=False, sim_scores=None):\n \n total_sim = 0.0\n total_comparisons = 0\n \n if short_cut:\n total_comparisons = len(sim_scores)\n \n for score in sim_scores:\n total_sim += score\n \n return (total_sim / total_comparisons)\n\n length = len(centroids)\n\n for i in xrange(0, length):\n for j in xrange(i + 1, length):\n total_sim += similarity(centroids[i], centroids[j])\n total_comparisons += 1\n\n return (total_sim / total_comparisons)", "def avg_dists(self):\n \n d = self.descriptors\n # make an empty array to fill b/c it is a touch faster\n averages = np.empty([1, self.d_length])\n for i, u in enumerate(d):\n s = 0\n for j, v in enumerate(d):\n if i != j:\n s += self.jaccard(u, v)\n averages[0, i] = (s / (self.d_length-1))\n return averages[0]", "def compute_means(self):\n ###TODO\n vector_means = []\n for doc in self.fin_clust.values():\n vec = defaultdict(float)\n for d_id in doc:\n doc_keys = self.docs[d_id].keys()\n for key in self.docs[d_id]:\n vec[key] = vec[key] + self.docs[d_id][key]\n tot = len(doc)\n x = defaultdict(float)\n for k,v in vec.items():\n x[k] = float(v)/tot\n vec = Counter(x)\n vector_means.append(vec)\n return vector_means", "def compute_distances(self, X):\n num_test = X.shape[0]\n num_train = self.X_train.shape[0]\n dists = np.zeros((num_test, num_train))\n \n sum_test_square = np.sum(np.square(X), axis=1).reshape(-1, 1)\n sum_train_square = np.sum(np.square(self.X_train), axis=1).reshape(-1, 1)\n product_test_train = X @ self.X_train.T\n \n sum_test_square = np.repeat(sum_test_square, num_train, axis=1)\n sum_train_square = np.repeat(sum_train_square, num_test, axis=1).T\n \n dists_square = sum_test_square - 2 * product_test_train + sum_train_square\n \n dists = np.sqrt(dists_square)\n \n return dists", "def fit(self, train_data, metric=None):\n \n if metric==None:\n self.metric=self.euclidean_sqr\n\n if self.centroids_.shape[0]==0:\n centroids=self.random_init(train_data)\n else:\n centroids=self.centroids_\n\n # remove mean from data\n #train_data_mean=np.mean(train_data,axis=0)\n #train_data=train_data-train_data_mean\n # row norms??\n #train_data_sqr_norms = np.einsum('ij,ij->i', train_data, train_data)\n\n\n old_centroids=np.zeros(centroids.shape)\n\n # iterate until no change in cluster centers or defined number of iterations is reached\n n_iterations=self.n_iterations_\n while n_iterations>0 and np.array_equal(centroids,old_centroids)==False:\n n_iterations-=1\n old_centroids=centroids\n centroids=self.fit_iteration(train_data, centroids)\n \n self.centroids_=centroids\n return centroids", "def get_cluster_average(cls, indices, dist_mat):\n distances = cls.get_all_distances(indices, dist_mat)\n return np.mean(distances)", "def centroids(self):\n return self.mdm_.covmeans_", "def euclidean_dist(self):\r\n\r\n real_cat, synth_cat = self.to_cat(self.origdst, self.synthdst)\r\n\r\n real_cat_dem = self.get_demographics(real_cat)\r\n synth_cat_dem = self.get_demographics(synth_cat)\r\n\r\n corr_real_obj = associations(real_cat_dem, theil_u=True, bias_correction=False, plot=False)\r\n corr_synth_obj = associations(synth_cat_dem, theil_u=True, bias_correction=False, plot=False)\r\n\r\n corr_real = corr_real_obj['corr']\r\n corr_rand = corr_synth_obj['corr']\r\n\r\n eucl_matr = distance.cdist(corr_real, corr_rand, 'euclidean')\r\n\r\n eucl = LA.norm(eucl_matr)\r\n\r\n return eucl, eucl_matr", "def calculate_cost(data, centers, clusters):\n total = 0\n for i in range(len(centers)):\n total = total + np.sum(data[centers[i]][clusters[i]]) \n return total", "def calcCentroids(data_points, clusters):\n #initiate empty list for the new centroids\n newCentroids = []\n\n #For position in each cluster, calculate the average for each position\n #The lists are zipped so each position can have an average\n for c in clusters:\n newCentroids.append(map(calcAverage, zip(*c)))\n\n #This is the check that a centroid is not empty. If a centroid is empty,\n #delete it, the filled centroids are added to the new list\n correctCentroid = []\n for centroid in newCentroids:\n #If centroid is not empty\n if centroid:\n correctCentroid.append(centroid)\n\n return len(correctCentroid), correctCentroid", "def SquareClusteringCoefficient(graph):\n coef = np.mean(list(nx.square_clustering(graph).values()))\n return coef", "def test_mean_of_distances(self):\n X = np.array([[0.3, 0.4],\n [0.1, 4.0],\n [2.0, 1.0],\n [0.0, 0.5]])\n counts = np.array([3, 2, 1, 2])\n scipy_X = []\n for c, count in enumerate(counts):\n for i in range(count):\n scipy_X.append(X[c])\n\n # SciPy:\n Y = pdist(scipy_X, metric=cdist)\n scipy_N = np.sum(counts)\n N_unique_pairs = scipy_N * (scipy_N - 1.0) / 2.0\n scipy_mean = Y.mean()\n self.assertTrue(Y.shape[0] == N_unique_pairs)\n self.assertTrue(scipy_mean == (np.sum(Y) / N_unique_pairs))\n\n # C & Cython:\n c_mean = c_mean_dist(X, counts)\n self.assertTrue(np.isclose(c_mean, scipy_mean))", "def calculate_error(k_means_matrix):\n return sum([min(dist) for dist in k_means_matrix])", "def assign_to_current_mean(img: np.ndarray, clustermask: np.ndarray) -> float:\n\n rows, cols = img.shape[:2]\n distances = np.zeros((numclusters, 1))\n overall_dist = 0\n\n for i in range(rows):\n for j in range(cols):\n distances = distance(img[i, j, :]) # returned shape: (numclusters, 1)\n \n k = np.argmin(distances) # closest cluster\n clustermask.itemset((i, j), k) # update cluster mask\n overall_dist += distances[k, 0] # sum distance\n\n return overall_dist", "def _evaluate_centroids(self):\n\n for c in self.centroids:\n _prev_cent = self._prev_centroids[c]\n _curr_cent = self.centroids[c]\n\n if self._euclidean_distance(_prev_cent, _curr_cent) > self.tol:\n return\n self._optimized = True", "def calculate_metric(self, distance_matrix):\n ap_scores = []\n for node_id in range(len(distance_matrix)):\n sorted_nodes = np.argsort(distance_matrix[node_id]).tolist()\n neighs = self.neighbors[node_id]\n n_correct = 0.0\n precisions = []\n for i in range(1, len(sorted_nodes)):\n if sorted_nodes[i] in neighs:\n n_correct += 1\n precisions.append(n_correct / i)\n if n_correct == len(neighs):\n break\n\n ap_scores.append(np.mean(precisions))\n\n return np.mean(ap_scores)", "def compute_clusters(self, documents):\n ###TODO\n for d in range(0, len(documents)):\n maxi = 999999999\n for cid in range(0, len(self.means)):\n dist = self.distance(documents[d], self.means[cid], self.norms[cid])\n if dist < maxi:\n maxi = dist\n clust = cid \n self.cluster[d] = clust", "def run(self, points, K):\n # Get size\n D, N = points.shape\n\n # DxK array initialiezd with random points\n centroids = points[:, np.random.permutation(N)[:K]]\n\n # Assigments 1xN array\n labels = np.zeros(N)\n\n for it in np.arange(self.niter):\n # 1. Compute distance to all cluster\n #v1 dirty\n distances = np.zeros([K, N])\n for n in np.arange(N):\n for k in np.arange(K):\n distances[k, n] = np.sqrt( (points[:, n] - centroids[:, k])**2 ).sum()\n #distances = np.sqrt(((points - centroids[:, np.newaxis, 0])**2)).sum(axis=0) \n\n # 2. Update assigments\n # v1 dirty\n for n in np.arange(N):\n kmin = 0\n for k in np.arange(1, K):\n if distances[k, n] <= distances[kmin, n]:\n kmin = k\n labels[n] = kmin\n # v2 quicker\n #labels = np.argmin(distances, axis=1)\n\n # 3. Update mean\n for k in np.arange(K):\n centroids[:, k] = np.mean(points[:, labels == k], axis=1)\n #np.array([points[closest==k].mean(axis=0) for k in range(centroids.shape[0])])\n\n return centroids, labels", "def average_distance(c1, c2):\n return sum(sum(symmetric_distances[p1][p2] for p1 in c1) for p2 in c2) \\\n / (len(c1) * len(c2))", "def calculate_centroids(self, data, clusters):\n centroids = []\n for i in range(self.n_clusters):\n mask = clusters == i \n centroids.append(np.mean(data[mask, :], axis = 0)) \n return centroids", "def kmean(X,initial_centroids,max_iters):\n m = np.size(X,0)\n K = np.size(initial_centroids,0)\n centroids = initial_centroids\n idx = np.zeros((m,1))\n for i in range(1,max_iters):\n idx = nearest_cluster(X,centroids)\n centroids = update_centroids(X,idx,K)\n return centroids,idx", "def k_means_clustering(rows, distance=pearson_distance, k=4):\n # Determine the min and max values for each point\n ranges = [(min(row[i] for row in rows), max([row[i] for row in rows])) for i in range(len(rows[0]))]\n\n # Create k RANDOMLY placed centroids\n clusters = [[random() * (ranges[i][1] - ranges[i][0]) + ranges[i][0] for i in range(len(rows[0]))] for j in\n range(k)]\n distances_from_centroids = {}\n last_matches = None\n best_matches = None\n for t in range(100):\n print ('Iteration {}'.format(t))\n best_matches = [[] for i in range(k)]\n\n # Find the centroid that is the closest for each row\n for j in range(len(rows)):\n row = rows[j]\n best_match = 0\n for i in range(k):\n d = distance(clusters[i], row)\n if d < distance(clusters[best_match], row):\n best_match = i\n best_matches[best_match].append(j)\n\n # if the results are the same as last time, then this is complete\n if best_matches == last_matches:\n break\n last_matches = best_matches\n\n # Move the centroids to the average of their members\n for i in range(k):\n avgs = [0.0] * len(rows[0])\n if len(best_matches[i]) > 0:\n for row_id in best_matches[i]:\n for m in range(len(rows[row_id])):\n avgs[m] += rows[row_id][m]\n for j in range(len(avgs)):\n avgs[j] /= len(best_matches[i])\n clusters[i] = avgs\n\n # Chapter 3 Exercise 5: Return along with the cluster results the total distance between all items\n # and their respective centroids\n for i in range(k):\n for j in range(len(best_matches[i])):\n distances_from_centroids[best_matches[i][j]] = distance(clusters[i],rows[best_matches[i][j]])\n return best_matches, distances_from_centroids", "def update(self, clusters):\n centroids = {}\n for cluster, coordinates in clusters.iteritems():\n sumLat = 0\n sumLong = 0\n for coordinate in coordinates:\n sumLat += float(coordinate[0])\n sumLong += float(coordinate[1])\n centroids[cluster] = (sumLat/float(len(coordinates)), sumLong/float(len(coordinates)))\n return centroids", "def centroid_link(clusters, i, j, dendrogram):\n n_i, n_j = len(dendrogram[i]), len(dendrogram[j])\n a_i = n_i / (n_i + n_j)\n a_j = n_j / (n_i + n_j)\n b = -(n_i * n_j) / (n_i + n_j)**2\n update_fn = lambda d_ik,d_jk: a_i*d_ik + a_j*d_jk + b*clusters[i,j]\n return _general_link(clusters, i, j, update_fn)", "def _distorted_distance(self):\n distance = 0\n for i, pixel in enumerate(self.training_set):\n distance += self._euclid_distance(\n pixel, self.clusters[self.labels[i]], axis=0)\n return distance", "def calc_centroid(self):\n num = 0\n centroid = numpy.zeros(3, float)\n for atm in self:\n if atm.position is not None:\n centroid += atm.position\n num += 1\n return centroid / num", "def calculate(self):\n\n gt_n = np.count_nonzero(self.ground_truth)\n gt_indices = np.flip(np.where(self.ground_truth == 1), axis=0)\n gt_mean = gt_indices.mean(axis=1)\n gt_cov = np.cov(gt_indices)\n\n seg_n = np.count_nonzero(self.segmentation)\n seg_indices = np.flip(np.where(self.segmentation == 1), axis=0)\n seg_mean = seg_indices.mean(axis=1)\n seg_cov = np.cov(seg_indices)\n\n # calculate common covariance matrix\n common_cov = (gt_n * gt_cov + seg_n * seg_cov) / (gt_n + seg_n)\n common_cov_inv = np.linalg.inv(common_cov)\n\n mean = np.matrix(np.array(gt_mean) - np.array(seg_mean))\n\n return math.sqrt(mean * np.matrix(common_cov_inv) * mean.T)", "def calcDistortion(medoids, clusters, class_header=\"Class\"):\n distortion = 0\n for medoid_row_index, medoid_tuple in enumerate(medoids.iterrows()): # For every Medoid\n for _, datum in clusters[medoid_row_index].iterrows(): # For each point in the medoid cluster\n # Add the distance between medoid and data point squared to total distortion\n distortion += (Cluster.calcDistance(medoid_tuple[1], datum, class_header=class_header)) ** 2\n return distortion", "def _compute_mean(self, C, mag, rjb, rake):\n mean = (C['a1'] +\n self._compute_linear_magnitude_term(C, mag) +\n self._compute_quadratic_magnitude_term(C, mag) +\n self._compute_logarithmic_distance_term(C, mag, rjb) +\n self._compute_faulting_style_term(C, rake))\n\n return mean", "def findK_centroids_average(self, features, clusters):\n\n class InnerFeatures:\n def __init__(self, kps, des, pos):\n self.kps = kps\n self.des = des\n self.pos = pos\n\n kmeans = KMeans(n_clusters=clusters)\n\n pts = np.array(features.pos)\n kps = np.array(features.kps)\n des = np.array(features.des)\n\n kmeans.fit(pts)\n m_clusters = np.array(kmeans.labels_.tolist())\n centers = np.array(kmeans.cluster_centers_)\n\n # KeyPoint(x,y,size) -required\n\n final_kps = []\n final_des = []\n final_pts = []\n\n for cluster in range(clusters):\n indices = np.where(m_clusters == cluster)\n cluster_kps_size = np.mean(np.array([x.size for x in kps[indices]]))\n cluster_des = des[indices]\n\n average_des = np.mean(cluster_des, axis=0)\n cluster_kps = cv2.KeyPoint(x=centers[cluster][0], y=centers[cluster][1], _size=cluster_kps_size)\n\n final_kps.append(cluster_kps)\n final_des.append(average_des)\n final_pts.append([centers[cluster][0], centers[cluster][1]])\n\n final_pts = np.array(final_pts)\n final_des = np.array(final_des)\n final_kps = np.array(final_kps)\n\n result = InnerFeatures(kps=final_kps, des=final_des, pos=final_pts)\n return result", "def fit(self):\n self.cluseter_agglomerative(n_clusters=20, linkage='average', iterate=5)\n self.sub_clustering(n_clusters=3, index_cluster=[79], linkage='complete')\n self.merge_clusters([[0,9,53],[1,83],[46,35,67],[88,23],[6,68]])\n self.merge_clusters([[6,33,52],[17,14]])\n self.sub_clustering(n_clusters=2, index_cluster=[0], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[2], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[85], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[14], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[16], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[22], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[24], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[26], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[28], linkage='ward')\n self.merge_clusters([[6,98,99]])\n self.merge_clusters([[35,80]])\n self.sub_clustering(n_clusters=4, index_cluster=[35], linkage='complete')\n self.merge_clusters([[76,98]])\n self.sub_clustering(n_clusters=3, index_cluster=[35], linkage='complete')\n self.merge_clusters([[39,42]])\n self.sub_clustering(n_clusters=3, index_cluster=[47], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='average')\n self.merge_clusters([[70,101]])\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[61], linkage='ward')\n self.merge_clusters()\n return", "def _compute_distances(self, atoms: List[CellAtom]):\n muon = self._cell_atoms[self._muon_index]\n\n for atom in atoms:\n atom.distance_from_muon = np.linalg.norm(muon.position - atom.position)", "def run_k_means(self):\r\n centroids = self.centroids\r\n\r\n for i in range(self.max_iters):\r\n self.closestcentroids()\r\n self.newcentroids()\r\n\r\n J = 0\r\n X = self.x\r\n m = len(X)\r\n idx = self.index\r\n K = self.K\r\n dim = X.shape[1]\r\n\r\n for num in range(K):\r\n # find the index of all entries where idx==n\r\n indexentries = np.nonzero(idx == num)[0]\r\n # the values in X that have the index in indesxentries\r\n values = X[indexentries]\r\n # using one of the K centroids to do the calculation. K<=2 doesn't\r\n # work here for some reason.\r\n centroid = centroids[num, 0]\r\n J += np.sum((values - centroid) ** 2)\r\n\r\n return [centroids.reshape((1, K, dim)), [X[idx == k].size for k in range(K)], J / m]", "def _eval_clustering(self, gen_reviews, clusters, embedding_model, clustering):\n result = []\n preds = self.predict_gen(gen_reviews, embedding_model, clustering)\n\n acc = accuracy_score(np.array(clusters), np.array(preds))\n conf = confusion_matrix(np.array(clusters), np.array(preds))\n\n return acc, conf", "def calculate_all_metrcis(self):\n self.calculate_gc_metrcis()\n self.calculate_sam_metrics()\n self.calculate_classification_metrics()\n self.calculate_losses()", "def _cluster(self):\n # , distance_function=spearman_squared_distance, max_iter=1000, tol=0.0001):\n if self.cluster_method is None:\n clusters = KMedoids(\n self.k,\n self.batchsize,\n dist_func=self.distance_function,\n max_iter=self.max_iter,\n tol=self.tol,\n init_medoids=self.init_medoids,\n swap_medoids=self.swap_medoids,\n )\n clusters.fit(self.clustering_attributions, verbose=self.verbose)\n\n self.subpopulations = clusters.members\n self.subpopulation_sizes = GAM.get_subpopulation_sizes(clusters.members)\n self.explanations = self._get_explanations(clusters.centers)\n # Making explanations return numerical values instead of dask arrays\n if isinstance(self.explanations[0][0][1], da.Array):\n explanations = []\n for explanation in self.explanations:\n explanations.append([(x[0], x[1].compute()) for x in explanation])\n self.explanations = explanations\n else:\n self.cluster_method(self)", "def kmeans(X, k, iterations=1000):\n\n # Initialize the cluster centroids (C <- centroid \"means\")\n C = initialize(X, k)\n\n if C is None:\n return None, None\n if not isinstance(iterations, int) or iterations <= 0:\n return None, None\n\n # n: number of dada points\n # d: dimension of each data point\n n, d = X.shape\n\n # # Initialize the cost/distortion function;\n # # defined as J = sum/n(sum/k(r(ij)*||x(i) - c(j)||**2))\n # J = np.inf\n\n # Iterate over iterations\n for iteration in range(iterations):\n # print(\"iteration:\", iteration)\n\n # Maintain a deep copy of C\n # C_prev = np.array([x for x in C])\n # Another alternative (removes for loop):\n C_prev = np.copy(C)\n\n # OPTION 1: FOR LOOPS\n\n # Initialize the array of pairwise data point-centroid\n # distances with zeros\n # dist = np.zeros((n, k))\n\n # for i in range(n):\n # for j in range(k):\n # dist[i, j] = np.linalg.norm(X[i, ...] - C[j, ...])\n # Note: squared distances can alternatively be inferred\n # directtly from the inner product of (X - C) with itself\n # dist[i, j] = np.inner(X[i,:]-C[j,:], X[i,:]-C[j,:])\n # print(\"dist:\", dist)\n # Squared distances from \"dist\":\n # print(\"dist ** 2:\", dist ** 2)\n\n # OPTION 2: VECTORIZATION\n\n # Convert X into an array suitable for vectorization\n Xv = np.repeat(X, k, axis=0)\n # print(\"Xv:\", Xv)\n # print(\"Xv.shape:\", Xv.shape)\n Xv = Xv.reshape(n, k, d)\n # print(\"Xv:\", Xv)\n # print(\"Xv.shape:\", Xv.shape)\n\n # Convert C into an array suitable for vectorization\n Cv = np.tile(C, (n, 1))\n # print(\"Cv:\", Cv)\n # print(\"Cv.shape:\", Cv.shape)\n Cv = Cv.reshape(n, k, d)\n # print(\"Cv:\", Cv)\n # print(\"Cv.shape:\", Cv.shape)\n\n # Compute the \"dist\" matrix of euclidean distances between\n # data points and centroids; shape (n, k)\n dist = np.linalg.norm(Xv - Cv, axis=2)\n\n # Assign each point of the dataset to a centroid:\n # Evaluate argmin(dist**2) for comparison with k\n # r(ij) = 1 if argmin(dist**2) == j\n # -> point i assigned to centroid k\n # otherwise r(ij) = 0 -> ignore point i wrt centroid k\n clss = np.argmin(dist ** 2, axis=1)\n # print(\"centroid indices:\", clss)\n # print(\"clss.shape:\", clss.shape)\n # Note: here, clss is a 1D array of the unique centroid index\n # to which each point in the dataset as been assigned (closest to);\n # the indices array is used in place of r(ij) in J evaluations\n\n # OPTION 1: EXIT CONDITION BASED ON J_prev == J\n\n # # Make a copy of the previous J value & reinitialize J\n # J_prev = J\n # # J = 0\n\n # # Update J (summing over the n data points),\n # # based on the (shortest) distances inferred from \"indices\"\n # # From \"for\" loop:\n # # for i in range(n):\n # # J += (dist[i, clss[i]] ** 2)\n # # From vectorization:\n # J = np.sum(dist[..., clss] ** 2)\n # # Normalize J to the number of data points to\n # # reduce the computational cost (optional)\n # J /= n\n # # print(\"J:\", J)\n\n # if J == J_prev:\n # # print(\"last iteration:\", iteration)\n # return C, clss\n\n # Move the cluster centroids to the center (mean) of\n # the refined cluster by updating C (centroid coordinates)\n for j in range(k):\n # Infer the array of data point indices that correspond\n # to each assigned cluster centroid\n indices = np.where(clss == j)[0]\n # print(\"indices:\", indices)\n if len(indices) == 0:\n C[j] = initialize(X, 1)\n else:\n C[j] = np.mean(X[indices], axis=0)\n\n # OPTION 2: EXIT CONDITION BASED ON C == C_prev\n\n if (C == C_prev).all():\n # print(\"last iteration:\", iteration)\n return C, clss\n\n # Update clss before returning C, clss\n Cv = np.tile(C, (n, 1))\n Cv = Cv.reshape(n, k, d)\n dist = np.linalg.norm(Xv - Cv, axis=2)\n clss = np.argmin(dist ** 2, axis=1)\n\n return C, clss", "def evaluate_clusters(self, cluster_formulas, value='weighted_sum'):\n num_elems = len(self.labels)\n total_val = {}\n num_cl = len(cluster_formulas)\n clustered_points_num = 0\n print(\"\\n\\n\")\n print(\"Sufficiently big clusters: {}\".format(num_cl))\n for c, formula, val in cluster_formulas:\n c_size = len([l for l in self.labels if l == c])\n clustered_points_num += c_size\n\n if value == 'weighted_sum':\n total_val[c] = val * c_size / num_elems\n elif value == 'sum':\n total_val[c] = val * 1\n\n clust_val = sum(total_val.values())\n self.clustering_value = total_val\n print(\"Value of clustering: {}\".format(clust_val))\n return clust_val", "def _calculate_analysis_values(predicted_clusters, true_cluster, times):\n logger = get_logger('analysis', logging.INFO)\n logger.info('Calculate scores')\n\n # Initialize output\n metric_results = [None] * len(metric_names)\n for m, min_value in enumerate(metric_min_values):\n if min_value == 1:\n metric_results[m] = np.ones(len(true_cluster))\n else:\n metric_results[m] = np.zeros((len(true_cluster)))\n\n # Loop over all possible clustering\n for i, predicted_cluster in enumerate(predicted_clusters):\n logger.info('Calculated Scores for {}/{} predicted clusters'.format(i, len(predicted_clusters)))\n # Calculate different analysis's\n metric_results[0][i] = misclassification_rate(true_cluster, predicted_cluster)\n metric_results[1][i] = average_cluster_purity(true_cluster, predicted_cluster)\n metric_results[2][i] = adjusted_rand_index(true_cluster, predicted_cluster)\n metric_results[3][i] = diarization_error_rate(true_cluster, predicted_cluster, times)\n\n return metric_results", "def find_centroid_for_each(self):", "def calc_error_dist(self):\n pass", "def train(self, train_X, max_iterations=1000):\n assert len(train_X.shape) == 2 and train_X.shape[1] == self.D, f\"train_X should be a NxD matrix. Got: {train_X.shape}\"\n assert max_iterations > 0, f\"max_iterations must be positive. Got: {max_iterations}\"\n N = train_X.shape[0]\n\n labels = np.empty(shape=(N, 1), dtype=np.long)\n distances = np.empty(shape=(N, self.K))\n for _ in range(max_iterations):\n old_labels = labels\n\n # ====================================================\n for index, center in enumerate(self.centers): # build the distances matrix\n # K iterations\n newDistanceCol = np.square(np.linalg.norm(train_X - center, axis=1))\n distances[:, index] = newDistanceCol\n\n labels = np.argmin(distances, axis=1).reshape((N, 1))# update labels based on distances\n\n cluster_child_count = np.zeros((1, self.K))\n self.centers = np.zeros(self.centers.shape) # replace with zeros\n\n\n for x_index, center_index in enumerate(labels): #update centers value\n # N iterations\n self.centers[center_index] += train_X[x_index, :] # increment centers\n cluster_child_count[:, center_index] += 1 # inc number of children in that center\n\n self.centers /= cluster_child_count.T # divide by number of children in each center to get avg\n # ====================================================\n\n # Check convergence\n if np.allclose(old_labels, labels):\n break\n\n return labels", "def k_clusters(old_ops, max_outputs, mut):\n \n # DM construction\n matrix = starting_centroids(old_ops, max_outputs, mut)\n\n\n # Clustering\n seed = []\n for i in matrix.OPs:\n seed.append(i)\n centroids = cluster(old_ops, seed, mut)\n disto = distortion(centroids, old_ops, mut)\n\n return centroids, disto", "def mean_cluster(self, labelled_cluster):\n sum_of_points = self.sum_cluster(labelled_cluster)\n size_cluster = len(labelled_cluster)\n if self.sigma_cl1:\n size_cluster += np.sqrt(2)*self.sigma_cl1*np.random.randn()\n mean_of_points = sum_of_points * (1.0 / size_cluster)\n return mean_of_points", "def clusterAndDistance(self, data):\n\t\treturn closestClusterAndDistance(data, self.centers)", "def _compute_dist(self, K, dist, within_distances, update_within):\r\n\r\n sw = self.sample_weight_\r\n\r\n for j in xrange(self.n_clusters):\r\n mask = self.labels_ == j\r\n if np.sum(mask) == 0:\r\n raise ValueError(\"Empty cluster found, try smaller n_cluster.\")\r\n\r\n denom = sw[mask].sum()\r\n denomsq = denom * denom\r\n if update_within:\r\n KK = K[mask][:, mask] \r\n dist_j = np.sum(np.outer(sw[mask], sw[mask]) * KK / denomsq)\r\n within_distances[j] = dist_j\r\n dist[:, j] += dist_j\r\n else:\r\n dist[:, j] += within_distances[j]\r\n\r\n dist[:, j] -= 2 * np.sum(sw[mask] * K[:, mask], axis=1) / denom #calculating distance of each point from centroid of cluster j by finding \r\n #diff. b/w centroid of cluster j & similarity of it with points in cluster j\r", "def cluster_means(self):\n if self.evaluate_by is not None:\n return(self.merged_data.groupby(\n 'labels').mean().sort_values(self.evaluate_by).transpose())\n else:\n return(self.merged_data.groupby('labels').mean().transpose())", "def _transform(self, X):\r\n return euclidean_distances(X, self.cluster_centers_)", "def compute_cost(X, groups, K_clusters):\n m = X.shape[0]\n dis = np.empty(m)\n for i in range(m):\n dis[i] = compute_distance(X[i,:].reshape(1,X.shape[1]), K_clusters[groups[i],:].reshape(1,X.shape[1]))\n cost = (1/m)*np.sum(dis)\n return cost", "def computeKMeans(self, points, k):\n centroids = self.init_centroids(points,k)\n \n for i in range(5):\n closest = self.closestCentroids(points,centroids)\n centroids = self.updateCentroids(points, closest ,centroids)\n\n return centroids", "def calculateEuclideanDistance(vector):\r\n global euclideanDistance\r\n # create linkage matrix with the distance metric as euclidean distance\r\n # calculate the distances of the clusters by starting as singletons\r\n # and in each iteration will merge the two clusters which have the smallest distance\r\n # returns array of length n - 1\r\n # Z[i] will tell us which clusters were merged in the i-th iteration\r\n # each row has format [cluster1, cluster1, dist, sample_count].\r\n euclideanDistance = linkage(vector, metric='euclidean')", "def ex7():\n\n \"\"\"\n ================= Part 1: Find Closest Centroids ====================\n To help you implement K-Means, we have divided the learning algorithm\n into two functions -- find_closest_centroids and computeCentroids. In this\n part, you shoudl complete the code in the find_closest_centroids function.\n \"\"\"\n print('Finding closest centroids.\\n\\n')\n\n # Load an example dataset that we will be using\n with open('ex7/data/ex7data2.pkl', 'rb') as fin:\n X = pickle.load(fin)\n\n # Select an initial set of centroids\n K = 3 # 3 Centroids\n initial_centroids = np.array([[3, 3], [6, 2], [8, 5]])\n\n # Find the closest centroids for the examples using the\n # initial_centroids\n idx = find_closest_centroids(X, initial_centroids)\n\n print('Closest centroids for the first 3 examples: \\n')\n print(idx[0:3])\n print('\\n(the closest centroids should be 0, 2, 1 respectively)\\n')\n\n \"\"\"\n ===================== Part 2: Compute Means =========================\n After implementing the closest centroids function, you should now\n complete the computeCentroids function.\n \n \"\"\"\n print('\\nComputing centroids means.\\n\\n')\n\n # Compute means based on the closest centroids found in the previous part.\n centroids = compute_centroids(X, idx, K)\n\n print('Centroids computed after initial finding of closest centroids: \\n')\n print(centroids)\n print('\\n(the centroids should be\\n')\n print(' [ 2.428301 3.157924 ]\\n')\n print(' [ 5.813503 2.633656 ]\\n')\n print(' [ 7.119387 3.616684 ]\\n)\\n')\n\n \"\"\"\n =================== Part 3: K-Means Clustering ======================\n After you have completed the two functions computeCentroids and\n find_closest_centroids, you have all the necessary pieces to run the\n kMeans algorithm. In this part, you will run the K-Means algorithm on\n the example dataset we have provided.\n \"\"\"\n print('\\nRunning K-Means clustering on example dataset.\\n\\n')\n\n # Load an example dataset\n with open('ex7/data/ex7data2.pkl', 'rb') as fin:\n X = pickle.load(fin)\n\n # Settings for running K-Means\n K = 3\n max_iters = 10\n\n \"\"\"\n For consistency, here we set centroids to specific values\n but in practice you want to generate them automatically, such as by\n settings them to be random examples (as can be seen in\n kmeans_init_centroids).\n \"\"\"\n initial_centroids = np.array([[3, 3], [6, 2], [8, 5]])\n\n # Run K-Means algorithm. The 'true' at the end tells our function to plot\n # the progress of K-Means\n centroids, idx = run_kmeans(X, initial_centroids, max_iters, True)\n print('\\nK-Means Done.\\n\\n')\n\n \"\"\"\n ============= Part 4: K-Means Clustering on Pixels ===============\n In this exercise, you will use K-Means to compress an image. To do this,\n you will first run K-Means on the colors of the pixels in the image and\n then you will map each pixel on to it's closest centroid.\n \n You should now complete the code in kmeans_init_centroids.py\n \"\"\"\n\n print('\\nRunning K-Means clustering on pixels from an image.\\n\\n')\n\n # Load an image of a bird\n A = plt.imread('ex7/data/bird_small.png')\n # A = A / 255; # Divide by 255 so that all values are in the range 0 - 1\n\n # Size of the image\n img_size = A.shape\n\n # Reshape the image into an Nx3 matrix where N = number of pixels.\n # Each row will contain the Red, Green and Blue pixel values\n # This gives us our dataset matrix X that we will use K-Means on.\n X = np.reshape(A, (img_size[0] * img_size[1], 3))\n\n # Run your K-Means algorithm on this data\n # You should try different values of K and max_iters here\n K = 16\n max_iters = 10\n\n # When using K-Means, it is important the initialize the centroids\n # randomly.\n # You should complete the code in kmeans_init_centroids.py before proceeding\n initial_centroids = kmeans_init_centroids(X, K)\n\n # Run K-Means\n [centroids, idx] = run_kmeans(X, initial_centroids, max_iters)\n\n \"\"\"\n ================= Part 5: Image Compression ======================\n In this part of the exercise, you will use the clusters of K-Means to\n compress an image. To do this, we first find the closest clusters for\n each example. After that, we \n \"\"\"\n print('\\nApplying K-Means to compress an image.\\n\\n')\n\n # Find closest cluster members\n idx = find_closest_centroids(X, centroids)\n\n # Essentially, now we have represented the image X as in terms of the\n # indices in idx.\n\n # We can now recover the image from the indices (idx) by mapping each pixel\n # (specified by it's index in idx) to the centroid value\n X_recovered = centroids[idx, :]\n\n # Reshape the recovered image into proper dimensions\n X_recovered = np.reshape(X_recovered, (img_size[0], img_size[1], 3))\n\n # Display the original image\n plt.close()\n fig, (ax1, ax2) = plt.subplots(1, 2)\n ax1.imshow(A)\n ax1.set_title('Original')\n\n # Display compressed image side by side\n ax2.imshow(X_recovered)\n ax2.set_title('Compressed, with {:d} colors.'.format(K))\n plt.show()", "def get_cluster_accuracy(self, curs, p_gene_table, mcl_id_list, p_value_cut_off=0.01):\n\t\taccuracy2cluster = []\n\t\tfor mcl_id in mcl_id_list:\n\t\t\tcurs.execute(\"select is_correct_lca from %s where mcl_id=%s and p_value_cut_off<=%s\"%(p_gene_table,\\\n\t\t\t\tmcl_id, p_value_cut_off))\n\t\t\trows = curs.fetchall()\n\t\t\tif rows:\n\t\t\t\tis_correct_lca_array = array(rows)\n\t\t\t\tcorrect_array = greater(is_correct_lca_array[:,0],0)\n\t\t\t\tknown_array = greater_equal(is_correct_lca_array[:,0],0)\n\t\t\t\taccuracy = float(sum(correct_array))/float(sum(known_array))\n\t\t\t\taccuracy2cluster.append([accuracy, sum(correct_array), sum(known_array), len(correct_array), mcl_id]) \n\t\treturn accuracy2cluster", "def __move_centroids(self, data):\n \n for i in range(len(self.centroids)):\n members_cluster = data[self.clusters == i]\n self.centroids[i] = np.sum(members_cluster, axis=0) / (len(members_cluster) + self.epsilon)", "def calculate_euclidean_distance(self, matrix, input, output_neuron):\n result = 0\n\n # Loop over all input data.\n diff = input - matrix[output_neuron]\n return np.sqrt(sum(diff*diff))", "def evaluate(self, clustering):\n # Pca for each one of the clusters\n pca_mean_val = 0.;\n MAX_ELEMENTS = 1000\n for c in clustering.clusters:\n # Pick the coordinates (ensuring that we are copying them)\n element_indexes = c.all_elements\n ###################\n # Performance hack\n ###################\n # As it can be very slow for big clusters (i.e. > 3k elements) we'll compress this clusters \n # before calculating PCA. It should increase variance but will allow calculations.\n # It should use the kmedoids compressor\n if len(c.all_elements) > MAX_ELEMENTS:\n element_indexes = c.get_random_sample(MAX_ELEMENTS)\n print \"[PCA] Random sampling too big cluster to improve performance (%d elements -> %d elements).\"%(len(c.all_elements),MAX_ELEMENTS)\n ###################\n \n fitting_coordinates_of_this_cluster = self.fitting_coordinates[element_indexes]\n \n calculator = RMSDCalculator(calculatorType = \"QTRFIT_SERIAL_CALCULATOR\",\n fittingCoordsets = fitting_coordinates_of_this_cluster)\n \n if self.calculation_coordinates is not None:\n calculation_coordinates_of_this_cluster = self.calculation_coordinates[element_indexes]\n calculator = RMSDCalculator(calculatorType = \"QTRFIT_SERIAL_CALCULATOR\",\n fittingCoordsets = fitting_coordinates_of_this_cluster,\n calculationCoordsets = calculation_coordinates_of_this_cluster)\n \n # Make an iterative superposition (to get the minimum RMSD of all with respect to a mean conformation)\n calculator.iterativeSuperposition()\n\n # Calculate the covariance matrix\n if self.calculation_coordinates is None:\n covariance_matrix = PCAMetric.create_covariance_matrix(fitting_coordinates_of_this_cluster)\n else:\n covariance_matrix = PCAMetric.create_covariance_matrix(calculation_coordinates_of_this_cluster)\n \n # And then the eigenvalue we are interested in\n pca_mean_val += PCAMetric.calculate_biggest_eigenvalue(covariance_matrix)\n print \"PCA finished\"\n return pca_mean_val /clustering.total_number_of_elements", "def rms(trained_data, dist):\n sum = 0\n for i in trained_data:\n point = i[:-2]\n centroid = i[-1]\n distance = (calculate_distance(point,centroid, dist)**2)\n sum +=distance\n return sum", "def calc_distances_from_central(cluster, embedding):\n\n return calc_distances_in_embedding(cluster, embedding)", "def Dist_clust(data):\n mask = np.loadtxt('/net/tarea/scratch/Rafael/phd/apogee/python/comb_SkyTel_mask.dat')\n masked = np.where(mask == 1)[0]\n spectra_list = data['fullset']\n clusters = data['clusters']\n clusters = clusters.transpose()\n distance = np.zeros((len(spectra_list), 2))\n min_dist_cl = np.zeros((data['nc'], 2))\n for j_cluster in range(data['nc']):\n dist_cluster= np.zeros((data['nc']))\n for i_cluster in range(data['nc']):\n dist_cluster[i_cluster] = np.nansum((clusters[j_cluster][masked] - clusters[i_cluster][masked])**2)**0.5\n min_dist_cl[j_cluster,0] = np.argmin(dist_cluster)\n dist_cluster[np.argmin(dist_cluster)] = dist_cluster[np.argmax(dist_cluster)]\n if (len(np.where(dist_cluster != 0)[0]) > 0):\n min_dist_cl[j_cluster,1] = np.argmin(dist_cluster[(dist_cluster != 0)])\n for i_spec, name in enumerate(spectra_list):\n vec_temp = np.load(name)\n for i_cluster, j_cluster in enumerate(min_dist_cl[data['assign'][i_spec]]):\n distance[i_spec,i_cluster] = np.nansum((clusters[j_cluster][masked] - vec_temp['norm'][masked])**2)**0.5\n vec_temp.close()\n return distance, min_dist_cl", "def k_means(x_input, n_cluster=3, n_iter=100, n_tries=10):\n results = []\n for _ in range(n_tries):\n error_value = 0\n rand.seed(None)\n centers = sorted([rand.uniform(0.0, 100.0) for i in range(n_cluster)])\n min_dist_idx = [0] * len(x_input)\n i = 0\n while i < n_iter:\n failed = False\n dist_mat = l2_pairwise_distance(x_input, centers)\n error_value = calculate_error(dist_mat)\n min_dist_idx = [dist.index(min(dist)) for dist in dist_mat]\n centers = [0] * n_cluster\n count = [0] * n_cluster\n for j in range(len(x_input)):\n centers[min_dist_idx[j]] += x_input[j]\n count[min_dist_idx[j]] += 1\n\n for j in range(n_cluster):\n if count[j] == 0:\n centers = sorted(\n [rand.uniform(0.0, 100.0) for i in range(n_cluster)])\n failed = True\n break\n\n if failed:\n i = 0\n continue\n\n for j in range(n_cluster):\n centers[j] = centers[j] / count[j]\n i += 1\n\n results.append((centers, min_dist_idx, error_value))\n\n return min(results, key=lambda x: x[2])", "def get_distances(centroid, points):\r\n return np.linalg.norm(points - centroid, axis=1)", "def compute(self): \r\n hist = self.confusion_matrix\r\n iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))\r\n mean_iu = np.nanmean(iu)\r\n return mean_iu", "def k_mean_cluster(num_anchors, boxes, convergence_threshold=1e-9):\n # Randomly pick 5 boxes as centroids\n centroids = np.array(random.sample(boxes, k=num_anchors))\n # Clustering until reaching loss_convergence threshold\n centroids, prev_errors = k_mean(boxes, centroids)\n while True:\n centroids, errors = k_mean(boxes, centroids)\n if abs(errors - prev_errors) <= convergence_threshold:\n break\n else:\n prev_errors = errors\n\n avg_iou = np.mean(np.max(compute_iou(boxes, centroids), axis=-1))\n return centroids, avg_iou", "def updateCentroids(self, points, closest, centroids):\n return numpy.array([points[closest==k].mean(axis=0) for k in range(centroids.shape[0])])", "def computeStats(self, absList, statOut, errorOut):\n \n nMentions = 0\n pSum = 0\n rSum = 0\n for abstract in absList:\n # build hash of annotated clusters/chains keyed by ID\n errorOut.write('\\n---- '+abstract.id+' ----\\n')\n trueChainLengths = {}\n entityList = abstract.annotatedEntities.getList(self.entityTypes[0])\n errorOut.write('True chains:\\n')\n for entityTemplate in entityList:\n if len(entityTemplate.getAnnotatedId()) > 0:\n trueChain = entityTemplate.getMentionChain()\n trueChainLengths[entityTemplate.getAnnotatedId(checkEntireCluster=False)] = len(trueChain)\n for m in trueChain:\n# errorOut.write(m.name+':'+m.getAnnotatedId(checkEntireCluster=False) +'\\n')\n errorOut.write('%s %s:%s, matchedMention=%s \\n'%(m.name, m.mention, m.getAnnotatedId(checkEntireCluster=False), m.mention.matchedMention))\n\n errorOut.write('----\\n')\n else:\n print abstract.id, entityTemplate.name, 'is missing an ID'\n \n # compute Recall and precision for each detected chain/cluster\n entityList = abstract.entities.getList(self.entityTypes[0])\n errorOut.write('\\nHypothesis chains:\\n')\n for entityTemplate in entityList:\n detectedChain = entityTemplate.getMentionChain()\n \n rootMention = entityTemplate.rootMention()\n errorOut.write('[Canonical name: '+rootMention.getCanonicalName()+']\\n')\n \n for m in detectedChain:\n errorOut.write('%s %s:%s, matchedMention=%s \\n'%(m.name, m.mention, m.getAnnotatedId(checkEntireCluster=False), m.mention.matchedMention))\n# errorOut.write(m.name+':'+m.getAnnotatedId(checkEntireCluster=False) +'\\n')\n errorOut.write('----\\n')\n\n nMentionsInChain = len(detectedChain)\n for mTemplate in detectedChain:\n nMentions += 1\n if len(mTemplate.getAnnotatedId(checkEntireCluster=False)) == 0:\n # mention is a false positive, it does not belong to any chain\n pSum += 1.0/nMentionsInChain\n rSum += 1\n else:\n if mTemplate.getAnnotatedId(checkEntireCluster=False) not in trueChainLengths:\n print abstract.id, 'template with id =',mTemplate.getAnnotatedId(checkEntireCluster=False), 'not in a true chain'\n break\n nMentionsInTrueChain = trueChainLengths[mTemplate.getAnnotatedId(checkEntireCluster=False)]\n nCorrectInDetectedChain = 0\n annotatedMatches = set([])\n # count the number of mentions in the detected chain that\n # should be in the same chain as this mention\n for m in detectedChain:\n if mTemplate.getAnnotatedId(checkEntireCluster=False) == m.getAnnotatedId(checkEntireCluster=False) \\\n and m.mention.matchedMention not in annotatedMatches:\n nCorrectInDetectedChain += 1\n annotatedMatches.add(m.mention.matchedMention)\n# else:\n# print abstract.id, 'Two mentions do not belong in same chain',\n# print mTemplate, m.getAnnotatedId()\n \n if nCorrectInDetectedChain > nMentionsInTrueChain:\n print abstract.id, 'id=',mTemplate.getAnnotatedId(checkEntireCluster=False), \n print 'detected chain=', nCorrectInDetectedChain,\n print 'true chain=', nMentionsInTrueChain\n nCorrectInDetectedChain = nMentionsInTrueChain\n \n# if nCorrectInDetectedChain != nMentionsInChain:\n# print abstract.id, 'id=',mTemplate.getAnnotatedId(), \n# print 'detected chain=', nCorrectInDetectedChain,\n# print 'true chain=', nMentionsInTrueChain\n \n pSum += float(nCorrectInDetectedChain) / nMentionsInChain\n rSum += float(nCorrectInDetectedChain) / nMentionsInTrueChain\n \n if nMentions == 0:\n print 'No mentions???'\n return \n \n precision = pSum/nMentions\n recall = rSum/nMentions \n fscore = 2*(recall*precision)/(recall + precision)\n \n sys.stdout.write('Recall\\tPrecision\\tF-score\\n')\n sys.stdout.write('%.3f\\t ' % recall + '%.3f\\t ' % precision + '%.3f' % fscore+'\\n')\n# statOut.write(self.entityTypesString+'\\n')\n# statOut.write('Recall\\tPrecision\\tF-score\\n')\n# statOut.write('%.3f\\t ' % recall + '%.3f\\t ' % precision + '%.3f' % fscore+'\\n')\n statOut.addStats('MC - '+self.entityTypesString, [['R', recall], ['P', precision], ['F',fscore]])", "def euclidean_sqr(self, instance, centroid):\n return np.linalg.norm(instance-centroid)**2", "def k_means(data, k = 2, centroids = None, max_iters = 100) :\r\n if centroids == None :\r\n centroids = get_random_centroids(data, k)\r\n \r\n elif len(centroids) != k :\r\n AssertionError(\"Número de centroides no equivale a k\")\r\n \r\n for i in range(max_iters) :\r\n old_centroids = centroids\r\n \r\n assigned_centroids = assign_centroids(data, centroids)\r\n \r\n # Sum the data by cluster\r\n centroids = [[0]*data.shape[1]] * k\r\n values_in_centroid = [0] * k\r\n\r\n for i in range(k) :\r\n assigned_centroids_aux = np.column_stack([assigned_centroids]*data.shape[1])==i\r\n centroids[i] = np.sum(np.multiply(data, assigned_centroids_aux), axis = 0)\r\n values_in_centroid[i] = np.sum(assigned_centroids == i)\r\n\r\n # Mean\r\n for i in range(k) :\r\n if values_in_centroid[i] > 0 :\r\n centroids[i] = centroids[i]/values_in_centroid[i]\r\n\r\n centroids = np.stack(centroids, axis=0)\r\n\r\n error = sum([np.linalg.norm(centroids[i] - old_centroids[i]) for i in range(k)])\r\n if(error < 1e-5) : break\r\n \r\n \r\n return centroids", "def run(self, max_clusters):\n sample_dist_matrix = self.matrix_dist()\n self.link.print_link()\n first_clus = self.clusters[0] # initialize first cluster to merge into\n second_clus = self.clusters[0] # initialize second cluster to merge\n max_samples_dist = max(sample_dist_matrix.values())\n # initialize minimun distance between two samples\n min_dist = max_samples_dist\n while len(self.clusters) > max_clusters: # clustering loop\n for clus in self.clusters: # iterate over every cluster\n for other_clus in self.clusters: # iterate over other clusters\n if clus.c_id > other_clus.c_id: # avoid duplicates and make sure to pass correct key to dictionary\n # compute distance between two clusters according to current link\n clus_dist = self.link.compute(clus, other_clus, sample_dist_matrix)\n if clus_dist < min_dist: # keep the minimum distance and its clusters\n min_dist = clus_dist\n first_clus = other_clus\n second_clus = clus\n self.clusters.remove(second_clus) # remove the cluster that's getting merged from clusters list\n first_clus.merge(second_clus) # merge the cluster with higher id into the other\n min_dist = max_samples_dist # restore high distance in order to start the search again\n\n sum_sil = self.compute_summery_silhouette(sample_dist_matrix)\n # print results\n for clus in self.clusters:\n clus.print_details(sum_sil[clus.c_id])\n print(f'Whole data: silhouette = {sum_sil[0]}, RI = {self.compute_rand_index()}')", "def calculate_clusters(study_id):\n\n with current_app.app_context():\n cur = conn.cursor()\n\n cur.execute(\"\"\"SELECT * FROM STATS WHERE STUDY_ID=%s\"\"\", (str(study_id),))\n study = fetchoneClean(cur)\n clusters_calculating = study[4]\n clusters_changed = study[5]\n if clusters_changed:\n if clusters_calculating:\n return {'message': 'calculating'}\n cur.execute(\"\"\"UPDATE STATS SET CLUSTERS_CALCULATING = TRUE WHERE STUDY_ID = %s\"\"\", (str(study_id),))\n conn.commit()\n\n distance = study[7]['matrix']\n card_names = study[7]['cardNames']\n cur.execute(\"\"\"SELECT COUNT(ID) FROM PARTICIPANT WHERE STUDY_ID = %s\"\"\", (str(study_id),))\n total_participants = fetchoneClean(cur)[0]\n\n distance_matrix = calculate_square_form(distance, total_participants)\n distArray = ssd.squareform(distance_matrix)\n\n try:\n clusters = hierarchy.linkage(distArray, method='average')\n except ValueError:\n return {'message': 'not enough data'}\n\n tree = hierarchy.to_tree(clusters, rd=False)\n # TODO Distance 0 on root\n dendro = dict(children=[], hierarchy=0, distance=100)\n add_node(tree, dendro, card_names)\n\n cur.execute(\"\"\"UPDATE STATS SET CLUSTERS = %s WHERE STUDY_ID = %s\"\"\", (json.dumps(dendro), str(study_id),))\n cur.execute(\"\"\"UPDATE STATS SET CLUSTERS_CALCULATING = FALSE WHERE STUDY_ID = %s\"\"\", (str(study_id),))\n cur.execute(\"\"\"UPDATE STATS SET CLUSTERS_CHANGED = FALSE WHERE STUDY_ID = %s\"\"\", (str(study_id),))\n conn.commit()\n else:\n dendro = study[6]\n\n return dendro", "def __call__(self, embeddings: np.ndarray) -> List[Optional[str]]:\n assert self._centroids != {} # Centroids must be set in advance\n \n # Setup known database\n cluster_ids, cluster_embs = zip(*self._centroids.items())\n cluster_embs = np.vstack(cluster_embs)\n \n # Calculate similarity with cluster centroids\n similarity = cosine_similarity(embeddings, cluster_embs)\n \n # Fetch the best-matching clusters\n results = []\n for i, idx in enumerate(similarity.argmax(axis=1)):\n results.append(cluster_ids[idx] if similarity[i, idx] >= self._sim_thr else None)\n return results", "def __find_nearest_centroids(self, data):\n \n self.clusters = np.array([]) \n for i, d in enumerate(data):\n min_dist = np.inf\n self.clusters = np.concatenate((self.clusters, np.array([-1])))\n for j, c in enumerate(self.centroids):\n dist = self.__compute_distance(d, c)\n if min_dist > dist:\n min_dist = dist\n self.clusters[i] = j", "def __get_cluster_centroid_distance(self, single_training: np.ndarray, cluster_center: np.ndarray) -> (int, float):\n training_label, training_distance = None, float('inf')\n # Check the distance of this point from all the cluster point.\n # This training point belongs to a cluster, which ever cluster centroid have the lowest distance from this point\n for cluster_label, single_cluster in enumerate(cluster_center):\n # Distance from the this training point to this cluster centroid\n this_distance = self.__get_distance(single_cluster, single_training)\n if this_distance < training_distance:\n training_label = cluster_label\n training_distance = this_distance\n return training_label, training_distance", "def kmeans_clustering(all_features, vocab_size, epsilon, max_iter):\n\n # Your code here. You should also change the return value.\n\n def _initiate_random_centroids(all_features, vocab_size):\n \"\"\"\n Initiate random centroids in the range of input\n\n :param all_features:\n :param vocab_size:\n :return:\n \"\"\"\n centroids = []\n # 1) Genereate points for initial centroids\n\n min_feat = np.ones(all_features[0].size)*np.inf\n max_feat = np.zeros(all_features[0].size)\n\n for a in all_features:\n for p in range(len(a)):\n if a[p] < min_feat[p]:\n min_feat[p] = a[p]\n else:\n if a[p] > max_feat[p]:\n max_feat[p] = a[p]\n\n\n for _ in range(vocab_size):\n random_vector = np.multiply(np.random.rand(1, all_features[0].size),\n max_feat-min_feat) + min_feat\n centroids.append(random_vector.flatten())\n\n return np.array(centroids)\n\n def _assign_vectors_to_nearest_centroid(all_features, centroid):\n \"\"\"\n Assign vectors to nearest centroids\n\n :param all_features:\n :param centroid:\n :return:\n \"\"\"\n #TODO: sprawdz co lepiej dziala\n new_centroid_coor = np.zeros([len(centroid), all_features[0].size])\n #new_centroid_coor = centroid\n new_centroid_counter = np.zeros(len(centroid))\n\n dist = pdist(centroid, all_features)\n #min_dist = dist.min(axis=0)\n min_dist_index = dist.argmin(axis=0)\n\n for x in range(len(min_dist_index)):\n id = min_dist_index[x]\n new_centroid_coor[id] = np.add(new_centroid_coor[id],\n all_features[x])\n new_centroid_counter[id] += 1\n\n new_centroid_coor_out = []\n for i in range(len(new_centroid_coor)):\n if new_centroid_counter[i] == 0:\n new_centroid_coor_out.append(centroid[i])\n else:\n new_centroid_coor_out.append(np.divide(new_centroid_coor[i],new_centroid_counter[i]))\n\n return np.array(new_centroid_coor_out), new_centroid_counter\n\n\n def _check_convergence_condition(old_centroids, new_centroids, epsilon):\n \"\"\"\n Check convergence confition\n\n :param old_centroids:\n :param new_centroids:\n :param epsilon: if every centroid is moved by dist < epsilon KMeans terminates\n :return:\n \"\"\"\n for i in range(len(old_centroids)):\n dist = euclidean(old_centroids[i], new_centroids[i])\n if dist > epsilon:\n return False\n\n return True\n\n def delete_small_clusters(new_centroids, centroid_counter, threshold):\n \"\"\"\n Potential extension of the algorithm -> if there is not any point in the cluster, delete this cluste\n\n :param new_centroids:\n :param centroid_counter:\n :param threshold:\n :return:\n \"\"\"\n\n out_centroids = []\n for n in range(len(new_centroids)):\n if centroid_counter[n] > threshold:\n out_centroids.append(new_centroids[n])\n out_centroids = np.array(out_centroids)\n return out_centroids\n\n #MAIN\n old_centroids = _initiate_random_centroids(all_features, vocab_size)\n\n for _ in range(max_iter):\n new_centroids, centroid_counter = _assign_vectors_to_nearest_centroid(all_features, old_centroids)\n if_convergenced = _check_convergence_condition(new_centroids, old_centroids, epsilon)\n\n if if_convergenced == True:\n # return centroids if algorithm is converged\n # return delete_small_clusters(new_centroids, centroid_counter, 0)\n return new_centroids\n old_centroids = new_centroids\n\n # return centroids if reached max_iter\n # return delete_small_clusters(new_centroids, centroid_counter, 0)\n return new_centroids" ]
[ "0.67511815", "0.667103", "0.65723747", "0.6478668", "0.62807804", "0.6273955", "0.622381", "0.6105332", "0.60754263", "0.6067694", "0.6046383", "0.603666", "0.60061574", "0.59977764", "0.5997736", "0.59847236", "0.5973263", "0.5969204", "0.5959918", "0.59587914", "0.59409744", "0.5935671", "0.59233385", "0.5913697", "0.5912067", "0.5898303", "0.5897077", "0.5891139", "0.58898133", "0.58638555", "0.5845367", "0.5830787", "0.5826753", "0.58232653", "0.58089817", "0.58002794", "0.579822", "0.5798033", "0.57907546", "0.5788285", "0.57841915", "0.5735069", "0.5728007", "0.57186556", "0.57140136", "0.56801116", "0.56733304", "0.5672067", "0.56673783", "0.5662086", "0.5661992", "0.56618965", "0.5655326", "0.5648323", "0.56384504", "0.5634903", "0.56336486", "0.563165", "0.5624158", "0.56166404", "0.5612015", "0.560881", "0.56073177", "0.5601574", "0.5595551", "0.55879253", "0.5585332", "0.55839145", "0.5571036", "0.55707157", "0.5567687", "0.5547009", "0.5536406", "0.5533442", "0.5532877", "0.5526473", "0.55253524", "0.5522286", "0.5520693", "0.5517294", "0.5515676", "0.5511529", "0.5509922", "0.54956645", "0.5489253", "0.5488356", "0.54664946", "0.5451735", "0.5450782", "0.54491097", "0.5447916", "0.5445619", "0.54417783", "0.54321754", "0.5424051", "0.5421796", "0.5419851", "0.54151356", "0.541286", "0.5403115" ]
0.66500586
2
Change the labels from arbitrary numbers to the range [0, len(set(labels))]. Points that are in the same cluster will stay in the same cluster. Points from different clusters will remain in different clusters.
def normalize_labels(labels): new_labels = np.array([-1] * len(labels)) labels = np.array(labels) label_dict = dict() for i, label in enumerate(set(labels)): new_labels[np.where(labels == label)] = i label_dict[i] = label return label_dict, new_labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _relocate_clusters(self, cluster_labels):\n for cluster_label in range(self.k):\n if cluster_labels[cluster_label] is not None:\n # mean of the pixels assigned to cluster\n p_sum, p_count = np.asarray(\n cluster_labels[\n cluster_label\n ]).sum(axis=0), len(cluster_labels[cluster_label])\n self._clusters[cluster_label] = p_sum / p_count", "def clean_labels(labels):\n\n llabels, slabels = list(labels), set(labels)\n \n for l in slabels:\n if llabels.count(l) <2 and l != max(slabels):\n llabels[llabels.index(l)] = l+1\n return clean_labels(llabels)\n elif llabels.count(l) <2 and l == max(slabels):\n llabels[llabels.index(l)] = l-1\n return clean_labels(llabels)\n else:\n return np.array(llabels)", "def assign_labels_to_centroids(self):\n labelled_centroids = []\n for i in range(len(self.final_clusters)):\n labels = map(lambda x: x[0], self.final_clusters[i])\n # pick the most common label\n most_common = Counter(labels).most_common(1)[0][0]\n c = np.round(len([item for item in self.final_clusters[i] if item[0]==1])/len(self.final_clusters[i]),2)\n if c>=0.46:\n most_common = 1.0\n centroid = (most_common, self.final_centroids[i])\n labelled_centroids.append(centroid)\n\n self.labelled_centroids = labelled_centroids\n print(\"cluster_0: \", np.round(len([item for item in self.final_clusters[0] if item[0]==1])/len(self.final_clusters[0]),2), \"size_0: \", len(self.final_clusters[0]))\n print(\"cluster_1: \", np.round(len([item for item in self.final_clusters[1] if item[0]==1])/len(self.final_clusters[1]),2), \"size_1: \", len(self.final_clusters[1]))\n #print(\"cluster_2: \", np.round(len([item for item in self.final_clusters[2] if item[0]==1])/len(self.final_clusters[2]),2), \"size_2: \", len(self.final_clusters[2]))\n #print(\"cluster_3: \", np.round(len([item for item in self.final_clusters[3] if item[0]==1])/len(self.final_clusters[3]),2), \"size_2: \", len(self.final_clusters[3]))", "def _relabel(labels, minval=0, bgval=None):\n\n labels = np.unique(labels, return_inverse=True)[-1] + minval\n if bgval is not None:\n labels[labels == minval] = bgval\n return labels", "def change_labels(labels, cluster_name, idx_to_change, target_labels):\n assert(type(idx_to_change) == list)\n assert(type(target_labels) == list)\n assert(len(idx_to_change) == len(target_labels))\n\n sub_list = labels[labels == cluster_name]\n\n for idx, target in zip(idx_to_change, target_labels):\n sub_list[idx] = target\n\n labels[labels == cluster_name] = sub_list\n\n return labels", "def separate_noise_labels_into_clusters(\n labels: np.ndarray, noise_label: int = -1\n) -> np.ndarray:\n new_labels = labels.copy()\n max_label = np.max(new_labels)\n j = max_label + 1\n for i in range(len(new_labels)):\n if new_labels[i] == noise_label:\n new_labels[i] = j\n j += 1\n return new_labels", "def repair_labels(labels):\n ret = np.copy(labels)\n ret[:, 0] = 10 # overwrite length to be stop seq\n ret = np.roll(ret, -1, axis=1) # move first to last\n return ret", "def _rename_clusters(self):\n all_clusters = []\n temp_clusters = self._clusters.copy()\n for clu in temp_clusters:\n all_clusters.append(self._clusters.pop(clu))\n idx = 0\n for clu in all_clusters:\n label = 'S' + str(idx)\n clu.rename(label)\n self._clusters[label] = clu\n idx += 1", "def cluster_labels(kmeans,actual_labels):\r\n infered_label={}\r\n for i in range(kmeans.n_clusters):\r\n # Find Index of points in cluster\r\n labels = []\r\n index = np.where(kmeans.labels_ == i)\r\n \r\n #Append actual labels for each point\r\n labels.append(actual_labels[index])\r\n \r\n #Determine the most common label\r\n if len(labels[0])==1:\r\n counts=np.bincount(labels[0])\r\n else:\r\n counts=np.bincount(np.squeeze(labels))\r\n #Assign cluster to a value in the dictionary\r\n if np.argmax(counts) in infered_label:\r\n # append the new number to the existing array at this slot\r\n infered_label[np.argmax(counts)].append(i)\r\n else:\r\n # create a new array in this slot\r\n infered_label[np.argmax(counts)]=[i]\r\n return infered_label", "def _calculate_nearest_cluster(self, pixels, cluster_labels):\n\n # assign pixel (RGB) to nearest cluster label (index)\n for index, rgb in pixels:\n rgb_vector = np.tile(rgb, (self.k, 1))\n self._labels[index] = np.argmin(\n self._euclid_distance(rgb_vector, self._clusters), axis=0)\n\n if cluster_labels[self._labels[index]] is None:\n cluster_labels[self._labels[index]] = list()\n\n cluster_labels[self._labels[index]].append(rgb)\n\n return cluster_labels", "def anonymize(labels, unique_ordered_labels):\n index_dict = dict((val, idx) for idx, val in enumerate(unique_ordered_labels))\n return [index_dict[x] for x in labels]", "def normalize_labels(labels):\n number_of_labels = len(labels)\n number_of_species = get_number_of_species()\n labels_norm = np.zeros(shape=(number_of_labels, number_of_species))\n for i in range(number_of_labels):\n for label in labels[i]:\n labels_norm[i][label] = 1\n return labels_norm", "def updateClusterInfo(self):\n self.nPoints = len(self.labels)\n self.n = len(np.unique(self.labels))\n self.centers = [ [0.0 for j in range(3)] for i in range(self.n)]", "def _relabel(self, a):\n\n labels = list(np.unique(a))\n if 0 in labels:\n labels.remove(0)\n\n if len(labels) == 0:\n return a.copy()\n old_values = np.asarray(labels)\n new_values = np.arange(1, len(labels) + 1, dtype=old_values.dtype)\n\n try:\n values_map = np.arange(int(a.max() + 1), dtype=new_values.dtype)\n except ValueError as e:\n raise ValueError(f\"{e}, arange length: {int(a.max() + 1)}\")\n values_map[old_values] = new_values\n\n return values_map[a.copy()]", "def rename_labels_by_count(labels):\n new_labels, label_counts = _count_labels(labels)\n\n return new_labels", "def cluster_assign(images_lists, dataset):\n assert images_lists is not None\n pseudolabels = []\n image_indexes = []\n for cluster, images in enumerate(images_lists):\n image_indexes.extend(images)\n pseudolabels.extend([cluster] * len(images))\n print(image_indexes)\n print(pseudolabels)\n \n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n t = transforms.Compose([transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize])\n\n return ReassignedDataset(image_indexes, pseudolabels, dataset, t)", "def process_label(self, foreground_labels):\n # Find the unique (nonnegative) foreground_labels, map them to {0, ..., K-1}\n unique_nonnegative_indices = np.unique(foreground_labels)\n mapped_labels = foreground_labels.copy()\n for k in range(unique_nonnegative_indices.shape[0]):\n mapped_labels[foreground_labels == unique_nonnegative_indices[k]] = k\n foreground_labels = mapped_labels\n return foreground_labels", "def setLabels(self, labels: List[Shape]):\n self.labels = labels\n self.update()", "def cluster_to_voxel_label(clusts: nb.types.List(nb.int64[:]),\n node_label: nb.int64[:]) -> nb.int64[:]:\n nvoxels = np.sum([len(c) for c in clusts])\n vlabel = np.empty(nvoxels, dtype=np.int64)\n stptr = 0\n for i, c in enumerate(clusts):\n endptr = stptr + len(c)\n vlabel[stptr:endptr] = node_label[i]\n stptr = endptr\n\n return vlabel", "def labels(self, labels: MutableMapping[str, str]):\n self._labels = labels", "def consolidate_labels(labels):\n return map(RNN_model.consolidate_label , labels)", "def classify_k_cluster(labels, datas):\n classify_k_cluster_to_redis(labels=labels, texts=datas)", "def _remove_and_relabel_blobs(labeled, wanted_blobs):\n labeled = labeled.copy()\n wanted_blobs = np.array(wanted_blobs)\n no_blobs = len(wanted_blobs)\n unwanted_blobs = np.arange(1, no_blobs+1)[np.logical_not(wanted_blobs)]\n wanted_blobs = np.arange(1, no_blobs+1)[wanted_blobs]\n\n for unwanted_blob in unwanted_blobs:\n labeled[labeled == unwanted_blob] = 0\n\n for new_label, wanted_blob in enumerate(wanted_blobs):\n new_label += 1\n labeled[labeled == wanted_blob] = -new_label\n\n return -labeled", "def change_class_labels(classes):\n u,indices=np.unique(classes,return_inverse=True)\n return u,indices", "def move_centroids(self, labelled_clusters):\n new_centroids = []\n # print(\"Cluster size\", end=\"\\t\")\n for cluster in labelled_clusters:\n new_centroids.append(self.mean_cluster(cluster))\n # print(len(cluster), end=\"\\t\" )\n # print(\"\\n\")\n return new_centroids", "def regroup_dataset(labels):\r\n batch_y = labels.copy()\r\n for i, label in enumerate(labels):\r\n if label in [0, 15, 19]:\r\n batch_y[i]=0\r\n if label in [1, 2, 3, 4, 5,]:\r\n batch_y[i]=1\r\n if label in [6]:\r\n batch_y[i]=2\r\n if label in [7,8,9,10]:\r\n batch_y[i]=3\r\n if label in [11,12,13,14]:\r\n batch_y[i]=4\r\n if label in [16,17,18]:\r\n batch_y[i]=5\r\n \r\n print('regrouped label', batch_y.shape)\r\n return batch_y", "def assignLabels(self):\n clusters = np.arange(0, len(self.V))[self.V < self.V1] #indexes self.V, volumes_sorted, and oldOrder\n self.clusterV = self.volumes_sorted[clusters]\n clusters = self.oldOrder[clusters] #indexes volumes\n self.clusters = self.nonBI[clusters] #indexes self.vor and self.data\n self.easyLabel = np.zeros(len(self.data))\n self.easyLabel[self.clusters] = 1\n print('Out of ' + str(len(self.data)) + ' particles, ' + str(len(self.clusters)) + ' (' + str(round(len(self.clusters)*100/len(self.data), 3)) +' %) are labelled as cluster particles.')", "def labels(self, labels):\n self._labels = labels", "def get_cluster_elements_labels(self):\n \n copy = deepcopy(self.cluster_elements_labels)\n return copy", "def get_cluster_to_label_mapping_safe(y, y_pred, n_classes, n_clusters, toprint=True):\n one_hot_encoded = np_utils.to_categorical(y, n_classes)\n\n cluster_to_label_mapping = []\n n_assigned_list = []\n majority_class_fractions = []\n majority_class_pred = np.zeros(y.shape)\n for cluster in range(n_clusters):\n cluster_indices = np.where(y_pred == cluster)[0]\n n_assigned_examples = cluster_indices.shape[0]\n n_assigned_list.append(n_assigned_examples)\n cluster_labels = one_hot_encoded[cluster_indices]\n cluster_label_fractions = np.mean(cluster_labels, axis=0)\n majority_cluster_class = np.argmax(cluster_label_fractions)\n cluster_to_label_mapping.append(majority_cluster_class)\n majority_class_pred[cluster_indices] += majority_cluster_class\n majority_class_fractions.append(cluster_label_fractions[majority_cluster_class])\n if toprint:\n print(cluster, n_assigned_examples, majority_cluster_class, cluster_label_fractions[majority_cluster_class])\n #print(cluster_to_label_mapping)\n if toprint:\n print(np.unique(y), np.unique(cluster_to_label_mapping))\n try:\n # make sure there is at least 1 cluster representing each class\n assert np.all(np.unique(y) == np.unique(cluster_to_label_mapping))\n except AssertionError:\n # if there is no cluster for a class then we will assign a cluster to that\n # class\n \n # find which class it is\n # ASSUMPTION - this task is binary\n \n diff = list(set(np.unique(y)) - set(np.unique(cluster_to_label_mapping)))[0]\n # we choose the cluster that contains the most examples of the class with no cluster\n \n one_hot = np_utils.to_categorical(y_pred[np.where(y==diff)[0]], \\\n len(cluster_to_label_mapping))\n \n cluster_to_label_mapping[np.argmax(np.sum(one_hot, axis=0))] = int(diff)\n if toprint:\n print(cluster_to_label_mapping)\n return cluster_to_label_mapping, n_assigned_list, majority_class_fractions", "def consolidate_labels(labels):\n return list(map(RNNOIE_model.consolidate_label , labels))", "def set_labels(self, new_labels=None):\n self.labels = new_labels", "def mask_labels(labels):\n def do_one_row(row):\n erase = False\n for i, _ in enumerate(row):\n if erase:\n row[i] = 0\n else:\n if row[i] == 10:\n erase = True\n row[i] = 1\n return row\n\n ret = np.copy(labels)\n return np.apply_along_axis(do_one_row, axis=1, arr=ret)", "def recluster(dataList, labelConverter, clusterSeedList, eps, minPts):\n # print(\"starting reclustering\")\n for pt in dataList:\n pt.isVisited = False\n\n # Expand existing clusters from the seed pt.\n for seed in clusterSeedList:\n if labelConverter[seed.label] == -1:\n continue\n seed.isVisited = True\n neighborhood = findNeighbor(seed, dataList, eps)\n # Cluster is expanded iff neighbor's label is same as seed's label,\n # or label is extra cluster.(x.label != -1 and labelConverter[x.label] == -1)\n neighborhood = [x for x in neighborhood\n if (x.label != -1 and labelConverter[x.label] == -1) or x.label == seed.label]\n idx = -1\n while (idx < len(neighborhood) - 1):\n idx += 1\n if (neighborhood[idx].isVisited):\n continue\n neighborhood[idx].isVisited = True\n nextNeighbor = findNeighbor(neighborhood[idx], dataList, eps)\n nextNeighbor = [x for x in nextNeighbor\n if (x.label != -1 and labelConverter[x.label] == -1) or x.label == seed.label]\n neighborhood = neighborhood \\\n + [x for x in nextNeighbor if x not in neighborhood and not x.isVisited]\n if (labelConverter[neighborhood[idx].label] == -1):\n # print(str(neighborhood[idx].id) + \" changed to \" + str(seed.label))\n neighborhood[idx].label = seed.label", "def createSetsFromLabels(self):\n \n self.tots = [0]*self.n\n for i in range(self.n):\n self.sets.append([])\n for i in range(self.nPoints):\n self.sets[self.labels[i]].append(i)\n self.tots[self.labels[i]] += 1", "def relabel_labelmask(labelmask, preserve_order=True):\n mask = np.copy(labelmask)\n # Get all object labels and their counts.\n labels, counts = np.unique(mask, return_counts=True)\n # Get the indexes of sorted counts, descending.\n ordered_indexes = np.argsort(counts)[::-1]\n # Set largest object as background (ID=0).\n background_label = labels[ordered_indexes[0]]\n mask[mask == background_label] = 0\n # Renumber the rest of the objects 1..n.\n obj_num=1\n if (preserve_order):\n oldlabels = labels\n else:\n oldlabels = labels[ordered_indexes]\n for old_label in oldlabels:\n if (old_label != background_label):\n mask[labelmask == old_label] = obj_num\n obj_num = obj_num + 1\n return mask", "def setCSLabels(self, labels):\n\n if isinstance(labels, list):\n if len(labels) == self._n_csets:\n if all((lbl is None or isinstance(lbl, str))\n for lbl in labels):\n self._cslabels = list(labels)\n else:\n raise ValueError('all items of labels must be strings')\n else:\n raise ValueError('length of labels must be equal to the '\n 'number of coordinate sets')\n else:\n raise TypeError('labels must be a list')", "def truncate_labels(labels):\n def do_one_row(row):\n erase = False\n for i, _ in enumerate(row):\n if erase:\n row[i] = -1\n else:\n if row[i] == 10:\n erase = True\n return row\n\n ret = np.copy(labels)\n ret = repair_labels(ret)\n return np.apply_along_axis(do_one_row, axis=1, arr=ret)", "def labels(self, labels):\n\n self._labels = labels", "def labels(self, labels):\n\n self._labels = labels", "def labels(self, labels):\n\n self._labels = labels", "def labels(self, labels):\n\n self._labels = labels", "def labels(self, labels):\n\n self._labels = labels", "def convertLabels(self, labels):\n counter = 0\n numericLabels = []\n for label in labels:\n if label not in self.labelDict:\n self.labelDict[label] = counter\n self.backwards_conversion[counter] = label\n counter += 1\n numericLabels += [self.labelDict[label]]\n return np.array(numericLabels)", "def classify_clusters_sk(cloud: object, labels: np.ndarray) -> list:\n \"\"\"\n indices = list(dict.fromkeys(labels))\n if (-1 in indices):\n indices.remove(-1)\n clusters = [[] for i in indices]\n for (i, point) in enumerate(cloud, start=0):\n if (labels[i] != -1):\n clusters[labels[i]].append(point) \n return (clusters, indices)\n \"\"\"\n indices = list(dict.fromkeys(labels))\n if (-1 in indices):\n indices.remove(-1)\n clusters = []\n for i in indices:\n clusters.append([])\n for (idx, point) in enumerate(cloud, start=0):\n label = labels[idx]\n if (label != -1):\n clusters[label].append(point)\n for el in clusters:\n el = np.vstack(el)\n radar_clusters = [Cluster(np.asarray(el), sensor='radar') for el in clusters]\n return (radar_clusters, indices)", "def update_labels(self, nidxs, y):\n\n y = np.array(y, dtype=bool)\n for n, yi in zip(nidxs, y):\n self.node_labels[n] = [self.labels[i] for i, j in enumerate(yi) if j]\n\n return self", "def set_label(self, labels_set=None):\n for pos in labels_set:\n self._q_bnn_circ.x(self.outputs[int(pos)])", "def infer_data_labels(X_labels, cluster_labels):\r\n #Empty array of len(X)\r\n predicted_labels = np.zeros(len(X_labels)).astype(np.uint8)\r\n \r\n for i, cluster in enumerate(X_labels):\r\n for key, value in cluster_labels.items():\r\n if cluster in value:\r\n predicted_labels[i] = key\r\n \r\n return predicted_labels", "def rectifyClusteringLabels( labels, rectificationWidth=5):\r\n\r\n\tfor l in range( len(labels) - rectificationWidth):\r\n\t\tfirstLabelIndex = l\r\n\t\tmedianLabelIndex = l + ( rectificationWidth//2)\r\n\t\tlastLabelIndex = l + rectificationWidth -1\r\n\t\tfirstLabel = labels[firstLabelIndex]\r\n\t\tmedianLabel = labels[ medianLabelIndex]\r\n\t\tlastLabel = labels[lastLabelIndex]\r\n\t\tleftLabels = labels[firstLabelIndex +1 : medianLabelIndex]\r\n\t\trightLabels = labels[ medianLabelIndex +1 : lastLabelIndex]\r\n\t\tif medianLabel != firstLabel and medianLabel != lastLabel:\r\n\t\t\tfor label in leftLabels:\r\n\t\t\t\tif label != firstLabel: continue\r\n\t\t\tfor label in rightLabels:\r\n\t\t\t\tif label != lastLabel: continue\r\n\t\t\tlabels[medianLabelIndex] = labels[medianLabelIndex - 1]\r\n\treturn labels", "def labels(self, points):\n\n assert(self.final_clusters is not None)\n\n labels = np.array([self.classify_point(point) for point in points])\n \n return labels", "def change_class_labels_back(classes,given):\n classes=np.asarray(classes)\n classes_new=np.zeros(classes.shape,dtype=object)\n for i in range(len(given)):\n classes_new[classes==i]=given[i]\n return classes_new", "def flip_random_labels(labels, percent):\n\n assert percent <= 1.0 and percent >= 0.0\n \n num_labels = len(labels)\n indices = np.random.permutation(num_labels)[:int(num_labels * percent)]\n labels[indices] = -labels[indices]\n return labels", "def reformat_labels(label, bin_limits=[2]):\n# num_labels = y_batch.max() + 1\n label = np.array([label], dtype=np.float32)\n num_labels = 2\n label = np.digitize(label, bins=[2])\n label = (np.arange(num_labels) == label[:, None]).astype(np.float32)[0]\n return label", "def initClusters(self):\n if len(self.labelList) != len(self.pointList):\n \traise ValueError(\"Label List and Point List not the same length!\")\n for i in range(len(self.labelList)):\n self.centroids[self.labelList[i]] = self.pointList[i]\n self.pointcounts[self.labelList[i]] = 1", "def propagate_labels_simple(regions,labels):\n rlabels,_ = label(regions)\n cors = correspondences(rlabels,labels,False)\n outputs = zeros(amax(rlabels)+1,'i')\n for o,i in cors.T: outputs[o] = i\n outputs[0] = 0\n return outputs[rlabels]", "def update_plot_clusters(cluster_handles, data_points, cluster_ids):\n for handle, cluster_id in zip(cluster_handles, np.unique(cluster_ids)):\n loc = cluster_id == cluster_ids\n handle.set_data(data_points[loc, 0], data_points[loc, 1])", "def ConfigureCluster(messages, args, cluster):\n cluster.labels = labels_util.ParseCreateArgs(args,\n messages.Cluster.LabelsValue)", "def form_clusters(self, labelled_data, unlabelled_centroids):\n # enumerate because centroids are arrays which are unhashable,\n centroids_indices = range(len(unlabelled_centroids))\n # initialize an empty list for each centroid. The list will contain\n # all the datapoints that are closer to that centroid than to any other.\n # That list is the cluster of that centroid.\n clusters = {c: [] for c in centroids_indices}\n \n for (label, Xi) in labelled_data:\n # for each datapoint, pick the closest centroid.\n smallest_distance = float(\"inf\")\n for cj_index in centroids_indices:\n cj = unlabelled_centroids[cj_index]\n distance = np.linalg.norm(Xi - cj)\n if distance < smallest_distance:\n closest_centroid_index = cj_index\n smallest_distance = distance\n # allocate that datapoint to the cluster of that centroid.\n clusters[closest_centroid_index].append((label,Xi))\n return list(clusters.values())", "def deletemessageslabels(self, uidlist, labels):\n\n labels = labels - self.ignorelabels\n result = self._messagelabels_aux('-X-GM-LABELS', uidlist, labels)\n if result:\n for uid in uidlist:\n self.messagelist[uid]['labels'] = self.messagelist[uid]['labels'] - labels", "def cfilter(centers, labels, t=5, position_likes=None, labels_likes=None):\n if position_likes is None:\n position_likes = []\n \n if labels_likes is None:\n labels_likes = []\n \n # backup the labels and the clusters\n centers, labels = deepcopy(centers), deepcopy(labels)\n unique_labels = np.unique(labels)\n \n # Create lists for each cluster contaning the related nodes\n components = []\n node_labels = [None for _ in range(len(labels))]\n \n for label in np.sort(unique_labels):\n component = np.argwhere(label == labels).T\n assert len(component) == 1\n\n for node in component[0]:\n node_labels[node] = label\n\n components.append(component[0].tolist())\n \n # Find the cluster indices of the to small clusters\n removal = []\n for idx, component in enumerate(components):\n if len(component) < t:\n removal.append(idx)\n \n # delete the too small centers\n new_centers = np.delete(centers, removal)\n \n # remove the old labels from the labels array\n position_removal = []\n labels_np = np.array(labels)\n \n for label in removal:\n position_removal += np.argwhere(label==labels_np).T[0].tolist()\n \n new_labels = np.delete(labels, position_removal)\n \n \n # adjust the position arrays\n new_position_likes = []\n for pos_like in position_likes:\n new_position_likes.append(np.delete(pos_like, position_removal, axis=0))\n \n # adjust the labels arrays\n new_labels_likes = []\n for lab_like in labels_likes:\n new_labels_likes.append(np.delete(lab_like, position_removal))\n \n \n # remove te gaps in the labels array (They sholud be the indices of the clusters array...):\n difference = [0 for i in range(len(unique_labels))]\n last_difference = 0\n \n for label in np.sort(unique_labels):\n if label in removal:\n last_difference += 1\n \n difference[label] = last_difference\n \n for idx, label in enumerate(new_labels):\n new_labels[idx] -= difference[label]\n \n \n # adjust the cluster centers to the fact, that the position array changed (they are the indices of it...): \n pos_difference = [0 for i in range(len(labels))]\n last_difference = 0\n \n for idx in range(len(labels)):\n if idx in position_removal:\n last_difference += 1\n \n \n pos_difference[idx] = last_difference\n \n for idx, center in enumerate(new_centers):\n new_centers[idx] -= pos_difference[center]\n \n \n \n # Adjust the output to the input\n if len(new_position_likes) != 0:\n if len(new_labels_likes) != 0:\n return new_centers, new_labels, new_position_likes, new_labels_likes\n \n else:\n return new_centers, new_labels, new_position_likes\n \n elif len(new_labels_likes) != 0:\n return new_centers, new_labels, new_labels_likes\n \n return new_centers, new_labels", "def _alter(self, label):\n altered = np.full(self.n, -1)\n altered[np.where(self.y_train == label)] = +1\n return altered", "def reassign_noise(labels: np.ndarray, mask):\n ret = labels.copy()\n ret[mask] = np.arange(np.sum(mask)) + np.max(ret) + 1\n return ret", "def prepare_labels(labels, class_mask):\n mask = [1 if elt else -1 for elt in class_mask]\n mask = np.array(mask)\n return labels.dot(mask)", "def update_pivot_labels(l, maxn):\n tmp = maxn+1\n tmps = []\n for i in range(0, len(l)):\n if l[i] > MAX_NODES-1 or l[i] in tmps:\n l[i] = tmp\n tmp += 1\n tmps.append(l[i])\n return l", "def assign_labels(self, data):\n data[self.label] = self.labeler(data.index.values)", "def transform_labels(self, labels):\n # Fallback:\n # return self.encoder.transform(labels)\n classes = list(self.classes_())\n return [classes.index(label) for label in labels]", "def transform_labels(self, labels):\n # Fallback:\n # return self.encoder.transform(labels)\n classes = list(self.classes_())\n return [classes.index(label) for label in labels]", "def replace_image_point_labels(image, labels):\n img = image.copy()\n for label, point in labels:\n row, col = point\n # Find the existing label at the point\n index = img[int(row), int(col)]\n # Replace the existing label with new, excluding background\n if index > 0:\n img[img == index] = label\n\n return img", "def associate_clusters(self, labels_1, labels_2):\n if not torch.is_tensor(labels_1):\n labels_1 = torch.cat(labels_1).cuda()\n\n if not torch.is_tensor(labels_2):\n labels_2 = torch.cat(labels_2).cuda()\n\n assert labels_1.shape == labels_2.shape, \"Shape mismatch: {}, {}\".format(labels_1.shape, labels_2.shape)\n\n # do not associate the outlier ID with anything\n unique_labels_1 = list(set(labels_1.unique().tolist()) - {self.OUTLIER_LABEL})\n unique_labels_2 = list(set(labels_2.unique().tolist()) - {self.OUTLIER_LABEL})\n\n assert not set(unique_labels_1).intersection(set(unique_labels_2)), \\\n \"Labels overlap: {}, {}\".format(unique_labels_1, unique_labels_2)\n\n association_costs = np.zeros((len(unique_labels_1), len(unique_labels_2)), np.float32)\n recall_12 = np.zeros((len(unique_labels_1), len(unique_labels_2)), np.float32)\n\n # iterate over pairs of labels\n for i1, i2 in [(i1, i2) for i1 in range(len(unique_labels_1)) for i2 in range(len(unique_labels_2))]:\n l1, l2 = unique_labels_1[i1], unique_labels_2[i2]\n l1_active_pts = labels_1 == l1\n l2_active_pts = labels_2 == l2\n\n intersection = (l1_active_pts & l2_active_pts).float().sum()\n union = (l1_active_pts | l2_active_pts).float().sum()\n iou = intersection / union\n\n # print(\"IoU ({}, {}) = {}\".format(l1, l2, iou.item()))\n association_costs[i1, i2] = 1. - iou.item()\n recall_12[i1, i2] = intersection / l1_active_pts.sum(dtype=torch.float32)\n\n idxes_1, idxes_2 = linear_sum_assignment(association_costs)\n\n associations = []\n unassigned_labels_1 = set(unique_labels_1)\n unassigned_labels_2 = set(unique_labels_2)\n\n for i1, i2 in zip(idxes_1, idxes_2):\n l1, l2 = unique_labels_1[i1], unique_labels_2[i2]\n associations.append((l1, l2))\n unassigned_labels_1.remove(l1)\n unassigned_labels_2.remove(l2)\n\n return associations, unassigned_labels_1, unassigned_labels_2, association_costs[idxes_1, idxes_2], \\\n (recall_12, unique_labels_1, unique_labels_2)", "def class2color(self, labels, clean_up_clusters=0, mode=None):\n clean_up_clusters *= clean_up_clusters # create an area\n colored_labels = np.zeros(labels.shape[:2] + (3,)).astype(np.uint8)\n labels = np.squeeze(labels)\n if clean_up_clusters > 0:\n labels = DropClusters.drop(labels, min_size=clean_up_clusters)\n ys, xs = np.where(labels)\n colored_labels[ys, xs, :] = self.label_color\n return colored_labels", "def _remove_duplicates(labels):\n to_delete = []\n for i in range(len(labels)):\n for j in range(i + 1, len(labels)):\n if intersection_ratio(labels[i], labels[j]) >= 0.5:\n to_delete.append(j)\n to_delete = np.unique(np.array(to_delete))\n return np.delete(labels, to_delete, 0)", "def _merge_clusters(self, cl1, cl2):\n label = ''\n to_delete = ''\n if cl1 < cl2:\n label = cl1\n to_delete = cl2\n else:\n label = cl2\n to_delete = cl1\n to_keep = self.get_cluster(label)\n to_remove = self._clusters.pop(to_delete)\n to_keep.merge(to_remove)", "def add_label_to_unique_species_labels(self, label: str) -> str:\n unique_label, i = label, 0\n while unique_label in self.unique_species_labels:\n unique_label = f'{label}_{i}'\n i += 1\n self.unique_species_labels.append(unique_label)\n return unique_label", "def label_centroids_heuristically(self, centroids: np.ndarray):\n\n cluster_centroids_labels = [(\"\", {}) for c in centroids]\n\n centre_point = centroids[0]\n heuristic_centroids = np.array(\n [\n centre_point + [-30, 30],\n centre_point + [30, 30],\n centre_point + [0, -48.125],\n ]\n )\n heuristic_centroid_labels = [\n ConstJoint.LEFT_EYE,\n ConstJoint.RIGHT_EYE,\n ConstJoint.MOUTH,\n ]\n labeled = [False for c in centroids]\n used_label = [False for c in heuristic_centroids]\n while self.__are_labels_matched_with_centroids(cluster_centroids_labels, \"\"):\n min_dist_square = math.inf\n min_centroid = 0\n min_cluster = 0\n current_cluster = {}\n for i, c in enumerate(centroids):\n if labeled[i]:\n continue\n for j, cl in enumerate(heuristic_centroids):\n if used_label[j]:\n continue\n diff = c - cl\n dist_square = diff.dot(diff)\n\n if dist_square < min_dist_square:\n min_centroid = i\n current_cluster = c\n min_cluster = j\n min_dist_square = dist_square\n\n cluster_centroids_labels[min_centroid] = (\n heuristic_centroid_labels[min_cluster],\n current_cluster,\n )\n labeled[min_centroid] = True\n used_label[min_cluster] = True\n\n return cluster_centroids_labels", "def assign_labels_to_longer_segs(self, uniq_id, base_scale_clus_label):\n per_scale_clus_label = []\n self.scale_n = len(self.multiscale_timestamp_dict[uniq_id]['scale_dict'])\n uniq_scale_mapping = get_scale_mapping_list(self.multiscale_timestamp_dict[uniq_id])\n for scale_index in range(self.scale_n):\n new_clus_label = []\n scale_seq_len = len(self.multiscale_timestamp_dict[uniq_id][\"scale_dict\"][scale_index][\"time_stamps\"])\n for seg_idx in range(scale_seq_len):\n if seg_idx in uniq_scale_mapping[scale_index]:\n seg_clus_label = mode(base_scale_clus_label[uniq_scale_mapping[scale_index] == seg_idx])\n else:\n seg_clus_label = 0 if len(new_clus_label) == 0 else new_clus_label[-1]\n new_clus_label.append(seg_clus_label)\n per_scale_clus_label.extend(new_clus_label)\n per_scale_clus_label = torch.tensor(per_scale_clus_label)\n return per_scale_clus_label, uniq_scale_mapping", "def normalize_label(labels):\n max_val = torch.max(labels)\n min_val = torch.min(labels)\n norm_labels = (labels - min_val)/(max_val - min_val)\n return norm_labels", "def map_clusters(labels, rows):\r\n counts = Counter(labels)\r\n mappings = {c + 1: ((counts[c] / rows) * 100) for c in sorted(counts)}\r\n\r\n return mappings", "def AddLabelsFromString(self, labels):\n if self.labels is None:\n self.labels = set()\n\n self.labels = self.labels.union([x.strip() for x in labels.split(',')])", "def propagate_labels(image,labels,conflict=0):\n rlabels,_ = label(image)\n cors = correspondences(rlabels,labels,False)\n outputs = zeros(amax(rlabels)+1,'i')\n oops = -(1<<30)\n for o,i in cors.T:\n if outputs[o]!=0: outputs[o] = oops\n else: outputs[o] = i\n outputs[outputs==oops] = conflict\n outputs[0] = 0\n return outputs[rlabels]", "def apply_remap_values(labels: np.ndarray, label_map: Dict[int, int]) -> np.ndarray:\n for l1, l2 in label_map.items():\n labels[labels == l1] = l2", "def clustering_by_label(instances, label, meta_dataset, logger):\n clusters = []\n impurities = {\n item[0]: item[1]\n for item in meta_dataset.items() if item[0] != label}\n impurities = list(itertools.chain(*impurities.values()))\n\n while instances:\n # List is not empty\n cluster = gift_wrapping(instances, impurities, logger)\n\n found = cluster['dimension'] < len(cluster['vertices'])\n _dataset = []\n vertices = []\n points = []\n for vertex in instances:\n if vertex in cluster['vertices']:\n vertices.append(vertex)\n else:\n if found and check_inside_hull(cluster['faces'], vertex):\n points.append(vertex)\n else:\n _dataset.append(vertex)\n\n if found:\n volume = round(calculate_volume(cluster['faces']), 15)\n elif len(cluster['faces'][0]) > 1:\n volume = round(numpy.exp(squared_area(cluster['faces'][0])), 15)\n else:\n volume = 0.0\n\n instances = _dataset\n clusters.append({'vertices': vertices,\n 'points': points,\n 'size': len(vertices) + len(points),\n 'volume': volume})\n\n logger.info(\n 'Clustering: %d clusters found, '\n '%d/%d instance processed for label %r',\n len(clusters), len(meta_dataset[label]) - len(instances),\n len(impurities) + len(meta_dataset[label]), label)\n\n return clusters", "def convert_labels_to_int(labels): \n if not len(labels):\n raise ValueError(\"Nothing to convert!\")\n\n if isinstance(labels[0], basestring) or isinstance(labels[0], int):\n label_legend = list(set(labels))\n converted_labels = [ label_legend.index(l) for l in labels ]\n elif is_iterable(labels[0]): # Multiple labels, handle each one individually\n num_labels = len(labels[0])\n label_legend = []\n converted_labels = []\n \n # Get the unique sets of labels for each index\n for i in xrange(num_labels):\n temp_labels = [l[i] for l in labels]\n label_legend.append(list(set(temp_labels)))\n\n # Apply mapping to each label\n for label in labels:\n converted_label = [ leg.index(l) for leg, l in \n zip(label_legend, label) ]\n converted_labels.append(converted_label)\n else: # Not a string/int, not a list... hmmm\n raise ValueError('Unexpected label type!')\n\n return label_legend, array(converted_labels)", "def get_partition_from_labels(self, labels):\n partition = defaultdict(list)\n for ind, label in enumerate(labels):\n partition[label].append(ind)\n self.clean_partition(partition)\n return partition", "def spread_labels(labels,maxdist=9999999):\n #distances,features = morphology.distance_transform_edt(labels==0,return_distances=1,return_indices=1)\n #indexes = features[0]*labels.shape[1]+features[1]\n #spread = labels.ravel()[indexes.ravel()].reshape(*labels.shape)\n if not labels.any():\n return labels\n distances,indexes = cv2.distanceTransformWithLabels(array(labels==0,uint8),cv2.DIST_L2,cv2.DIST_MASK_PRECISE,labelType=cv2.DIST_LABEL_PIXEL)\n spread = labels[where(labels>0)][indexes-1]\n if maxdist is None:\n return spread, distances\n spread *= (distances<maxdist)\n return spread", "def cluster_labels_pairs(cluster_labels, k):\n points_in_cluster = np.zeros((k,), dtype=int)\n pairs_num = 0\n for num in cluster_labels:\n points_in_cluster[num] += 1\n for cluster in points_in_cluster:\n pairs_num += (cluster**2 - cluster) / 2\n return pairs_num", "def forget_labels(labels_to_forget=\"none\"):\n\t\t\tassert labels_to_forget in {\"none\",\"originally unlabelled\",\"all\"}\n\t\t\tif labels_to_forget != \"none\":\n\t\t\t\tif labels_to_forget == \"originally unlabelled\":\n\t\t\t\t\tself.train_labels___0_unlab__neg1_exclud=self.train_orig_labels.copy()\n\t\t\t\telif labels_to_forget == \"all\":\n\t\t\t\t\tself.train_labels___0_unlab__neg1_exclud=np.zeros(self.num_train)\n\t\t\t\telse:\n\t\t\t\t\tassert False\n\t\t\t\tself.bool_train_labelled=(self.train_labels___0_unlab__neg1_exclud>0)\n\t\t\t\tself.bool_train_unlabelled=(self.train_labels___0_unlab__neg1_exclud==0)\n\t\t\t\tself.bool_train_excluded=(self.train_labels___0_unlab__neg1_exclud<0)\n\t\t\t\tself.num_train_labelled=sum(self.bool_train_labelled)\n\t\t\t\tself.num_train_unlabelled=sum(self.bool_train_unlabelled)\n\t\t\t\tself.num_train_excluded=sum(self.bool_train_excluded)", "def _labels_to_ordinals(self, labels, mask):\n one_to_n = tf.range(1, self._ordinal_size + 1, dtype=tf.float32)\n unsqueezed = tf.repeat(\n tf.expand_dims(labels, axis=2), self._ordinal_size, axis=-1)\n ordinals = tf.where(unsqueezed >= one_to_n, tf.ones_like(unsqueezed), 0.0)\n if self._use_fraction_label:\n fractions = unsqueezed - one_to_n + 1.0\n fractions = tf.where(\n tf.logical_and(fractions > 0.0, fractions < 1.0), fractions, 0.0)\n ordinals += fractions\n return tf.where(tf.expand_dims(mask, axis=-1), ordinals, 0.0)", "def from_labels_map(cls, labels_map):\n mask_index = cls()\n for index, value in iteritems(labels_map):\n mask_index[index] = CategoricalAttribute(\"label\", value)\n\n return mask_index", "def undo_uniform_list_length(labels):\n for label in labels:\n while \" \" in label:\n label.remove(\" \")\n return labels", "def make_fixed_labels(self):\n fixed_labels = []\n for dim in range(self.opt.c_dim):\n t = [0] * self.opt.c_dim\n t[dim] = 1\n t = torch.FloatTensor(t).expand([self.opt.batch_size, self.opt.c_dim])\n fixed_labels.append(t)\n return fixed_labels", "def labels_to_scores(labels):\n device = sp.get_device(labels)\n xp = device.xp\n with device:\n num_classes = labels.max() + 1\n scores = xp.zeros([len(labels), num_classes], dtype=np.float32)\n scores[xp.arange(len(labels)), labels] = 1\n\n return scores", "def getLabel(labels):\r\n elems = {}\r\n for l in labels:\r\n if l not in elems.keys():\r\n elems[l] = 1\r\n else:\r\n elems[l] += 1\r\n counts = sorted(elems.values(), reverse=True)\r\n if len(counts) > 1 and counts[0] == counts[1]:\r\n return choice(list(elems.keys()))\r\n return sorted(elems, key=elems.get, reverse=True)[0]", "def change_class_labels_to_given(classes,given):\n classes=np.asarray(classes)\n classes_new=np.zeros(classes.shape,dtype=object)\n for i in given:\n classes_new[classes==i]=given[i]\n return classes_new", "def fix_label_names():\n\n assert trace.cpu.trace_done\n binary_addr = memorymanager.BinaryAddr(0)\n while binary_addr < len(classifications):\n c = classifications[binary_addr]\n if c is not None:\n dummy = [str(x) for x in c.as_string_list(binary_addr, None)]\n binary_addr += c.length()\n else:\n binary_addr += 1", "def _empty_clusters(clusters):\n for clst in clusters:\n clst.points = []", "def inlabel_shuffle(data):\n num_zero_data = np.sum(data[:,-1]==0)\n label_zero_data = data[:num_zero_data,:]\n label_one_data = data[num_zero_data:,:]\n np.random.shuffle(label_zero_data)\n np.random.shuffle(label_one_data)\n return data", "def relabeled(self, memo=None, labels=None):\n from copy import deepcopy\n\n self._deepcopy_relabel_ = True\n self._deepcopy_labels_ = labels\n new = deepcopy(self, memo)\n del self._deepcopy_relabel_\n del self._deepcopy_labels_\n return new", "def annotate(self, ignore=(-1,), shuffle=True, chunk_size=10):\n\n self.chunk_size = chunk_size\n\n self.new_clusters = OrderedDict.fromkeys(self.clusters)\n\n try:\n self.new_clusters.pop(ignore, None)\n except TypeError:\n for value in ignore:\n self.new_clusters.pop(value, None)\n\n self.new_labels = self.cluster_labels.copy().astype(float)\n self.new_labels[:] = np.nan\n\n self._label_queue = deque(self.new_clusters.keys())\n self._already_labelled = deque([])\n if shuffle:\n np.random.shuffle(self._label_queue)\n\n self._annotation_loop = self._annotation_iterator()\n # reset the progress bar\n self.progressbar.max = len(self._label_queue)\n self.progressbar.value = 0\n\n # start the iteration cycle\n return next(self._annotation_loop)", "def overlay_labels(labels):\n labels_scaled = rescaled(labels,0,256)\n labels_colored = plt.get_cmap(\"jet\")(labels_scaled)\n border_mask = region_borders(labels) & (labels > 0)\n labels_colored[~border_mask,:,3] = 0 # set alpha to zero\n return labels_colored", "def binarize_labels(labels):\n labels = np.where(labels == 0, labels, 1)\n\n return labels" ]
[ "0.68851614", "0.6850147", "0.6721148", "0.6478987", "0.643922", "0.6345853", "0.6343787", "0.6329328", "0.62441224", "0.6120226", "0.6087101", "0.60843414", "0.60319865", "0.5999183", "0.59800136", "0.5965767", "0.5940681", "0.59308654", "0.592457", "0.5924074", "0.5922587", "0.59185356", "0.5911539", "0.59012073", "0.5877225", "0.5865721", "0.58591425", "0.5842034", "0.5841271", "0.583055", "0.58052087", "0.57852286", "0.57824177", "0.5774432", "0.5773876", "0.57583994", "0.57548106", "0.5750352", "0.5749712", "0.5749712", "0.5749712", "0.5749712", "0.5749712", "0.5726109", "0.57021797", "0.56795007", "0.5652587", "0.56458086", "0.563204", "0.5627547", "0.5619535", "0.5607772", "0.55898774", "0.5588308", "0.5584888", "0.5572411", "0.5559481", "0.55373293", "0.5535396", "0.5533", "0.5522069", "0.55180997", "0.55126876", "0.55126005", "0.5509811", "0.5505415", "0.5505415", "0.550264", "0.55001694", "0.5483458", "0.54727143", "0.5470554", "0.54704493", "0.54658973", "0.5455896", "0.5445543", "0.54352915", "0.5432285", "0.54200834", "0.5417614", "0.54157406", "0.54087716", "0.54015917", "0.53999066", "0.5382394", "0.538059", "0.53634214", "0.5358435", "0.5354307", "0.53521377", "0.5351596", "0.5348534", "0.5342153", "0.5340996", "0.5340373", "0.5325329", "0.53194404", "0.53154236", "0.53067553", "0.530533" ]
0.6535897
3
Modify the column name to make it Pythoncompatible as a field name
def normalize_col_name(col_name, used_column_names, is_relation): field_params = {} field_notes = [] new_name = col_name.lower() if new_name != col_name: field_notes.append('Field name made lowercase.') if is_relation: if new_name.endswith('_id'): new_name = new_name[:-3] else: field_params['db_column'] = col_name new_name, num_repl = re.subn(r'\W', '_', new_name) if num_repl > 0: field_notes.append('Field renamed to remove unsuitable characters.') if new_name.find(LOOKUP_SEP) >= 0: while new_name.find(LOOKUP_SEP) >= 0: new_name = new_name.replace(LOOKUP_SEP, '_') if col_name.lower().find(LOOKUP_SEP) >= 0: # Only add the comment if the double underscore was in the original # name field_notes.append( "Field renamed because it contained more than one '_' in a row." ) if new_name.startswith('_'): new_name = 'field%s' % new_name field_notes.append("Field renamed because it started with '_'.") if new_name.endswith('_'): new_name = '%sfield' % new_name field_notes.append("Field renamed because it ended with '_'.") if keyword.iskeyword(new_name): new_name += '_field' field_notes.append( 'Field renamed because it was a Python reserved word.') if new_name[0].isdigit(): new_name = 'number_%s' % new_name field_notes.append( "Field renamed because it wasn't a valid Python identifier.") if new_name in used_column_names: num = 0 while '%s_%d' % (new_name, num) in used_column_names: num += 1 new_name = '%s_%d' % (new_name, num) field_notes.append('Field renamed because of name conflict.') if col_name != new_name and field_notes: field_params['db_column'] = col_name return new_name, field_params, field_notes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_column_name(self, column, idx, old_name, name):\n dtype = self.dtype\n # Updating the names on the dtype should suffice\n dtype.names = dtype.names[:idx] + (name,) + dtype.names[idx + 1 :]", "def py_field_name(self, field):\n name = field.name\n name = as_identifier(name)\n if self.options(field).convert_case:\n name = from_camel_case(name)\n name = self._mangle_name(name)\n return name", "def col_name(col):\n\n if isinstance(col, str):\n return col\n return col.__name__", "def encodeColumnName(self, column):\r\n return '\"{}\"'.format(column)", "def column_name(name):\n # Only needs exceptions to standard token cleanup\n column_map = {\n \"line#\" : \"ignore\",\n \"date\" : \"timestamp\",\n \"rh\" : \"humidity\",\n \"par\" : \"par_ue\"\n }\n\n if name in column_map:\n return column_map[name]\n \n return name", "def set_column_name(self, name):\r\n self.column_name = name", "def set_column_name(self, name):\r\n self.column_name = name", "def _column_original_name(name):\n if ':' in name:\n return name.split(':')[-1]\n else:\n return name", "def name(self):\n if self.table:\n return \"{}.{}\".format(self.table, self.field_name)\n return self.field_name", "def wrap_columns_name(self, format_string):\n self._data_frame = self._data_frame.rename(\n columns=lambda column: format_string.format(column)\n )", "def rename_columns(self, col):\n try:\n self.cleaned_data.columns = col\n except Exception as e:\n raise e", "def db_field_name(self):\r\n return self.db_field or self.column_name", "def db_field_name(self):\r\n return self.db_field or self.column_name", "def short_column(name : str) -> str:\n return name.split(\"-\")[1]", "def get_name(self):\n return self.col_name", "def capnp_field_name(self, field):\n name = field.name\n return as_identifier(name)", "def _valid_column(column_name):\n return str(column_name)", "def namehack(field):\n if field.endswith((\"attribute\", \"views\")):\n return field + \"__name\"\n else:\n return field", "def column_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"column_name\")", "def column_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"column_name\")", "def _validate_column_name(col_name : str) -> str:\n\n if col_name[0].isdigit():\n return f'\"{col_name}\"'\n return col_name", "def typed_column(self) -> str:\n\n return \"{}:{}\".format(self.name, self.dtype)", "def rename(self, newname):\n # set the new column name\n self.colname = newname", "def __getitem__(self, item):\n if isinstance(item, str):\n name_dict = {n.lower():n for n in self.colnames}\n item = item.lower()\n item = ','.join([name_dict[i] for i in item.split(',')])\n out = APtable.__getitem__(self, item)\n return out", "def as_field(identifier: str) -> str:\n return identifier.lower()", "def _remap_column_names(self, frame):\n\n frame[TransactionColumns.BANK.name] = self.INSTITUTION\n frame[TransactionColumns.ACCOUNT.name] = self.account\n frame.rename(columns=self._FIELD_2_TRANSACTION, inplace=True)\n frame[TransactionColumns.CHECK_NO.name] = None\n return frame", "def column_name(self) -> Optional[str]:\n return pulumi.get(self, \"column_name\")", "def test_column_name(self):\n field = self.base_field\n sch = SchemaField(field)\n self.assertEqual(sch.name, sch.column_name)\n self.assertNotEqual(sch.column_name, sch.title)", "def getColName(self, col):\n try:\n return chr(ord('a') + col)\n except:\n return col", "def getColName(self, col):\n try:\n return chr(ord('a') + col)\n except:\n return col", "def generate_field_name(container, field):\n if \"standard_name\" in container.fields[field]:\n field_name = container.fields[field][\"standard_name\"]\n elif \"long_name\" in container.fields[field]:\n field_name = container.fields[field][\"long_name\"]\n else:\n field_name = str(field)\n field_name = field_name.replace(\"_\", \" \")\n field_name = field_name[0].upper() + field_name[1:]\n return field_name", "def __set_name(self):\n table_name = self.get_table_name()\n record, timestamp = self.__get_max_timestamp()\n self.name = \"%s_%s_%s\" % (table_name, record, timestamp)", "def new_index_name(self, column, unique=False):\n table_name = self.name.lstrip('migrate_')\n column = column.replace(', ', '')\n return '{}_{}_{}{}'.format(table_name, column, self._random_string(6), '_unique' if unique else '')", "def format_column(self, column, use_table=False, name=None, table_name=None):\n if name is None:\n name = column.name\n if not getattr(column, 'is_literal', False):\n if use_table:\n return self.format_table(column.table, use_schema=False, name=table_name) + \".\" + self.__generic_obj_format(column, name)\n else:\n return self.__generic_obj_format(column, name)\n else:\n # literal textual elements get stuck into ColumnClause alot, which shouldnt get quoted\n if use_table:\n return self.format_table(column.table, use_schema=False, name=table_name) + \".\" + name\n else:\n return name", "def test_dummydb_add_data_to_table_wrong_column_name(self):\n db = DummyDB()\n columns = {\n \"one\": int,\n \"two\": str,\n \"three\": bool,\n }\n db.create_table(\"new_table\", columns)\n result = db.select(\"new_table\", four=1)", "def _field_name(self) -> str:\n name = self._resolve_field_name()\n if name is None:\n # pylint: disable=consider-using-f-string\n raise FieldNameError(\n \"No field name found among: explicit name = {}, inferred name = {}\".format(\n self.__name_explicit, self.__name_contextual\n )\n )\n return name", "def safe_column_name(string):\n string = unidecode(string.replace(' ', '_').lower())\n return re.sub(r'[^0-9a-z_]','', string)", "def alter_column(self, table_name, name, field, explicit_name=True):\r\n # Get the column's SQL\r\n field.set_attributes_from_name(name)\r\n if not explicit_name:\r\n name = field.column\r\n sql = self.column_sql(table_name, name, field)\r\n # Remake the table correctly\r\n self._remake_table(table_name, altered={name: sql})", "def new_constraint_name(self, column, type):\n name = self.name.lstrip('migrate_')[:30]\n if type == 'UNIQUE':\n return '{}_{}_{}_uniq'.format(name, column[:15], self._random_string(8))\n elif type == 'PRIMARY KEY':\n return '{}_{}_pkey'.format(name, self._random_string(4))\n else:\n raise NotImplementedError('Name not implemented for type {}'.format(type))", "def name_field(self):\r\n return 'name'", "def __getitem__(self, field_name: str) -> ColumnField:\n return ColumnField(self, field_name=field_name)", "def undo_format_field_name(field_name):\n if json_api_settings.FORMAT_FIELD_NAMES:\n return format_value(field_name, \"underscore\")\n\n return field_name", "def update_column_title(col):\n col_type = self.features_bucket_mapping_.get(col).type\n return [f\"Feature '{col}'\"], [col_type]", "def __tablename__(self):\n return sub(r\"(?<!^)(?=[A-Z])\", \"_\", self.__name__).lower()", "def rename_column(self, table_name, old, new):\r\n self._remake_table(table_name, renames={old: new})", "def get_name(tablename):\n\n return tablename[tablename.find(\"_\") + 1:].replace(\"_\", \" \").capitalize()", "def get_bare_field_name(field_name: str) -> str:\n\n return re.sub(r\"_[^_]+$\", \"\", field_name).replace(\"human_readable_\", \"\")", "def update_column_format(self):\n pass", "def columnName(self):\n return self.__column", "def _get_column_name(df, name='agg'):\n while name in df.columns:\n name += '_'\n return name", "def get_column_name(self) -> str:\n if self.is_shared():\n assert self._shared_id is not None\n return self._shared_id\n else:\n return str(id(self))", "def get_column_def(self):\r\n return '{} {}'.format(self.cql, self.db_type)", "def _clean_up_table_column_names(loop_dict):\n \n # Make the column names all lowercase\n # and remove any underscores from the beginning\n for key in loop_dict.keys():\n rename_dict = { x:re.sub(r\"\"\"^_\"\"\", '', x.lower()) for x in loop_dict[key].columns }\n loop_dict[key].rename(columns=rename_dict, inplace=True)\n \n return loop_dict", "def change_type(self, col_name, str_type):\n if self[col_name] is not None:\n self[col_name] = self[col_name].astype(str_type)", "def _str_colnames(self):\n return ', '.join(self.galcat.colnames)", "def substitute_names(df):\n\n masking_tag = '_sql'\n duplicated_names = ['SwitchName', 'Fabric_Name', 'SwitchMode', 'Memory_Usage', 'Flash_Usage', 'Speed']\n replace_dct = {orig_name + masking_tag: orig_name for orig_name in duplicated_names}\n df.rename(columns=replace_dct, inplace=True)", "def _get_custom_attribute_field_name(self, attribute):\n return 'attribute_{0}'.format(attribute.id)", "def namingConvention(columnName):\n words = columnName.lower().split(\"_\")\n\n def cap(word):\n if word.lower() == \"id\":\n return word.upper()\n else:\n return word.capitalize()\n\n return words[0] + \"\".join(map(cap, words[1:]))", "def colNames_string(self):\n # SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'some_table';\n return \"SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = \"", "def fix_name(row, index, name_map):\n # print(\"Input row: {}\".format(row))\n name = row[index].strip()\n # print(\"Name entry is {}\".format(name))\n if name.endswith(\" (yourself)\"):\n name = name[:-len(\" (yourself)\")]\n # print(\"Shortening to |{}|\".format(name))\n if name not in name_map:\n name_map[name] = name # Initially the identity transform\n row[index] = name_map[name]", "def give_field(self, name):\n return self.field(name).toPyObject()", "def get_ast_field_name(ast):\n replacements = {\n # We always rewrite the following field names into their proper underlying counterparts.\n TYPENAME_META_FIELD_NAME: '@class'\n }\n base_field_name = ast.name.value\n normalized_name = replacements.get(base_field_name, base_field_name)\n return normalized_name", "def _table_name(self, name: AnyStr) -> bytes:\n name = ensure_bytes(name)\n if self.table_prefix is None:\n return name\n return self.table_prefix + self.table_prefix_separator + name", "def name(self) -> str:\n return self.fqtable.replace(\".\", \"_\")", "def _str_colnames(self):\n return ', '.join(self.colnames)", "def to_sql(self, model_cls, field_name, value):\n raise NotImplementedError # pragma: no cover", "def NAME(self) -> str:\n return self._field_name", "async def set_db_name_field(self, db_name_field):\n self.db_name_field = db_name_field", "def new_fk_index_name(self, column, fk_column):\n return '{}_refs_{}_{}'.format(column, fk_column, self._random_string(8))", "def rename(self, old_column, new_column, data_type):\n self.table.rename_column(old_column, new_column, data_type)\n return self", "def __tablename__(cls) -> str:\n return inflection.underscore(cls.__name__)", "def _label(self, column):\n # XXX\n return column", "def undo_format_field_names(obj):\n if json_api_settings.FORMAT_FIELD_NAMES:\n return format_field_names(obj, \"underscore\")\n\n return obj", "def rename_columns(self):\n self.data.rename(columns={\n 'DealerID': 'd_id',\n 'DealerName': 'd_name',\n 'Type': 'stock_type', # Needs capitalization\n 'Stock': 'stock_id',\n 'VIN': 'vin',\n 'Year': 'year',\n 'Make': 'make',\n 'Model': 'model',\n 'Body': 'trim', # Needs parsing\n 'Trim': 'body_style',\n 'Doors': 'doors',\n 'ExtColor': 'exterior_colour',\n 'IntColor': 'interior_colour',\n 'EngCylinders': 'cylinders',\n 'EngDisplacement': 'displacement', # Needs parsing\n 'Transmission': 'transmission_description', # Needs parsing and split\n 'Odometer': 'odometer',\n 'Price': 'price',\n 'MSRP': 'msrp',\n 'Description': 'description',\n 'EngType': 'configuration',\n 'EngFuel': 'fuel_type',\n 'Drivetrain': 'drivetrain',\n 'ExtColorGeneric': 'exterior_colour_generic', # Needs parsing\n 'IntColorGeneric': 'interior_colour_generic', # Needs parsing\n 'PassengerCount': 'passengers'\n }, inplace=True)\n\n return None", "def get_field(cls, name):\n if name not in cls.get_field_names():\n # - check field name first, next: column name -\n name = cls.get_field_name(name)\n return getattr(cls, name, None)", "def setFieldNames(self, model, lyr): \n #get the fields\n fields = lyr.pendingFields()\n position = 0\n \n #set column names\n for field in fields:\n model.setHorizontalHeaderItem(position, QStandardItem(field.name()))\n position+=1", "def _transformed_name(key: Text) -> Text:\n return key + \"_xf\"", "def make_column(options, name, column):\n # (ElasticsearchFDWOptions, str, multicorn.ColumnDefinition) -> Column\n assert name not in {\n options.rowid_column,\n options.score_column,\n options.query_column,\n }, \"Programmer error: bad name passed to make_column {name}\".format(name=name)\n\n if column.base_type_name.upper() in {\"JSON\", \"JSONB\"}:\n return JsonColumn(name=name)\n return BasicColumn(name=name)", "def __init__(self, column_type, name):\n self.column_type = column_type\n self.name = name", "def autoname(self):\n ret = \"%(table)s_%(reftable)s_fkey\"%dict(\n table=self.table.name,\n reftable=self.reftable.name,\n )\n return ret", "def act_on_column_name(self, *, arg, value):\n assert arg is None\n assert isinstance(value, str)", "def translate_db_fields(cls, data):\r\n dst_data = data.copy()\r\n for name, col in cls._columns.items():\r\n key = col.db_field or name\r\n if key in dst_data:\r\n dst_data[name] = dst_data.pop(key)\r\n\r\n return dst_data", "def del_column(self, fieldname):\n ...", "def _sanitize_field_name(self, field_name):\n field_name = field_name.replace(self._field_prefix, '')\n return field_name.replace('.', '_')", "def camel_case_to_mysql(name: str) -> str:\n\n view_name = name\n\n # Convert to MySQL view name\n idx_prefix = 'Ix'\n if view_name.startswith(idx_prefix):\n view_name = view_name.replace(idx_prefix, '', 1)\n return CobolDefinition.name_to_snake_case(view_name) + '_view'\n\n return CobolDefinition.name_to_snake_case(view_name)", "def rename_id_col(df: pd.DataFrame):\r\n for col in df.columns:\r\n if \"id\" in col:\r\n df.rename(columns={col: col.replace(\"-\", \"_\")}, inplace=True)\r\n return df", "def test_rename_column(self):\n name_column = Varchar()\n name_column._meta.name = \"name\"\n\n title_column = Varchar()\n title_column._meta.name = \"title\"\n\n schema: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Band\",\n tablename=\"band\",\n columns=[name_column],\n )\n ]\n schema_snapshot: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Band\",\n tablename=\"band\",\n columns=[title_column],\n )\n ]\n\n schema_differ = SchemaDiffer(\n schema=schema, schema_snapshot=schema_snapshot, auto_input=\"y\"\n )\n\n self.assertTrue(len(schema_differ.rename_columns.statements) == 1)\n self.assertEqual(\n schema_differ.rename_columns.statements[0],\n \"manager.rename_column(table_class_name='Band', tablename='band', old_column_name='title', new_column_name='name', old_db_column_name='title', new_db_column_name='name')\", # noqa\n )", "def update_column_title():\n\n column_id = request.get_json()['column_id']\n column_title = request.get_json()['column_title']\n\n return sql_manager.update_column_title(column_id, column_title)", "def __to_key(name: str) -> str:\n return name.replace(\" \", \"-\")", "def get_column_def(self):\r\n db_type = self.db_type.format(self.value_type.db_type)\r\n return '{} {}'.format(self.cql, db_type)", "def _normalize_column(column):\n if not isinstance(column, str):\n msg = \"expected column of type 'str', got {0!r} instead\"\n raise TypeError(msg.format(column.__class__.__name__))\n column = column.strip()\n column = column.replace('\"', '\"\"') # Escape quotes.\n if column == '':\n column = '_empty_'\n return '\"' + column + '\"'", "def _normalize_column(column):\n if not isinstance(column, str):\n msg = \"expected column of type 'str', got {0!r} instead\"\n raise TypeError(msg.format(column.__class__.__name__))\n column = column.strip()\n column = column.replace('\"', '\"\"') # Escape quotes.\n if column == '':\n column = '_empty_'\n return '\"' + column + '\"'", "def get_field_class_name(field):\n return field.capitalize() + \"Field\"", "def _resolve_ref(self, name):\n if name not in self.fields:\n raise RuntimeError(\"No field with name `{}`\".format(name))\n\n field = self.fields.get(name)\n field.set_attributes_from_name(name)\n return Col(self.name, field, output_field=field)", "def sql_for_columns(self, data, qn, connection):\n table_alias, _name, db_type = data\n\n fun = connection.ops.field_cast_sql\n\n if table_alias:\n lhs = [fun(f.db_type(connection)) % '%s.%s' % (qn(table_alias), qn(f.column)) for f in self.fields]\n else:\n lhs = [fun(f.db_type(connection)) % qn(f.column) for f in self.fields]\n return Atoms(self.fields, lhs)", "def field_names(self):\n ...", "def format_colname(name):\n colnames = [\n \"AV\",\n \"RV\",\n \"EBV\",\n \"CAV1\",\n \"CAV2\",\n \"CAV3\",\n \"CAV4\",\n \"C1\",\n \"C2\",\n \"C3\",\n \"C4\",\n \"x_o\",\n \"gamma\",\n \"bump_area\",\n \"fh2\",\n \"nhtot\",\n \"nh2\",\n \"nhi\",\n \"NH_AV\",\n \"NH_EBV\",\n ]\n plotnames = [\n \"$A(V)$\",\n \"$R(V)$\",\n \"$E(B-V)$\",\n \"$C^{A(V)}_1$\",\n \"$C^{A(V)}_2$\",\n \"$C^{A(V)}_3$\",\n \"$C^{A(V)}_4$\",\n \"$C_1$\",\n \"$C_2$\",\n \"$C_3$\",\n \"$C_4$\",\n \"$x_o$\",\n r\"$\\gamma$\",\n r\"$\\pi C^{A(V)}_3 / 2 \\gamma$\",\n \"$f(H_2)$\",\n \"$N(H)$\",\n \"$N(H_2)$\",\n \"$N(HI)$\",\n \"$N(H)/A(V)$\",\n \"$N(H)/E(B-V)$\",\n ]\n dic_pairs = dict(zip(colnames, plotnames))\n\n out_name = name\n if name[:3] == \"log\":\n out_name = r\"$\\log (\" + name[3:].upper() + \")$\"\n elif name in dic_pairs.keys():\n out_name = dic_pairs[name]\n\n return out_name", "def _field_prefix(self):\n if self.layer_name == 'geninfo':\n return ''\n return self.layer_name + '.'", "def _make_alias(self, agg_func, code, col):\n\t\treturn DELIMITER.join([agg_func.prefix(), code, self.name(), col])", "def _field_name_flag(field: _d.Field):\r\n return f\"--{field.name.rstrip('_').replace('_', '-')}\"" ]
[ "0.7072573", "0.7007338", "0.6994978", "0.69579434", "0.68072045", "0.6802428", "0.6802428", "0.67907166", "0.6732477", "0.6600031", "0.65641886", "0.64931035", "0.64931035", "0.64816284", "0.6459289", "0.64281356", "0.63902634", "0.6334104", "0.6324554", "0.6324554", "0.62862897", "0.6277314", "0.62329", "0.6204517", "0.6178222", "0.61457276", "0.6116452", "0.61137515", "0.6037867", "0.6037867", "0.6021725", "0.60192513", "0.59939206", "0.5990731", "0.59853417", "0.59742796", "0.59728163", "0.5941572", "0.59278464", "0.5924766", "0.59223235", "0.5900049", "0.5889538", "0.5882949", "0.58741105", "0.5867748", "0.5842101", "0.5841567", "0.5837694", "0.5822072", "0.57839173", "0.5775221", "0.5773551", "0.5761088", "0.575862", "0.57581127", "0.57547784", "0.5732902", "0.5731511", "0.5722033", "0.5713239", "0.57056814", "0.57017636", "0.5689161", "0.56811345", "0.5664873", "0.56612074", "0.56606156", "0.56585294", "0.5655326", "0.56533086", "0.5653031", "0.56509554", "0.5647386", "0.5626526", "0.559765", "0.55964524", "0.5589174", "0.55836254", "0.5583242", "0.5581165", "0.5576898", "0.5574576", "0.5564658", "0.5562101", "0.55595887", "0.55535525", "0.55443704", "0.5535675", "0.55331427", "0.5527466", "0.5527466", "0.55250585", "0.55134696", "0.55020696", "0.5501646", "0.54988164", "0.5492232", "0.5490315", "0.54811484" ]
0.7277575
0
Given the database connection, the table name, and the cursor row description, this routine will return the given field type name, as well as any additional keyword parameters and notes for the field.
def get_field_type(connection, table_name, row): field_params = OrderedDict() field_notes = [] is_geometry = False try: field_type = connection.introspection.get_field_type(row[1], row) except KeyError: field_type = 'TextField' field_notes.append('This field type is a guess.') # This is a hook for data_types_reverse to return a tuple of # (field_type, field_params_dict). if type(field_type) is tuple: field_type, new_params = field_type field_params.update(new_params) # Add max_length for all CharFields. if field_type == 'CharField' and row[3]: field_params['max_length'] = int(row[3]) if field_type == 'DecimalField': if row[4] is None or row[5] is None: field_notes.append( 'max_digits and decimal_places have been guessed, as this ' 'database handles decimal fields as float') field_params['max_digits'] = row[4] if row[4] is not None else 10 field_params['decimal_places'] = row[ 5] if row[5] is not None else 5 else: field_params['max_digits'] = row[4] field_params['decimal_places'] = row[5] if field_type == 'GeometryField': geo_col = row[0] # Getting a more specific field type and any additional parameters # from the `get_geometry_type` routine for the spatial backend. field_type, geo_params = connection.introspection.get_geometry_type( table_name, geo_col) field_params.update(geo_params) is_geometry = True return field_type, field_params, is_geometry # return getattr(models.fields, field_type), field_params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_fields(self):\n if not self._cursor.description:\n return {}\n\n results = {}\n column = 0\n\n for des in self._cursor.description:\n fieldname = des[0]\n results[column] = fieldname\n column = column + 1\n\n return results", "def get_field_type(self, table_name, field_name):\n \n dtype = self.field_types[(self.field_types.TABNAME == table_name) & (self.field_types.FIELDNAME == field_name)]['DATATYPE'].values[0] \n return dtype", "def getFieldDetails(self, field_name):\n try:\n value_list = []\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('qiime_assets.get_field_details', [field_name, results])\n\n for row in results:\n # column_name, data_type, desc_or_value, definition, active\n value_list.append((row[0], row[1], row[2], row[3], row[4]))\n \n if len(value_list) == 0:\n # If not found in the dictionary, assume this is a user-created column\n value_list.append((field_name, 'text', '', ''))\n \n return value_list[0]\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def get_geometry_type(self, table_name, description):\n with self.connection.cursor() as cursor:\n cursor.execute(\n \"\"\"\n SELECT t.coord_dimension, t.srid, t.type FROM (\n SELECT * FROM geometry_columns\n UNION ALL\n SELECT * FROM geography_columns\n ) AS t WHERE t.f_table_name = %s AND t.f_geometry_column = %s\n \"\"\",\n (table_name, description.name),\n )\n row = cursor.fetchone()\n if not row:\n raise Exception(\n 'Could not find a geometry or geography column for \"%s\".\"%s\"'\n % (table_name, description.name)\n )\n dim, srid, field_type = row\n # OGRGeomType does not require GDAL and makes it easy to convert\n # from OGC geom type name to Django field.\n field_type = OGRGeomType(field_type).django\n # Getting any GeometryField keyword arguments that are not the default.\n field_params = {}\n if self.postgis_oid_lookup.get(description.type_code) == \"geography\":\n field_params[\"geography\"] = True\n if srid != 4326:\n field_params[\"srid\"] = srid\n if dim != 2:\n field_params[\"dim\"] = dim\n return field_type, field_params", "def _convert_field_type(row):\n return row", "def get_column_def(self):\r\n return '{} {}'.format(self.cql, self.db_type)", "def parse_description(_descriptions, _db_type):\n _field_names = []\n _field_types = []\n\n \"\"\"name, type_code, display_size, internal_size, precision, scale, null_ok\"\"\"\n\n for _column in _descriptions:\n _field_names.append(_column[0])\n if _db_type == DB_MYSQL:\n _field_types.append(mysql_type_to_sql_type(_column[1]))\n else:\n _field_types.append(_column[1])\n\n return _field_names, _field_types", "def test_get_field_type_text_field(self):\n db_introspection = DatabaseIntrospection(self.connection)\n self.assertEqual(\n db_introspection.get_field_type(\n TypeCode.STRING,\n description=ColumnInfo(\n name=\"name\",\n type_code=TypeCode.STRING,\n internal_size=\"MAX\",\n ),\n ),\n \"TextField\",\n )", "def field_type(self):\n return \"\"", "def field_type(name):\n if name not in field_types:\n field_types[name] = records.fields_get([name], attributes=['type'])[name]['type']\n return field_types.get(name)", "def findMetadataTable(self, field_name, field_type, log, study_id, lock):\n \n # log passed in from writeMetadataValue() - it's a list. At end of function, \n # exception handler will output contents of log to web for viewing if error\n # occurrs.\n \n try:\n table = ''\n field_name = field_name.upper()\n field_name.replace('\"', '')\n\n # Fill out the field list if it's the first call\n log.append('Length of fields is: {0}'.format(str(len(self.fields))))\n if len(self.fields) == 0:\n log.append('Filling out field list for table lookup. Current field is \"{0}\"'.format(field_name))\n lock.acquire()\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('qiime_assets.find_metadata_table', [results])\n for tab_name, col_name in results:\n if col_name not in self.fields:\n self.fields[col_name] = []\n self.fields[col_name].append(tab_name)\n lock.release()\n \n log.append('field{} successfully filled out')\n \n if field_name in self.fields:\n # If there's only one hit we can assign it\n tables = self.fields[field_name]\n log.append('Type of variable is: %s' % str(tables))\n if len(self.fields[field_name]) == 1:\n table = self.fields[field_name][0]\n log.append('Field only in one table: %s' % table)\n \n # More than one table was found with this column name. Find the correct one\n # based on the study id\n else:\n log.append('Field in multiple tables(%s): %s' % (len(self.fields[field_name]), str(self.fields[field_name])))\n log.append('Study is is: %s' % study_id)\n for table_name in self.fields[field_name]:\n if str(study_id) in table_name:\n table = table_name\n \n # If table is not found, assume user-defined column\n else:\n \"\"\" Code may look bizarre... but here's why:\n 1. To streamline access and prevent blocking, we first check to see if the field\n does exist in the field list. If it does, we do not have to lock and can simply\n look up the table name.\n \n 2. If field is not in list, it must be a new column. In this case we must lock the \n code that handles new column creation. The catch is that if two threads both hit the lock\n with the same field name, one will get in and the other will block. Once the initial thread \n exists, it will have handled the new column, gotten the appropriate table name, and returned. \n The 2nd thread will now enter the critical section, however if we don't again check to see \n if the field is now in the field list, it will attempt to create the same column again and \n fail. Thus we check a 2nd time to see if the field exists and if so, simply read it from the \n field list. \n \"\"\"\n lock.acquire() \n if field_name in self.fields:\n log.append('Field now exists. Pulling from local list.')\n table = self.fields[field_name][0]\n log.append('Table name exists. Using \"%s\".' % table)\n else:\n log.append('Entities do not exist. Creating...')\n table = self.handleExtraData(study_id, field_name, field_type, log)\n log.append('Entities created. Table name is \"%s\"' % table)\n if field_name not in self.fields:\n self.fields[field_name] = [table]\n else:\n self.fields[field_name].append(table)\n lock.release()\n \n log.append('Returning from findMetadataTable with value: %s' % str(table))\n return table\n\n except Exception, e:\n lock.release()\n log.append('Exception caught: %s' % str(e))\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n raise Exception('\\n'.join(log))", "def get_column_def(self):\r\n db_type = self.db_type.format(self.value_type.db_type)\r\n return '{} {}'.format(self.cql, db_type)", "def get_unique_name(self, cursor, field_name=None):\n if cursor.kind in [CursorKind.UNEXPOSED_DECL]:\n return ''\n # covers most cases\n name = cursor.spelling\n if cursor.kind == CursorKind.CXX_BASE_SPECIFIER:\n name = cursor.type.spelling\n # if it's a record decl or field decl and its type is anonymous\n if name == '':\n # if cursor.is_anonymous():\n # a unnamed object at the root TU\n if (cursor.semantic_parent\n and cursor.semantic_parent.kind == CursorKind.TRANSLATION_UNIT):\n name = self.make_python_name(cursor.get_usr())\n log.debug('get_unique_name: root unnamed type kind %s',cursor.kind)\n elif cursor.kind in [CursorKind.STRUCT_DECL,CursorKind.UNION_DECL,\n CursorKind.CLASS_DECL,CursorKind.FIELD_DECL]:\n name = self._make_unknown_name(cursor, field_name)\n log.debug('Unnamed cursor type, got name %s',name)\n else:\n log.debug('Unnamed cursor, No idea what to do')\n #import code\n #code.interact(local=locals())\n return ''\n if cursor.kind in [CursorKind.STRUCT_DECL,CursorKind.UNION_DECL,\n CursorKind.CLASS_DECL, CursorKind.CXX_BASE_SPECIFIER]:\n names= {CursorKind.STRUCT_DECL: 'struct',\n CursorKind.UNION_DECL: 'union',\n CursorKind.CLASS_DECL: 'class',\n CursorKind.TYPE_REF: '',\n CursorKind.CXX_BASE_SPECIFIER: 'class'\n }\n name = '%s_%s'%(names[cursor.kind],name)\n log.debug('get_unique_name: name \"%s\"',name)\n return name", "def get_column_def(self):\r\n db_type = self.db_type.format(\r\n self.key_type.db_type,\r\n self.value_type.db_type\r\n )\r\n return '{} {}'.format(self.cql, db_type)", "def get_eltype_from_sbmfielddesc(hgf_field):\n\tq = \"\"\"SELECT type FROM sbmFIELDDESC where name='%s'\"\"\" %(hgf_field)\n\treturn run_sql(q)[0][0]", "def getdocfield(fieldname):\t\t\n\tl = [d for d in doctype_dl if d.doctype=='DocField' and d.fieldname==fieldname]\n\treturn l and l[0] or None", "def test_row_description(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(a int, b char(3))\")\n cursor.execute(\"insert into t1 values(1, 'abc')\")\n row = cursor.execute(\"select * from t1\").fetchone()\n assert cursor.description == row.cursor_description", "def get_field_from_sbmfielddesc(hgf_field):\n\tq = \"\"\"SELECT * FROM sbmFIELDDESC where name='%s'\"\"\" %(hgf_field)\n\treturn run_sql(q)[0]", "def get_type(self) -> str:\n return self.row_dict['type']", "def _read_metadata(self, conn, tbl_name): \n # Split table name in libname and actual table name\n name, schema = tuple(tbl_name.split('.'))\n # Query the Vertica dictionary to get types and formats\n query = \"\"\"\n SELECT column_name as NAME, data_type as TYPE, data_type_length AS LENGTH \n FROM v_catalog.columns \n WHERE table_schema = '{}' AND table_name = '{}'\n \"\"\".format(name, schema)\n \n md = conn.fetch(query)\n if not len(md):\n raise ValueError('No metadata for table {}'.format(tbl_name))\n\n md = (md\n # Use variable names as row names, then remove the NAME column\n .set_index('NAME', inplace=False)\n # Compute the number of bytes for each variable It is given by the LENGTH variable\n .rename({'LENGTH': 'NUM_BYTES'}, axis=1))\n\n # Identify data types\n type_upper = md['TYPE'].str.upper()\n md['IS_TEXT'] = type_upper.str.startswith('VARCHAR')\n md['IS_BOOLEAN'] = type_upper == 'BOOLEAN'\n md['IS_INTEGER'] = type_upper.isin(['INT', 'INTEGER'])\n md['IS_FLOAT'] = (type_upper == 'FLOAT') | type_upper.str.startswith('NUMERIC')\n md['IS_DATE'] = type_upper == 'DATE'\n md['IS_TIMESTAMP'] = type_upper == 'TIMESTAMP'\n md['IS_TIME'] = type_upper == 'TIME'\n # Determine datetime formats for date and time data\n md['DATETIME_FORMAT'] = np.nan\n md.loc[md['IS_DATE'], 'DATETIME_FORMAT'] = 'yyyy-MM-dd'\n md.loc[md['IS_TIME'], 'DATETIME_FORMAT'] = 'HH:mm:ss'\n # Determine datetime formats for timestamp data\n # For timestamp data, the right format is:\n # - yyyy-MM-dd HH:mm:ss.0 with a JDBC connection <-- python default\n # - yyyy-MM-dd HH:mm:ss with an ODBC connection\n md.loc[md['IS_TIMESTAMP'], 'DATETIME_FORMAT'] = 'yyyy-MM-dd HH:mm:ss.0'\n\n # Original type\n md.rename({'TYPE': 'TYPE_IN_SOURCE'}, axis=1, inplace=True)\n # Create the metadata catalog\n md = MetadataCatalog(md, is_case_sensitive=False)\n # Check that all formats have been correctly processed\n format_check = md.check_metadata_completeness()\n if not all(format_check):\n unsupported_format = md.get_type_in_source()\n unsupported_format = unsupported_format[~format_check].unique()\n raise ValueError('Unsupported Vertica format: {}'.format(unsupported_format))\n return md", "def columns_type(self,table):\n with self.conn.cursor() as cur:\n #_logger.debug('Columns Query. sql: %r', self.table_columns_query)\n cur.execute(self.columns_info_query % (self.dbname,table))\n for row in cur:\n yield row", "def _get_tabletype(cls) -> str:\n raise NotImplementedError", "def field_type(self) -> Optional[NameObject]:\n return self.get(\"/FT\")", "def get_field_type(field):\n if (field < len(Field.FIELD_TYPES)):\n return Field.FIELD_TYPES[field][1]\n return 'unknown'", "def db_field_name(self):\r\n return self.db_field or self.column_name", "def db_field_name(self):\r\n return self.db_field or self.column_name", "def get_column_type(type_name: str) -> object:\n raise NotImplementedError", "def _get_fields(self, table):\n fields = list()\n for column in table.columns:\n fields.append({'id': column.name, 'type': str(column.type)})\n return fields", "def db_fields(self):", "def dict_factory(self, cursor, row):\n results = {}\n for index, col_name in enumerate(cursor.description):\n results[col_name[0]] = row[index]\n\n return results", "def get_field(cls, name):\n if name not in cls.get_field_names():\n # - check field name first, next: column name -\n name = cls.get_field_name(name)\n return getattr(cls, name, None)", "def cursor_data(c):\r\n\r\n # pull column description\r\n d = []\r\n for i in range(len(c.description)):\r\n d.append(c.description[i][0])\r\n\r\n # fetch column entries\r\n c = c.fetchall()\r\n\r\n # compile list\r\n info = []\r\n for i in range(len(c)):\r\n # compile dictionary entry\r\n entry = {}\r\n for j in range(len(d)):\r\n entry[d[j]] = c[i][j]\r\n info.append(entry)\t\r\n\r\n # success\r\n return info", "def getfield(value, arg):\n #import pdb; pdb.set_trace()\n if hasattr(value, \"fields\"):\n fields = getattr(value, \"fields\")\n if str(arg) in fields:\n return str(fields[str(arg)])", "def _update_desc(self):\n if not self.connection:\n self.close()\n cname = CREATE_BUFFER_U(1024)\n ctype_code = C_SHORT()\n csize = ctypes.c_size_t()\n cdisp_size = C_SSIZE_T(0)\n c_decimal_digits = C_SHORT()\n cnull_ok = C_SHORT()\n col_descr = []\n self._col_type_code_list = []\n for col in range(1, self._num_of_cols() + 1):\n ret = ODBC_API.SQLColAttribute(self.stmt_h, col, 6, ADDR(CREATE_BUFFER(10)), 10, ADDR(C_SHORT()), ADDR(cdisp_size))\n if ret != SQL_SUCCESS:\n check_success(self, ret)\n ret = ODBC_API.SQLDescribeColW(self.stmt_h, col, cname, len(cname), ADDR(C_SHORT()), ADDR(ctype_code), ADDR(csize), ADDR(c_decimal_digits), ADDR(cnull_ok))\n if ret != SQL_SUCCESS:\n check_success(self, ret)\n # (name, type_code, display_size,\n col_descr.append((from_buffer_u(cname), SQL_DATA_TYPE_DICT.get(ctype_code.value, (ctype_code.value,))[0], cdisp_size.value, csize.value, csize.value, c_decimal_digits.value, cnull_ok.value == 1 and True or False))\n self._col_type_code_list.append(ctype_code.value)\n if len(col_descr) > 0:\n self.description = col_descr\n # Create the row type before fetching.\n self._row_type = self.row_type_callable(self)\n else:\n self.description = None\n self._create_col_buf()", "def _get_fields(self):\n tables = [self.sell_table, self.buy_table, self.pending_table]\n for table in tables:\n sql = f'SHOW COLUMNS in {table}'\n self.fields[table] = list(tb.Database().read(sql).Field)", "def get_field_def(schema, parent_type, field_ast):\n name = field_ast.name.value\n if name == SchemaMetaFieldDef.name and schema.get_query_type() == parent_type:\n return SchemaMetaFieldDef\n\n elif name == TypeMetaFieldDef.name and schema.get_query_type() == parent_type:\n return TypeMetaFieldDef\n\n elif name == TypeNameMetaFieldDef.name and \\\n isinstance(parent_type, (\n GraphQLObjectType,\n GraphQLInterfaceType,\n GraphQLUnionType,\n )):\n return TypeNameMetaFieldDef\n\n elif isinstance(parent_type, (GraphQLObjectType, GraphQLInterfaceType)):\n return parent_type.get_fields().get(name)", "def type(self):\n return self._field.type", "def _to_known_field(cls, field_name: str, value) -> (Column, dict):\n field_names = field_name.split(\".\", maxsplit=1)\n if len(field_names) == 2:\n for field in cls.__fields__:\n if field.name == field_names[0] and field.field_type == dict:\n return field, {field_names[1]: value}\n return None, None", "def dict_factory(cursor, row):\r\n\td = {}\r\n\tfor idx, col in enumerate(cursor.description):\r\n\t\td[col[0]] = row[idx]\r\n\treturn d", "def dict_factory(cursor, row):\n fields = [column[0] for column in cursor.description]\n return {key: value for key, value in zip(fields, row)}", "def __parse_field(self, field, tuple_descriptor, alias_on_complex_types, make_visible):\r\n alias = None\r\n field_type = None\r\n return_type = None\r\n underlying_fields = None\r\n aggregate_factory = None\r\n literal_value = None\r\n func_factory = None\r\n fields_to_verify = []\r\n parsed_fds = []\r\n field_backup = list(field)\r\n self.__clean_list(field)\r\n \r\n # parse aliases if they exist\r\n if (len(field) >= 4) and (field[-2] == QueryTokens.AS):\r\n alias = field[-1]\r\n field = field[:-2]\r\n if (field[0] == QueryTokens.STRING_LITERAL) or \\\r\n (field[0] == QueryTokens.INTEGER_LITERAL) or \\\r\n (field[0] == QueryTokens.FLOAT_LITERAL): \r\n alias = self.unnamed_operator_name()\r\n underlying_fields = []\r\n field_type = FieldType.LITERAL\r\n literal_value = field[1]\r\n if field[0] == QueryTokens.STRING_LITERAL:\r\n return_type = ReturnType.STRING\r\n elif field[0] == QueryTokens.INTEGER_LITERAL:\r\n return_type = ReturnType.INTEGER\r\n literal_value = int(literal_value)\r\n elif field[0] == QueryTokens.FLOAT_LITERAL:\r\n return_type = ReturnType.FLOAT\r\n literal_value = float(literal_value)\r\n elif field[0] == QueryTokens.COLUMN_NAME: # field or alias\r\n if alias == None:\r\n alias = field[1]\r\n field_descriptor = tuple_descriptor.get_descriptor(field[1])\r\n if field_descriptor == None: # underlying field not yet defined. mark to check later\r\n field_type = FieldType.UNDEFINED\r\n underlying_fields = [field[1]]\r\n # check alias and underlying once this process is done to\r\n # find yet-undefined fields\r\n fields_to_verify.append(field[1])\r\n fields_to_verify.append(alias)\r\n else: # field found, copy information\r\n field_type = field_descriptor.field_type\r\n return_type = field_descriptor.return_type\r\n underlying_fields = field_descriptor.underlying_fields\r\n aggregate_factory = field_descriptor.aggregate_factory\r\n func_factory = field_descriptor.func_factory\r\n elif field[0] == QueryTokens.FUNCTION_OR_AGGREGATE: # function or aggregate \r\n if alias == None:\r\n if alias_on_complex_types:\r\n raise QueryException(\"Must specify alias (AS clause) for '%s'\" % (field[1]))\r\n else:\r\n alias = self.unnamed_operator_name()\r\n underlying_field_list = field[2:]\r\n underlying_fields = []\r\n for underlying in underlying_field_list:\r\n (parsed_fd_list, parsed_verify) = self.__parse_field(underlying, tuple_descriptor, False, False)\r\n for parsed_fd in parsed_fd_list:\r\n parsed_fd.visible = False\r\n fields_to_verify.extend(parsed_verify)\r\n parsed_fds.extend(parsed_fd_list)\r\n underlying_fields.append(parsed_fd_list[0].alias)\r\n aggregate_factory = get_aggregate_factory(field[1])\r\n if aggregate_factory != None: # found an aggregate function\r\n field_type = FieldType.AGGREGATE\r\n return_type = ReturnType.FLOAT\r\n else:\r\n function_information = self.function_registry.get_function(field[1])\r\n if function_information != None:\r\n field_type = FieldType.FUNCTION\r\n func_factory = function_information.func_factory\r\n return_type = function_information.return_type\r\n else:\r\n raise QueryException(\"'%s' is neither an aggregate or a registered function\" % (field[1]))\r\n else:\r\n raise QueryException(\"Empty field clause found: %s\" % (\"\".join(field_backup)))\r\n fd = FieldDescriptor(alias, underlying_fields, field_type, return_type, aggregate_factory, func_factory, literal_value)\r\n fd.visible = make_visible\r\n parsed_fds.insert(0, fd)\r\n return (parsed_fds, fields_to_verify)", "def get_field_type(field, table):\n for i in settings.GTFS_SPEC['resources']:\n print(i['name'])\n if i['name'] == table:\n for j in i['schema']['fields']:\n print(j['name'])\n if j['name'] == field:\n return j['gtfs_type']\n raise ValueError(\"Field not found in GTFS spec.\")", "def dict_factory(cursor, row):\n dic = {}\n for idx, col in enumerate(cursor.description):\n if isinstance(row[idx], unicode):\n dic[col[0]] = u.unicode_to_string(row[idx])\n else:\n dic[col[0]] = row[idx]\n return dic", "def datetime_type_field_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"datetime_type_field_name\")", "def __getitem__(self, field_name):\n\n if field_name in self._module._fields.keys():\n try:\n return self._fields[field_name]\n except KeyError:\n if self['id'] == '':\n # If this is a new entry, the 'id' field is yet undefined.\n return ''\n else:\n # Retrieve the field from the SugarCRM connection.\n \n q_str = \"%s.id='%s'\" % (self._module._table, self['id'])\n res = self._module._connection.get_entry_list(\n self._module._name, q_str,\n '', 0, [field_name], 1, 0)\n\n nvl = res['entry_list'][0]['name_value_list']\n for attribute in nvl:\n if attribute == field_name:\n value = nvl[attribute]['value']\n if value:\n self._fields[attribute] = \\\n HTMLParser().unescape(\n nvl[attribute]['value'])\n else:\n self._fields[attribute] = ''\n\n return self._fields[attribute]\n\n else:\n raise AttributeError", "def find_type(line):\n type_string = line.split(':')[-1].strip(\";\\n \")\n isOptional = False\n if \"|\" in type_string:\n isOptional = True\n type_string = type_string.split(' | ')[0].strip()\n\n column_type = type_mapping[type_string]\n\n register_imports(column_type, isOptional)\n\n return (column_type, isOptional)", "def writeMetadataValue(self, field_type, key_field, field_name, field_value, \\\n study_id, host_key_field, row_num, lock):\n \n # This is a mess and it's slow right now. In need of serious speed improvements and cleanup.\n \n statement = ''\n log = []\n pk_name = ''\n\n try:\n #lock.acquire()\n \n # Get our database connection\n con = self.getMetadataDatabaseConnection()\n \n # Set the timeout\n #con.cursor().execute('alter session set ddl_lock_timeout=100') \n \n # Find the table name\n log.append('Locating table name...')\n table_name = None\n table_name = self.findMetadataTable(field_name, field_type, log, study_id, lock)\n log.append('Successfully found table name. Table name is \"%s\"' % str(table_name))\n \n #if field_name == 'dw1':\n # raise Exception('asdf')\n\n # Double-quote for database safety\n log.append('Adding quotes to table name...')\n table_name = '\"' + table_name + '\"' \n log.append('Quoted table name is %s' % table_name)\n log.append('Table name found: %s' % (table_name))\n \n # Store the field name in the database. These are the field names which will\n # be used later to generate a mapping file. We collect the names here because\n # it's an expensive operation to determine post-commit which fields were\n # actually submitted to the database.\n log.append('Attempting to store values in study_actual_columns: %s, %s, %s'\\\n % (study_id, field_name, table_name))\n self.addStudyActualColumn(study_id, field_name, table_name);\n \n # Get extended field info from the database\n log.append('Loading field details...')\n field_details = self.getFieldDetails(field_name)\n log.append(str(field_details))\n if field_details == None:\n log.append('Could not obtain detailed field information. Skipping.')\n raise Exception\n else:\n database_data_type = field_details[1]\n log.append('Read field database data type as \"%s\"' % database_data_type)\n \n # If the field value is 'unknown', switch to 'null' (empty string is the same as null)\n #pass_values = \n if str(field_value).upper() == 'UNKNOWN':\n field_value = ''\n # Figure out if this needs to be an integer ID instead of a text value\n elif database_data_type == 'list':\n log.append('Field is of type list. Looking up integer value...')\n named_params = {'field_value':field_value.upper()}\n statement = 'select vocab_value_id from controlled_vocab_values where upper(term) = :field_value'\n statement = str(statement)\n log.append(statement)\n results = con.cursor().execute(statement, named_params).fetchone()\n if results != None:\n # If found, set the field_value to its numeric identifier for storage\n log.append('Value found in controlled_vocab_values. Old field value: \"%s\", new field value: \"%s\".'\\\n % (field_value, results[0]))\n field_value = results[0]\n else:\n log.append('Could not determine inteteger value for list term \"%s\" with value \"%s\". Skipping.'\\\n % (field_name, field_value))\n raise Exception\n \n # Set the field_name to it's quoted upper-case name to avoid key-work issues with Oracle\n field_name = '\"%s\"' % field_name.upper()\n \n ########################################\n ### For STUDY\n ########################################\n \n # Study is special - handle separately since row is guaranteed to exist and there can only be one row\n if table_name == '\"STUDY\"' or 'EXTRA_STUDY_' in table_name:\n log.append('Updating study field...')\n named_params = {'field_value':field_value, 'study_id':study_id}\n statement = 'update %s set %s = :field_value where study_id = :study_id' % (table_name, field_name)\n statement = str(statement)\n log.append(statement)\n log.append('field_value = \"%s\", study_id = \"%s\"' % (field_value, study_id))\n results = con.cursor().execute(statement, named_params)\n con.cursor().execute('commit')\n log.append('Study updated.')\n #raise Exception\n return\n \n ########################################\n ### For other tables\n ########################################\n \n if table_name in ['\"AIR\"', '\"COMMON_FIELDS\"', '\"MICROBIAL_MAT_BIOFILM\"', '\"OTHER_ENVIRONMENT\"', \\\n '\"SAMPLE\"', '\"SEDIMENT\"', '\"SOIL\"', '\"WASTEWATER_SLUDGE\"', '\"WATER\"', '\"SEQUENCE_PREP\"', \\\n '\"HOST_ASSOC_VERTIBRATE\"', '\"HOST_ASSOCIATED_PLANT\"', '\"HOST_SAMPLE\"', '\"HUMAN_ASSOCIATED\"',\n '\"COMMON_EXTRA_SAMPLE\"', '\"COMMON_EXTRA_SAMPLE_2\"', '\"COMMON_EXTRA_PREP\"'] \\\n or 'EXTRA_SAMPLE_' in table_name or 'EXTRA_PREP_' in table_name:\n named_params = {'key_field':key_field, 'study_id':study_id}\n statement = 'select sample_id from \"SAMPLE\" where sample_name = :key_field and study_id = :study_id'\n statement = str(statement)\n key_column = 'sample_id'\n key_table = '\"SAMPLE\"'\n elif table_name in ['\"HOST\"']:\n named_params = {'key_field':'{0}:{1}'.format(str(study_id), host_key_field)}\n statement = 'select host_id from \"HOST\" where host_subject_id = :key_field'\n statement = str(statement)\n key_column = 'host_id'\n key_table = '\"HOST\"'\n else:\n return 'Unknown table found - no action taken. Table name was: \"%s\". Column name was: \"%s\"'\\\n % (table_name, field_name)\n \n # Find the assocaited key column\n log.append('Determining if required key row exists...')\n log.append(statement + '\", key_field is ' + key_field + ', study_id is ' + str(study_id))\n results = con.cursor().execute(statement, named_params).fetchone()\n if results != None:\n key_column_value = results[0]\n log.append('Found key_column_value: %s' % str(key_column_value))\n else:\n log.append('Could not determine key. Skipping write for field %s with value \"%s\".'\\\n % (field_name, field_value))\n raise Exception\n\n\n\n\n\n\n\n\n\n\n\n ####### to speed up access, create local storage for all items already seen\n\n\n\n # If it ain't there, create it\n log.append('Checking if row already exists...')\n \n # Must append row_num if sequence table\n if table_name == '\"SEQUENCE_PREP\"' or table_name == '\"COMMON_EXTRA_PREP\"' or 'EXTRA_PREP_' in table_name:\n named_params = {'key_column_value':key_column_value, 'row_number':row_num}\n statement = 'select 1 from %s where %s = :key_column_value and row_number = :row_number'\\\n % (table_name, key_column)\n else:\n named_params = {'key_column_value':key_column_value}\n statement = 'select 1 from %s where %s = :key_column_value' % (table_name, key_column)\n statement = str(statement)\n log.append(statement)\n results = con.cursor().execute(statement, named_params).fetchone()\n \n if results == None:\n log.append('No row found, inserting new row')\n if table_name == '\"SEQUENCE_PREP\"' or table_name == '\"COMMON_EXTRA_PREP\"' or 'EXTRA_PREP_' in table_name:\n log.append('Row number is %s' % (str(row_num)))\n named_params = {'key_column_value':key_column_value, 'row_number':row_num}\n statement = 'insert into %s (%s, row_number) values (:key_column_value, :row_number)'\\\n % (table_name, key_column)\n else:\n named_params = {'key_column_value':key_column_value}\n statement = 'insert into %s (%s) values (:key_column_value)' % (table_name, key_column)\n statement = str(statement)\n log.append(statement)\n con.cursor().execute(statement, named_params)\n\n\n\n\n\n\n \n # Attempt to write the metadata field\n log.append('Writing metadata value...')\n \n # If it's a date, must not put quotes around the oracle to_date function.\n if database_data_type == 'date':\n field_value = self.convertToOracleHappyName(field_value)\n if table_name == '\"SEQUENCE_PREP\"' or table_name == '\"COMMON_EXTRA_PREP\"' or 'EXTRA_PREP_' in table_name:\n statement = 'update %s set %s = %s where %s = %s and row_number = %s'\\\n % (table_name, field_name, field_value, key_column, key_column_value, row_num)\n else: \n statement = 'update %s set %s = %s where %s = %s'\\\n % (table_name, field_name, field_value, key_column, key_column_value) \n else: \n if table_name == '\"SEQUENCE_PREP\"' or table_name == '\"COMMON_EXTRA_PREP\"' or 'EXTRA_PREP_' in table_name:\n statement = 'update %s set %s = \\'%s\\' where %s = %s and row_number = %s'\\\n % (table_name, field_name, field_value, key_column, key_column_value, row_num)\n else: \n statement = 'update %s set %s = \\'%s\\' where %s = %s'\\\n % (table_name, field_name, field_value, key_column, key_column_value)\n statement = str(statement)\n log.append(statement)\n results = con.cursor().execute(statement)\n \n # Finally, commit the changes\n results = con.cursor().execute('commit')\n\n #if field_name == '\"DW1\"':\n # raise Exception('Found DW1: Dumping log')\n \n except Exception, e:\n call_string = 'writeMetadataValue(\"%s\", \"%s\", \"%s\", \"%s\", \"%s\")'\\\n % (field_type, key_field, field_name, field_value, study_id)\n log_string = '<br/>'.join(log)\n error_msg = 'Exception caught attempting to store field \"%s\" with value \"%s\" into \\\n table \"%s\".<br/>Method signature: %s<br/>Full error log:<br/>%s<br/>Oracle says: %s' % \\\n (field_name, field_value, table_name, call_string, log_string, str(e))\n raise Exception(error_msg)\n finally:\n # Release the lock\n #lock.release()\n log.append('Lock released')", "def dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d", "def get_pymongo_type_string(\n self, field_type: Union[Type, str], collection_name: str\n ) -> str:\n try:\n type_string = PYMONGO_TYPE_TO_MONGO_TYPE[field_type]\n except KeyError:\n self.report.report_warning(\n collection_name, f\"unable to map type {field_type} to metadata schema\"\n )\n PYMONGO_TYPE_TO_MONGO_TYPE[field_type] = \"unknown\"\n type_string = \"unknown\"\n\n return type_string", "def dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d", "def dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d", "def dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d", "def db_type(self, field):\r\n if self.connection.settings_dict.get('STORE_RELATIONS_AS_DB_KEYS'):\r\n if field.primary_key or field.rel is not None:\r\n return 'key'\r\n\r\n # Primary keys were processed as db.Keys; for related fields\r\n # the db_type of primary key of the referenced model was used,\r\n # but RelatedAutoField type was not defined and resulted in\r\n # \"integer\" being used for relations to models with AutoFields.\r\n # TODO: Check with Positive/SmallIntegerField primary keys.\r\n else:\r\n if field.primary_key:\r\n return 'key'\r\n if field.rel is not None:\r\n related_field = field.rel.get_related_field()\r\n if related_field.get_internal_type() == 'AutoField':\r\n return 'integer'\r\n else:\r\n return related_field.db_type(connection=self.connection)\r\n\r\n db_type = field.db_type(connection=self.connection)\r\n\r\n # Override db_type of \"string\" fields according to indexing.\r\n if db_type in ('string', 'text'):\r\n indexes = get_model_indexes(field.model)\r\n if field.attname in indexes['indexed']:\r\n return 'string'\r\n elif field.attname in indexes['unindexed']:\r\n return 'text'\r\n\r\n return db_type", "def dict_factory(cursor, row):\n rowdict = {}\n for idx, col in enumerate(cursor.description):\n rowdict[col[0]] = row[idx]\n return rowdict", "def type_fields(self, res, op_item):\n result = []\n cast_func = {}\n header = res[0]\n for heading in header:\n cast_func[heading] = DataType.str\n\n if \"field_type\" in op_item:\n for f, p in findall(FIELD_TYPE_RE, op_item[\"field_type\"]):\n cast_func[p] = self.dt.get_func(f)\n first = True\n for row in res[1:]:\n new_row = []\n for idx in range(len(header)):\n\n heading = header[idx]\n cur_value = row[idx]\n if type(cur_value) is tuple:\n cur_value = cur_value[1]\n if heading == \"timespan\" and first:\n first = False\n new_row.append((cast_func[heading](cur_value), cur_value))\n\n result.append(new_row)\n\n return [header] + result", "def get_column_type(cls, **kwargs: Any) -> Any: # pragma no cover\n return None", "def extract_generic_credit_type(self, course_row):\n course_id = row[0]\n credit_type_res = {re.compile(\"Communication\"):\"CS\",\n re.compile(\"Natural Science\"):\"NS\",\n re.compile(\"Humanities\"):\"H\",\n re.compile(\"Performance\"):\"HP\",\n re.compile(\"Social\"):\"SS\",\n re.compile(\"Lab\"):\"NSL\",\n re.compile(\"Quant\"):\"QS\",\n re.compile(\"Elective\"):\"E\",\n re.compile(\"Diversity\"):\"DC\",\n re.compile(\"Prereq\"):\"PR\"}\n for credit_type_re in credit_type_res:\n if credit_type_re.search(course_id):\n type_string = credit_type_res[credit_type_re]\n credit_type = models.CreditType.objects.get(label_short=type_string)\n return credit_type", "def specific(self):\n field_attr = field_registry.field_map[self.type]\n return getattr(self, field_attr, None)", "def _fields(self, doclet):\n FIELD_TYPES = OrderedDict([('params', _params_formatter),\n ('properties', _params_formatter),\n ('exceptions', _exceptions_formatter),\n ('returns', _returns_formatter)])\n for field_name, callback in iteritems(FIELD_TYPES):\n for field in doclet.get(field_name, []):\n description = field.get('description', '')\n unwrapped = sub(r'[ \\t]*[\\r\\n]+[ \\t]*', ' ', description)\n yield callback(field, unwrapped)", "def get_model_details(refobject):\n typename = type(refobject).__name__\n\n if typename in TYPES_TABLE:\n\n details = TYPES_TABLE[typename]\n\n elif isinstance(refobject, type):\n clrtype = clr.GetClrType(refobject)\n\n if clrtype.IsEnum:\n details = TYPES_TABLE['enumtype']\n\n elif clrtype.IsClass:\n if clrtype.FullName.startswith('Autodesk.Revit.DB'):\n details = TYPES_TABLE['revitclass']\n\n else:\n details = TYPES_TABLE['classtype']\n\n elif clrtype.IsInterface:\n details = TYPES_TABLE['interface']\n\n elif clrtype.FullName in SYS_TYPES:\n details = TYPES_TABLE['systype']\n\n elif clrtype.FullName in WIN_TYPES:\n details = TYPES_TABLE['wintype']\n\n else:\n details = TYPES_TABLE['unknown']\n logger.debug('TODO get_model_details for CLR/type: {} ({})'.format(\n typename, refobject))\n\n # match all list/collection types\n elif is_iterable(refobject):\n details = TYPES_TABLE['listtype']\n\n # match sub enum types\n elif clr.GetClrType(type(refobject)).IsEnum:\n details = TYPES_TABLE['subenumtype']\n\n # match sub class types\n elif clr.GetClrType(type(refobject)).IsClass:\n\n if clr.GetClrType(type(refobject)).FullName == 'Autodesk.Revit.DB.Parameter':\n details = TYPES_TABLE['revitparameter']\n\n elif clr.GetClrType(type(refobject)).FullName.startswith('Autodesk.Revit.DB'):\n details = TYPES_TABLE['revitclass']\n else:\n details = TYPES_TABLE['subclasstype']\n\n elif clr.GetClrType(type(refobject)).IsInterface:\n details = TYPES_TABLE['interface']\n\n elif clr.GetClrType(type(refobject)).FullName in SYS_TYPES:\n details = TYPES_TABLE['systype']\n\n elif clr.GetClrType(type(refobject)).FullName in WIN_TYPES:\n details = TYPES_TABLE['wintype']\n\n else:\n details = TYPES_TABLE['unknown']\n logger.debug('TODO get_model_details for : {} ({})'.format(\n typename, refobject))\n\n return details", "def init_fields(self):\n result = self.connection.execute('pragma table_info(files)')\n rows = result.fetchall()\n self.fields = [Field(row) for row in rows[4:]]\n result = self.connection.execute('select _keyword from keywords')\n rows = result.fetchall()\n self.keywords = [row[0] for row in rows]", "def field(self) -> 'outputs.PreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldField':\n return pulumi.get(self, \"field\")", "def south_field_triple(self):\n # We'll just introspect ourselves, since we inherit.\n from south.modelsinspector import introspector\n field_class = \"django.db.models.fields.TextField\"\n args, kwargs = introspector(self)\n # That's our definition!\n return (field_class, args, kwargs)", "async def get_schema_info(self, collection):\n await self.ensure_collection(collection)\n try:\n # Luke handler is not supported in API v2 yet.\n # /v2/collections/<COLLECTION>/schema/fields doesn't show dynamically\n # created fields.\n # So using old API (/solr/...).\n response = await self.get(\n '/solr/{}/admin/luke?numTerms=0'.format(collection)\n )\n return json.loads(response.body.decode('utf-8'))\n except SolrError:\n logger.warning('Failed to fetch fields list for collection {}'\n .format(collection))\n raise", "def sql_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"sql_type\")", "def sql_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"sql_type\")", "def _init_fields(self):\n if self._fields is None:\n M.mset('U', \"^\") # DBS Calls Require this\n f = self._fields = {}\n attrs = self.fieldnames = {}\n fieldid = \"0\"\n while 1:\n # Subscript 0 is field description, .1 is the title, 3 is help\n fieldid, info, title, fieldhelp = M.ddwalk(self._fileid, fieldid)\n #fieldid, info, title, fieldhelp = M.mexec(\n # \"\"\"set s0=$order(^DD(s2,s0)) Q:s0'=+s0 s s1=$G(^DD(s2,s0,0)),s3=$G(^DD(s2,s0,.1)),s4=$G(^DD(s2,s0,3))\"\"\",\n # M.INOUT(str(fieldid)), M.INOUT(\"\"), str(self._fileid), M.INOUT(\"\"), M.INOUT(\"\"))\n if fieldid == \"\" or fieldid[0] not in \"0123456789.\":\n break\n\n info = info.split(\"^\", 4) \n label = self._clean_label(info[0])\n try:\n ftype = info[1]\n except:\n ftype = None\n if ftype:\n finst = None\n for klass in FIELD_TYPES:\n if klass.isa(ftype):\n finst = f[fieldid] = klass(fieldid, label, info)\n finst.fileid = self.fileid\n finst.ownerdd = self\n attrs[label] = fieldid\n break\n if finst is None:\n print finst, \"FIELD [%s], spec [%s] was not identified\" % (label, ftype)\n continue\n finst.title = title\n finst.fieldhelp = fieldhelp\n else:\n assert finst, \"FIELD [%s] %s has no fieldspec\" % (label, info)\n\n return self._fields", "def describe(self):\r\n return dict((field.name, dict(\r\n id=field.name,\r\n name=field.label,\r\n validators=ValidatorSerializer(\r\n field.requires if isSequenceType(field.requires) else [field.requires])(),\r\n comment=field.comment,\r\n readable=field.readable,\r\n writable=field.writable,\r\n type=getattr(field, 'wtype',\r\n field.type.type if isinstance(field.type, SQLCustomType) else field.type.split('(')[0]),\r\n # w2pwidget=field.widget,\r\n )) for field in self.descibe_columns)", "def table_description(classname, nclassname, shape=()):\n classdict = {}\n colpos = append_columns(classdict, shape)\n\n ndescr = nested_description(nclassname, colpos, shape=shape)\n classdict['c_nested'] = ndescr\n colpos += 1\n\n extracol = tb.IntCol(shape=shape, pos=colpos)\n classdict['c_extra'] = extracol\n colpos += 1\n\n idxextracol = tb.IntCol(shape=shape, pos=colpos)\n classdict['c_idxextra'] = idxextracol\n colpos += 1\n\n return type(classname, (tb.IsDescription,), classdict)", "def retrievebeamparamproptype(self, cursor, bpptname, bpptunit=None):\n sql = \"\"\"\n SELECT\n beam_param_prop_type_id,\n beam_param_prop_type_name,\n beam_param_prop_type_desc,\n beam_param_prop_type_unit\n FROM\n beam_param_prop_type\n WHERE\n \"\"\"\n sql += \"beam_param_prop_type_name = '%s'\" % (bpptname,)\n if bpptunit is None:\n sql += \" AND beam_param_prop_type_unit IS NULL\"\n else:\n sql += \" AND beam_param_prop_type_unit = '%s'\" % (bpptunit,)\n cursor.execute(sql)\n return cursor.fetchone()", "def _getfield(self, block, name):\n\n # First, get the field from the class, if defined\n block_field = getattr(block.__class__, name, None)\n if block_field is not None and isinstance(block_field, Field):\n return block_field\n\n # Not in the class, so name\n # really doesn't name a field\n raise KeyError(name)", "def add_field(self, field_name, field_type):\n field_name = field_name.replace('\"','')\n if field_type == 'keyword':\n query = \"\"\"insert or ignore into keywords\n (_keyword) values (\"%s\")\"\"\" % field_name\n else:\n query = 'alter table files add column \"%s\" %s' % (\n field_name, field_type)\n self.connection.execute(query)\n self.connection.commit()\n self.init_fields()", "def column_reflection_fallback(self):\n col_info_dict_list: List[Dict]\n if self.sql_engine_dialect.name.lower() == \"mssql\":\n type_module = self._get_dialect_type_module()\n # Get column names and types from the database\n # StackOverflow to the rescue: https://stackoverflow.com/a/38634368\n col_info_query: TextClause = sa.text(\n f\"\"\"\nSELECT\n cols.NAME, ty.NAME\nFROM\n tempdb.sys.columns AS cols\nJOIN\n sys.types AS ty\nON\n cols.user_type_id = ty.user_type_id\nWHERE\n object_id = OBJECT_ID('tempdb..{self._table}')\n \"\"\"\n )\n col_info_tuples_list = self.engine.execute(col_info_query).fetchall()\n col_info_dict_list = [\n {\"name\": col_name, \"type\": getattr(type_module, col_type.upper())()}\n for col_name, col_type in col_info_tuples_list\n ]\n else:\n query: Select = sa.select([sa.text(\"*\")]).select_from(self._table).limit(1)\n col_names: list = self.engine.execute(query).keys()\n col_info_dict_list = [{\"name\": col_name} for col_name in col_names]\n return col_info_dict_list", "def get_field_type(db_code):\n try:\n field_type = connection.introspection.get_field_type(\n db_code, 'django_musicbrainz tests')\n except KeyError:\n # if the db_code is not listed in\n # django.db.backens.postgresql_psycopg2.introspection.\\\n # DatabaseIntrospection.data_types_reverse we guess that\n # the db type is a TextField\n field_type = 'TextField'\n return field_type", "def get_table_info(line):\n\n COMMENT_EXPR = '-- Name: '\n TYPE_EXPR = '; Type: '\n SCHEMA_EXPR = '; Schema: '\n OWNER_EXPR = '; Owner: '\n TABLESPACE_EXPR = '; Tablespace: '\n\n temp = line.strip('\\n')\n type_start = get_all_occurrences(TYPE_EXPR, temp)\n schema_start = get_all_occurrences(SCHEMA_EXPR, temp)\n owner_start = get_all_occurrences(OWNER_EXPR, temp)\n tblspace_start = get_all_occurrences(TABLESPACE_EXPR, temp)\n if len(type_start) != 1 or len(schema_start) != 1 or len(owner_start) != 1:\n return (None, None, None, None)\n name = temp[len(COMMENT_EXPR) : type_start[0]]\n type = temp[type_start[0] + len(TYPE_EXPR) : schema_start[0]]\n schema = temp[schema_start[0] + len(SCHEMA_EXPR) : owner_start[0]]\n if not tblspace_start:\n tblspace_start.append(None)\n owner = temp[owner_start[0] + len(OWNER_EXPR) : tblspace_start[0]]\n return (name, type, schema, owner)", "def test_get_table_description(self):\n db_introspection = DatabaseIntrospection(self.connection)\n cursor = mock.MagicMock()\n\n def description(*args, **kwargs):\n return [[\"name\", TypeCode.STRING], [\"age\", TypeCode.INT64]]\n\n def get_table_column_schema(*args, **kwargs):\n column_details = {}\n column_details[\"name\"] = ColumnDetails(\n null_ok=False, spanner_type=\"STRING(10)\"\n )\n column_details[\"age\"] = ColumnDetails(\n null_ok=True, spanner_type=\"INT64\"\n )\n return column_details\n\n cursor.get_table_column_schema = get_table_column_schema\n cursor.description = description()\n table_description = db_introspection.get_table_description(\n cursor=cursor, table_name=\"Table_1\"\n )\n if USING_DJANGO_3:\n self.assertEqual(\n table_description,\n [\n FieldInfo(\n name=\"name\",\n type_code=TypeCode.STRING,\n display_size=None,\n internal_size=10,\n precision=None,\n scale=None,\n null_ok=False,\n default=None,\n collation=None,\n ),\n FieldInfo(\n name=\"age\",\n type_code=TypeCode.INT64,\n display_size=None,\n internal_size=None,\n precision=None,\n scale=None,\n null_ok=True,\n default=None,\n collation=None,\n ),\n ],\n )\n else:\n self.assertEqual(\n table_description,\n [\n FieldInfo(\n name=\"name\",\n type_code=TypeCode.STRING,\n display_size=None,\n internal_size=10,\n precision=None,\n scale=None,\n null_ok=False,\n default=None,\n ),\n FieldInfo(\n name=\"age\",\n type_code=TypeCode.INT64,\n display_size=None,\n internal_size=None,\n precision=None,\n scale=None,\n null_ok=True,\n default=None,\n ),\n ],\n )", "def _get_field_details(self, data, fields):\n fields_metadata = dict()\n for field in fields:\n dtype = data[field].dtype\n field_template = self._FIELD_TEMPLATES.get(dtype.kind)\n if not field_template:\n raise ValueError('Unsupported dtype {} in column {}'.format(dtype, field))\n\n field_details = copy.deepcopy(field_template)\n fields_metadata[field] = field_details\n\n return fields_metadata", "def get_description_from_row(row):\n return [\n (\n name, # name\n get_type(value), # type_code\n None, # [display_size]\n None, # [internal_size]\n None, # [precision]\n None, # [scale]\n get_type(value) == Type.STRING, # [null_ok]\n )\n for name, value in row.items()\n ]", "def get_fields(dgid, metadata=None, computed_columns=None):\n # NOTE: metadata does not contain computed_columns yet\n if metadata is None:\n conn = get_database_connection(dgid)\n metadata = get_metadata(conn)\n\n # Used to evaluate computed columns\n unify_computed_columns(computed_columns)\n columns = list(metadata.keys())\n select_expr_as = [get_field_name(column, metadata) for column in columns]\n databases = [\"datagrid\"]\n\n if computed_columns:\n # Only passed in when calling from endpoint\n update_state(computed_columns, metadata, databases, columns, select_expr_as)\n # Now metadata has computed columns\n\n fields = {}\n for column in metadata:\n datatype = metadata[column][\"type\"]\n field_name = get_field_name(column, metadata)\n qbtype = datatype_to_qbtype(datatype)\n if qbtype is None:\n continue\n\n if datatype in [\"FLOAT\", \"INTEGER\", \"ROW_ID\"]:\n fields[field_name] = {\n \"label\": column,\n \"field\": field_name,\n \"type\": qbtype,\n \"tooltip\": \"The '%s' column (type '%s') from the data grid\"\n % (column, qbtype),\n }\n # name, datatype, min, max, avg, variance, total, stddev, other\n if (metadata[column][\"minimum\"] is not None) and (\n metadata[column][\"minimum\"] is not None\n ):\n min_value = metadata[column][\"minimum\"]\n max_value = metadata[column][\"maximum\"]\n fields[field_name][\"fieldSettings\"] = {\n \"min\": min_value,\n \"max\": max_value,\n }\n fields[field_name][\"valueSources\"] = [\"value\", \"field\"]\n\n elif datatype == \"DATETIME\":\n field_exp = \"datetime(%s, 'unixepoch')\" % field_name\n fields[field_exp] = {\n \"label\": column,\n \"field\": field_name,\n \"type\": qbtype,\n \"tooltip\": \"The '%s' column (type '%s') from the data grid\"\n % (column, qbtype),\n }\n if (metadata[column][\"minimum\"] is not None) and (\n metadata[column][\"minimum\"] is not None\n ):\n min_value = metadata[column][\"minimum\"]\n max_value = metadata[column][\"maximum\"]\n fields[field_exp][\"fieldSettings\"] = {\n \"min\": min_value,\n \"max\": max_value,\n # \"dateFormat\": \"DD-MM-YYYY\",\n # \"timeFormat\":\n # \"valueFormat\":\n }\n fields[field_exp][\"valueSources\"] = [\n \"value\",\n \"field\",\n \"func\",\n ] # adds Now, and Relative\n\n elif datatype == \"BOOLEAN\":\n fields[field_name] = {\n \"label\": column,\n \"field\": field_name,\n \"type\": qbtype,\n \"tooltip\": \"The '%s' column (type '%s') from the data grid\"\n % (column, qbtype),\n }\n fields[field_name][\"fieldSettings\"] = {\n \"labelYes\": \"True\",\n \"labelNo\": \"False\",\n }\n fields[field_name][\"valueSources\"] = [\"value\", \"field\"]\n\n elif datatype == \"TEXT\":\n fields[field_name] = {\n \"label\": column,\n \"field\": field_name,\n \"type\": qbtype,\n \"tooltip\": \"The '%s' column (type '%s') from the data grid\"\n % (column, qbtype),\n }\n fields[field_name][\"valueSources\"] = [\"value\", \"field\"]\n\n elif datatype == \"JSON\":\n # Asset metadata columns are named\n # 'COLUMN_NAME.metadata' or 'COLUMN_NAME--metadata'\n fields[field_name] = {\n \"label\": column.replace(\".metadata\", \"\").replace(\"--metadata\", \"\"),\n \"field\": field_name,\n \"tooltip\": \"The '%s' column (type 'JSON') from the data grid\"\n % (column,),\n \"type\": \"!struct\",\n \"subfields\": {},\n }\n subfields = ast.literal_eval(metadata[column][\"other\"])\n # Only filterable keys are in subfields\n for key in subfields:\n # Query Builder filter types: \"text\", \"number\", \"boolean\", or \"list-of-text\"\n qbtype = subfields[key][\"type\"]\n if qbtype == \"list-of-text\":\n field_exp = \"json_extract(%s, '$.%s')\" % (field_name, key)\n fields[field_name][\"subfields\"][field_exp] = {\n \"type\": \"text\",\n \"label\": key,\n \"field\": field_name,\n \"tableName\": \"1\", # special signal for JSON queries in our QueryBuilder\n \"operators\": [\"like\"],\n }\n else:\n field_exp = \"json_extract(%s, '$.%s')\" % (field_name, key)\n fields[field_name][\"subfields\"][field_exp] = {\n \"type\": qbtype,\n \"label\": key,\n \"field\": field_name,\n \"tableName\": \"1\", # special signal for JSON queries in our QueryBuilder\n }\n if \"values\" in subfields[key]:\n fields[field_name][\"subfields\"][field_exp][\"type\"] = \"select\"\n fields[field_name][\"subfields\"][field_exp][\"fieldSettings\"] = {\n \"listValues\": sorted(subfields[key][\"values\"])\n }\n\n return fields", "def getFieldDescr(fieldName, descr):\n i = getIter(descr)\n if not i:\n return\n\n try:\n sw = ''\n item = i.next()\n while item:\n if fieldName == item[0]:\n yield item\n break\n if isinstance(item[1], list):\n if fieldName.startswith('%s/' %item[0]):\n sw = item[0]\n else:\n item = i.next()\n continue\n [trash, newField] = fieldName.split(sw + '/')\n for c in getFieldDescr(newField, item[1]):\n sw = '%s/%s' % (sw, c[0])\n yield (sw, c[1])\n item = i.next()\n except StopIteration:\n pass", "def createField(schemaName, field):\n# print(field.domain)\n# print(field.name, field.domain if isinstance(field.domain, str) else field.domain.type)\n# print(field.__dict__)\n return \"\\\"{name}\\\" {type_}\".format(\n name = field.name,\n type_ = '\"' + schemaName + '\".\"' + field.domain + '\"' if isinstance(field.domain, str) else getType(field.domain)\n )", "def _savebeamparamproptype(self, cursor, bpptname, bpptunit=None, bpptdesc=None):\n sql = \"\"\"\n INSERT INTO beam_param_prop_type (\n beam_param_prop_type_name,\n beam_param_prop_type_desc,\n beam_param_prop_type_unit\n ) VALUES (\n '%s', %s, %s\n )\n \"\"\"\n if bpptunit is None:\n bpptunit = \"NULL\"\n else:\n bpptunit = \"'%s'\" % (bpptunit,)\n\n if bpptdesc is None:\n bpptdesc = \"NULL\"\n else:\n bpptdesc = \"'%s'\" % (bpptdesc,)\n\n cursor.execute(sql % (bpptname, bpptdesc, bpptunit))\n return cursor.lastrowid", "def _make_unknown_name(self, cursor, field_name):\n parent = cursor.lexical_parent\n pname = self.get_unique_name(parent)\n log.debug('_make_unknown_name: Got parent get_unique_name %s',pname)\n # we only look at types declarations\n _cursor_decl = cursor.type.get_declaration()\n # we had the field index from the parent record, as to differenciate\n # between unnamed siblings of a same struct\n _i = 0\n found = False\n # Look at the parent fields to find myself\n for m in parent.get_children():\n # FIXME: make the good indices for fields\n log.debug('_make_unknown_name child %d %s %s %s',_i,m.kind, m.type.kind,m.location)\n if m.kind not in [CursorKind.STRUCT_DECL,CursorKind.UNION_DECL,\n CursorKind.CLASS_DECL]:#,\n #CursorKind.FIELD_DECL]:\n continue\n if m == _cursor_decl:\n found = True\n break\n _i+=1\n if not found:\n raise NotImplementedError(\"_make_unknown_name BUG %s\" % cursor.location)\n # truncate parent name to remove the first part (union or struct)\n _premainer = '_'.join(pname.split('_')[1:])\n # name the anonymous record with the field name if it has one\n if field_name:\n name = '%s_%s' % (_premainer, field_name)\n else:\n name = '%s_%d' % (_premainer, _i)\n return name", "def get_field(self, field_name):\n all_fields = self._fields.items(self._fields.root)\n print(\"all_fields\", all_fields)\n for name, field in all_fields:\n print(name, field_name)\n if name == field_name:\n return field", "def get_fields(self, table_name):\n return self.get_table_meta(table_name)['fields']", "def GetFieldDef(fielddef, fields=\"format_, addrdef, baseaddr, bits, bitshift, strindex, datadef, arraydef, validate, cmd, group, tasmotacmnd, converter, readconverter, writeconverter\"):\n format_ = addrdef = baseaddr = datadef = arraydef = validate = cmd = group = tasmotacmnd = converter = readconverter = writeconverter = strindex = None\n bits = bitshift = 0\n\n # calling with nothing is wrong\n if fielddef is None:\n print('<fielddef> is None', file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n\n # get top level items\n if len(fielddef) == 3:\n # converter not present\n format_, addrdef, datadef = fielddef\n elif len(fielddef) == 4:\n # converter present\n format_, addrdef, datadef, converter = fielddef\n else:\n print('wrong <fielddef> {} length ({}) in setting'.format(fielddef, len(fielddef)), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n\n # ignore calls with 'root' setting\n if isinstance(format_, dict) and baseaddr is None and datadef is None:\n return eval(fields)\n\n if not isinstance(format_, (str,dict)):\n print('wrong <format> {} type {} in <fielddef> {}'.format(format_, type(format_), fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n\n # extract addrdef items\n baseaddr = addrdef\n if isinstance(baseaddr, (list,tuple)):\n if len(baseaddr) == 3:\n # baseaddr bit definition\n baseaddr, bits, bitshift = baseaddr\n if not isinstance(bits, int):\n print('<bits> must be defined as integer in <fielddef> {}'.format(fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n if not isinstance(bitshift, int):\n print('<bitshift> must be defined as integer in <fielddef> {}'.format(fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n elif len(baseaddr) == 2:\n # baseaddr string definition\n baseaddr, strindex = baseaddr\n if not isinstance(strindex, int):\n print('<strindex> must be defined as integer in <fielddef> {}'.format(fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n if strindex >= SettingsTextIndex.index('SET_MAX'):\n print('<strindex> out of range [0,{}] in <fielddef> {}'.format(SettingsTextIndex.index('SET_MAX'), fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n else:\n print('wrong <addrdef> {} length ({}) in <fielddef> {}'.format(addrdef, len(addrdef), fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n if not isinstance(baseaddr, int):\n print('<baseaddr> {} must be defined as integer in <fielddef> {}'.format(baseaddr, fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n\n # extract datadef items\n arraydef = datadef\n if isinstance(datadef, (tuple)):\n if len(datadef) == 2:\n # datadef has a validator\n arraydef, validate = datadef\n elif len(datadef) == 3:\n # datadef has a validator and cmd set\n arraydef, validate, cmd = datadef\n # cmd must be a tuple with 2 objects\n if isinstance(cmd, tuple) and len(cmd) == 2:\n group, tasmotacmnd = cmd\n if group is not None and not isinstance(group, str):\n print('wrong <group> {} in <fielddef> {}'.format(group, fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n if isinstance(tasmotacmnd, tuple):\n for tcmnd in tasmotacmnd:\n if tcmnd is not None and not callable(tcmnd) and not isinstance(tcmnd, str):\n print('wrong <tasmotacmnd> {} in <fielddef> {}'.format(tcmnd, fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n else:\n if tasmotacmnd is not None and not callable(tasmotacmnd) and not isinstance(tasmotacmnd, str):\n print('wrong <tasmotacmnd> {} in <fielddef> {}'.format(tasmotacmnd, fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n else:\n print('wrong <cmd> {} length ({}) in <fielddef> {}'.format(cmd, len(cmd), fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n else:\n print('wrong <datadef> {} length ({}) in <fielddef> {}'.format(datadef, len(datadef), fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n\n if validate is not None and (not isinstance(validate, str) and not callable(validate)):\n print('wrong <validate> {} type {} in <fielddef> {}'.format(validate, type(validate), fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n\n # convert single int into one-dimensional list\n if isinstance(arraydef, int):\n arraydef = [arraydef]\n\n if arraydef is not None and not isinstance(arraydef, (list)):\n print('wrong <arraydef> {} type {} in <fielddef> {}'.format(arraydef, type(arraydef), fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n\n # get read/write converter items\n readconverter = converter\n if isinstance(converter, (tuple)):\n if len(converter) == 2:\n # converter has read/write converter\n readconverter, writeconverter = converter\n if readconverter is not None and not isinstance(readconverter, str) and not callable(readconverter):\n print('wrong <readconverter> {} type {} in <fielddef> {}'.format(readconverter, type(readconverter), fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n if writeconverter is not None and (not isinstance(writeconverter, (bool,str)) and not callable(writeconverter)):\n print('wrong <writeconverter> {} type {} in <fielddef> {}'.format(writeconverter, type(writeconverter), fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n else:\n print('wrong <converter> {} length ({}) in <fielddef> {}'.format(converter, len(converter), fielddef), file=sys.stderr)\n raise SyntaxError('<fielddef> error')\n\n\n return eval(fields)", "def getDocObjects(row, field):\n\n doc = nlp_larg(str(row[field]).lower())\n\n return doc", "def typeString(self):\n return Parameter.string_dict[self._field.type]", "def test_parse_fields(pawprint_default_tracker_db):\n\n tracker = pawprint_default_tracker_db\n\n # SELECT * FROM table\n args = ()\n assert tracker._parse_fields(*args) == \"*\"\n\n # SELECT event FROM table\n args = (\"event\",)\n assert tracker._parse_fields(*args) == \"event\"\n\n # SELECT user_id, timestamp FROM table\n args = (\"user_id\", \"timestamp\")\n assert tracker._parse_fields(*args) == \"user_id, timestamp\"\n\n # SELECT metadata #>> '{a, b}' FROM table\n args = (\"metadata__a__b\",)\n assert tracker._parse_fields(*args) == \"metadata #> '{a, b}' AS json_field\"", "def get_column_information(self, column_name, table, verbose=True): \n \n assert(self.connected)\n try: \n assert(self.check_table(table, verbose=False)) \n except AssertionError: \n raise TableNotFoundError\n \n if (not self.check_column(column_name, table, verbose=False)):\n return\n \n GET_COLUMN_INFO_COMMAND = (\"SELECT COLUMN_TYPE, IS_NULLABLE, COLUMN_KEY, EXTRA \"\n \t\t\"FROM INFORMATION_SCHEMA.COLUMNS \"\n \t\t\"WHERE TABLE_NAME='{0}' and COLUMN_NAME = '{1}'\".format(table,column_name))\n \n self.cursor.execute(GET_COLUMN_INFO_COMMAND)\n \n for row in self.cursor:\n break\n \n info = {'type' : row[0],\n 'not_null' : row[1] != 'YES' , \n 'foreign_key' : row[2] == 'MUL',\n 'auto_incremenet' : row[3] == 'auto_increment'}\n \n if verbose: print(\"Column with label '{0}' found in table '{1}'\".format(column_name, table))\n \n \n return info", "def get_field_dtype(self, field=None):\n\n if field in self._fields_dtypes:\n return self._fields_dtypes[field]\n\n # initialize dbtypes for all fields\n field_type = pd.read_sql(\n 'select distinct column_name, type '\n 'from fields',\n self._get_db_engine())\n\n for row in field_type.itertuples():\n self._fields_dtypes[row.column_name] = row.type\n\n return self._fields_dtypes[field] if field in self._fields_dtypes else None", "def complete_info_record_type(self, text, line, begidx, endidx):\n begidx = begidx\n endidx = endidx\n mline = line.partition(' ')[2]\n offs = len(mline) - len(text)\n info_record_types = ['description', 'access', 'default', 'bit',\n 'flag', 'max', 'min']\n return [s[offs:] for s in info_record_types if s.startswith(mline)]", "def get_ent_type(self, line):\n\n\t\treturn str(self.kb_shm.dataType(line))", "def record_type(values):\n field = basic.lookup(values, name='Record Type (one of %s)' % values)\n\n return field.setResultsName('record_type')", "def _fieldDefAsString(self, fieldTuple):\n\n fname, ftype, flen = fieldTuple\n if ftype == 'char':\n # pdb.set_trace()\n try:\n slen, flen = flen\n except TypeError as e:\n slen = flen\n flen = 1\n\n if slen > 0:\n fname = '%s[%d]' % (fname, slen)\n else:\n fname = '%s[]' % (fname)\n \n if flen > 1: \n return \" %s %s[%d];\" % (ftype, fname, flen)\n else:\n return \" %s %s;\" % (ftype, fname)", "def ddl_table(self, tabela):\r\n sql = \"\"\"SELECT\r\n RF.RDB$FIELD_NAME FIELD_NAME,\r\n CASE F.RDB$FIELD_TYPE\r\n WHEN 7 THEN\r\n CASE F.RDB$FIELD_SUB_TYPE\r\n WHEN 0 THEN 'INT'\r\n WHEN 1 THEN 'NUMERIC(' || F.RDB$FIELD_PRECISION || ', ' || (-F.RDB$FIELD_SCALE) || ')'\r\n WHEN 2 THEN 'DECIMAL'\r\n END\r\n WHEN 8 THEN\r\n CASE F.RDB$FIELD_SUB_TYPE\r\n WHEN 0 THEN 'INTEGER'\r\n WHEN 1 THEN 'NUMERIC(' || F.RDB$FIELD_PRECISION || ', ' || (-F.RDB$FIELD_SCALE) || ')'\r\n WHEN 2 THEN 'DECIMAL'\r\n END\r\n WHEN 9 THEN 'QUAD'\r\n WHEN 10 THEN 'FLOAT'\r\n WHEN 12 THEN 'DATE'\r\n WHEN 13 THEN 'TIME'\r\n WHEN 14 THEN 'CHAR(' || (TRUNC(F.RDB$FIELD_LENGTH / COALESCE(CH.RDB$BYTES_PER_CHARACTER,1))) || ') '\r\n WHEN 16 THEN\r\n CASE F.RDB$FIELD_SUB_TYPE\r\n WHEN 0 THEN 'BIGINT'\r\n WHEN 1 THEN 'NUMERIC(' || F.RDB$FIELD_PRECISION || ', ' || (-F.RDB$FIELD_SCALE) || ')'\r\n WHEN 2 THEN 'DECIMAL'\r\n END\r\n WHEN 27 THEN 'NUMERIC'\r\n WHEN 35 THEN 'TIMESTAMP'\r\n WHEN 37 THEN 'VARCHAR(' || (TRUNC(F.RDB$FIELD_LENGTH / COALESCE(CH.RDB$BYTES_PER_CHARACTER,1))) || ')'\r\n WHEN 40 THEN 'CSTRING' || (TRUNC(F.RDB$FIELD_LENGTH / COALESCE(CH.RDB$BYTES_PER_CHARACTER,1))) || ')'\r\n WHEN 45 THEN 'BLOB_ID'\r\n WHEN 261 THEN 'TEXT'\r\n ELSE 'RDB$FIELD_TYPE: ' || F.RDB$FIELD_TYPE || '?'\r\n END FIELD_TYPE\r\n FROM RDB$RELATION_FIELDS RF\r\n JOIN RDB$FIELDS F ON (F.RDB$FIELD_NAME = RF.RDB$FIELD_SOURCE)\r\n LEFT OUTER JOIN RDB$CHARACTER_SETS CH ON (CH.RDB$CHARACTER_SET_ID = F.RDB$CHARACTER_SET_ID)\r\n LEFT OUTER JOIN RDB$COLLATIONS DCO ON ((DCO.RDB$COLLATION_ID = F.RDB$COLLATION_ID) AND (DCO.RDB$CHARACTER_SET_ID = F.RDB$CHARACTER_SET_ID))\r\n WHERE (RF.RDB$RELATION_NAME = '%s') AND (COALESCE(RF.RDB$SYSTEM_FLAG, 0) = 0)\r\n ORDER BY RF.RDB$FIELD_POSITION;\"\"\" % (tabela)\r\n res = self.cur_origem.execute(sql)\r\n table = \"CREATE TABLE IF NOT EXISTS %s (\" % tabela\r\n tipos = {}\r\n for coluna, tipo, in res.fetchall():\r\n table += \"%s %s,\" % (coluna.strip(), tipo.strip())\r\n tipos[coluna.strip()] = tipo\r\n table = table[:-1]+\");\"\r\n return table, tipos", "def get_sql_fields(self):\n return self.mapping_db, 'GeneNames', 'EnsembleID', 'symbol', 'name'", "def field_dim(self, str field_name):\n cdef std_string fn = <std_string> field_name.encode('UTF-8')\n return self.mdb.get().field_dim(fn)", "def get_column_type(cls, **kwargs: Any) -> Any:\n return sqlalchemy.Text()", "def _GetField(bpo_field_id):\n bpo_field = BPOField.objects.get(id=bpo_field_id)\n # Get BPO Field based on type.\n if bpo_field.type == 'auto_sum':\n return BPOAutoSumField.objects.get(id=bpo_field_id)\n elif bpo_field.type == 'multiple_choice':\n return BPOMultipleChoiceField.objects.get(id=bpo_field_id)\n elif bpo_field.type == 'float_input':\n return BPOFloatField.objects.get(id=bpo_field_id)\n elif bpo_field.type == 'integer_input':\n return BPOIntegerField.objects.get(id=bpo_field_id)\n elif bpo_field.type == 'date_input':\n return BPODateField.objects.get(id=bpo_field_id)\n elif bpo_field.type == 'table':\n return BPOTable.objects.get(id=bpo_field_id)\n else:\n return bpo_field" ]
[ "0.6303345", "0.6107471", "0.5995526", "0.5872125", "0.5803399", "0.5616249", "0.56076336", "0.5588405", "0.55621403", "0.55433136", "0.55343395", "0.5498443", "0.5464399", "0.5463323", "0.5434962", "0.53413516", "0.53411543", "0.53399295", "0.53027624", "0.52892405", "0.5286212", "0.5240713", "0.523672", "0.5220382", "0.5216029", "0.5216029", "0.52090305", "0.52000684", "0.5199184", "0.5199134", "0.519603", "0.51899517", "0.51496863", "0.5146651", "0.51453525", "0.5143113", "0.51334083", "0.5128507", "0.51285017", "0.5127615", "0.51149", "0.51133597", "0.51084137", "0.50817835", "0.5057224", "0.505523", "0.50520307", "0.504668", "0.50398767", "0.5029137", "0.5029137", "0.5029137", "0.50258565", "0.5016657", "0.501603", "0.50076437", "0.4995999", "0.49955755", "0.49953455", "0.49943903", "0.49698576", "0.4953548", "0.49489984", "0.49464265", "0.49460816", "0.49460816", "0.4924896", "0.49233735", "0.49183613", "0.49176002", "0.49173906", "0.49144787", "0.4910695", "0.48880753", "0.48866415", "0.4882979", "0.4880796", "0.48789588", "0.48685762", "0.48630545", "0.4862595", "0.48611355", "0.48406118", "0.4839562", "0.48392063", "0.48389784", "0.48346445", "0.48292667", "0.48288226", "0.48260307", "0.48251355", "0.4819546", "0.4812242", "0.48003334", "0.4784031", "0.47821686", "0.47812393", "0.47755903", "0.47580972", "0.47566792" ]
0.7295043
0
Return a sequence comprising the lines of code necessary to construct the inner Meta class for the model corresponding to the given database table name.
def get_meta(table_name, constraints, column_to_field_name): # unique_together = [] # for index, params in constraints.items(): # if params['unique']: # columns = params['columns'] # if len(columns) > 1: # we do not want to include the u"" or u'' prefix # so we build the string rather than interpolate the tuple # tup = '(' + ', '.join("'%s'" % column_to_field_name[c] for c in columns) + ')' # unique_together.append(tup) return type('Meta', (), dict(managed=False, db_table=table_name, app_label='layers')) # if unique_together: # tup = '(' + ', '.join(unique_together) + ',)' # meta += [" unique_together = %s" % tup] # return meta
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_meta_table_name(engine):\n class Model(engine.model):\n id = Column(UUID, hash_key=True)\n\n assert Model.Meta.table_name == \"Model\"\n\n class Other(engine.model):\n class Meta:\n table_name = \"table_name\"\n write_units = 3\n id = Column(UUID, hash_key=True)\n\n assert Other.Meta.table_name == \"table_name\"", "def model_table():\r\n class OccupationTable(tables.Table):\r\n class Meta:\r\n model = Occupation\r\n assert [\"id\", \"name\", \"region\"] == list(OccupationTable.base_columns.keys())\r\n\r\n class OccupationTable2(tables.Table):\r\n extra = tables.Column()\r\n\r\n class Meta:\r\n model = Occupation\r\n assert [\"id\", \"name\", \"region\", \"extra\"] == list(OccupationTable2.base_columns.keys())\r\n\r\n # be aware here, we already have *models* variable, but we're importing\r\n # over the top\r\n from django.db import models\r\n\r\n class ComplexModel(models.Model):\r\n char = models.CharField(max_length=200)\r\n fk = models.ForeignKey(\"self\")\r\n m2m = models.ManyToManyField(\"self\")\r\n\r\n class ComplexTable(tables.Table):\r\n class Meta:\r\n model = ComplexModel\r\n assert [\"id\", \"char\", \"fk\"] == list(ComplexTable.base_columns.keys())", "def sequence():\r\n class TestTable(tables.Table):\r\n a = tables.Column()\r\n b = tables.Column()\r\n c = tables.Column()\r\n assert [\"a\", \"b\", \"c\"] == TestTable([]).columns.names()\r\n assert [\"b\", \"a\", \"c\"] == TestTable([], sequence=(\"b\", \"a\", \"c\")).columns.names()\r\n\r\n class TestTable2(TestTable):\r\n class Meta:\r\n sequence = (\"b\", \"a\", \"c\")\r\n assert [\"b\", \"a\", \"c\"] == TestTable2([]).columns.names()\r\n assert [\"a\", \"b\", \"c\"] == TestTable2([], sequence=(\"a\", \"b\", \"c\")).columns.names()\r\n\r\n class TestTable3(TestTable):\r\n class Meta:\r\n sequence = (\"c\", )\r\n assert [\"c\", \"a\", \"b\"] == TestTable3([]).columns.names()\r\n assert [\"c\", \"a\", \"b\"] == TestTable([], sequence=(\"c\", )).columns.names()\r\n\r\n class TestTable4(TestTable):\r\n class Meta:\r\n sequence = (\"...\", )\r\n assert [\"a\", \"b\", \"c\"] == TestTable4([]).columns.names()\r\n assert [\"a\", \"b\", \"c\"] == TestTable([], sequence=(\"...\", )).columns.names()\r\n\r\n class TestTable5(TestTable):\r\n class Meta:\r\n sequence = (\"b\", \"...\")\r\n assert [\"b\", \"a\", \"c\"] == TestTable5([]).columns.names()\r\n assert [\"b\", \"a\", \"c\"] == TestTable([], sequence=(\"b\", \"...\")).columns.names()\r\n\r\n class TestTable6(TestTable):\r\n class Meta:\r\n sequence = (\"...\", \"b\")\r\n assert [\"a\", \"c\", \"b\"] == TestTable6([]).columns.names()\r\n assert [\"a\", \"c\", \"b\"] == TestTable([], sequence=(\"...\", \"b\")).columns.names()\r\n\r\n class TestTable7(TestTable):\r\n class Meta:\r\n sequence = (\"b\", \"...\", \"a\")\r\n assert [\"b\", \"c\", \"a\"] == TestTable7([]).columns.names()\r\n assert [\"b\", \"c\", \"a\"] == TestTable([], sequence=(\"b\", \"...\", \"a\")).columns.names()\r\n\r\n # Let's test inheritence\r\n class TestTable8(TestTable):\r\n d = tables.Column()\r\n e = tables.Column()\r\n f = tables.Column()\r\n\r\n class Meta:\r\n sequence = (\"d\", \"...\")\r\n\r\n class TestTable9(TestTable):\r\n d = tables.Column()\r\n e = tables.Column()\r\n f = tables.Column()\r\n\r\n assert [\"d\", \"a\", \"b\", \"c\", \"e\", \"f\"] == TestTable8([]).columns.names()\r\n assert [\"d\", \"a\", \"b\", \"c\", \"e\", \"f\"] == TestTable9([], sequence=(\"d\", \"...\")).columns.names()", "def _analyze_db_model(cls):\n attributes = dir(cls)\n\n table_attributes = {} # type: Dict[str, Union[ColumnProperty, RelationshipProperty]]\n properties = [] # type: List[str]\n mapper_attrs = inspect(cls).attrs\n\n _get_class_attributes(attributes, cls, properties, mapper_attrs, table_attributes)\n\n model_fields = []\n attributes_containing_lists = []\n normal_attributes = []\n unknown_attributes = []\n for name, attr in table_attributes.items():\n if isinstance(attr, RelationshipProperty):\n mapper = attr.mapper # type: Mapper\n if issubclass(mapper.class_, Taxonomy):\n normal_attributes.append((name, mapper.class_))\n model_fields.append(\"'{}': fields.Nested(taxonomy_item_get, description='{}'),\".format(name, mapper.class_.__name__))\n else:\n if name in properties:\n name = name.lstrip('_')\n attributes_containing_lists.append(name)\n classname = mapper.class_.__name__\n if 'To' in classname:\n classname = classname.split('To')[0]\n model_fields.append(\"'{}': fields.List(fields.Nested(taxonomy_item_get, description='{}'), default=[]),\".format(name, classname))\n else:\n model_fields.append(\"'{}': fields.List(fields.Raw(description='{}'), default=[]),\".format(name, classname))\n else:\n model_fields.append(\"'{}': fields.Raw(description='{}'),\".format(name, mapper.class_.__name__))\n normal_attributes.append((name, mapper.class_))\n if isinstance(attr, ColumnProperty):\n col = attr.columns[0] # type: Column\n zusatz = ''\n if col.default is not None:\n default = col.default.arg\n if isinstance(default, str):\n zusatz = \"default='{}'\".format(default)\n else:\n zusatz = 'default={}'.format(default)\n if isinstance(col.type, Integer):\n model_fields.append(\"'{}': fields.Integer({}),\".format(name, zusatz))\n if name != 'id':\n normal_attributes.append((name, int))\n elif isinstance(col.type, Float):\n model_fields.append(\"'{}': fields.Float({}),\".format(name, zusatz))\n normal_attributes.append((name, float))\n elif isinstance(col.type, String):\n model_fields.append(\"'{}': fields.String({}),\".format(name, zusatz))\n normal_attributes.append((name, str))\n elif isinstance(col.type, Boolean):\n model_fields.append(\"'{}': fields.Boolean({}),\".format(name, zusatz))\n normal_attributes.append((name, bool))\n elif isinstance(col.type, Date):\n model_fields.append(\"'{}': fields.Date({}),\".format(name, zusatz))\n normal_attributes.append((name, date))\n else:\n unknown_attributes.append((name, col.type))\n\n return {\n 'model_fields': model_fields,\n 'normal_attributes': tuple(normal_attributes),\n 'attributes_containing_lists': tuple(attributes_containing_lists),\n 'unknown_attributes': tuple(unknown_attributes),\n }", "def construct_base_table(self):\n # Get requested biological entities.\n req_biomodels = [exp.biomodel for exp in self.req_exps]\n # Remove duplicates.\n req_biomodels = list(unique_everseen(req_biomodels))\n # Order according to the linearized models.\n relations = Relations(self.app_label, model_superclass=(BiologicalModel, MetaModel))\n relations.start_join()\n linearized_biomodels = relations.linearized\n req_biomodels = sort_by_other(req_biomodels, \n order=linearized_biomodels)\n # Construct the SQL statement.\n sql = self.construct_result_table(req_biomodels)\n return sql", "def should_support_both_meta_sequence_and_constructor_exclude():\r\n class SequencedTable(tables.Table):\r\n a = tables.Column()\r\n b = tables.Column()\r\n c = tables.Column()\r\n\r\n class Meta:\r\n sequence = ('a', '...')\r\n\r\n table = SequencedTable([], exclude=('c', ))\r\n table.as_html()", "def generate_model(\n self,\n table: Dict,\n singular: bool = True,\n exceptions: Optional[List] = None,\n schema_global: Optional[bool] = True,\n *args,\n **kwargs,\n ) -> str:\n model = \"\"\n model = gt.model_template.format(\n model_name=create_class_name(table.name, singular, exceptions),\n table_name=table.name,\n )\n for column in table.columns:\n model += logic.generate_column(column, table.primary_key, table, gt, self)\n if table.indexes or table.alter or table.checks or not schema_global:\n model = logic.add_table_args(self, model, table, schema_global)\n # create sequence\n return model", "def create_table(self, context, connection, *, engine):\n yield Table(self.table_name, MetaData(), autoload=True, autoload_with=engine)", "def model_to_sequencesql(self, m):\n from django.db import connection\n\n # tbl has app_label prefix; e.g., testapp_simple\n tbl = m._meta.db_table\n\n # Get name of sequence for this table. Here's\n # a trace from doing it manually.\n #\n # sql> select \"default\" from sys.columns\n # more> where table_id = 4186 and name = 'id';\n # +-------------------------------------+\n # | default |\n # +=====================================+\n # | next value for \"django1\".\"seq_4176\" |\n # +-------------------------------------+\n # 1 tuple\n # sql>\n #\n\n c = connection.cursor()\n fmt = \"\"\"\nSELECT\n \"default\"\nFROM\n sys.columns\nWHERE\n table_id = (SELECT id FROM sys.tables where name = %s) AND\n name = 'id'\n;\n\"\"\"\n c.execute(fmt, [tbl, ])\n row = c.fetchone()\n # default = 'next value for \"django1\".\"seq_4176\"'\n default = row[0]\n p = default.rfind('\".\"seq_')\n if p == -1:\n return ''\n\n # seq = '\"seq_4176\"'\n seq = default[p + 2:]\n\n fmt = 'ALTER SEQUENCE %s RESTART WITH (SELECT MAX(id) + 1 FROM %s);'\n\n return fmt % (seq, tbl)", "def sequence():\n class TestTable(tables.Table):\n a = tables.Column()\n b = tables.Column()\n c = tables.Column()\n Assert([\"a\", \"b\", \"c\"]) == TestTable([]).columns.names()\n Assert([\"b\", \"a\", \"c\"]) == TestTable([], sequence=(\"b\", \"a\", \"c\")).columns.names()\n\n class TestTable2(TestTable):\n class Meta:\n sequence = (\"b\", \"a\", \"c\")\n Assert([\"b\", \"a\", \"c\"]) == TestTable2([]).columns.names()\n Assert([\"a\", \"b\", \"c\"]) == TestTable2([], sequence=(\"a\", \"b\", \"c\")).columns.names()\n\n # BAD, all columns must be specified, or must use \"...\"\n with Assert.raises(ValueError):\n class TestTable3(TestTable):\n class Meta:\n sequence = (\"a\", )\n with Assert.raises(ValueError):\n TestTable([], sequence=(\"a\", ))\n\n # GOOD, using a single \"...\" allows you to only specify some columns. The\n # remaining columns are ordered based on their definition order\n class TestTable4(TestTable):\n class Meta:\n sequence = (\"...\", )\n Assert([\"a\", \"b\", \"c\"]) == TestTable4([]).columns.names()\n Assert([\"a\", \"b\", \"c\"]) == TestTable([], sequence=(\"...\", )).columns.names()\n\n class TestTable5(TestTable):\n class Meta:\n sequence = (\"b\", \"...\")\n Assert([\"b\", \"a\", \"c\"]) == TestTable5([]).columns.names()\n Assert([\"b\", \"a\", \"c\"]) == TestTable([], sequence=(\"b\", \"...\")).columns.names()\n\n class TestTable6(TestTable):\n class Meta:\n sequence = (\"...\", \"b\")\n Assert([\"a\", \"c\", \"b\"]) == TestTable6([]).columns.names()\n Assert([\"a\", \"c\", \"b\"]) == TestTable([], sequence=(\"...\", \"b\")).columns.names()\n\n class TestTable7(TestTable):\n class Meta:\n sequence = (\"b\", \"...\", \"a\")\n Assert([\"b\", \"c\", \"a\"]) == TestTable7([]).columns.names()\n Assert([\"b\", \"c\", \"a\"]) == TestTable([], sequence=(\"b\", \"...\", \"a\")).columns.names()\n\n # Let's test inheritence\n class TestTable8(TestTable):\n d = tables.Column()\n e = tables.Column()\n f = tables.Column()\n\n class Meta:\n sequence = (\"d\", \"...\")\n\n class TestTable9(TestTable):\n d = tables.Column()\n e = tables.Column()\n f = tables.Column()\n\n Assert([\"d\", \"a\", \"b\", \"c\", \"e\", \"f\"]) == TestTable8([]).columns.names()\n Assert([\"d\", \"a\", \"b\", \"c\", \"e\", \"f\"]) == TestTable9([], sequence=(\"d\", \"...\")).columns.names()", "def find_sql_models():\n for model in find_subclasses(SQLModel):\n # Get model Meta class\n meta = getattr(model, 'Meta', None)\n if meta:\n # If this is marked as abstract ignore it\n if getattr(meta, 'abstract', False):\n continue\n yield model", "def db_table(self):", "def create_table(cls):\n\n dbmanager = cls.dbm()\n\n # ask model to define its internal fields\n fields = cls.define_fields(dbmanager)\n cls.extend_fields(fields, True)\n # now hash the fieldlist so we can look up fields by name\n cls.hash_fieldlist()\n\n # get the sqlahelper for this schema (usually default one shared by all models), plus some info\n dbtablename = cls.get_dbtablename()\n dbschemaname = cls.get_dbschemaname()\n sqlahelper = dbmanager.get_sqlahelper(dbschemaname)\n metadata = sqlahelper.getmake_metadata()\n\n # build sqlalchemy columns from fields\n sqlalchemycolumns = cls.create_sqlalchemy_columns_from_dbfields()\n\n # tell sqlalchemy to build table object from columns\n modeltable = sqlalchemy.Table(dbtablename, metadata, *sqlalchemycolumns)\n\n # and store the table and other object references in the model class itself\n cls.setclass_dbinfo(modeltable, sqlahelper)", "def next_record(self) -> Iterable[DatabaseMetadata]:\n for key, group in groupby(self._get_raw_extract_iter(), get_table_key):\n columns = []\n for row in group:\n last_row = row\n col_type = ''\n if row['col_type'].upper() == 'CHARACTER VARYING':\n col_type = 'VARCHAR'\n elif row['col_type'].upper() == 'CHARACTER' or row['col_type'].upper() == 'NAME':\n col_type = 'CHAR'\n elif row['col_type'].upper() == 'INTEGER':\n col_type = 'INT'\n elif row['col_type'].upper() == 'TIMESTAMP WITHOUT TIME ZONE':\n col_type = 'TIMESTAMP'\n elif row['col_type'].upper() == 'DOUBLE PRECISION':\n col_type = 'DOUBLE'\n elif row['col_type'].upper() == 'OID':\n col_type = 'NUMBER'\n elif row['col_type'].upper() == 'ARRAY':\n col_type = 'ARRAY'\n elif row['col_type'].upper() == 'BOOLEAN':\n col_type = 'BOOLEAN'\n else:\n col_type = None\n if not self.pattern.filter_pattern.included(f'{last_row[1]}.{last_row[2]}'):\n self.status.filtered(f'{last_row[1]}.{last_row[2]}', \"pattern not allowed\", last_row[2])\n continue\n if col_type is not None:\n columns.append(Column(name=row['col_name'], description=row['col_description'],\n columnDataType=col_type, ordinalPosition=int(row['col_sort_order'])))\n table_metadata = Table(id=uuid.uuid4(), name=last_row['name'],\n description=last_row['description'],\n columns=columns)\n\n self.status.scanned(table_metadata.name.__root__)\n\n dm = Database(id=uuid.uuid4(),\n name=row['schema'],\n description=row['description'] if row['description'] is not None else ' ',\n service=EntityReference(id=self.service.id, type=self.SERVICE_TYPE))\n table_and_db = OMetaDatabaseAndTable(table=table_metadata, database=dm)\n yield table_and_db", "def map_database(connection):\n eng = create_engine(connection)\n metadata = MetaData()\n metadata.reflect(eng)\n base = automap_base(metadata=metadata)\n base.prepare()\n return base.classes, eng", "def __tablename__(cls):\n return get_table_name(cls.__name__)", "def model_table(name, *fields, app_label='internal'):\n model = apps.get_model(app_label, name)\n items = model.objects.all().values_list(*fields)\n field_names = [model._meta.get_field(field).verbose_name\n for field in fields]\n return {'items': items, 'fields': field_names}", "def TTableModel(metadata=None):\n\n from sqlalchemy import MetaData\n\n @add_metaclass(ComplexModelMeta)\n class TableModel(TTableModelBase()):\n class Attributes(ComplexModelBase.Attributes):\n sqla_metadata = metadata or MetaData()\n\n return TableModel", "def parse(self, lines):\n # Keep count of the current line number.\n i = 0\n # list tables and content\n tables = dict()\n attr_param = list()\n\n skipped_lines = list() # DEBUG\n\n # Loop through all lines.\n for i in range(0, len(lines)):\n line_stripped = lineNormalise(lines[i])\n skip = True\n\n for keyword in self.target_keywords:\n\n # Look for keywords at the beginning of the line.\n if line_stripped.startswith(keyword):\n # print(\"{} : {}\".format(i, line_stripped)) # DEBUG\n skip = False\n\n # Found one, do parse\n expression = re.search(r'(\\w+) (\\w+)', line_stripped)\n if keyword is self.target_keywords[0]: # class/table\n # get table name\n table_name = expression.group(2)\n\n # add it in tables if not already in\n # tables (classes) may be at differant place in a PlantUML file\n if table_name not in tables:\n tables[table_name] = list()\n # print(\"Table : «{}» ajoutee\".format(expression.group(2))) # DEBUG\n print(\"{} : +table «{}»\".format(i, table_name)) # DEBUG\n\n elif keyword is self.target_keywords[1]: # primary key\n # import pdb; pdb.set_trace()\n # get related table\n attr_param = (re.sub(r'(pyk\\()|\\)|,|\\n', r' ', line_stripped).strip().split())\n tables[table_name].extend(attr_param)\n print(\"{} :\\t«{}» +{}\".format(i, table_name, attr_param)) # DEBUG\n\n elif keyword is self.target_keywords[2]: # foreign key\n # get related table\n attr_param = (re.sub(r'(fnk\\()|\\)|,|\\n', r' ', line_stripped).strip().split())\n tables[table_name].extend(attr_param)\n print(\"{} :\\t«{}» +{}\".format(i, table_name, attr_param)) # DEBUG\n\n\n elif keyword is self. target_keywords[3]: # primary foreign key\n # get related table\n attr_param = (re.sub(r'(pfk\\()|\\)|,|\\n', r' ', line_stripped).strip().split())\n tables[table_name].extend(attr_param)\n print(\"{} :\\t«{}» +{}\".format(i, table_name, attr_param)) # DEBUG\n\n else: # attribute\n # print(line_stripped) # DEBUG\n print(\"{} : \\t«{}» Attribute? {}\".format(i, line_stripped)) # DEBUG\n\n if skip:\n skipped_lines.append(i)\n\n print(\"\\nNumbers of tables : {}\\n\".format(len(tables)))\n pp = pprint.PrettyPrinter(indent=4, compact=True)\n print(\"Scraped data:\")\n pp.pprint(tables) # DEBUG\n print(\"\\nSkipped lines: {}\\n\".format(skipped_lines)) # DEBUG", "def init_model(engine):\n ## Reflected tables must be defined and mapped here\n #global reflected_table\n #reflected_table = sa.Table(\"Reflected\", meta.metadata, autoload=True,\n # autoload_with=engine)\n #orm.mapper(Reflected, reflected_table)\n #\n meta.Session.configure(bind=engine)\n meta.engine = engine", "def create_prerequisites(cls, dbmanager):\n subfields = mdbmixins.dbfmixins_authortracker()\n if (cls.flag_mixin_atroot):\n # prepare extra fields that will be added at root; this doesnt actually create any helper models\n cls.extend_fields(subfields)\n else:\n # add a special sub table that will contain some fields, using a helper class object attached to us\n # create (AND REGISTER) the new helper object\n backrefname = cls.get_dbtablename_pure()\n mdbmodel_fieldset.MewloDbFieldset.make_fieldset_dbobjectclass(cls,'tracking','author tracking object',backrefname,dbmanager,subfields)", "def meta(cls):\n if getattr(cls, '__from_class__', None) is not None:\n cls = cls.__from_class__\n attribute_info = {}\n for name, value in cls.__table__.columns.items():\n attribute_info[name] = str(value.type).lower()\n\n return {cls.__name__: attribute_info}", "def meta_db_tables(self) -> list:\r\n def _passer(**kwargs):\r\n data = self.engine.execute(\"\"\"\r\n SELECT * FROM sqlite_master WHERE type='table';\r\n \"\"\").fetchall()\r\n table_names = [i[1] for i in data]\r\n return table_names\r\n return self._connectionController(_passer)", "def generateModelClass(self):\n\t\tself.printt_cls(\"export class {} {}\".format(self.objName, \"{\"))\n\t\tfor col in self.objSchema[\"fields\"]:\n\t\t\tcolName = col[\"name\"]\n\t\t\tcolType = col[\"type\"]\n\t\t\tself.printt_cls(\"\\t{} : {};\".format(colName, self.JS_DATA_TYPES[colType]))\n\t\tself.printt_cls(\"}\")\n\t\tself.printt_cls(\"\")", "def __init__(self, table):\n self.thingy_table = table\n self.thingy_type = table.__name__\n\n # Match [...]{tablename}\n regex = ur'(?x) \\[ ([^]]+) \\] \\s* \\{' + table.__singlename__ + ur'(:[^}]+)?\\}'\n\n # old-style classes augh!\n markdown.inlinepatterns.Pattern.__init__(self, regex)", "def create_table(cls, *args, **kwargs):\n init = cls._meta.database.create_table_title(cls._meta.table_name)\n i = 1\n fields = zip(cls._meta.sorted_fields_names, cls._meta.sorted_fields)\n for field in fields:\n field_string = field[1].create_field(field[0])\n if i == len(fields):\n if cls._meta.unique:\n init = cls._meta.database.create_unique(init, cls._meta.unique)\n\n init = cls._meta.database.create_table_field_end(init, field_string)\n\n if cls._meta.hypertable:\n init = cls._meta.database.create_hypertable(init,\n cls._meta)\n else:\n init = cls._meta.database.create_table_field(init, field_string)\n i+=1\n\n yield cls._meta.database.runOperation(init)", "def create_tables(self):\n for name, attribute in self.__dict__.items():\n if hasattr(attribute, 'create_table_in_sqlite_db'):\n attribute.create_table_in_sqlite_db()", "def __init__(self, *args):\n self.engine = db.create_engine('mysql+pymysql://root:''@127.0.0.1:3306/northwind', echo=True)\n self.connection = self.engine.connect()\n self.metadata = db.MetaData()\n self.tables = db.Table(*args, self.metadata, autoload=True, autoload_with=self.engine)", "def setup_schema(BaseDao, session):\n def setup_schema_fn():\n for class_ in BaseDao._decl_class_registry.values():\n if hasattr(class_, '__tablename__'):\n if class_.__name__.endswith('Schema'):\n raise ModelConversionError(\n \"For safety, setup_schema can not be used when a\"\n \"Model class ends with 'Schema'\"\n )\n\n class Meta(object):\n model = class_\n sqla_session = session\n dump_only = ('pkId', 'created', 'modified')\n\n schema_class_name = '%sSchema' % class_.__name__\n\n schema_class = type(\n schema_class_name,\n (ModelSchema,),\n {'Meta': Meta}\n )\n\n setattr(class_, '__marshmallow__', schema_class)\n\n return setup_schema_fn", "def create_models( self ):", "def construct_result_table(self, biomodels):\n select = 'SELECT DISTINCT '\n cached_alias = 'c'\n join_exps = 'FROM %s_%s AS %s' % (self.app_label, \n settings.DISBI['JOINED_TABLENAME'], \n cached_alias)\n left_join_template = '''\n LEFT JOIN (\n %s\n ) AS %s\n ON (%s = %s)\n '''\n subtables_not_null_column = []\n select_bios = []\n relations = Relations(self.app_label, model_superclass=(BiologicalModel, MetaModel))\n for biomodel in biomodels:\n # Requested experiments related to biomodel.\n req_exps_for_bio = [] \n for exp in self.req_exps:\n if exp.biomodel == biomodel:\n req_exps_for_bio.append(exp)\n \n select_bios.extend(self.get_show_columns(biomodel))\n \n\n # Get Meta models for Bio model and the show columns to the \n # SELECT clause.\n metamodels_of_biomodel = relations.get_related_metamodels(biomodel)\n #print(metamodels_of_biomodel)\n for metamodel in metamodels_of_biomodel:\n select_bios.extend(self.get_show_columns(metamodel))\n\n \n for exp in req_exps_for_bio:\n select_bios.extend(self.get_display_names(exp))\n exp_alias = 'exp%s' % exp.pk\n join_exps += left_join_template % (\n self.construct_exptable(exp),\n exp_alias,\n '%s.%s_id' % (cached_alias, biomodel.__name__.lower()),\n '%s.%s' % (exp_alias, exp.biofield.column)\n )\n subtables_not_null_column.append((self.get_notnull_column(exp), \n str(exp.id)))\n exclude_empty = 'WHERE ' + ' OR '.join(['%s_%s IS NOT NULL' % col \n for col in subtables_not_null_column])\n \n sql = '\\n'.join((select + ', '.join(select_bios), join_exps, exclude_empty))\n return re.sub(r'^\\s+', '', sql, flags=re.MULTILINE)", "def get_data_models(models_file):\n list_models = []\n model = []\n pos_numeric = [] # Position of numeric fields\n info_keys = [] # Info. about keys\n re_field = re.compile('\\s+\\w+\\s*=\\s*models\\.') # Line with field name\n re_class = re.compile('\\s+class ') # For Admin and Meta\n re_def = re.compile('\\s+def ')\n is_new_model = False\n\n for line in open(models_file):\n # The models start with 'class'\n if not is_new_model and line.startswith('class'):\n model_name = line.replace('class','').split('(')[0].strip()\n model.append(model_name)\n is_new_model = True\n elif is_new_model:\n if re_field.match(line):\n field_name = line.split('=')[0].strip()\n model.append(field_name)\n\n if 'models.DecimalField' in line or 'models.IntegerField' in line:\n pos_numeric.append(len(model)-2) # Discard model name.\n elif 'models.ForeignKey' in line:\n key_name = line.split('(')[-1].strip().strip(')')\n position = len(model)-2 # Discard model name.\n info_keys.append(':')\n info_keys.append(str(position) + ',')\n info_keys.append(key_name)\n # It is supposed that models in localization has at the end:\n # ('class Meta', 'class Admin', or some 'def')\n elif re_class.match(line) or re_def.match(line):\n if pos_numeric:\n pos_num2str = '#'\n for num in pos_numeric:\n pos_num2str += str(num)\n model.append(pos_num2str)\n model.append(':N') # To detect the numeric field.\n pos_numeric = []\n if info_keys:\n all_keys = \"\"\n for key in info_keys:\n all_keys += key\n model.append(all_keys)\n model.append(':K') # To detect fastly some key.\n info_keys = []\n list_models.append(model)\n model = []\n is_new_model = False\n\n return list_models", "async def _create_tables_declarative(self, base, engine):\n if hasattr(base, 'metadata'):\n base.metadata.create_all(bind=engine, checkfirst=True)\n return", "def __tablename__(self):\n return sub(r\"(?<!^)(?=[A-Z])\", \"_\", self.__name__).lower()", "def get_metas(self):\n return self.get_meta_classes() + self.get_meta_functions()", "def get_table_meta(pudl_engine):\n md = sa.MetaData()\n md.reflect(pudl_engine)\n return md.tables", "def load_metadata(self):\n self.meta[\"user_tables\"] = pd.read_sql(self.SQL[\"User Tables\"], self.engine)\n self.meta[\"all_tables\"] = pd.read_sql(self.SQL[\"All Tables\"], self.engine)\n self.meta[\"all_databases\"] = pd.read_sql(self.SQL[\"All Databases\"], self.engine)", "def _get_model_class_from_table(self, table):\r\n try:\r\n model_class = [m for m in get_models() if connection.introspection.table_name_converter(m._meta.db_table) in map(connection.introspection.table_name_converter,[table])][0] \r\n m2m = False \r\n except IndexError:\r\n try: \r\n # this is a many to many field \r\n model_class = [f.rel.to for m in get_models() for f in m._meta.local_many_to_many if f.m2m_db_table() == table][0] \r\n m2m = True \r\n except IndexError: \r\n # this is an inner join \r\n table = self.query.alias_map[table][0]\r\n return self._get_model_class_from_table(table)\r\n return model_class, m2m", "def _get_models_from_metafile(dir: str):\n meta_indexes = load(osp.join(dir, 'model-index.yml'))\n for meta_path in meta_indexes['Import']:\n # meta_path example: mmcls/.mim/configs/conformer/metafile.yml\n meta_path = osp.join(dir, meta_path)\n metainfo = load(meta_path)\n yield from metainfo['Models']", "def meta_table_metadata(self, table_name: str) -> list:\r\n def _passer(**kwargs):\r\n return self.engine.execute(\r\n f\"PRAGMA table_info({kwargs['table_name']});\").fetchall()\r\n return self._connectionController(_passer, table_name=table_name)", "def _get_model_from_table_name(table_name: str) -> Optional[Type[RDSModel]]:\n table_model = None\n try:\n if hasattr(Base, '_decl_class_registry'):\n models = Base._decl_class_registry.values() # sqlalchemy < 1.4\n else:\n models = Base.registry._class_registry.values()\n\n for model in models:\n if hasattr(model, '__tablename__') and model.__tablename__ == table_name:\n table_model = model\n except Exception as e:\n LOGGER.exception(f'Failed to get model for the table: {table_name} from rds model base')\n raise e\n\n return table_model", "def db_for_read(self, model, **hints):\n model_name = model._meta.label_lower\n pos = model_name.find('.')\n table_name = model_name[pos+1:]\n if table_name in self.route_encuestas:\n return 'encuestas'\n elif table_name in self.route_uxxienc_resul:\n return 'uxxienc_resul'\n return None", "def __tablename__(cls) -> str:\n return inflection.underscore(cls.__name__)", "def model_definition(self):\n pass", "def show_database_structure(self):\n self.analyze()\n items = []\n for model in get_models():\n names = []\n # for f, m in model._meta.get_fields_with_model():\n for f in model._meta.concrete_fields:\n names.append(f.name)\n items.append(\n \"{0} : {1}\".format(fmn(model), ', '.join(names)))\n\n items = sorted(items)\n return rstgen.ul(items)", "def create_intermediary_table_model(model):\n name = model.__name__ + 'Relation'\n \n class Meta:\n db_table = '%s_relation' % model._meta.db_table\n unique_together = (('tag', 'content_type', 'object_id'),)\n\n def obj_unicode(self):\n return u'%s [%s]' % (self.content_type.get_object_for_this_type(pk=self.object_id), self.tag)\n \n # Set up a dictionary to simulate declarations within a class \n attrs = {\n '__module__': model.__module__,\n 'Meta': Meta,\n 'tag': models.ForeignKey(model, verbose_name=_('tag'), related_name='items'),\n 'content_type': models.ForeignKey(ContentType, verbose_name=_('content type')),\n 'object_id': models.PositiveIntegerField(_('object id'), db_index=True),\n 'content_object': generic.GenericForeignKey('content_type', 'object_id'),\n '__unicode__': obj_unicode,\n }\n\n return type(name, (models.Model,), attrs)", "def map_def_classes(self, table):\n definition = MapperDefinition()\n for rc in self.get_table_classes(table):\n splitted = rc.split()\n abbreviation = \" \".join(splitted[:-1])\n course_number = splitted[-1]\n definition.add(abbreviation, allowed=[course_number])\n return definition", "def test_custom_metadata_schema(self):\n # The use-case for this functionality is to allow using\n # Foreign Data Wrappers, each with a full set of Django\n # tables, to copy between databases using SQLAlchemy\n # and the automatically generation of aldjemy.\n metadata = MetaData(schema=\"arbitrary\")\n sa_models = construct_models(metadata)\n self.assertEqual(sa_models[Log].__table__.schema, \"arbitrary\")", "def fetchall(self):\n rows = self.cursor.fetchall()\n\n if self.model.single:\n for row in rows:\n yield self.__instance_from_db(self.model, row)\n else:\n for row in rows:\n yield tuple(self.__instance_from_db(m, row) for m in self.model.models)", "def __new__(cls, name, bases, ns):\n newbases = []\n table = None\n namer = None\n\n for base in bases:\n if isinstance(base, fromTable):\n if table is not None:\n raise RuntimeError(\n \"Can't define a class from two or more tables at once.\"\n )\n table = base.table\n elif getattr(base, \"table\", None) is not None:\n raise RuntimeError(\n \"Can't define a record class by inheriting one already \"\n \"mapped to a table.\"\n # TODO: more info\n )\n else:\n if namer is None:\n if isinstance(base, _RecordMeta):\n namer = base\n newbases.append(base)\n\n if table is not None:\n attrmap = {}\n colmap = {}\n allColumns = list(table)\n for column in allColumns:\n attrname = namer.namingConvention(column.model.name)\n attrmap[attrname] = column\n colmap[column] = attrname\n ns.update(table=table, __attrmap__=attrmap, __colmap__=colmap)\n ns.update(attrmap)\n\n return super(_RecordMeta, cls).__new__(cls, name, tuple(newbases), ns)", "def __init__(self, name, sequence_number):\n super(SQLTableIdentifier, self).__init__()\n self.name = name\n self.sequence_number = sequence_number", "def create_tables(self):\n tables = {}\n for cls in find_sql_models():\n table = cls.__table__\n if table is None:\n table = cls.__table__ = create_table(cls, self.metadata)\n if not table.metadata.bind:\n table.metadata.bind = SQLBinding(manager=self, table=table)\n tables[cls] = table\n return tables", "def init_models(self):\n from ron import Application\n from ron.models.basemodel import BaseModel\n if self.models == None or not Application().db:\n return\n models_namespace = self.__namespace + \".models\" # TODO: allow customize this\n try:\n models_package = import_module(models_namespace)\n except:\n models_package = None\n if models_package:\n models_modules = self._get_package_modules(models_package)\n for model_name in models_modules:\n imported_model = import_module('.' + model_name, package=models_namespace)\n for i in dir(imported_model):\n attribute = getattr(imported_model, i)\n if inspect.isclass(attribute) and issubclass(attribute, BaseModel):\n self.models.append(attribute)\n Application().db().database.create_tables(self.models)", "def _create_tables_classic(self, engine, metadata):\n if engine and metadata:\n with (yield from engine) as conn:\n for x in self._models.values():\n try:\n yield from conn.execute(CreateTable(x))\n except ProgrammingError as error:\n if hasattr(self.app, 'log') and self.app.log:\n if self.app.debug:\n self.app.log.info(\"[PostgressPlugin] [ `{}` already exists]\".format(x))\n else:\n if self.app.debug:\n print(\"[PostgressPlugin] [ `{}` already exists]\".format(x))\n return", "def GenMetaTableParam(a_name):\n table_param = Table_Param()\n if a_name == \"meta\":\n table_param.NAME = meta_table_name\n table_param.COLS = \\\n (Column(\"version\", String(50), primary_key=True, index=False),\n Column('site', String),\n Column('pairDbFile', String),\n Column('dumpType', String),\n Column('lock', Integer),\n Column('prNumber', String),\n Column('kbUrl', String),\n Column('desc', String))\n if a_name == \"fixby\":\n table_param.NAME = fixby_table_name\n table_param.COLS = \\\n (Column(\"id\", Integer, primary_key=True), # will auto increase\n Column('module', String),\n Column('desc', String))\n\n return table_param", "def build_metadata():\n metadata = sa.MetaData()\n\n sa.Table(\n 'hive_blocks', metadata,\n sa.Column('num', sa.Integer, primary_key=True, autoincrement=False),\n sa.Column('hash', CHAR(40), nullable=False),\n sa.Column('prev', CHAR(40)),\n sa.Column('txs', SMALLINT, server_default='0', nullable=False),\n sa.Column('ops', SMALLINT, server_default='0', nullable=False),\n sa.Column('created_at', sa.DateTime, nullable=False),\n\n sa.UniqueConstraint('hash', name='hive_blocks_ux1'),\n sa.ForeignKeyConstraint(['prev'], ['hive_blocks.hash'], name='hive_blocks_fk1'),\n )\n\n sa.Table(\n 'hive_accounts', metadata,\n sa.Column('id', sa.Integer, primary_key=True),\n sa.Column('name', VARCHAR(16), nullable=False),\n sa.Column('created_at', sa.DateTime, nullable=False),\n #sa.Column('block_num', sa.Integer, nullable=False),\n sa.Column('reputation', sa.Float(precision=6), nullable=False, server_default='25'),\n\n sa.Column('display_name', sa.String(20)),\n sa.Column('about', sa.String(160)),\n sa.Column('location', sa.String(30)),\n sa.Column('website', sa.String(100)),\n sa.Column('profile_image', sa.String(1024), nullable=False, server_default=''),\n sa.Column('cover_image', sa.String(1024), nullable=False, server_default=''),\n\n sa.Column('followers', sa.Integer, nullable=False, server_default='0'),\n sa.Column('following', sa.Integer, nullable=False, server_default='0'),\n\n sa.Column('proxy', VARCHAR(16), nullable=False, server_default=''),\n sa.Column('post_count', sa.Integer, nullable=False, server_default='0'),\n sa.Column('proxy_weight', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('vote_weight', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('kb_used', sa.Integer, nullable=False, server_default='0'), # deprecated\n sa.Column('rank', sa.Integer, nullable=False, server_default='0'),\n\n sa.Column('lastread_at', sa.DateTime, nullable=False, server_default='1970-01-01 00:00:00'),\n sa.Column('active_at', sa.DateTime, nullable=False, server_default='1970-01-01 00:00:00'),\n sa.Column('cached_at', sa.DateTime, nullable=False, server_default='1970-01-01 00:00:00'),\n sa.Column('raw_json', sa.Text),\n\n\n sa.UniqueConstraint('name', name='hive_accounts_ux1'),\n sa.Index('hive_accounts_ix1', 'vote_weight', 'id'), # core: quick ranks\n sa.Index('hive_accounts_ix2', 'name', 'id'), # core: quick id map\n sa.Index('hive_accounts_ix3', 'vote_weight', 'name', postgresql_ops=dict(name='varchar_pattern_ops')), # API: lookup\n sa.Index('hive_accounts_ix4', 'id', 'name'), # API: quick filter/sort\n sa.Index('hive_accounts_ix5', 'cached_at', 'name'), # core/listen sweep\n )\n\n sa.Table(\n 'hive_posts', metadata,\n sa.Column('id', sa.Integer, primary_key=True),\n sa.Column('parent_id', sa.Integer),\n sa.Column('author', VARCHAR(16), nullable=False),\n sa.Column('permlink', VARCHAR(255), nullable=False),\n sa.Column('category', VARCHAR(255), nullable=False, server_default=''),\n sa.Column('community_id', sa.Integer, nullable=True),\n sa.Column('created_at', sa.DateTime, nullable=False),\n sa.Column('depth', SMALLINT, nullable=False),\n sa.Column('is_deleted', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_pinned', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_muted', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_valid', BOOLEAN, nullable=False, server_default='1'),\n sa.Column('promoted', sa.types.DECIMAL(10, 3), nullable=False, server_default='0'),\n\n sa.ForeignKeyConstraint(['author'], ['hive_accounts.name'], name='hive_posts_fk1'),\n sa.ForeignKeyConstraint(['parent_id'], ['hive_posts.id'], name='hive_posts_fk3'),\n sa.UniqueConstraint('author', 'permlink', name='hive_posts_ux1'),\n sa.Index('hive_posts_ix3', 'author', 'depth', 'id', postgresql_where=sql_text(\"is_deleted = '0'\")), # API: author blog/comments\n sa.Index('hive_posts_ix4', 'parent_id', 'id', postgresql_where=sql_text(\"is_deleted = '0'\")), # API: fetching children\n sa.Index('hive_posts_ix5', 'id', postgresql_where=sql_text(\"is_pinned = '1' AND is_deleted = '0'\")), # API: pinned post status\n sa.Index('hive_posts_ix6', 'community_id', 'id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_pinned = '1' AND is_deleted = '0'\")), # API: community pinned\n )\n\n sa.Table(\n 'hive_post_tags', metadata,\n sa.Column('post_id', sa.Integer, nullable=False),\n sa.Column('tag', sa.String(32), nullable=False),\n sa.UniqueConstraint('tag', 'post_id', name='hive_post_tags_ux1'), # core\n sa.Index('hive_post_tags_ix1', 'post_id'), # core\n )\n\n sa.Table(\n 'hive_follows', metadata,\n sa.Column('follower', sa.Integer, nullable=False),\n sa.Column('following', sa.Integer, nullable=False),\n sa.Column('state', SMALLINT, nullable=False, server_default='1'),\n sa.Column('created_at', sa.DateTime, nullable=False),\n\n sa.UniqueConstraint('following', 'follower', name='hive_follows_ux3'), # core\n sa.Index('hive_follows_ix5a', 'following', 'state', 'created_at', 'follower'),\n sa.Index('hive_follows_ix5b', 'follower', 'state', 'created_at', 'following'),\n )\n\n sa.Table(\n 'hive_reblogs', metadata,\n sa.Column('account', VARCHAR(16), nullable=False),\n sa.Column('post_id', sa.Integer, nullable=False),\n sa.Column('created_at', sa.DateTime, nullable=False),\n\n sa.ForeignKeyConstraint(['account'], ['hive_accounts.name'], name='hive_reblogs_fk1'),\n sa.ForeignKeyConstraint(['post_id'], ['hive_posts.id'], name='hive_reblogs_fk2'),\n sa.UniqueConstraint('account', 'post_id', name='hive_reblogs_ux1'), # core\n sa.Index('hive_reblogs_ix1', 'post_id', 'account', 'created_at'), # API -- not yet used\n )\n\n sa.Table(\n 'hive_payments', metadata,\n sa.Column('id', sa.Integer, primary_key=True),\n sa.Column('block_num', sa.Integer, nullable=False),\n sa.Column('tx_idx', SMALLINT, nullable=False),\n sa.Column('post_id', sa.Integer, nullable=False),\n sa.Column('from_account', sa.Integer, nullable=False),\n sa.Column('to_account', sa.Integer, nullable=False),\n sa.Column('amount', sa.types.DECIMAL(10, 3), nullable=False),\n sa.Column('token', VARCHAR(5), nullable=False),\n\n sa.ForeignKeyConstraint(['from_account'], ['hive_accounts.id'], name='hive_payments_fk1'),\n sa.ForeignKeyConstraint(['to_account'], ['hive_accounts.id'], name='hive_payments_fk2'),\n sa.ForeignKeyConstraint(['post_id'], ['hive_posts.id'], name='hive_payments_fk3'),\n )\n\n sa.Table(\n 'hive_feed_cache', metadata,\n sa.Column('post_id', sa.Integer, nullable=False),\n sa.Column('account_id', sa.Integer, nullable=False),\n sa.Column('created_at', sa.DateTime, nullable=False),\n sa.UniqueConstraint('post_id', 'account_id', name='hive_feed_cache_ux1'), # core\n sa.Index('hive_feed_cache_ix1', 'account_id', 'post_id', 'created_at'), # API (and rebuild?)\n )\n\n sa.Table(\n 'hive_posts_cache', metadata,\n sa.Column('post_id', sa.Integer, primary_key=True, autoincrement=False),\n sa.Column('author', VARCHAR(16), nullable=False),\n sa.Column('permlink', VARCHAR(255), nullable=False),\n sa.Column('category', VARCHAR(255), nullable=False, server_default=''),\n\n # important/index\n sa.Column('community_id', sa.Integer, nullable=True),\n sa.Column('depth', SMALLINT, nullable=False, server_default='0'),\n sa.Column('children', SMALLINT, nullable=False, server_default='0'),\n\n # basic/extended-stats\n sa.Column('author_rep', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('flag_weight', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('total_votes', sa.Integer, nullable=False, server_default='0'),\n sa.Column('up_votes', sa.Integer, nullable=False, server_default='0'),\n\n # basic ui fields\n sa.Column('title', sa.String(255), nullable=False, server_default=''),\n sa.Column('preview', sa.String(1024), nullable=False, server_default=''),\n sa.Column('img_url', sa.String(1024), nullable=False, server_default=''),\n\n # core stats/indexes\n sa.Column('payout', sa.types.DECIMAL(10, 3), nullable=False, server_default='0'),\n sa.Column('promoted', sa.types.DECIMAL(10, 3), nullable=False, server_default='0'),\n sa.Column('created_at', sa.DateTime, nullable=False, server_default='1990-01-01'),\n sa.Column('payout_at', sa.DateTime, nullable=False, server_default='1990-01-01'),\n sa.Column('updated_at', sa.DateTime, nullable=False, server_default='1990-01-01'),\n sa.Column('is_paidout', BOOLEAN, nullable=False, server_default='0'),\n\n # ui flags/filters\n sa.Column('is_nsfw', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_declined', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_full_power', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_hidden', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_grayed', BOOLEAN, nullable=False, server_default='0'),\n\n # important indexes\n sa.Column('rshares', sa.BigInteger, nullable=False, server_default='0'),\n sa.Column('sc_trend', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('sc_hot', sa.Float(precision=6), nullable=False, server_default='0'),\n\n # bulk data\n sa.Column('body', TEXT),\n sa.Column('votes', TEXT),\n sa.Column('json', sa.Text),\n sa.Column('raw_json', sa.Text),\n\n # index: misc\n sa.Index('hive_posts_cache_ix3', 'payout_at', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # core: payout sweep\n sa.Index('hive_posts_cache_ix8', 'category', 'payout', 'depth', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: tag stats\n\n # index: ranked posts\n sa.Index('hive_posts_cache_ix2', 'promoted', postgresql_where=sql_text(\"is_paidout = '0' AND promoted > 0\")), # API: promoted\n\n sa.Index('hive_posts_cache_ix6a', 'sc_trend', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: trending todo: depth=0\n sa.Index('hive_posts_cache_ix7a', 'sc_hot', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: hot todo: depth=0\n sa.Index('hive_posts_cache_ix6b', 'post_id', 'sc_trend', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: trending, filtered todo: depth=0\n sa.Index('hive_posts_cache_ix7b', 'post_id', 'sc_hot', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: hot, filtered todo: depth=0\n\n sa.Index('hive_posts_cache_ix9a', 'depth', 'payout', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: payout todo: rem depth\n sa.Index('hive_posts_cache_ix9b', 'category', 'depth', 'payout', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: payout, filtered todo: rem depth\n\n sa.Index('hive_posts_cache_ix10', 'post_id', 'payout', postgresql_where=sql_text(\"is_grayed = '1' AND payout > 0\")), # API: muted, by filter/date/payout\n\n # index: stats\n sa.Index('hive_posts_cache_ix20', 'community_id', 'author', 'payout', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: pending distribution; author payout\n\n # index: community ranked posts\n sa.Index('hive_posts_cache_ix30', 'community_id', 'sc_trend', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '0' AND depth = 0\")), # API: community trend\n sa.Index('hive_posts_cache_ix31', 'community_id', 'sc_hot', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '0' AND depth = 0\")), # API: community hot\n sa.Index('hive_posts_cache_ix32', 'community_id', 'created_at', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '0' AND depth = 0\")), # API: community created\n sa.Index('hive_posts_cache_ix33', 'community_id', 'payout', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '0' AND is_paidout = '0'\")), # API: community payout\n sa.Index('hive_posts_cache_ix34', 'community_id', 'payout', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '1' AND is_paidout = '0'\")), # API: community muted\n )\n\n sa.Table(\n 'hive_state', metadata,\n sa.Column('block_num', sa.Integer, primary_key=True, autoincrement=False),\n sa.Column('db_version', sa.Integer, nullable=False),\n sa.Column('steem_per_mvest', sa.types.DECIMAL(8, 3), nullable=False),\n sa.Column('usd_per_steem', sa.types.DECIMAL(8, 3), nullable=False),\n sa.Column('sbd_per_steem', sa.types.DECIMAL(8, 3), nullable=False),\n sa.Column('dgpo', sa.Text, nullable=False),\n )\n\n metadata = build_metadata_community(metadata)\n\n metadata = build_metadata_blacklist(metadata)\n\n metadata = build_trxid_block_num(metadata)\n\n return metadata", "def meta(self) -> api.Meta:\n return self._get_model(model=api.Meta)", "def test_extraction_with_model_class(self: Any, mock_method: Any) -> None:\n config_dict = {\n 'extractor.sqlalchemy.conn_string': 'TEST_CONNECTION',\n 'extractor.sqlalchemy.extract_sql': 'SELECT 1 FROM TEST_TABLE;',\n 'extractor.sqlalchemy.model_class':\n 'tests.unit.extractor.test_sql_alchemy_extractor.TableMetadataResult'\n }\n self.conf = ConfigFactory.from_dict(config_dict)\n\n extractor = SQLAlchemyExtractor()\n extractor.results = [dict(database='test_database',\n schema='test_schema',\n name='test_table',\n description='test_description',\n column_name='test_column_name',\n column_type='test_column_type',\n column_comment='test_column_comment',\n owner='test_owner')]\n\n extractor.init(Scoped.get_scoped_conf(conf=self.conf,\n scope=extractor.get_scope()))\n\n result = extractor.extract()\n\n self.assertIsInstance(result, TableMetadataResult)\n self.assertEqual(result.name, 'test_table')", "def __sanitizeMetaModelName( self, name ):\r\n if( name[-5:] == '_META' ): \r\n return name[:-5]\r\n else: return name", "def base_fields(self):\n if not self.model:\n raise NameError('database model has not been set.')\n\n with self.session() as session:\n query = self.get_query(session, [self.model.pkId, self.model.created, self.model.modified])\n data = query.order_by(self.model.pkId).all()\n return data", "def declarative_base(model):\n for parent in model.__bases__:\n try:\n parent.metadata\n return declarative_base(parent)\n except AttributeError:\n pass\n return model", "def _load_db(self):\n for type_ in self._types:\n try:\n type_.table(self._metadata)\n except InvalidRequestError:\n pass\n # Reflect metadata so auto-mapping works\n self._metadata.reflect(self._engine)\n # Make sure the tables exist\n self._metadata.create_all()", "def __table_cls__(cls, *args, **kwargs):\n # check if a table with this name already exists\n # allows reflected tables to be applied to model by name\n key = _get_table_key(args[0], kwargs.get('schema'))\n\n if key in cls.metadata.tables:\n return sa.Table(*args, **kwargs)\n\n # if a primary key or constraint is found, create a table for\n # joined-table inheritance\n for arg in args:\n is_pk_column = isinstance(arg, sa.Column) and arg.primary_key\n is_pk_constraint = isinstance(arg, sa.PrimaryKeyConstraint)\n if is_pk_column or is_pk_constraint:\n return sa.Table(*args, **kwargs)\n\n # if no base classes define a table, return one\n # ensures the correct error shows up when missing a primary key\n for base in cls.__mro__[1:-1]:\n if '__table__' in base.__dict__:\n break\n else:\n return sa.Table(*args, **kwargs)\n\n # single-table inheritance, use the parent tablename\n if '__tablename__' in cls.__dict__:\n del cls.__tablename__", "def __init__(self):\n engine = connect_to_db()\n create_lyrics_table(engine) #declarative base stuff\n self.Session = sessionmaker(bind=engine)\n\n # self.create_connection()\n # self.create_table()", "def test_field_names(self):\n\n for mb_model in self.mb_model_list:\n mb_fields = mb_model._meta.fields\n db_cols = connection.introspection.get_table_description(\n self.cursor, mb_model._meta.db_table)\n\n for i in range(0, len(mb_model._meta.fields)):\n self.assertEqual(\n mb_fields[i].column,\n db_cols[i].name\n )", "def __init__(self,):\n self.logger = conf._init_logger(logger=conf.LOGGER_ORM)\n self.logger = logging.getLogger(conf.LOGGER_ORM)\n\n self.logger.info(\"[+] Initilizing Orm [+]\")\n\n\n self.engine = sqlalchemy.create_engine(\n f\"mysql+mysqldb://{conf.DB_USER}:{conf.DB_PASSWORD}@{conf.DB_ADRESS}/{conf.DB_NAME}\")\n self.metadata = sqlalchemy.MetaData(bind=self.engine)\n self.metadata.reflect(only=[\"examens\", \"sections\", \"patients\", \"medecins\", \"types_intervention\"])\n self.conn = self.engine.connect()\n \"\"\"\n Load the ORM of different table into the class\n \"\"\"\n self.check_table()\n self.hl7_connections = sqlalchemy.Table(\"hl7_connections\", self.metadata)\n self.examens = sqlalchemy.Table(\"examens\", self.metadata)\n self.sections = sqlalchemy.Table(\"sections\", self.metadata)\n self.patients = sqlalchemy.Table(\"patients\", self.metadata)\n self.medecins = sqlalchemy.Table(\"medecins\", self.metadata)\n self.types_interventions = sqlalchemy.Table(\"types_intervention\", self.metadata)\n self.logger.info(\"[+] Orm initialized [+]\")", "def items(self):\r\n for column in self.table.columns:\r\n yield (column, self[column.name])", "def get_table_metadata(self, a_table_name):\n \n self.connect()\n\n # create MetaData \n meta = sqlalchemy.MetaData()\n\n # bind to an engine\n meta.bind = self._engine\n\n table_metadata = sqlalchemy.Table(a_table_name, meta, autoload = True)\n\n cols = [] \n\n for col in table_metadata.columns:\n desc = {}\n desc['name'] = col.name\n desc['type'] = col.type\n desc['nullable'] = col.nullable\n cols.append(desc)\n\n # a dictionary of dict, one dict for each row\n return cols", "def transform(self, sql):\n # Start from scratch.\n self.clear()\n # Parse the SQL into the internal structure.\n self.parse(sql)\n\n # Create an empty list of linus in the output.\n puml_lines = list()\n # Run through all tables.\n for table_name, table in self.puml_tables.items():\n # Add PUML code for the beggining of the table.\n puml_lines.append('table({}) '.format(table_name) + '{')\n\n # Add PUML lines for all primary keys.\n for cname, ctype in table['primary'].items():\n puml_lines.append('\\tprimary_key({}) {}'.format(cname, ctype))\n\n # Add PUML lines for all foreign keys.\n for cname, cval in table['foreign'].items():\n puml_lines.append('\\tforeign_key({},{}) {}'.format(cname, cval[1], cval[0]))\n\n # Add separator if there is regular columns.\n if len(table['default'].keys()) > 0:\n puml_lines.append('\\t---')\n\n # Add regular columns.\n for cname, ctype in table['default'].items():\n puml_lines.append('\\t{} {}'.format(cname, ctype))\n\n # Close the table.\n puml_lines.append('}')\n # Add a single empty line.\n puml_lines.append('')\n\n # Run through all foreign keys and crete the table relations.\n for table_name, table in self.puml_tables.items():\n for fk in table['foreign'].values():\n puml_lines.append('{} \"0..n\" -- \"1..1\" {}'.format(table_name, fk[1].split('.')[0]))\n\n # Add a single empty line.\n puml_lines.append('')\n\n # Join all output lines separated by new lines.\n content = '\\n'.join(puml_lines)\n\n # Return the final PUML string.\n return (self.puml_template.format(content))", "def _tables(self):\n assert False, \"subclass responsibility\"", "def gen_model():\n\n\tmodel = skipthoughts.load_model()\n\treturn model", "def model(self) -> str:\n ...", "def table_name(self) -> str:\n return self.model._meta.db_table", "def build_model():", "def apply_model(engine):\n Base.metadata.create_all(engine)", "def __new__(cls, name, bases, attrs):\n if name == 'Model':\n return type.__new__(cls, name, bases, attrs)\n table_name = name\n primary_key = None\n fields = []\n column_to_filed = dict()\n for k, v in attrs.items():\n if isinstance(v, Field):\n column_to_filed[k] = v\n if v.primary_key:\n if primary_key:\n raise NameError('Primary key should only be one.')\n else:\n primary_key = k\n else:\n fields.append(k)\n if primary_key is None:\n raise NameError('Primary key not found.')\n # Delete the class attributes which is belong to `Field` object.\n # Because the class attributes may be overide by the same name of \n # the class instance attributes.\n for key in column_to_filed:\n attrs.pop(key)\n\n attrs['PRIMARY_KEY'] = primary_key\n attrs['COLUMN_TO_FILED'] = column_to_filed\n attrs['TABLE_NAME'] = table_name\n return type.__new__(cls, name, bases, attrs)", "def _related_fields(self):\r\n model_class, m2m = self._get_model_class_from_table(self.model._meta.db_table) \r\n related_fields = {\r\n self.model._meta.pk.attname: model_class\r\n }\r\n for attname, model_class in self._get_related_models(self.model):\r\n related_fields[attname] = model_class\r\n return related_fields", "def bootstrap():\n Base.metadata.create_all(engine)", "def table_name(class_):\n try:\n return class_.__tablename__\n except AttributeError:\n return class_.__table__.name", "def _define_tables(self):\n metadata = MetaData(bind=self.engine)\n Base = declarative_base(metadata=metadata)\n\n class Dataset(Base):\n __tablename__ = 'datasets'\n\n id = Column(Integer, primary_key=True, autoincrement=True)\n name = Column(String(100), nullable=False)\n\n # columns necessary for loading/processing data\n class_column = Column(String(100), nullable=False)\n train_path = Column(String(200), nullable=False)\n test_path = Column(String(200))\n description = Column(String(1000))\n\n # metadata columns, for convenience\n n_examples = Column(Integer, nullable=False)\n k_classes = Column(Integer, nullable=False)\n d_features = Column(Integer, nullable=False)\n majority = Column(Numeric(precision=10, scale=9), nullable=False)\n size_kb = Column(Integer, nullable=False)\n\n def __repr__(self):\n base = \"<%s: %s, %d classes, %d features, %d rows>\"\n return base % (self.name, self.description, self.k_classes,\n self.d_features, self.n_examples)\n\n class Datarun(Base):\n __tablename__ = 'dataruns'\n\n # relational columns\n id = Column(Integer, primary_key=True, autoincrement=True)\n dataset_id = Column(Integer, ForeignKey('datasets.id'))\n dataset = relationship('Dataset', back_populates='dataruns')\n\n description = Column(String(200), nullable=False)\n priority = Column(Integer)\n\n # hyperparameter selection and tuning settings\n selector = Column(String(200), nullable=False)\n k_window = Column(Integer)\n tuner = Column(String(200), nullable=False)\n gridding = Column(Integer, nullable=False)\n r_minimum = Column(Integer)\n\n # budget settings\n budget_type = Column(Enum(*BUDGET_TYPES))\n budget = Column(Integer)\n deadline = Column(DateTime)\n\n # which metric to use for judgment, and how to compute it\n metric = Column(Enum(*METRICS))\n score_target = Column(Enum(*[s + '_judgment_metric' for s in\n SCORE_TARGETS]))\n\n # variables that store the status of the datarun\n start_time = Column(DateTime)\n end_time = Column(DateTime)\n status = Column(Enum(*DATARUN_STATUS), default=RunStatus.PENDING)\n\n def __repr__(self):\n base = \"<ID = %d, dataset ID = %s, strategy = %s, budget = %s (%s), status: %s>\"\n return base % (self.id, self.dataset_id, self.description,\n self.budget_type, self.budget, self.status)\n\n Dataset.dataruns = relationship('Datarun', order_by='Datarun.id',\n back_populates='dataset')\n\n class Hyperpartition(Base):\n __tablename__ = 'hyperpartitions'\n\n # relational columns\n id = Column(Integer, primary_key=True, autoincrement=True)\n datarun_id = Column(Integer, ForeignKey('dataruns.id'))\n datarun = relationship('Datarun', back_populates='hyperpartitions')\n\n # name of or path to a configured classification method\n method = Column(String(255))\n\n # list of categorical parameters whose values are fixed to define\n # this hyperpartition\n categorical_hyperparameters_64 = Column(Text)\n\n # list of continuous parameters which are not fixed; their values\n # must be selected by a Tuner\n tunable_hyperparameters_64 = Column(Text)\n\n # list of categorical or continuous parameters whose values are\n # always fixed. These do not define the hyperpartition, but their\n # values must be passed on to the method. Here for convenience.\n constant_hyperparameters_64 = Column(Text)\n\n # has the partition had too many errors, or is gridding done?\n status = Column(Enum(*PARTITION_STATUS),\n default=PartitionStatus.INCOMPLETE)\n\n @property\n def categoricals(self):\n \"\"\"\n A list of categorical variables along with the fixed values\n which define this hyperpartition.\n Each element is a ('name', HyperParameter) tuple.\n \"\"\"\n return base_64_to_object(self.categorical_hyperparameters_64)\n\n @categoricals.setter\n def categoricals(self, value):\n self.categorical_hyperparameters_64 = object_to_base_64(value)\n\n @property\n def tunables(self):\n \"\"\"\n A list of parameters which are unspecified and must be selected\n with a Tuner. Each element is a ('name', HyperParameter) tuple.\n \"\"\"\n return base_64_to_object(self.tunable_hyperparameters_64)\n\n @tunables.setter\n def tunables(self, value):\n self.tunable_hyperparameters_64 = object_to_base_64(value)\n\n @property\n def constants(self):\n return base_64_to_object(self.constant_hyperparameters_64)\n\n @constants.setter\n def constants(self, value):\n self.constant_hyperparameters_64 = object_to_base_64(value)\n\n def __repr__(self):\n return \"<%s: %s>\" % (self.method, self.categoricals)\n\n Datarun.hyperpartitions = relationship('Hyperpartition',\n order_by='Hyperpartition.id',\n back_populates='datarun')\n\n class Classifier(Base):\n __tablename__ = 'classifiers'\n\n # relational columns\n id = Column(Integer, primary_key=True, autoincrement=True)\n datarun_id = Column(Integer, ForeignKey('dataruns.id'))\n datarun = relationship('Datarun', back_populates='classifiers')\n hyperpartition_id = Column(Integer, ForeignKey('hyperpartitions.id'))\n hyperpartition = relationship('Hyperpartition',\n back_populates='classifiers')\n\n # name of the host where the model was trained\n host = Column(String(50))\n\n # these columns point to where the output is stored\n model_location = Column(String(300))\n metrics_location = Column(String(300))\n\n # base 64 encoding of the hyperparameter names and values\n hyperparameter_values_64 = Column(Text, nullable=False)\n\n # performance metrics\n cv_judgment_metric = Column(Numeric(precision=20, scale=10))\n cv_judgment_metric_stdev = Column(Numeric(precision=20, scale=10))\n test_judgment_metric = Column(Numeric(precision=20, scale=10))\n\n start_time = Column(DateTime)\n end_time = Column(DateTime)\n status = Column(Enum(*CLASSIFIER_STATUS), nullable=False)\n error_message = Column(Text)\n\n @property\n def hyperparameter_values(self):\n return base_64_to_object(self.hyperparameter_values_64)\n\n @hyperparameter_values.setter\n def hyperparameter_values(self, value):\n self.hyperparameter_values_64 = object_to_base_64(value)\n\n @property\n def mu_sigma_judgment_metric(self):\n # compute the lower confidence bound on the cross-validated\n # judgment metric\n if self.cv_judgment_metric is None:\n return None\n return (self.cv_judgment_metric - 2 *\n self.cv_judgment_metric_stdev)\n\n def __repr__(self):\n params = ', '.join(['%s: %s' % i for i in\n list(self.hyperparameter_values.items())])\n return \"<id=%d, params=(%s)>\" % (self.id, params)\n\n Datarun.classifiers = relationship('Classifier',\n order_by='Classifier.id',\n back_populates='datarun')\n Hyperpartition.classifiers = relationship('Classifier',\n order_by='Classifier.id',\n back_populates='hyperpartition')\n\n self.Dataset = Dataset\n self.Datarun = Datarun\n self.Hyperpartition = Hyperpartition\n self.Classifier = Classifier\n\n Base.metadata.create_all(bind=self.engine)", "def _generate_rowklass(self):\n header = six.next(self.resolved)\n clean = []\n for h in header:\n underscoreless = h.strip().lower().replace(' ', '_').replace('.', '_')\n specialless = underscoreless.replace('(', '').replace(')', '').replace('?', '').replace('-', '')\n if specialless == '':\n clean.append(specialless)\n continue\n try:\n num = int(specialless[0])\n numbers = {1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five',\n 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine', 10: 'ten'}\n numless = numbers[num] + specialless[1:]\n cleaned = numless\n except ValueError:\n cleaned = specialless\n\n more = 1\n while cleaned in clean:\n more += 1\n cleaned += str(more)\n\n clean.append(cleaned)\n\n for i, v in enumerate(clean):\n if v == '':\n clean[i] = 'field_' + str(i)\n self.rowklass = collections.namedtuple('RowKlass', clean)", "def make_tables(self):\n for t in self.tables:\n self.add_table(groupname=t['groupname'],\n tablename=t['tablename'],\n description=t['description'],\n tabletitle=t['tabletitle'])", "def table(cls):\n return cls.__name__", "def test_reflection(self):\n m = MetaData()\n\n t = Table('test_table_syn', m, autoload=True,\n autoload_with=testing.db, oracle_resolve_synonyms=True)\n eq_(t.c.keys(), ['id', 'data'])\n eq_(list(t.primary_key), [t.c.id])", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def test_meta_read_write_units(engine):\n class Model(engine.model):\n id = Column(UUID, hash_key=True)\n\n assert Model.Meta.write_units == 1\n assert Model.Meta.read_units == 1\n\n class Other(engine.model):\n class Meta:\n read_units = 2\n write_units = 3\n id = Column(UUID, hash_key=True)\n\n assert Other.Meta.write_units == 3\n assert Other.Meta.read_units == 2", "def get_tables_name_and_type(self) -> Optional[Iterable[Tuple[str, str]]]:\n try:\n schema_name = self.context.database_schema.name.__root__\n if self.source_config.includeTables:\n for table_and_type in self.query_table_names_and_types(schema_name):\n table_name = self.standardize_table_name(\n schema_name, table_and_type.name\n )\n table_fqn = fqn.build(\n self.metadata,\n entity_type=Table,\n service_name=self.context.database_service.name.__root__,\n database_name=self.context.database.name.__root__,\n schema_name=self.context.database_schema.name.__root__,\n table_name=table_name,\n skip_es_search=True,\n )\n if filter_by_table(\n self.source_config.tableFilterPattern,\n table_fqn\n if self.source_config.useFqnForFiltering\n else table_name,\n ):\n self.status.filter(\n table_fqn,\n \"Table Filtered Out\",\n )\n continue\n yield table_name, table_and_type.type_\n\n if self.source_config.includeViews:\n for view_name in self.inspector.get_view_names(schema_name):\n view_name = self.standardize_table_name(schema_name, view_name)\n view_fqn = fqn.build(\n self.metadata,\n entity_type=Table,\n service_name=self.context.database_service.name.__root__,\n database_name=self.context.database.name.__root__,\n schema_name=self.context.database_schema.name.__root__,\n table_name=view_name,\n )\n\n if filter_by_table(\n self.source_config.tableFilterPattern,\n view_fqn\n if self.source_config.useFqnForFiltering\n else view_name,\n ):\n self.status.filter(\n view_fqn,\n \"Table Filtered Out\",\n )\n continue\n yield view_name, TableType.View\n except Exception as err:\n logger.warning(\n f\"Fetching tables names failed for schema {schema_name} due to - {err}\"\n )\n logger.debug(traceback.format_exc())", "def create_meta_loan_table(self):\n table_exists = self.check_if_table_exists(\"meta_loan_tables\")\n\n if not table_exists:\n self.read_sql_from_file('create_meta_loan_tables.sql')\n return", "def create_table(self):\n pass", "def meta(self):\n raise NotImplementedError", "def _create_TableDescriptor(self):\n\n self.conn.cursor.execute(\"PRAGMA table_info(\" + self.table_name + \")\")\n descriptions = self.conn.cursor.fetchall()\n column_map = {}\n for description in descriptions:\n column_map[description[1]] = description[2]\n td = TD(self.table_name, column_map) \n\n# self.conn.cursor.execute(\"SELECT sql FROM sqlite_master WHERE name='{tb}'\"\\\n# .format(tb=self.table_name))\n# aa = str(self.conn.cursor.fetchone()[0])\n# sindx = aa.find(\"(\")\n# eindx = aa.find(\")\")\n# aa = aa[sindx+1:eindx]\n# aa = aa.split(\",\")\n# column_map = {kyval.split()[0]:kyval.split()[1] for kyval in aa}\n# td = TD(self.table_name, column_map) \n\n return td", "def iteritems(self):\r\n for name in self.table.sequence:\r\n if name not in self.table.exclude:\r\n yield (name, self.columns[name])", "def create_translations_model(model, related_name, meta, **fields):\n if not meta:\n meta = {}\n unique = [('language_code', 'master')]\n meta['unique_together'] = list(meta.get('unique_together', [])) + unique\n # Create inner Meta class \n Meta = type('Meta', (object,), meta)\n name = '%sTranslation' % model.__name__\n attrs = {}\n attrs.update(fields)\n attrs['Meta'] = Meta\n attrs['__module__'] = model.__module__\n attrs['language_code'] = models.CharField(max_length=15, db_index=True)\n # null=True is so we can prevent cascade deletion\n attrs['master'] = models.ForeignKey(model, related_name=related_name, editable=False, null=True)\n # Create and return the new model\n return ModelBase(name, (BaseTranslationModel,), attrs)", "def create_sqlalchemy_mapperproperties_from_dbfields(cls,modeltable):\n allprops = {}\n #\n for field in cls.fieldlist:\n props = field.create_sqlalchemy_mapperproperties(cls,modeltable)\n if (props!=None):\n allprops.update(props)\n return allprops", "def _get_related_models(self, parent_model):\r\n related_models = set()\r\n rev_reversemapping = dict([(v,k) for k,v in self._reversemapping.iteritems()])\r\n if rev_reversemapping:\r\n for attname, related in self._get_reverse_relations(parent_model):\r\n related_models.add((rev_reversemapping[attname], related.model))\r\n\r\n for field in parent_model._meta.fields:\r\n if field.rel and field.rel.to._meta.db_table in self.query.tables and field.rel.to != parent_model:\r\n related_models.add((field.attname, field.rel.to))\r\n \r\n for attname, model_class in related_models:\r\n yield attname, model_class\r\n if attname.endswith(\"_id\"):\r\n attname = attname[:-3]\r\n for join_attname, model_klass in self._get_related_models(model_class):\r\n yield LOOKUP_SEP.join((attname,join_attname)), model_klass", "def createTables(self):\n metadata = Base.metadata\n metadata.create_all(self._engine)\n return" ]
[ "0.64038646", "0.6319936", "0.61308765", "0.58914566", "0.58779913", "0.5795165", "0.5719704", "0.5694968", "0.5619841", "0.56065655", "0.5591526", "0.55195546", "0.5389739", "0.5318234", "0.5304673", "0.5291211", "0.52884954", "0.5279363", "0.52785826", "0.52544487", "0.52321386", "0.5231754", "0.51996213", "0.5188176", "0.5153182", "0.5127161", "0.51235783", "0.5121615", "0.51210475", "0.51104903", "0.5098141", "0.5093024", "0.50921124", "0.50882906", "0.50874853", "0.5072644", "0.50653034", "0.5060398", "0.50589347", "0.50584775", "0.50556046", "0.504214", "0.5039316", "0.5033422", "0.503156", "0.50310296", "0.5019509", "0.5010189", "0.50024277", "0.4996406", "0.4995856", "0.498689", "0.49776587", "0.49748346", "0.49667856", "0.49625766", "0.49619922", "0.49606666", "0.4935995", "0.49358135", "0.49252865", "0.49195606", "0.49020815", "0.48896965", "0.48801184", "0.48798797", "0.4871115", "0.4865422", "0.486283", "0.48554373", "0.48424825", "0.4835274", "0.48310938", "0.48258653", "0.48199356", "0.4811632", "0.4810126", "0.48001617", "0.4788581", "0.47878593", "0.47852796", "0.47764406", "0.4767456", "0.47581908", "0.4755629", "0.4755629", "0.4755629", "0.4755629", "0.4755629", "0.4753293", "0.47497863", "0.47462967", "0.47417814", "0.4739125", "0.47350904", "0.47333884", "0.47295207", "0.4728654", "0.4722144", "0.4716221" ]
0.58967185
3
returns a tuple (Model, geometry_field, geometry_field_type) for a given table in given schema
def get_layer(schema, table_name): fn = '{}.{}'.format(schema, table_name) if fn not in LAYER_MODELS: LAYER_MODELS[fn] = inspect_table(schema, table_name) return LAYER_MODELS.get(fn)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_geometry_type(self, table_name, description):\n with self.connection.cursor() as cursor:\n cursor.execute(\n \"\"\"\n SELECT t.coord_dimension, t.srid, t.type FROM (\n SELECT * FROM geometry_columns\n UNION ALL\n SELECT * FROM geography_columns\n ) AS t WHERE t.f_table_name = %s AND t.f_geometry_column = %s\n \"\"\",\n (table_name, description.name),\n )\n row = cursor.fetchone()\n if not row:\n raise Exception(\n 'Could not find a geometry or geography column for \"%s\".\"%s\"'\n % (table_name, description.name)\n )\n dim, srid, field_type = row\n # OGRGeomType does not require GDAL and makes it easy to convert\n # from OGC geom type name to Django field.\n field_type = OGRGeomType(field_type).django\n # Getting any GeometryField keyword arguments that are not the default.\n field_params = {}\n if self.postgis_oid_lookup.get(description.type_code) == \"geography\":\n field_params[\"geography\"] = True\n if srid != 4326:\n field_params[\"srid\"] = srid\n if dim != 2:\n field_params[\"dim\"] = dim\n return field_type, field_params", "def get_field_type(connection, table_name, row):\n field_params = OrderedDict()\n field_notes = []\n is_geometry = False\n try:\n field_type = connection.introspection.get_field_type(row[1], row)\n except KeyError:\n field_type = 'TextField'\n field_notes.append('This field type is a guess.')\n\n # This is a hook for data_types_reverse to return a tuple of\n # (field_type, field_params_dict).\n if type(field_type) is tuple:\n field_type, new_params = field_type\n field_params.update(new_params)\n\n # Add max_length for all CharFields.\n if field_type == 'CharField' and row[3]:\n field_params['max_length'] = int(row[3])\n\n if field_type == 'DecimalField':\n if row[4] is None or row[5] is None:\n field_notes.append(\n 'max_digits and decimal_places have been guessed, as this '\n 'database handles decimal fields as float')\n field_params['max_digits'] = row[4] if row[4] is not None else 10\n field_params['decimal_places'] = row[\n 5] if row[5] is not None else 5\n else:\n field_params['max_digits'] = row[4]\n field_params['decimal_places'] = row[5]\n\n if field_type == 'GeometryField':\n geo_col = row[0]\n # Getting a more specific field type and any additional parameters\n # from the `get_geometry_type` routine for the spatial backend.\n field_type, geo_params = connection.introspection.get_geometry_type(\n table_name, geo_col)\n field_params.update(geo_params)\n is_geometry = True\n\n return field_type, field_params, is_geometry\n # return getattr(models.fields, field_type), field_params", "def get_table_info(self):\n epsg = None\n meta = MetaData()\n table_obj = Table(self.table, meta,\n autoload=True, autoload_with=self.engine)\n if not self.columns:\n self.columns = table_obj.columns.keys()\n geo_cols = [(col.name, col.type) for col in table_obj.columns\n if hasattr(col.type, 'srid')]\n if geo_cols:\n geo_col = geo_cols[0]\n self.geom_column = geo_col[0]\n geo_obj = geo_col[1]\n if self.geom_column not in self.columns:\n self.columns.append(self.geom_column)\n if hasattr(geo_obj, 'srid'):\n epsg = geo_obj.srid\n if epsg == -1:\n epsg = 4326\n if hasattr(geo_obj, 'geometry_type'):\n self.geometry_type = geo_obj.geometry_type\n\n self.epsg = epsg\n self.table_obj = table_obj\n self.meta = meta", "def get_table_info(self):\n epsg = None\n meta = MetaData()\n table_obj = Table(self._table, meta,\n autoload=True, autoload_with=self._engine)\n if not self._columns:\n self._columns = table_obj.columns.keys()\n geo_cols = [(col.name, col.type) for col in table_obj.columns\n if hasattr(col.type, 'srid')]\n if geo_cols:\n geo_col = geo_cols[0]\n self._geom_column = geo_col[0]\n geo_obj = geo_col[1]\n if self._geom_column not in self._columns:\n self._columns.append(self._geom_column)\n if hasattr(geo_obj, 'srid'):\n epsg = geo_obj.srid\n if epsg == -1:\n epsg = 4326\n if hasattr(geo_obj, 'geometry_type'):\n self._geometry_type = geo_obj.geometry_type\n\n self._epsg = epsg\n self._table_obj = table_obj\n self._meta = meta", "def getGeometryColumnDef(self, schema, table, column):\r\n defs = self.fetchSqlRecords(\r\n \"select type, srid from geometry_columns where f_table_schema='{}' and f_table_name='{}' and f_geometry_column='{}'\".format(schema, table, column))\r\n if not len(defs) == 1:\r\n return None\r\n\r\n return 'geometry({},{})'.format(defs[0][0], defs[0][1])", "def get_geom_type(carto_sql_client, tablename):\n geomtypes = {'ST_Point': 'point',\n 'ST_MultiPoint': 'point',\n 'ST_LineString': 'line',\n 'ST_MultiLineString': 'line',\n 'ST_Polygon': 'polygon',\n 'ST_MultiPolygon': 'polygon'}\n\n # NOTE: assumes one geometry type per table\n result = carto_sql_client.send('''\n SELECT ST_GeometryType(the_geom) As geomtype\n FROM \"{tablename}\"\n WHERE the_geom IS NOT NULL\n LIMIT 1'''.format(tablename=tablename))\n try:\n return geomtypes[result['rows'][0]['geomtype']]\n except (KeyError, IndexError):\n print((\"Cannot create a map from `{tablename}` because this table \"\n \"does not have geometries ({geomreported})\").format(\n tablename=tablename,\n geomreported=None))\n return None\n except Exception as err:\n print(\"ERROR: {}\".format(err))\n return None", "def get_schema(schema): # noqa: E501\n return 'do some magic!'", "def _get_table_reflection(self, schema: str, table: str) -> Table:\n return self.sql_metadata.tables.get(f\"{schema}.{table}\",\n Table(table, self.sql_metadata, schema=schema, autoload=True))", "def schema(self):\n return self.table_info.schema", "def get_schema(self) -> dict:", "def _get_fields(self, table):\n fields = list()\n for column in table.columns:\n fields.append({'id': column.name, 'type': str(column.type)})\n return fields", "def getTableSchema(self,tableName):\n\tif not self.schemaDict.has_key(tableName):\n\t if self.dbType==\"sqlite\":\n\t query = \"SELECT * FROM sqlite_master WHERE name='%s'\"%tableName\n\t tup = self.fetchOne(query)\n\t schema= tup[4]\n\t else: # MySQL \n\t query = \"DESCRIBE %s\"%tableName\n\t tup = self.fetchAll(query)\n\t schema= \"CREATE TABLE %s (\"%tableName\n\t for item in tup:\n\t name = item[0]\n\t\t type = item[1]\n\t\t priKey = item[3]\n\t\t autoInc = item[5] \n\t schema+=name+' '+type+' '+priKey+' '+autoInc\n\t\t if item!=tup[-1]:\n\t\t schema+=','\n\t schema+=\" )\"\n\t return schema\n\telse:\n\t return self.schemaDict[tableName]", "def get_gtfs_field_tuple_from_table(table_name, gtfs_spec=None):\n if not gtfs_spec:\n gtfs_spec = settings.GTFS_SPEC\n choice_tuple = choice_tuple = (('',''),)\n for t in gtfs_spec['resources']:\n if t['name'] == table_name:\n for f in t['schema']['fields']:\n choice_tuple = choice_tuple + ((f['name'], f['name']),)\n return choice_tuple\n raise ValueError(\"Table name not found in GTFS spec.\")", "def split_table_schema(table_name):\r\n\r\n split = table_name.split('.')\r\n if len(split) > 1:\r\n return (split[0], split[1])\r\n else:\r\n return (None, split[0])", "def get_schema(self, repo, table):\n return self.user_con.get_schema(repo=repo, table=table)", "def parse_table_schema(conn):\r\n cur = conn.cursor()\r\n\r\n cur.execute(\"PRAGMA table_info({})\".format(\"week5\"))\r\n print(cur.fetchall())", "def postgis_metadata(self, data_source_id, schema, table_name):\n metadata = {}\n\n try:\n engine = self.engine_for_data_source(data_source_id)\n if engine is None:\n return {\n 'error': \"FEHLER: DataSource nicht gefunden\"\n }\n\n # connect to data_source\n conn = engine.connect()\n\n # get primary key\n\n # build query SQL\n sql = sql_text(\"\"\"\n SELECT a.attname\n FROM pg_index i\n JOIN pg_attribute a ON a.attrelid = i.indrelid\n AND a.attnum = ANY(i.indkey)\n WHERE i.indrelid = '{schema}.{table}'::regclass\n AND i.indisprimary;\n \"\"\".format(schema=schema, table=table_name))\n\n # execute query\n primary_key = None\n result = conn.execute(sql)\n for row in result:\n primary_key = row['attname']\n\n # get geometry column and srid\n\n # build query SQL\n sql = sql_text(\"\"\"\n SELECT f_geometry_column, srid, type\n FROM geometry_columns\n WHERE f_table_schema = '{schema}' AND f_table_name = '{table}';\n \"\"\".format(schema=schema, table=table_name))\n\n # execute query\n geometry_columns = []\n result = conn.execute(sql)\n for row in result:\n geometry_columns.append({\n 'geometry_column': row['f_geometry_column'],\n 'geometry_type': row['type'],\n 'srid': row['srid']\n })\n\n # close database connection\n conn.close()\n\n metadata = {\n 'schema': schema,\n 'table': table_name,\n 'primary_key': primary_key,\n 'geometry_columns': geometry_columns\n }\n except OperationalError as e:\n self.logger.error(e.orig)\n return {\n 'error': \"OperationalError: %s\" % e.orig\n }\n except ProgrammingError as e:\n self.logger.error(e.orig)\n return {\n 'error': \"ProgrammingError: %s\" % e.orig\n }\n\n return metadata", "def get_schema(self):\r\n return self.__schema", "def get_schema(self):\n return ', '.join('%s:%s' % (col, self.schema[col]) for col in self.schema)", "def _get_model_class_from_table(self, table):\r\n try:\r\n model_class = [m for m in get_models() if connection.introspection.table_name_converter(m._meta.db_table) in map(connection.introspection.table_name_converter,[table])][0] \r\n m2m = False \r\n except IndexError:\r\n try: \r\n # this is a many to many field \r\n model_class = [f.rel.to for m in get_models() for f in m._meta.local_many_to_many if f.m2m_db_table() == table][0] \r\n m2m = True \r\n except IndexError: \r\n # this is an inner join \r\n table = self.query.alias_map[table][0]\r\n return self._get_model_class_from_table(table)\r\n return model_class, m2m", "def fields_from_table(table):\r\n\r\n fields = []\r\n\r\n for column in table.columns:\r\n field = brewery.metadata.Field(name=column.name)\r\n field.concrete_storage_type = column.type\r\n\r\n for conv in _sql_to_brewery_types:\r\n if issubclass(column.type.__class__, conv[0]):\r\n field.storage_type = conv[1]\r\n field.analytical_type = conv[2]\r\n break\r\n\r\n if not field.storage_type:\r\n field.storaget_tpye = \"unknown\"\r\n\r\n if not field.analytical_type:\r\n field.analytical_type = \"unknown\"\r\n\r\n fields.append(field)\r\n\r\n return brewery.metadata.FieldList(fields)", "def get_table_definition(jwt_payload: dict, schema_name: str, table_name: str):\n DJConnector.set_datajoint_config(jwt_payload)\n\n schema_virtual_module = dj.create_virtual_module(schema_name, schema_name)\n return getattr(schema_virtual_module, table_name).describe()", "def get_schema(cls):\n return cls.schema()", "def get_schema() -> dict:\n raise NotImplementedError()", "def _get_schema(self):\n self._pick()\n return Schema()", "def get_geometries ( self, object_class_table, spatial_column, select_column, select_id ) :\n stmt = 'select sdo_util.to_wktgeometry(' + str(spatial_column) + ') from ' + str(object_class_table) + ' where ' + str(select_column) + ' = ' + str(select_id)\n self.oracle_cursor.execute( stmt )\n resultset = self.oracle_cursor.fetchall()\n return resultset", "def get_table_info(line):\n\n COMMENT_EXPR = '-- Name: '\n TYPE_EXPR = '; Type: '\n SCHEMA_EXPR = '; Schema: '\n OWNER_EXPR = '; Owner: '\n TABLESPACE_EXPR = '; Tablespace: '\n\n temp = line.strip('\\n')\n type_start = get_all_occurrences(TYPE_EXPR, temp)\n schema_start = get_all_occurrences(SCHEMA_EXPR, temp)\n owner_start = get_all_occurrences(OWNER_EXPR, temp)\n tblspace_start = get_all_occurrences(TABLESPACE_EXPR, temp)\n if len(type_start) != 1 or len(schema_start) != 1 or len(owner_start) != 1:\n return (None, None, None, None)\n name = temp[len(COMMENT_EXPR) : type_start[0]]\n type = temp[type_start[0] + len(TYPE_EXPR) : schema_start[0]]\n schema = temp[schema_start[0] + len(SCHEMA_EXPR) : owner_start[0]]\n if not tblspace_start:\n tblspace_start.append(None)\n owner = temp[owner_start[0] + len(OWNER_EXPR) : tblspace_start[0]]\n return (name, type, schema, owner)", "def read_schema_from_db(cur, table):\n num_rows = cur.execute(\"\"\"DESCRIBE {}\"\"\".format(table))\n tbl_schema = []\n for i in range(num_rows):\n row = cur.fetchone()\n tbl_schema.append([row[0], row[1]])\n return tbl_schema", "def schema_ref(schema, table):\n return schema + '.' + table", "async def _get_schema_info(self, app_id, namespace, gae_index_name):\n collection = get_collection_name(app_id, namespace, gae_index_name)\n solr_schema_info = await self.solr.get_schema_info(collection)\n fields_info = solr_schema_info['fields']\n id_field = SolrSchemaFieldInfo(\n solr_name='id', gae_name='doc_id', type=Field.Type.ATOM,\n language=None, docs_number=fields_info.get('id', {}).get('docs', 0)\n )\n rank_field = SolrSchemaFieldInfo(\n solr_name='rank', gae_name='rank', type=Field.Type.NUMBER,\n language=None, docs_number=fields_info.get('rank', {}).get('docs', 0)\n )\n fields = [id_field, rank_field]\n grouped_fields = {\n 'doc_id': [id_field],\n 'rank': [rank_field]\n }\n facets = []\n grouped_facet_indexes = {}\n\n for solr_field_name, info in fields_info.items():\n try:\n gae_name, type_, language = parse_solr_field_name(solr_field_name)\n except ValueError:\n continue\n schema_field = SolrSchemaFieldInfo(\n solr_field_name, gae_name, type_, language, info.get('docs', 0)\n )\n if SolrSchemaFieldInfo.Type.is_facet_index(type_):\n add_value(grouped_facet_indexes, gae_name, schema_field)\n if SolrSchemaFieldInfo.Type.is_facet(type_):\n facets.append(schema_field)\n else:\n fields.append(schema_field)\n add_value(grouped_fields, gae_name, schema_field)\n\n for fields_group in grouped_fields.values():\n if len(fields_group) > 1:\n # Sadly app uses the same name for fields with different types [*1].\n # Let's sort them from high popularity to low.\n fields_group.sort(key=lambda solr_field: -solr_field.docs_number)\n\n for facets_group in grouped_facet_indexes.values():\n if len(facets_group) > 1:\n # Sadly app uses the same name for facets with different types [*1].\n # Let's sort them from high popularity to low.\n facets_group.sort(key=lambda solr_field: -solr_field.docs_number)\n\n index_info = solr_schema_info['index']\n return SolrIndexSchemaInfo(\n app_id=app_id,\n namespace=namespace,\n gae_index_name=gae_index_name,\n collection=collection,\n docs_number=index_info['numDocs'],\n heap_usage=index_info['indexHeapUsageBytes'],\n size_in_bytes=index_info['segmentsFileSizeInBytes'],\n fields=fields,\n facets=facets,\n grouped_fields=grouped_fields,\n grouped_facet_indexes=grouped_facet_indexes\n )", "def get_schema(db, sourcename):\n try:\n schema = db[\"tables\"][sourcename]\n schema[\"type\"] = constants.TABLE\n except KeyError:\n try:\n schema = db[\"views\"][sourcename]\n schema[\"type\"] = constants.VIEW\n except KeyError:\n raise ValueError(\"no such table/view\")\n return schema", "def get_field_type(field, table):\n for i in settings.GTFS_SPEC['resources']:\n print(i['name'])\n if i['name'] == table:\n for j in i['schema']['fields']:\n print(j['name'])\n if j['name'] == field:\n return j['gtfs_type']\n raise ValueError(\"Field not found in GTFS spec.\")", "def get_schema(self):\n return ', '.join(\n '%s:%s' % (col, self.schema[col]) for col in self.schema)", "def get_schema(self) -> dict:\n return schemas.get_object_schema(self.schema)", "def Result(row, schema):\r\n return dict(zip(schema.fields(), row))", "def model_table():\r\n class OccupationTable(tables.Table):\r\n class Meta:\r\n model = Occupation\r\n assert [\"id\", \"name\", \"region\"] == list(OccupationTable.base_columns.keys())\r\n\r\n class OccupationTable2(tables.Table):\r\n extra = tables.Column()\r\n\r\n class Meta:\r\n model = Occupation\r\n assert [\"id\", \"name\", \"region\", \"extra\"] == list(OccupationTable2.base_columns.keys())\r\n\r\n # be aware here, we already have *models* variable, but we're importing\r\n # over the top\r\n from django.db import models\r\n\r\n class ComplexModel(models.Model):\r\n char = models.CharField(max_length=200)\r\n fk = models.ForeignKey(\"self\")\r\n m2m = models.ManyToManyField(\"self\")\r\n\r\n class ComplexTable(tables.Table):\r\n class Meta:\r\n model = ComplexModel\r\n assert [\"id\", \"char\", \"fk\"] == list(ComplexTable.base_columns.keys())", "def as_tuple(self) -> tuple[typing.Any, ...]:\n return tuple(getattr(self, field.name) for field in Schema)", "def schema(self):", "def fetch_table_schema(self, table_name):\n ddl = self.query(sql.show_create_table(table_name))\n if ddl:\n try:\n return parse_create(ddl[0][\"Create Table\"])\n except ParseError as e:\n raise OSCError(\n \"TABLE_PARSING_ERROR\",\n {\"db\": self._current_db, \"table\": self.table_name, \"msg\": str(e)},\n )", "def info_from_geo_field(geo_field):\n\n geo_field_name = geo_field.name\n\n get_geom = lambda obj: getattr(obj, geo_field_name)\n\n if hasattr(geo_field, 'geom_type'):\n geom_type = geo_field.geom_type\n else:\n geom_type = geo_field._geom\n\n if hasattr(geo_field, 'srid'):\n srid = geo_field.srid\n else:\n srid = geo_field._srid\n\n return get_geom, geom_type, srid", "def schema_to_features(schema: TableSchema) -> List[LiteFeature]:\n features = []\n for col_name, column_schema in schema.columns.items():\n assert isinstance(column_schema, ColumnSchema)\n\n logical_type = column_schema.logical_type\n assert logical_type\n assert issubclass(type(logical_type), LogicalType)\n\n tags = column_schema.semantic_tags\n assert isinstance(tags, set)\n\n features.append(\n LiteFeature(\n name=col_name,\n logical_type=type(logical_type),\n tags=tags,\n ),\n )\n\n return features", "def retrieve_shapely(osm_path,geoType,keyCol,**valConstraint):\n driver=ogr.GetDriverByName('OSM')\n data = driver.Open(osm_path)\n query = query_b(geoType,keyCol,**valConstraint)\n sql_lyr = data.ExecuteSQL(query)\n features =[]\n # cl = columns \n cl = ['osm_id'] \n for a in keyCol: cl.append(a)\n if data is not None:\n print('query is finished, lets start the loop')\n for feature in tqdm(sql_lyr):\n try:\n if feature.GetField(keyCol[0]) is not None:\n geom = loads(feature.geometry().ExportToWkb()) \n if geom is None:\n continue\n # field will become a row in the dataframe.\n field = []\n for i in cl: field.append(feature.GetField(i))\n field.append(geom) \n features.append(field)\n except:\n print(\"WARNING: skipped OSM feature\") \n else:\n print(\"ERROR: Nonetype error when requesting SQL. Check required.\") \n cl.append('geometry') \n if len(features) > 0:\n return geopandas.GeoDataFrame(features,columns=cl,crs={'init': 'epsg:4326'})\n else:\n print(\"WARNING: No features or No Memory. returning empty GeoDataFrame\") \n return geopandas.GeoDataFrame(columns=['osm_id','geometry'],crs={'init': 'epsg:4326'})", "def schema(cls, only_self: bool=False):\n try:\n md_tbls = cls.metadata.tables\n insp = reflection.Inspector.from_engine(cls.s.bind.engine)\n tbls = dict()\n for tbl in insp.get_table_names():\n if not only_self or (only_self and tbl == cls.__tablename__):\n cols = dict()\n for col in insp.get_columns(tbl):\n info = dict(col)\n col_info = md_tbls[tbl].c[col['name']]\n info['type'] = {\n 'compiled': col['type'].compile(),\n 'native': col['type'].python_type.__name__\n }\n info['type']['length'] = col['type'].length if hasattr(col['type'], 'length') else None\n if info['autoincrement']:\n info['default'] = 'autoincrement'\n info.update(col_info.info)\n info['placeholder'] = '%s_%s' % (tbl, col['name'])\n cols[col['name']] = info\n tbls[tbl] = cols\n\n return tbls\n except SQLAlchemyError:\n cls.s.rollback()\n raise", "def get_schema(self, get_stats=False):\n query = \"schema {}\"\n\n results = self.run_dgraph_query_raw(query)\n\n schema = {}\n\n for row in results[\"schema\"]:\n table_name = row[\"predicate\"]\n\n if table_name not in schema:\n schema[table_name] = {\"name\": table_name, \"columns\": []}\n\n return list(schema.values())", "def build_schema(self):\n field_defs = []\n fields = []\n point_rows = []\n line_rows = []\n polygon_rows = []\n for i in self.json_in['features']: # first iterate through it all and get all the fields\n props = i.get('properties')\n\n for k, v in props.items():\n if k not in fields:\n fields.append(k)\n\n for i in self.json_in['features']: # now fill in any props that any features are missing, and sort them all\n geom = i['geometry']\n props = i['properties']\n for f in fields:\n if f not in props.keys():\n props[f] = ''\n props = OrderedDict(sorted(props.items()))\n\n for k, v in props.items():\n schema_row = [k, \"TEXT\", k.replace('_', ' '), 256]\n if schema_row not in field_defs:\n field_defs.append(schema_row)\n row = [str(v) for k, v in sorted(props.items())] # coerce everything to str cause this stuff is a mess\n parsed_geom = GeoJSONUtils.parse_geometry(geom)\n geotype = parsed_geom['type']\n egeom = parsed_geom['esri_geom']\n\n if geotype == \"POINT\":\n row.insert(0, egeom)\n print(row)\n point_rows.append(row)\n elif geotype == \"POLYLINE\":\n row.insert(0, egeom)\n print(row)\n line_rows.append(row)\n else:\n row.insert(0, egeom)\n print(row)\n polygon_rows.append(row)\n\n return {\n \"fields\": fields,\n \"field_defs\": field_defs,\n \"rows\": [point_rows, line_rows, polygon_rows]\n }", "def _get_table(self, cursor):\n raise NotImplementedError", "def transform_schema(pgschema):\n datatypes = {}\n for field in pgschema:\n if 'cartodb_id' in field:\n continue\n datatypes[field] = map_dtypes(pgschema[field]['type'])\n return datatypes", "def info_table(table):\n print \"\\nSCHEMA de la taula \",table, \"es: \"\n con=lite.connect('parking.db')\n cur=con.cursor()\n cur.execute(\"PRAGMA table_info({});\".format(table))\n data = cur.fetchall()\n for d in data:\n print \"\\t\",d[0], d[1], d[2]\n con.close()", "def fetch_tuples(jwt_payload: dict, schema_name: str, table_name: str):\n DJConnector.set_datajoint_config(jwt_payload)\n\n schema_virtual_module = dj.create_virtual_module(schema_name, schema_name)\n\n # Get the table object refernece\n table = getattr(schema_virtual_module, table_name)\n\n # Fetch tuples without blobs as dict to be used to create a\n # list of tuples for returning\n non_blobs_rows = table.fetch(*table.heading.non_blobs, as_dict=True,\n limit=DEFAULT_FETCH_LIMIT)\n\n # Buffer list to be return\n rows = []\n\n # Looped through each tuple and deal with TEMPORAL types and replacing\n # blobs with ==BLOB== for json encoding\n for non_blobs_row in non_blobs_rows:\n # Buffer object to store the attributes\n row = []\n # Loop through each attributes, append to the tuple_to_return with specific\n # modification based on data type\n for attribute_name, attribute_info in table.heading.attributes.items():\n if not attribute_info.is_blob:\n if non_blobs_row[attribute_name] is None:\n # If it is none then just append None\n row.append(None)\n elif attribute_info.type == 'date':\n # Date attribute type covert to epoch time\n row.append((non_blobs_row[attribute_name] -\n datetime.date(1970, 1, 1)).days * DAY)\n elif attribute_info.type == 'time':\n # Time attirbute, return total seconds\n row.append(non_blobs_row[attribute_name].total_seconds())\n elif attribute_info.type in ('datetime', 'timestamp'):\n # Datetime or timestamp, use timestamp to covert to epoch time\n row.append(non_blobs_row[attribute_name].timestamp())\n elif attribute_info.type[0:7] == 'decimal':\n # Covert decimal to string\n row.append(str(non_blobs_row[attribute_name]))\n else:\n # Normal attribute, just return value with .item to deal with numpy\n # types\n if isinstance(non_blobs_row[attribute_name], np.generic):\n row.append(np.asscalar(non_blobs_row[attribute_name]))\n else:\n row.append(non_blobs_row[attribute_name])\n else:\n # Attribute is blob type thus fill it in string instead\n row.append('=BLOB=')\n\n # Add the row list to tuples\n rows.append(row)\n return rows", "def schema(self) -> 'outputs.TableSchemaResponse':\n return pulumi.get(self, \"schema\")", "def get_tables_name_and_type(self) -> Optional[Iterable[Tuple[str, str]]]:\n try:\n schema_name = self.context.database_schema.name.__root__\n if self.source_config.includeTables:\n for table_and_type in self.query_table_names_and_types(schema_name):\n table_name = self.standardize_table_name(\n schema_name, table_and_type.name\n )\n table_fqn = fqn.build(\n self.metadata,\n entity_type=Table,\n service_name=self.context.database_service.name.__root__,\n database_name=self.context.database.name.__root__,\n schema_name=self.context.database_schema.name.__root__,\n table_name=table_name,\n skip_es_search=True,\n )\n if filter_by_table(\n self.source_config.tableFilterPattern,\n table_fqn\n if self.source_config.useFqnForFiltering\n else table_name,\n ):\n self.status.filter(\n table_fqn,\n \"Table Filtered Out\",\n )\n continue\n yield table_name, table_and_type.type_\n\n if self.source_config.includeViews:\n for view_name in self.inspector.get_view_names(schema_name):\n view_name = self.standardize_table_name(schema_name, view_name)\n view_fqn = fqn.build(\n self.metadata,\n entity_type=Table,\n service_name=self.context.database_service.name.__root__,\n database_name=self.context.database.name.__root__,\n schema_name=self.context.database_schema.name.__root__,\n table_name=view_name,\n )\n\n if filter_by_table(\n self.source_config.tableFilterPattern,\n view_fqn\n if self.source_config.useFqnForFiltering\n else view_name,\n ):\n self.status.filter(\n view_fqn,\n \"Table Filtered Out\",\n )\n continue\n yield view_name, TableType.View\n except Exception as err:\n logger.warning(\n f\"Fetching tables names failed for schema {schema_name} due to - {err}\"\n )\n logger.debug(traceback.format_exc())", "def _get_tabletype(cls) -> str:\n raise NotImplementedError", "def set_schema():\n schema = StructType([\n StructField(\"cicid\",DoubleType(),True),\n StructField(\"arrdate\",DoubleType(),True),\n StructField(\"i94cit\",DoubleType(),True),\n StructField(\"i94res\",DoubleType(),True),\n StructField(\"i94port\",StringType(),True),\n StructField(\"i94mode\",DoubleType(),True),\n StructField(\"i94addr\",StringType(),True),\n StructField(\"depdate\",DoubleType(),True), \n StructField(\"i94bir\",DoubleType(),True),\n StructField(\"i94visa\",DoubleType(),True),\n StructField(\"gender\",StringType(),True),\n StructField(\"airline\",StringType(),True),\n StructField(\"visatype\",StringType(),True)])\n return schema", "def get_schemas(self, conn):\n return conn.get_schemas()['table_schema']", "def _get_model_from_table_name(table_name: str) -> Optional[Type[RDSModel]]:\n table_model = None\n try:\n if hasattr(Base, '_decl_class_registry'):\n models = Base._decl_class_registry.values() # sqlalchemy < 1.4\n else:\n models = Base.registry._class_registry.values()\n\n for model in models:\n if hasattr(model, '__tablename__') and model.__tablename__ == table_name:\n table_model = model\n except Exception as e:\n LOGGER.exception(f'Failed to get model for the table: {table_name} from rds model base')\n raise e\n\n return table_model", "def guess_schema_from_model(model, tensor_type=None, schema=None):\n if schema is not None:\n try:\n guessed = guess_schema_from_model(model)\n except NotImplementedError: # pragma: no cover\n return _replace_tensor_type(schema, tensor_type)\n if len(guessed) != len(schema):\n raise RuntimeError( # pragma: no cover\n \"Given schema and guessed schema are not the same:\\nGOT: {}\\n-----\\nGOT:\\n{}\".format(\n schema, guessed))\n return _replace_tensor_type(schema, tensor_type)\n\n if hasattr(model, 'coef_'):\n # linear model\n init = [('X', FloatTensorType([None, model.coef_.shape[1]]))]\n return _replace_tensor_type(init, tensor_type)\n elif hasattr(model, 'dump_model'):\n dumped = model.dump_model()\n if isinstance(dumped, dict) and 'feature_names' in dumped:\n names = dumped['feature_names']\n init = [(name, FloatTensorType([None, 1])) for name in names]\n return _replace_tensor_type(init, tensor_type)\n\n data = pprint.pformat(model.__dict__)\n dirs = pprint.pformat(dir(model))\n if hasattr(model, 'dump_model'): # pragma: no cover\n dumped = model.dump_model()\n keys = list(sorted(dumped))\n last = pprint.pformat([keys, dumped])\n if len(last) >= 200000:\n last = last[:200000] + \"\\n...\"\n else:\n last = \"\"\n raise NotImplementedError( # pragma: no cover\n \"Unable to guess schema for model {}\\n{}\\n----\\n{}\\n------\\n{}\".format(\n model.__class__, data, dirs, last))", "def __init__(self, schema_row):\n self.schema = []\n for field in schema_row['fields']:\n self.schema.append(field['type'])", "def table(self, table: Union[str, sa.Table]) -> B[B, E]:", "def get_table_schema(dataset_id, table_id):\n logging.info('getting table schema')\n bigquery_client = bigquery.Client()\n dataset_ref = bigquery_client.dataset(dataset_id)\n bg_tableref = bigquery.table.TableReference(dataset_ref, table_id)\n bg_table = bigquery_client.get_table(bg_tableref)\n return bg_table.schema", "def schema(self):\n raise NotImplementedError", "def build_song_schema():\n schema = StructType(\n [\n StructField('artist_id', StringType(), True),\n StructField('artist_latitude', DecimalType(), True),\n StructField('artist_longitude', DecimalType(), True),\n StructField('artist_location', StringType(), True),\n StructField('artist_name', StringType(), True),\n StructField('duration', DecimalType(), True),\n StructField('num_songs', IntegerType(), True),\n StructField('song_id', StringType(), True),\n StructField('title', StringType(), True),\n StructField('year', IntegerType(), True)\n ]\n )\n return schema", "def _get_table(self):\n\t\treturn self._table", "def getTableColumnDefs(self, schema, table):\r\n src_columns = self.fetchSqlRecords(\r\n \"select c.column_name, data_type, character_maximum_length, numeric_precision, numeric_scale from information_schema.columns c where c.table_schema = '{}' and c.table_name='{}'\".format(schema, table))\r\n return [dict(zip(('name', 'type', 'max_length', 'precision', 'scale'), c)) for c in src_columns]", "def schema() -> None:\n pass", "def get_schema_cls() -> t.Any:\n return None", "def _get_table_schema(self):\n\n return {\n 'AttributeDefinitions': [\n {\n 'AttributeName': self._key_field.name,\n 'AttributeType': self._key_field.data_type\n }\n ],\n 'TableName': self.table_name,\n 'KeySchema': [\n {\n 'AttributeName': self._key_field.name,\n 'KeyType': 'HASH'\n }\n ],\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': self.read_capacity_units,\n 'WriteCapacityUnits': self.write_capacity_units\n }\n }", "def get_field_type(self, table_name, field_name):\n \n dtype = self.field_types[(self.field_types.TABNAME == table_name) & (self.field_types.FIELDNAME == field_name)]['DATATYPE'].values[0] \n return dtype", "def columns_type(self,table):\n with self.conn.cursor() as cur:\n #_logger.debug('Columns Query. sql: %r', self.table_columns_query)\n cur.execute(self.columns_info_query % (self.dbname,table))\n for row in cur:\n yield row", "def _create_field_schema(col_schema: dict) -> bigquery.SchemaField:\n name = to_safe_name(col_schema['name'])\n return bigquery.SchemaField(\n name,\n col_schema.get('type'),\n col_schema.get('mode', 'NULLABLE'),\n col_schema.get('description', '')\n )", "def get_table_definition(db_name, schema_name, table_name, server_name, data_partition_column_name='', excluded_columns=()):\n server_name = '' if server_name == '127.0.0.1' or server_name == 'localhost' else server_name\n server_name = f'[{server_name}].' if server_name else ''\n\n sql = (\"SELECT T.name AS TABLE_NAME, C.name AS COLUMN_NAME, P.name AS DATA_TYPE, \"\n \"P.max_length AS SIZE, CAST(P.precision AS VARCHAR) + '/' + CAST(P.scale AS VARCHAR) AS PRECISION_SCALE, \"\n \"c.* FROM {0}[{1}].sys.objects AS T JOIN {0}[{1}].sys.columns AS C ON T.object_id = C.object_id \"\n \"JOIN {0}[{1}].sys.types AS P ON C.system_type_id = P.system_type_id \"\n \"JOIN sys.schemas ss ON (T.schema_id = ss.schema_id) \"\n \" WHERE T.type_desc = 'USER_TABLE' and ss.name = ? \"\n \"and T.name = ? and P.name != 'timestamp' and P.name != 'sysname' order by column_id asc\").format(server_name, db_name)\n\n columns = fetch_rows(sql, [schema_name, table_name])\n\n target_table_column_prefix = get_config()['TARGET_TABLE_COLUMN_PREFIX']\n out_columns = {}\n\n for column in columns:\n column['original_data_type'] = column['data_type']\n\n if column['column_name'].upper() in default_columns:\n column['target_table_column_name'] = target_table_column_prefix + column['column_name']\n else:\n column['target_table_column_name'] = column['column_name']\n\n # Update the data type for the data partition column\n if data_partition_column_name != '' and column['column_name'].upper() == data_partition_column_name.upper():\n column['data_type'] = 'datetime'\n\n out_columns[column['column_name'].upper()] = column\n\n if len(excluded_columns) > 0:\n for excluded_column in excluded_columns:\n out_columns.pop(excluded_column)\n\n return out_columns", "def get_table_by_person_id(table, person_id, schema=staging_schema, fields=\"default\"):\n if fields == \"default\":\n columns = \" * \"\n elif fields == \"Edit\":\n columns = \" [PersonID], [FirstName], [LastName], [SSN], [DOB], [Phone], [Email], [PersonType], [DataEntryLocationID], [AssignedInfectionStaff], [Note], [PartOfStudyYN], [StudyDetails], [PtHospitalizedYN], [PtCurrentLocationID], [PtHighComplicationRiskYN], [PtContactedYN], [PtDateLastContacted], [PtVABostonPCPYN], [PtPCPName], [PtPCPPhone], [EmplSupervisorName], [EmplSupervisorPhone], [EmplSupervisorNotifiedYN], [EmplIDNotifiedYN], [EmplEmployeeRole], [EmplCampus], [EmplCampusesOther], [EmplShiftSchedule], [EmplWorkLocation], [EmplPatientInteractionYN], [EmplSXSExposureWorkStartDate], [EmplSXSExposureWorkEndDate], [EmplWorkClearedYN1], [EmplReturnToWorkDate1] ,TestDate1, TestLocation1, TestResult1, TestDriveThruNeededYN1, CovidStatus1, AssignedProvider1, NumOfConsecutiveNegTestSinceLastPos1, TestDate2, TestLocation2, TestResult2, TestDriveThruNeededYN2, CovidStatus2, AssignedProvider2, NumOfConsecutiveNegTestSinceLastPos2, TestDate3, TestLocation3, TestResult3, TestDriveThruNeededYN3, CovidStatus3, AssignedProvider3, NumOfConsecutiveNegTestSinceLastPos3, TestDate4, TestLocation4, TestResult4, TestDriveThruNeededYN4, CovidStatus4, AssignedProvider4, NumOfConsecutiveNegTestSinceLastPos4, TestDate5, TestLocation5, TestResult5, TestDriveThruNeededYN5, CovidStatus5, AssignedProvider5, NumOfConsecutiveNegTestSinceLastPos5, TestDate6, TestLocation6, TestResult6, TestDriveThruNeededYN6, CovidStatus6, AssignedProvider6, NumOfConsecutiveNegTestSinceLastPos6, TestDate7, TestLocation7, TestResult7, TestDriveThruNeededYN7, CovidStatus7, AssignedProvider7, NumOfConsecutiveNegTestSinceLastPos7, TestDate8, TestLocation8, TestResult8, TestDriveThruNeededYN8, CovidStatus8, AssignedProvider8, NumOfConsecutiveNegTestSinceLastPos8, TestDate9, TestLocation9, TestResult9, TestDriveThruNeededYN9, CovidStatus9, AssignedProvider9, NumOfConsecutiveNegTestSinceLastPos9, TestDate10, TestLocation10, TestResult10, TestDriveThruNeededYN10, CovidStatus10, AssignedProvider10, NumOfConsecutiveNegTestSinceLastPos10, [QuarantinedYN1], [QuarantineStartDate1], [QuarantineEndDate1], [QuarantineSymptomTrackerYN1], [QuarantinedYN2], [QuarantineStartDate2], [QuarantineEndDate2], [QuarantineSymptomTrackerYN2], [QuarantinedYN3], [QuarantineStartDate3], [QuarantineEndDate3], [QuarantineSymptomTrackerYN3], [QuarantinedYN4], [QuarantineStartDate4], [QuarantineEndDate4], [QuarantineSymptomTrackerYN4], [QuarantinedYN5], [QuarantineStartDate5], [QuarantineEndDate5], [QuarantineSymptomTrackerYN5], [LocationChangeDateTime1], [LocationChangeNewLocationID1], [LocationChangeDateTime2], [LocationChangeNewLocationID2], [LocationChangeDateTime3], [LocationChangeNewLocationID3], [LocationChangeDateTime4], [LocationChangeNewLocationID4], [LocationChangeDateTime5], [LocationChangeNewLocationID5], [ExposureDate1], [ExposureType1], [ExposureLocation1], [ExposureDetails1], [ExposureDate2], [ExposureType2], [ExposureLocation2], [ExposureDetails2], [ExposureDate3], [ExposureType3], [ExposureLocation3], [ExposureDetails3], [ExposureDate4], [ExposureType4], [ExposureLocation4], [ExposureDetails4], [ExposureDate5], [ExposureType5], [ExposureLocation5], [ExposureDetails5], [SymptomType1], [SymptomOnsetDate1], [SymptomOnsetLocationID1], [SymptomResolutionDate1], [SymptomResolutionLocationID1], [SymptomNote1], [SymptomType2], [SymptomOnsetDate2], [SymptomOnsetLocationID2], [SymptomResolutionDate2], [SymptomResolutionLocationID2], [SymptomNote2], [SymptomType3], [SymptomOnsetDate3], [SymptomOnsetLocationID3], [SymptomResolutionDate3], [SymptomResolutionLocationID3], [SymptomNote3], [SymptomType4], [SymptomOnsetDate4], [SymptomOnsetLocationID4], [SymptomResolutionDate4], [SymptomResolutionLocationID4], [SymptomNote4], [SymptomType5], [SymptomOnsetDate5], [SymptomOnsetLocationID5], [SymptomResolutionDate5], [SymptomResolutionLocationID5], [SymptomNote5], [SymptomType6], [SymptomOnsetDate6], [SymptomOnsetLocationID6], [SymptomResolutionDate6], [SymptomResolutionLocationID6], [SymptomNote6], [SymptomType7], [SymptomOnsetDate7], [SymptomOnsetLocationID7], [SymptomResolutionDate7], [SymptomResolutionLocationID7], [SymptomNote7], [SymptomType8], [SymptomOnsetDate8], [SymptomOnsetLocationID8], [SymptomResolutionDate8], [SymptomResolutionLocationID8], [SymptomNote8], [SymptomType9], [SymptomOnsetDate9], [SymptomOnsetLocationID9], [SymptomResolutionDate9], [SymptomResolutionLocationID9], [SymptomNote9], [SymptomType10], [SymptomOnsetDate10], [SymptomOnsetLocationID10], [SymptomResolutionDate10], [SymptomResolutionLocationID10], [SymptomNote10], [Sta3n], [DOD], [EmplWorkClearedYN2], [EmplReturnToWorkDate2], [EmplWorkClearedYN3], [EmplReturnToWorkDate3], [EmplWorkClearedYN4], [EmplReturnToWorkDate4], [EmplWorkClearedYN5], [EmplReturnToWorkDate5], [StreetAddress], [City], [County], [State], [Zip] \"\n elif fields == \"Detail\":\n columns = \" SSN4, LastName, FirstName, DOB, DOD, Phone, Email, PersonType, AdmitStatus, RoomBed, WardLocationName, InstitutionName, PACT_Provider, PACT_Team, PACT_Phone, CDWUpdatedDateTime \"\n\n q = \"select {columns} from {schema}.{table} where PersonID = ?\"\n q = q.format(columns=columns, schema=schema, table=table)\n tb = pd.read_sql(q, engine, params=[person_id])\n return tb", "def _get_geometry(self):\r\n if self._geometry_column_name not in self.columns:\r\n raise AttributeError(\"Geometry Column Not Present: %s\" % self._geometry_column_name)\r\n return self[self._geometry_column_name]", "async def get_schema_info(self, collection):\n await self.ensure_collection(collection)\n try:\n # Luke handler is not supported in API v2 yet.\n # /v2/collections/<COLLECTION>/schema/fields doesn't show dynamically\n # created fields.\n # So using old API (/solr/...).\n response = await self.get(\n '/solr/{}/admin/luke?numTerms=0'.format(collection)\n )\n return json.loads(response.body.decode('utf-8'))\n except SolrError:\n logger.warning('Failed to fetch fields list for collection {}'\n .format(collection))\n raise", "def schema(self):\n return self._schema", "def getSchema(cls):\n pass", "def _get_schema(self):\n self.to_dask()\n return Schema(dtype=self._df.dtypes,\n shape=(None, len(self._df.columns)),\n npartitions=self._df.npartitions,\n metadata=self.metadata)", "def _columns(cls, schema: dsl.Source.Schema) -> typing.Sequence[str]:\n return tuple(f.name for f in schema)", "def ArrowSchema(self) -> pa.Schema:", "def _get_schema_using_query(self, query: str) -> sch.Schema:\n return sch.Schema.from_tuples(self._metadata(query))", "def _get_table_obj(self, mode):\n return self.client[f\"bigquery_{mode}\"].get_table(self.table_full_name[mode])", "def tableExists(self, schema, table):\r\n r = self.fetchSqlRecords(\r\n \"SELECT to_regclass('{}.{}')\".format(schema, table))\r\n return r[0][0]", "def _get_stored_schema(self, table: str) -> Optional[TableSchema]:\n try:\n with open(self.schemas / (table + '.json'), 'r') as f:\n return json.load(f)\n except FileNotFoundError:\n return None", "def table(entity) -> sa.Table:\n return entity.__table__", "def geo_field_from_model(model, default_geo_field_name=None):\n try:\n # If the class defines a geomfield property, use it !\n return model.geomfield\n except AttributeError:\n pass\n\n fields = model._meta.fields\n geo_fields = [f for f in fields if isinstance(f, GeometryField)]\n\n # Used for error case\n geo_fields_names = lambda: ', '.join([f.name for f in geo_fields])\n\n if len(geo_fields) > 1:\n if not default_geo_field_name:\n raise ValueError(\"More than one geodjango geometry field found, please specify which to use by name using the 'geo_field' keyword. Available fields are: '%s'\" % geo_fields_names())\n else:\n geo_field_by_name = [fld for fld in geo_fields if fld.name == default_geo_field_name]\n if not geo_field_by_name:\n raise ValueError(\"Geodjango geometry field not found with the name '%s', fields available are: '%s'\" % (default_geo_field_name, geo_fields_names()))\n else:\n geo_field = geo_field_by_name[0]\n elif geo_fields:\n geo_field = geo_fields[0]\n else:\n raise ValueError('No geodjango geometry fields found in this model')\n\n return geo_field", "def get_table_attributes(jwt_payload: dict, schema_name: str, table_name: str):\n DJConnector.set_datajoint_config(jwt_payload)\n\n schema_virtual_module = dj.create_virtual_module(schema_name, schema_name)\n table_attributes = dict(primary_attributes=[], secondary_attributes=[])\n for attribute_name, attribute_info in getattr(schema_virtual_module,\n table_name).heading.attributes.items():\n if attribute_info.in_key:\n table_attributes['primary_attributes'].append((\n attribute_name,\n attribute_info.type,\n attribute_info.nullable,\n attribute_info.default,\n attribute_info.autoincrement\n ))\n else:\n table_attributes['secondary_attributes'].append((\n attribute_name,\n attribute_info.type,\n attribute_info.nullable,\n attribute_info.default,\n attribute_info.autoincrement\n ))\n\n return table_attributes", "def guess_schema_from_data(X, tensor_type=None, schema=None):\n init = guess_initial_types(X, schema)\n if tensor_type is not None:\n init = _replace_tensor_type(init, tensor_type)\n # Grouping column\n unique = set()\n for _, col in init:\n if len(col.shape) != 2:\n return init # pragma: no cover\n if col.shape[0] is not None:\n return init # pragma: no cover\n if len(unique) > 0 and col.__class__ not in unique:\n return init # pragma: no cover\n unique.add(col.__class__)\n unique = list(unique)\n return [('X', unique[0]([None, sum(_[1].shape[1] for _ in init)]))]", "def modelfields(entity) -> Dict[str, Field]:\n return entity.__modelfields__", "def get_schema(self) -> ArchiveSchema:\n return self.schema", "def get_schema(self):\n response = self.client.get(self._get_collection_url('schema'))\n\n return response.get('schema', {})", "def schema(self):\n pass", "def resolve_schema_instance(schema: Union[BaseModel, str]) -> BaseModel:\n from smpa.schemas.core import CoreGetSchema, CoreListSchema\n from smpa.schemas.auth import LoginSchema\n\n if schema == 'CoreListSchema':\n return CoreListSchema\n elif schema == 'CoreGetSchema':\n return CoreGetSchema\n elif schema == 'LoginSchema':\n return LoginSchema\n\n if isinstance(schema, type) and issubclass(schema, BaseModel):\n return schema()\n if isinstance(schema, BaseModel):\n return schema\n try:\n return model_registry.get_class(schema)()\n except RegistryError:\n raise ValueError(\n \"{!r} is not a BaseModel subclass or instance and has not\"\n \" been registered in the model registry.\".format(schema)\n )", "def get_model_schema(self, app, model):\n model_schema = {\n 'app': {\n 'name': model._meta.app_label,\n 'label': model._meta.app_config.verbose_name,\n },\n 'name': model._meta.model_name,\n 'label': model._meta.verbose_name,\n 'components': []\n }\n\n for field in model._meta.get_fields():\n # 排除不需要在前端构建form的field: id、反向关联field\n # print('field', type(field), field)\n if field.name == 'id':\n continue\n if isinstance(field, ForeignObjectRel):\n # logger.info(\"ForeignObjectRel\", field)\n continue\n\n component = self.get_field_data(model, field)\n # logger.info('component', component)\n model_schema['components'].append(component)\n # print('get_model_schema', model_schema)\n return model_schema\n # return JsonResponse(model_schema)", "def create_staging_schema(cursor,table_schema):\n create_schema = \"CREATE SCHEMA IF NOT EXISTS \" + table_schema + \";\"\n cursor.execute(create_schema)", "def instance_schema(self):\n raise NotImplementedError", "def query_table_names_and_types(\n self, schema_name: str\n ) -> Iterable[TableNameAndType]:\n\n return [\n TableNameAndType(name=table_name)\n for table_name in self.inspector.get_table_names(schema_name) or []\n ]", "def _table_from_ft(ft_schema: dict) -> bigquery.Table:\n # A \"TableSchema\" is just a sequence of SchemaFields https://googleapis.dev/python/bigquery/latest/generated/google.cloud.bigquery.table.Table.html\n schema = list(map(_create_field_schema, ft_schema['columns']))\n table = bigquery.Table(\n bigquery.TableReference(ds, to_safe_name(ft_schema['name'])),\n schema\n )\n table.description = ft_schema.get('description', '')\n return table", "async def schema_state(appname, schema, table):\n base = sprout.cfg.db_str(appname)\n con = await asyncpg.connect(base)\n req = ', '.join([\n 'table_catalog',\n 'table_schema',\n 'table_name',\n 'column_name',\n 'data_type'\n ])\n q = f\"\"\"select {req} from information_schema.columns\nwhere table_schema='{schema}' and table_name='{table}';\"\"\"\n try:\n ret = await con.fetch(q)\n df = pd.DataFrame.from_records(ret, columns=ret[0].keys())\n return df\n except Exception as e:\n sprout.cfg.log.error(f\"fetching current table state failed: {e}\")\n finally:\n await con.close()", "def get_schema() -> Dict[str, type]:\n schema: Dict[str, type] = {}\n\n # Add all columns from pipeline configs\n for pipeline in get_pipelines():\n schema.update(pipeline.schema)\n\n # Add new columns from adapter\n for col_old, col_new in OUTPUT_COLUMN_ADAPTER.items():\n if col_old in schema and col_new is not None:\n schema[col_new] = schema[col_old]\n\n return schema", "def build_song_schema():\n schema = T.StructType(\n [\n T.StructField('artist_id', T.StringType(), True),\n T.StructField('artist_latitude', T.DecimalType(), True),\n T.StructField('artist_longitude', T.DecimalType(), True),\n T.StructField('artist_location', T.StringType(), True),\n T.StructField('artist_name', T.StringType(), True),\n T.StructField('duration', T.DecimalType(), True),\n T.StructField('num_songs', T.IntegerType(), True),\n T.StructField('song_id', T.StringType(), True),\n T.StructField('title', T.StringType(), True),\n T.StructField('year', T.IntegerType(), True)\n ]\n )\n return schema", "def _split_table_name(table_name):\n table_name_items = table_name.split(\".\")\n if len(table_name_items) == 1:\n schema_name = None\n elif len(table_name_items) == 2:\n schema_name, table_name = table_name_items\n else:\n raise ValueError(\"Cannot determine schema/table name from input {}\".format(table_name))\n return schema_name, table_name" ]
[ "0.6816969", "0.65374726", "0.62112284", "0.6156063", "0.5837291", "0.569644", "0.56083405", "0.55587506", "0.55330145", "0.547764", "0.5454867", "0.5441994", "0.54250515", "0.54212207", "0.5413206", "0.5390883", "0.5313759", "0.5281348", "0.52764404", "0.52503705", "0.5214422", "0.5203239", "0.51881", "0.5138209", "0.5130797", "0.512235", "0.5115746", "0.5110222", "0.510605", "0.5105856", "0.5098519", "0.50984675", "0.5094267", "0.5077075", "0.50769687", "0.50539136", "0.50362164", "0.501837", "0.500501", "0.49993223", "0.49651104", "0.4963914", "0.49623787", "0.49561557", "0.49455547", "0.49386814", "0.49379078", "0.49297807", "0.49228823", "0.49178168", "0.4916689", "0.49141946", "0.49099532", "0.4905032", "0.48901516", "0.48875237", "0.4886075", "0.48776555", "0.48736033", "0.4872982", "0.48723057", "0.4871822", "0.4866216", "0.48590702", "0.4856022", "0.4841703", "0.48386475", "0.4838168", "0.4823492", "0.4816273", "0.48114282", "0.48113325", "0.48100972", "0.48063752", "0.48032632", "0.47972527", "0.4784114", "0.47767892", "0.4775406", "0.47683606", "0.47543156", "0.47416347", "0.47334343", "0.4732999", "0.47305095", "0.47226188", "0.471607", "0.47125912", "0.4711352", "0.47081202", "0.46973783", "0.4689936", "0.4689526", "0.46844015", "0.467885", "0.46688756", "0.4661925", "0.46602398", "0.46568164", "0.46560115" ]
0.54125166
15
Runs a single userprovided line as a REPL input.
def handle_line(line: str, stmt_index: int): fn_name = f'repl_{stmt_index}' module_text = f""" import std fn {fn_name}() -> () {{ {line} }} """ # For error reporting we use a helper that puts this into a fake filesystem # location. def make_fakefs_open(): fs = fake_filesystem.FakeFilesystem() fs.CreateFile(FILENAME, module_text) return fake_filesystem.FakeFileOpen(fs) importer = import_helpers.Importer() while True: try: fake_module = parser.Parser( scanner.Scanner(FILENAME, module_text), fn_name).parse_module() except span.PositionalError as e: parser_helpers.pprint_positional_error(e, fs_open=make_fakefs_open()) return # First attempt at type checking, we expect this may fail the first time # around and we'll substitute the real return type we observe. try: type_info = cpp_typecheck.check_module(fake_module, importer.cache, importer.additional_search_paths) except XlsTypeError as e: # We use nil as a placeholder, and swap it with the type that was expected # and retry once we determine what that should be. if e.rhs_type == concrete_type_mod.ConcreteType.NIL: module_text = module_text.replace(' -> ()', ' -> ' + str(e.lhs_type)) continue # Any other errors are likely real type errors in the code and we should # report them. parser_helpers.pprint_positional_error(e, fs_open=make_fakefs_open()) return # It type checked ok, and we can proceed. break # Interpret the line and print the result. # TODO(leary): 2020-06-20 No let bindings for the moment, just useful for # evaluating expressions -- could put them into the module scope as consts. interpreter = interpreter_mod.Interpreter( fake_module, type_info, importer.typecheck, import_cache=importer.cache, additional_search_paths=(), trace_all=False) result = interpreter.run_function(fn_name, args=(), symbolic_bindings=None) print(result) type_info.clear_type_info_refs_for_gc() return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _handle_stdin(self, line):\r\n return input(line.replace(STDIN_PROMPT, \"\"))", "def read_user_input(self):\n\n self.commandline = raw_input(\"Enter the string you want to parse\\n\")", "def do_prompt(self, line):\n self.prompt = line + ': '", "def on_user_input(self, line: str) -> str:\n return self.infer_batch(lines=[line])[0]", "def do_shell(self, line):\n eval(line)", "def parseInputLine(self, line):\r\n if line is not None and line is not '':\r\n func = getattr(self, 'cmd_' + line.split()[0].upper(), None)\r\n if func is not None:\r\n func(line.split()[1:])\r\n else:\r\n self.terminal.write('No such command')\r\n self.showPrompt()", "def do_prompt(self, line):\n if line:\n self.prompt = \"(%s) \" %line\n\n else:\n print 'Please specify a prompt text'", "def process_input_line(self, line, store_history=True):\r\n #print \"input='%s'\"%self.input\r\n stdout = sys.stdout\r\n splitter = self.IP.input_splitter\r\n try:\r\n sys.stdout = self.cout\r\n splitter.push(line)\r\n more = splitter.push_accepts_more()\r\n if not more:\r\n source_raw = splitter.source_raw_reset()[1]\r\n self.IP.run_cell(source_raw, store_history=store_history)\r\n finally:\r\n sys.stdout = stdout", "def do(self, line): \n self.interface.onecmd(line)", "def do_input(self, line):\n cmd_args = io.parse_cmd_args(line, io.input_cmd_pattern)\n if cmd_args:\n success = self.manager.input(\n cmd_args.get('target'), \n cmd_args.get('cslist'), \n mode=cmd_args.get('mode')\n )\n if success:\n self.console_print(\"Yippee! input successfull!\", settings.INFO_FORMAT)\n else:\n self.console_print(\"Sorry, something kinda went wrong! You can try again.\", settings.ERROR_FORMAT)\n else:\n self.console_print(settings.COMMMAND_ARGS_ERROR_MSG, settings.ERROR_FORMAT)", "def _input(str=''):\n print(str, end='', flush=True)\n return stdin.readline().rstrip('\\n')", "def do_run(self, line: str):\n if self._real_module is None:\n print(\"'run' command depends on using a module. See 'use' for help.\")\n return\n\n self._real_module.run()", "def raw_next_line() -> str:\n return input()", "def input(self, prompt):\r\n return console_input(prompt)", "def default(self, line):\n self.history.append(line)\n if line[:1] == '!':\n line = line[1:]\n locals = self.curframe_locals\n ns = self.curframe.f_globals.copy()\n ns.update(locals)\n try:\n code = compile(line + '\\n', '<stdin>', 'single')\n save_stdout = sys.stdout\n save_stdin = sys.stdin\n save_displayhook = sys.displayhook\n try:\n sys.stdin = self.stdin\n sys.stdout = self.stdout\n sys.displayhook = self.displayhook\n exec(code, ns, locals)\n finally:\n sys.stdout = save_stdout\n sys.stdin = save_stdin\n sys.displayhook = save_displayhook\n except:\n exc_info = sys.exc_info()[:2]\n self.error(traceback.format_exception_only(*exc_info)[-1].strip())", "def ilaw(self, line):\n line = line.strip()\n if not line:\n logger.error(r\"the command passed to %ilaw must not be empty\")\n return\n\n argv = shlex.split(line)\n prog = argv.pop(0)\n\n # prog must be a valid law cli prog\n if prog not in law.cli.cli.progs:\n raise ValueError(\"'{}' is not a valid law cli program\".format(prog))\n\n # forward to the actual prog, run special case\n try:\n # call the line_fn when set\n if callable(line_fn):\n logger.info(\"calling line function '{}'\".format(line_fn.__name__))\n line_fn(line)\n\n if prog == \"run\":\n # perform the run call interactively\n return law_run(argv)\n else:\n # forward all other progs to the cli interface\n return law.cli.cli.run([prog] + argv)\n except SystemExit as e:\n # reraise when the exit code is non-zero\n if e.code:\n raise", "def get_user_input(self):\n return stdin.readline().strip()", "def enter_repl(self):\n text_input = ''\n while True:\n text_input = input('>>')\n if text_input == 'exit':\n break\n #An alias for querying an instrument error string\n elif text_input == 'err?':\n self.write_to_serial(':SYST:ERR?')\n print(self.read_from_serial())\n else:\n self.write_to_serial(text_input)\n print(self.read_from_serial())", "def xi(self, line=''):\r\n #line = self.xxFixLine(line)\r\n return Easy.SubInteract( ['/bin/bash', '-i', '-c', line, ] ) #shell=True\r", "def read_line():\n # try reading a line, removing any extra whitespace\n try:\n line = sys.stdin.readline().strip()\n # i3status sends EOF, or an empty line\n if not line:\n sys.exit(3)\n return line\n # exit on ctrl-c\n except KeyboardInterrupt:\n sys.exit()", "def read_line():\n # try reading a line, removing any extra whitespace\n try:\n line = sys.stdin.readline().strip()\n # i3status sends EOF, or an empty line\n if not line:\n sys.exit(3)\n return line\n # exit on ctrl-c\n except KeyboardInterrupt:\n sys.exit()", "def rlinput(prompt, prefill=''):\n if \"readline\" not in sys.modules:\n # For example on Windows\n return input(prompt)\n else:\n readline.set_startup_hook(lambda: readline.insert_text(prefill))\n try:\n return input(prompt)\n finally:\n readline.set_startup_hook()", "def xx(self, line=''):\r\n ## line in this context is one ipython line which may have line breaks in it\r\n line = self.xxFixLine(line)\r\n return self.shell.getoutput(line)", "def _get_input(question: str) -> str:\n print(question)\n sys.stdout.flush()\n user_input = sys.stdin.readline()\n user_input = user_input.strip()\n return user_input", "def lineEdit(args: list) -> QLineEdit:\n lineEdit = QLineEdit()\n lineEdit.setText(args[0])\n return lineEdit", "def _interact_with_user(code: str, increase: bool):\n pass", "def text_input():\n return input(\">>>\")", "def next_line() -> str:\n return input().strip()", "def precmd(self, line):\n return line", "def eval(self, line):\n self.eval(line)", "def do_shell(self, line):\n os.system(line)", "def readline(self) -> str | None:", "def get_user_text_input(self):\n\t\tuser_input = raw_input('You: ')\n\t\treturn user_input", "def _interpreter(self, inp):\n inp = inp.strip()\n elem = inp.split(\" \")\n return self.controller.execute(elem[0], elem[1:])", "def _input(msg):\n if sys.version_info.major >= 3:\n ans = input(msg)\n elif sys.version_info.major == 2:\n ans = raw_input(msg)\n else:\n raise Exception(\"Unsupported python version. Please upgrade to python 2 or higher.\")\n\n return ans", "def read(self):\n return raw_input('> ')", "def onecmd(self, line):\n cmd, arg, line = self.parseline(line)\n if not line:\n return self.emptyline()\n if cmd is None:\n return self.default(line)\n self.lastcmd = line\n if line == 'EOF' : #end the loop\n self.lastcmd = ''\n return True\n if cmd == '':\n return self.default(line)\n else:\n func = None\n for context in reversed(self.__class__.context_stack):\n func = context.resolve_cmd(cmd)\n if not func:\n break\n if not func:\n func = self.__class__.top_context.resolve_cmd(cmd)\n if not func:\n return self.default(line)\n args = self.cmdline_parse(arg)\n return func(args)", "def testInterpretingSection(self):\n pl = Pipeline(loadInitFile=False)\n repl = REPL(pl)\n repl.runCommandLine('4')\n self.assertEqual(4, pl.stdin)\n\n repl.runCommandLine('_')\n self.assertEqual(4, pl.stdin)\n\n repl.runCommandLine('[3, 6, 9]')\n self.assertEqual([3, 6, 9], pl.stdin)\n\n repl.runCommandLine(\"print('hello')\")\n self.assertEqual('hello', pl.stdin)\n\n repl.runCommandLine('echo hello too')\n self.assertEqual(['hello too'], pl.stdin)\n\n self.assertEqual(REPL.DEFAULT_PS1, repl.prompt)", "def pseudo_raw_input(self, prompt):\n\n if self.use_rawinput:\n try:\n line = sm.input(prompt)\n except EOFError:\n line = 'EOF'\n else:\n self.stdout.write(prompt)\n self.stdout.flush()\n line = self.stdin.readline()\n if not len(line):\n line = 'EOF'\n else:\n if line[-1] == '\\n': # this was always true in Cmd\n line = line[:-1]\n return line", "def interpret_line(self, line, source=None, lineno=None):\n\n pline = self.parser.parse_line(line, source=source, lineno=lineno)\n return self.execute(pline)", "def cmd_user(args):", "def readline(self) -> Optional[str]:", "def do_Intermediate (self, line):\r\n GenIntermediate(self.stdin, self.tracking).do_cmdloop()", "def requestInput(st):\n return input(st+\": \")", "def cli():\n\n while True:\n try:\n i = input()\n except EOFError:\n continue\n \n if i.startswith(\"r \"): # Reload channel\n reload_channel(i[2:])\n else:\n print(\"Invalid command\\n\")", "def console():\r\n while True:\r\n interpret_command(input(\"POM> \"))", "def run_cmd(server, client):\n msg = [client.get_command()]\n client.input_list += msg\n server.logger.info(\"RECEIVED INPUT {} : {}\".format(client.ip, msg[0]))\n if not client.username or not client.password:\n server.login_screen(client, msg)\n return\n loop_cmds(server, client, msg[0].split(';'))\n server.return_prompt(client)", "def test_perform_get_input_raw_input(monkeypatch):\n monkeypatch.setattr(\"builtins.input\", lambda p: \"my name\" if p == \"> \" else \"boo\")\n assert sync_perform(stdio_dispatcher, Effect(Prompt(\"> \"))) == \"my name\"", "def get_input(user_input):\n return input(user_input)", "def processline(userdata, line):\n\t\n\tdef processcmd(command, args, expected_nargs, delegate):\n\t\t\"\"\"Validate the number of arguments and call a delegate handler.\"\"\"\n\t\t\n\t\tif len(args) != expected_nargs:\n\t\t\traise FormatError(\"Wrong number of arguments for '\" + command + \"', expected \" +\n\t\t\t str(expected_nargs) + \" but got \" + str(len(args)))\n\t\t\t\n\t\tdelegate(userdata, *args)\n\t\n\t# commands should use double quotes if an argument has spaces\n\t# in it and escape internal double quotes as necessary\n\ttokens = shlex.split(line)\n\tif len(tokens) < 1:\n\t\treturn\n\t\n\tcmd = tokens[0]\n\targs = tokens[1:]\n\t\n\t# Specialized handlers; they bridge the user facing interface and the internal implementations;\n\t# see the command dictionary below for more information on each operation\n\t\n\tdef timed_handler(userdata, *args):\n\t\t\"\"\"Accomodate the input for a non-recurrent event.\"\"\"\n\t\tfuzzy = False\n\t\t\n\t\tif args[3] == \"exact\":\n\t\t\tfuzzy = False\n\t\telif args[3] == \"fuzzy\":\n\t\t\tfuzzy = True\n\t\telse:\n\t\t\traise FormatError(\"Expected 'fuzzy' or 'exact' but found '\" + args[3] + \"' instead.\")\n\t\t\n\t\tevent = Event.create_once(args[4], fuzzy, args[2])\n\t\t\n\t\tif args[0] == \"add\":\n\t\t\tdevice.schedule(userdata, args[1], event)\n\t\telif args[0] == \"del\":\n\t\t\tdevice.unschedule(userdata, args[1], event)\n\t\telse:\n\t\t\traise FormatError(\"Expected 'add' or 'del' but found '\" + args[0] + \"' instead.\")\n\t\n\tdef recurrent_handler(userdata, *args):\n\t\t\"\"\"Accomodate the input for a recurrent event.\"\"\"\n\t\tfuzzy = False\n\t\t\n\t\tif args[5] == \"exact\":\n\t\t\tfuzzy = False\n\t\telif args[5] == \"fuzzy\":\n\t\t\tfuzzy = True\n\t\telse:\n\t\t\traise FormatError(\"Expected 'fuzzy' or 'exact' but found '\" + args[5] + \"' instead.\")\n\t\t\n\t\tevent = Event.create_recurrent(args[6], fuzzy, args[2], args[3], args[4])\n\t\t\n\t\tif args[0] == \"add\":\n\t\t\tdevice.schedule(userdata, args[1], event)\n\t\telif args[0] == \"del\":\n\t\t\tdevice.unschedule(userdata, args[1], event)\n\t\telse:\n\t\t\traise FormatError(\"Expected 'add' or 'del' but found '\" + args[0] + \"' instead.\")\n\t\n\tdef devlist_handler(userdata, *args):\n\t\t\"\"\"Transform the raw devlist into a human readable list.\"\"\"\n\t\tfor (dev, connected) in database.devlist(userdata[\"cursor\"]):\n\t\t\tif dev == \"devmaster\":\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tif connected:\n\t\t\t\tprint(shlex.quote(\"+\" + dev), end=\" \")\n\t\t\telse:\n\t\t\t\tprint(shlex.quote(\"-\" + dev), end=\" \")\n\t\t\n\t\tprint()\n\t\n\tdef guestlist_handler(userdata, *args):\n\t\t\"\"\"Transform the raw guestlist into a human readable list.\"\"\"\n\t\tfor guest in userdata[\"guestlist\"]:\n\t\t\tprint(shlex.quote(guest), end=\" \")\n\t\t\n\t\tprint()\n\t\n\tdef info_handler(userdata, *args):\n\t\t\"\"\"Transform the raw info list into a human readable list.\"\"\"\n\t\tinfo = database.devinfo(userdata[\"cursor\"], args[0])\n\t\t\n\t\tif info is None:\n\t\t\tprint(\"can't find user \" + args[0])\n\t\t\treturn\n\t\t\n\t\tstype, connected, status = info\n\t\t\n\t\tprint(shlex.quote((\"+\" if connected else \"-\") + stype), end=\" \")\n\t\tprint(shlex.quote(status))\n\t\n\tdef schedule_handler(userdata, *args):\n\t\t\"\"\"Transform the raw schedule list into a human readable list.\"\"\"\n\t\tfor event in database.devschedule(userdata[\"cursor\"], args[0]):\n\t\t\tprint(str(event))\n\t\t\n\t\tprint(\"\")\n\t\n\t# Command dictionary\n\t\n\tcommands = {\n\t\t# \"command name\": (expected_nargs, delegate)\n\t\t\"add\": (4, device.add),\n\t\t\t# add <guestname> <displayname> <type> <status>\n\t\t\t# add a device to the network\n\t\t\"rename\": (2, device.rename),\n\t\t\t# rename <displayname> <newdisplayname>\n\t\t\t# change the public display name of a device in the network\n\t\t\"del\": (1, device.delete),\n\t\t\t# del <displayname>\n\t\t\t# delete a device from the network\n\t\t\"sync\": (1, device.sync),\n\t\t\t# sync <displayname>\n\t\t\t# send a time synchronization message to the specified device\n\t\t\"ping\": (1, device.ping),\n\t\t\t# ping <displayname>\n\t\t\t# check a device is still responsive by sending a ping message\n\t\t\"askstatus\": (1, device.askstatus),\n\t\t\t# askstatus <displayname>\n\t\t\t# ask the device for its current status\n\t\t\"cmd\": (2, device.execute),\n\t\t\t# cmd <displayname> <operation>\n\t\t\t# send immediate command to device;\n\t\t\t# arguments to the operation should be within the operation argument, e.g. 'dimmer 126'\n\t\t\t# the operation must be valid (and have valid arguments) for the device type;\n\t\t\t# otherwise, the command will fail silently\n\t\t\"timed\": (5, timed_handler),\n\t\t\t# timed (add|del) <displayname> <date> (exact|fuzzy) <operation>\n\t\t\t# schedule a command for the future;\n\t\t\t# 'add' indicates to add the operation to the schedule, 'del' indicates to remove it from it;\n\t\t\t# <date> must be formatted as a unix integer timestamp in the future,\n\t\t\t# otherwise the command is a no-op;\n\t\t\t# 'exact' sets the timer for the specific timestamp; 'fuzzy' adds a small amount of time noise;\n\t\t\t# <operation> and <args> follow the same rules as the 'cmd' message;\n\t\t\"recurrent\": (7, recurrent_handler),\n\t\t\t# recurrent (add|del) <displayname> <weekday> <hours> <minutes> (exact|fuzzy) <operation>\n\t\t\t# schedule a recurrent command for the future;\n\t\t\t# 'add' indicates to add the operation to the schedule, 'del' indicates to remove it from it;\n\t\t\t# <weekday> must be a number between 0 and 9. Passing 0 signals the operation should execute\n\t\t\t# every day; 1-7 signal it should be executed on Mon-Sun respectively; 8 signals Mon-Fri; and\n\t\t\t# 9 signals Sat-Sun;\n\t\t\t# 'exact' sets the timer for the specific timestamp; 'fuzzy' adds a small amount of time noise;\n\t\t\t# <operation> and <args> follow the same rules as the 'cmd' message\n\t\t\"clear\": (1, device.clearschedule),\n\t\t\t# clear <displayname>\n\t\t\t# clear the schedule for a given device\n\t\t\"devlist\": (0, devlist_handler),\n\t\t\t# devlist\n\t\t\t# retrieve verified device list\n\t\t\t# respond with a list of devices, using the format: ('(-|+)<displayname>' )*\n\t\t\t# where every name is prepended with a positive sign '+' if the device is connected\n\t\t\t# and a negative sign '-' if it is not\n\t\t\"guestlist\": (0, guestlist_handler),\n\t\t\t# guestlist\n\t\t\t# retrieve the guestlist\n\t\t\t# respond with a list of unverified devices, using the format ('<displayname>' )*\n\t\t\t# note that every guest is connected (otherwise it would just be removed from the list)\n\t\t\"info\": (1, info_handler),\n\t\t\t# info <displayname>\n\t\t\t# retrieve device profile\n\t\t\t# respond with the list of device properties in the profile using the format\n\t\t\t# <connected><type> <status>\n\t\t\t# where connected is formatted as + if the device is connected as - if it is not, e.g.\n\t\t\t# '-sonoff on' indicates a disconnected sonoff device with a status of 'on'\n\t\t\"schedule\": (1, schedule_handler)\n\t\t\t# schedule <displayname>\n\t\t\t# retrieve device schedule\n\t\t\t# respond with a list of scheduled commands for the given device using the formats\n\t\t\t# timed <date> (exact|fuzzy) <operation> <args>\n\t\t\t# recurrent <weekday> <hours> <minutes> (exact|fuzzy) <operation> [<args>]\n\t\t\t# for timed and recurrent operations respectively;\n\t\t\t# the specifics of each formats are the same as for their schedule counterparts\n\t}\n\t\n\ttry:\n\t\t(expected_nargs, delegate) = commands[cmd]\n\texcept KeyError:\n\t\tprint(\"Unrecognized command, skipping\", file=sys.stderr)\n\t\treturn\n\t\n\tprocesscmd(cmd, args, expected_nargs, delegate)", "def get_input(prompt):\n return input(prompt)", "def get_input(prompt):\n return input(prompt)", "def inp(text):\r\n input(text)", "def evaluate(self, line):\n locals = self.curframe().f_locals\n globals = self.curframe().f_globals\n try:\n code = compile(line + '\\n', '<stdin>', 'single')\n exec(code, globals, locals)\n except Exception:\n import sys\n t, v = sys.exc_info()[:2]\n if isinstance(t, type('')):\n exc_type_name = t\n else:\n exc_type_name = t.__name__\n print('*** {}: {}'.format(exc_type_name, v))", "def ask_user_input(self, sentence):\n user_input = raw_input(sentence + \" : \")\n return user_input", "def do_say(self, line):\n if line != '':\n print(line)", "def __edit_line(self, line, code, code_obj): # pylint: disable=R0201\r\n try:\r\n result = eval(code_obj, globals(), locals())\r\n except TypeError as ex:\r\n message = \"failed to execute {}: {}\".format(code, ex)\r\n logger.warning(message)\r\n raise EditorError(message)\r\n if result is None:\r\n raise EditorError(\"cannot process line '{}' with {}\".format(\r\n line, code))\r\n elif isinstance(result, list) or isinstance(result, tuple):\r\n line = ' '.join([str(res_element) for res_element in result])\r\n else:\r\n line = str(result)\r\n return line", "def get_input(prompt):\n # type: (str) -> str\n return raw_input(prompt)", "def main():\n dt = DropToken()\n play = True\n while play:\n try:\n line = sys.stdin.readline()\n except KeyboardInterrupt:\n break\n if not line:\n break\n play = dt.inputProcess(line)", "def ask_user_input(prompt: str) -> str:\n return input(prompt)", "def interact(self, prompt='debug> '):\r\n msg = 'Entering Octave Debug Prompt...\\n%s' % prompt\r\n self.stdout.write(msg)\r\n while 1:\r\n inp_func = input if not PY2 else raw_input\r\n try:\r\n inp = inp_func() + '\\n'\r\n except EOFError:\r\n return\r\n if inp in ['exit\\n', 'quit\\n', 'dbcont\\n', 'dbquit\\n']:\r\n inp = 'return\\n'\r\n self.write('disp(char(3));' + inp)\r\n if inp == 'return\\n':\r\n self.write('return\\n')\r\n self.write('clear _\\n')\r\n self.readline()\r\n self.readline()\r\n if not pty is None:\r\n self.readline()\r\n self.write('disp(char(3))\\n')\r\n return\r\n self.expect('\\x03')\r\n self.stdout.write(self.expect(prompt))", "def input_helper(prompt):\n if version_info[0] == 2:\n # python2 input is scary - we want raw_input\n return raw_input(prompt)\n else:\n return input(prompt)", "def cmd_note(cls): \n print(\"Enter/Paste your release note. To save use Ctrl-Z (windows) or Ctrl-D in a new line and press Enter.\")\n inputNote = sys.stdin.readlines()\n note = ''\n for line in inputNote:\n note += line\n return note", "def cmd_note(cls): \n print(\"Enter/Paste your release note. To save use Ctrl-Z (windows) or Ctrl-D in a new line and press Enter.\")\n inputNote = sys.stdin.readlines()\n note = ''\n for line in inputNote:\n note += line\n return note", "def Prompt(self):\n self.cli.context_was_set = not self.cli.config.context\n doc = self.cli.run()\n return doc.text if doc else None", "def precmd(self, line):\n return line.strip()", "def getInput(prompt):\n if platform.python_version().startswith('3'):\n userInput = input('%s ' % prompt).strip()\n if platform.python_version().startswith('2'):\n userInput = raw_input('%s ' % prompt).strip()\n return userInput", "def run(self, cmdline):\n self.send(cmdline+\"\\n\")\n rdata = '\\n'.join(self.recv_to_prompt())\n return rdata", "def test_single_dialog_prompt_extra_line(monkeypatch, capsys):\n monkeypatch.setattr(\"sys.stdin\", io.StringIO(\"value\" + \"\\n\"))\n _dialog_prompt(\n parameter=DialogParameter(\"Title\", comment=\"Comment\"),\n )\n captured = capsys.readouterr()\n assert captured.out.count(\"\\n\") == 2", "def handle_input(self):\n\n\t\tline = sys.stdin.readline().strip()\n\n\t\tif line == '':\n\t\t\t# print('')\n\t\t\tself.print_prompt()\n\t\t\treturn\n\n\t\tcommand_name, *parts = line.split()\n\n\t\tif command_name in self.commands:\n\t\t\t# Call given command and unpack parts into args\n\t\t\tself.commands[command_name]['callback'](*parts)\n\t\telse:\n\t\t\tprint(command_name + ' : command not found')\n\t\t\tself.print_available_commands()\n\n\n\t\tself.print_prompt()", "def get_input(*args, **kw):\n if sys.version[0] == \"2\":\n return raw_input(*args, **kw)\n else:\n return input(*args, **kw)", "def start_interaction():\r\n\r\n # Loop infinitely\r\n while True:\r\n # Prints 'Say something: ' and then waits for user input\r\n # Note: line gets a string value\r\n line = input('Say something: ')\r\n\r\n # Right now, not very interesting...?\r\n if line == EASTER_EGG:\r\n print(EASTER_EGG_RESPONSE)\r\n else:\r\n print(repeat(line))", "def inputProcess(self, line):\n fields = line.split()\n # check if input argument size is not 1 or 2\n if len(fields) < 1 or len(fields) > 2:\n print 'Invalid input size!'\n return True\n # call corresponding functions based on input argument(s)\n if fields[0] == 'GET':\n res = self.get()\n if res != '':\n print res,\n elif fields[0] == 'EXIT':\n return False\n elif fields[0] == 'BOARD':\n print self.displayBoard()\n elif fields[0] == 'PUT':\n if len(fields) != 2:\n print 'PUT command needs one argument!'\n return True\n try:\n column = int(fields[1])\n if column < 1 or column > 4:\n print 'Column number for PUT command needs to be from 1 to 4'\n else:\n print self.put(column)\n except ValueError:\n print 'Invalid input, for column number please enter an integer from 1 to 4'\n else:\n print 'Invalid input, valid commands consists of GET BOARD EXIT PUT <column> only'\n return True", "def lineReceived(self, line):\n rbuffer = StringIO()\n po=sys.stdout\n sys.stdout = rbuffer\n err=False\n if not hasattr(self,\"dc\"):\n self.dc={\"self\":self.server}\n print \"dc:\", self.dc\n try: exec(line,self.dc)\n except Exception as e: err=e\n except KeyboardInterrupt : pass\n # remove backeffect on dictionary\n if self.dc.has_key('__builtins__'): \n del self.dc['__builtins__']\n # update data context\n # remember to restore the original stdout!\n sys.stdout = po\n print '>u> '+line\n if err: out = self.pre_e+str(e)+self.post_e\n else: out = rbuffer.getvalue()\n if out!=\"\": print '>s> ' + out", "def user_input(self, options, prompt):\n for o in options:\n line = self.selector_line(o)\n o[\"line\"] = line\n self.output(line)\n self.output(prompt, end=\" \")\n while True:\n if self.test_input:\n inp = self.test_input.pop(0)\n self.output(f\"Using '{inp}' test input\")\n else:\n try:\n inp = raw_input()\n except (IOError, KeyboardInterrupt):\n self.game.print_state()\n raise\n if inp:\n matching = []\n for o in options:\n if o[\"selector\"] == inp:\n return o\n if inp.lower() in o[\"line\"].lower() and o[\"selector\"] != \"-\":\n matching.append(o)\n if len(matching) == 1:\n return matching[0]\n self.output(f\"Invalid Option ({inp})\")", "def handle_line(self, line):\n LOG.debug(\"Received line of input from client %s: %s\", self.addr, line)", "def GetLine(line):\r\n pass", "def console():\n repl(click.get_current_context())", "def REPL(\n parse_tree_fn: Callable,\n output_style_fn: Callable,\n session: bool,\n mode: str,\n show_tree_fn=None,\n debug=False,\n) -> None:\n print(\n \"Enter a Mathematica expression. Enter either an empty line, Ctrl-C, or Ctrl-D to exit.\"\n )\n in_count = 1\n while True:\n try:\n user_in = input(f\"In[{in_count}]:= \")\n except (KeyboardInterrupt, EOFError):\n break\n else:\n in_count += 1\n if user_in == \"\":\n break\n\n eval_one(\n in_str=user_in,\n parse_tree_fn=parse_tree_fn,\n output_style_fn=output_style_fn,\n mode=mode,\n session=session,\n show_tree_fn=show_tree_fn,\n debug=debug,\n )\n pass\n return", "def handle_line(line):\n print line,\n\n line_parts = log_re.match(line).groupdict()\n action = line_parts['action']\n attacker = line_parts['attacker']\n\n (cmd, status, output) = interact(line_parts, method=opts.method)\n\n if status > 0:\n warnings.warn('IP %s (%s)' % (attacker, output), InteractionWarning)\n print '\\t%s FAILURE' % attacker\n else:\n print '\\t%s SUCCESS' % attacker\n \n print", "def get_value(Runner, input_str):\n Runner.stdin.write(input_str)\n output = Runner.stdout.readline() \n return output", "def user_prompt(prompt, default=None):\n prompt = f\"\\n {prompt} [{default}] runs or type an amount: \"\n response = input(prompt)\n if not response and default:\n return default\n else:\n return response", "def lineReceived(self, line):\n self.sendLine('reply '+line)", "def main(self):\n cmd = \"self.%s(sys.stdin)\" % sys.argv[1]\n exec(cmd)", "def gip(line):\n import os\n import shlex\n import textwrap\n args = shlex.split(line)\n if len(args) == 0:\n path = '~/.ipyscratch'\n else:\n try:\n path = eval(line)\n except:\n path = args[0]\n with open(os.path.expanduser(path)) as f:\n cmd = textwrap.dedent(f.read())\n get_ipython().run_line_magic('pycat', path)\n # update history\n In[-1] = cmd\n get_ipython().run_code(cmd)", "def interactive(self, *args, **kwargs):\n if args == (['python'],):\n return\n raise ValueError(self, args, kwargs)", "def UserInput(self, username, userinput):\n pass", "def run_interactive(cls, shell, command, input):\n feed = '\\\\n'.join(input)\n icommand = \"echo -e '{}' | {}\".format(feed, command)\n _stdout, _stderr = cls.run(shell, icommand)\n return _stdout", "def main():\n line_count = int(input().strip())\n lines = sys.stdin.readlines()[:line_count]\n for line in lines:\n print('YES' if contains_hackerrank(line) else 'NO')", "def run(self, name, rawtext, text, lineno, inliner, options=None,\n content=None):\n raise NotImplementedError", "def safe_input(display_string):\n\n try:\n x = raw_input(display_string)\n except NameError:\n x = input(display_string)\n\n return x", "def main():\n user_interaction()", "def default(self, line):\n print \"Command not found\\n\"", "def user_line(self, frame):\n pass", "def default(self, line):\n line = line.split(' ')[0]\n self.PRINT.warning(line + \": command not found\")\n self.PRINT.newline()", "def onecmd(self, line):\n statement = self.parsed(line)\n self.lastcmd = statement.parsed.raw\n funcname = self.func_named(statement.parsed.command)\n if not funcname:\n return self._default(statement)\n try:\n func = getattr(self, funcname)\n except AttributeError:\n return self._default(statement)\n stop = func(statement)\n return stop", "def getInput(self):\n self.userInput = self.entry.get()", "def user_input(self, msg, default=''):\n msg = '%s %s ' % (self.prefix, msg)\n\n if default != '':\n msg += '[%s] ' % default\n\n try:\n vim.command('echohl Debug')\n input_str = vim.eval('input(\"%s> \")' % msg)\n vim.command('echohl none')\n except KeyboardInterrupt:\n input_str = ''\n\n return input_str or default", "def rawInput(string):\n if os.name == \"posix\":\n tcflush(sys.stdin, TCIFLUSH)\n return input(string)", "def do_action_for_input(self, user_input):\n if user_input == CommandLineProgram.ACTION.HELP:\n self.print_help()\n elif user_input == CommandLineProgram.ACTION.ADD_USER:\n self.input_and_create_user()\n elif user_input == CommandLineProgram.ACTION.LIST_USERS:\n self.print_users()\n elif user_input == CommandLineProgram.ACTION.ADD_TRANSACTION:\n self.select_user_and_add_transaction()\n elif user_input == CommandLineProgram.ACTION.GENERATE_REPORT:\n self.select_user_and_print_report()", "def process_input(line: str) -> None:\n\n # Do not lines if empty or commands\n # Built in commands take precedence to other processing\n if line_valid(line) and not check_commands(line):\n\n # Split the line into its constituent words\n words = line.split(' ')\n\n while unknown_words(words) is not None:\n # As long as any words in the last line are unknown, attempt to learn them\n learn_new_word(unknown_words(words))\n\n if len(words) == 1:\n # Attempt to learn the definition of single-word input\n learn_new_defn(words.pop())\n\n wait()" ]
[ "0.695336", "0.66525394", "0.65766966", "0.6399153", "0.63881487", "0.63797593", "0.63621664", "0.6285082", "0.62843174", "0.6240477", "0.6193314", "0.6086187", "0.60828507", "0.6036487", "0.5988181", "0.59823424", "0.5970285", "0.59633726", "0.59236956", "0.5900732", "0.5900732", "0.58744293", "0.58585066", "0.5855987", "0.5830384", "0.5821434", "0.573722", "0.57318777", "0.5709697", "0.5680791", "0.5657915", "0.5655651", "0.5649584", "0.56370205", "0.56324923", "0.56302655", "0.56287134", "0.5620079", "0.5617121", "0.561243", "0.56116605", "0.56086504", "0.5578386", "0.5573463", "0.5544984", "0.55378884", "0.5527542", "0.5521601", "0.55150205", "0.5510962", "0.55082816", "0.55082816", "0.550794", "0.54938686", "0.54850066", "0.5476465", "0.54734993", "0.54711634", "0.5467746", "0.54617625", "0.5460441", "0.54434174", "0.543709", "0.543709", "0.5425275", "0.5421998", "0.5421081", "0.54173934", "0.54122514", "0.5399537", "0.53938824", "0.53904015", "0.53824246", "0.5369121", "0.53654414", "0.5363004", "0.53598166", "0.5358896", "0.53544044", "0.53405094", "0.53338534", "0.5319089", "0.53030705", "0.52922326", "0.5290135", "0.52890736", "0.5284703", "0.5282021", "0.52807033", "0.52779156", "0.52747416", "0.5262039", "0.52620256", "0.5257792", "0.523936", "0.52380896", "0.5236481", "0.5234185", "0.5234075", "0.52331597", "0.5228141" ]
0.0
-1
Encodes user verification request using user profile ID as pub key.
def generate_authentication_code(user): salt = 'd9!1l@39#c3' expire_timestamp = time.time() + EXPIRE_TIME_LIMIT # Make a string which depends on restaurant id # Same encoding mechanism will be used in seerpod hardware composite_string = "%s%s%s" % (user.id, user.password, salt) str_hex = hashlib.md5(composite_string).hexdigest() decoded_str = str(user.owner_email_id) + str(user.id) + "_" + str(expire_timestamp) + "_" + str_hex # Encoded string will be a multiple line string, if it is greater # than maximum bin size of 76. Browser strips the newline character # in the url. encoded = base64.encodestring(decoded_str).strip().replace('\n', '') return encoded
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_verification_code(self, user_id, verify_type, secret):\n user = self.get(user_id, raise_error=True)\n code_hash = hmac.new(secret)\n code_hash.update(str(user_id))\n code_hash.update(str(user.user_name))\n code_hash.update(str(verify_type))\n return code_hash.hexdigest()", "def gen_verification_token(user):\n exp_date = timezone.now() + timedelta(days=3)\n payload = {\n 'user': user.username,\n 'exp': int(exp_date.timestamp()),\n 'type': 'email_confirmation'\n }\n token = jwt.encode(payload, settings.SECRET_KEY, algorithm='HS256')\n return token.decode()", "def recipient_public_key(self):", "def code_challenge(verifier):\n digest = hashlib.sha256(verifier).digest()\n return base64.urlsafe_b64encode(digest).rstrip(b'=')", "def get_verification_code(request):\n\n if request.user.get_profile().is_verified:\n messages.info(request, 'Olet jo vahvistanut osoitteesi')\n else:\n verification_code = request.user.get_profile().gen_verification_code()\n extractx = {\n 'code': verification_code,\n }\n subject = _('Verification code')\n email_helpers.send_user_email(request.user, subject, 'send_verification_code.txt', extractx)\n\n messages.info(request, 'Vahvistuskoodi on lähetetty sähköpostiisi')\n\n return HttpResponseRedirect(reverse('user', args=(request.user.username,)))", "def gen_verification_token(user):\n exp_date = timezone.now() + timedelta(days= 3)\n payload = {\n 'user': user.username,\n 'exp': int (exp_date.timestamp()),\n 'type': 'email_confirmation'\n }\n token = jwt.encode(payload, settings.SECRET_KEY, algorithm= 'HS256')\n return token", "def gen_verification_token(self, user):\n exp_date = timezone.now() + timedelta(days=3)\n payload = {\n 'user': user.username,\n 'exp': int(exp_date.timestamp()),\n 'type': 'email_confirmation'\n }\n token = jwt.encode(payload, settings.SECRET_KEY, algorithm='HS256')\n return token", "def encode_auth_token(user_id, email):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=100, seconds=5),\n 'iat': datetime.datetime.utcnow(),\n 'sub': email + ' ' + str(user_id)\n }\n return jwt.encode(\n payload,\n key,\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def gen_verification_token(user):\n exp_date = timezone.now() + timedelta(days=3)\n payload = {\n 'user': user.username,\n 'exp': int(exp_date.timestamp()),\n 'type': 'email_confirmation'\n }\n # Generacion del token\n token = jwt.encode(payload, settings.SECRET_KEY, algorithm='HS256')\n return token", "def get_recovery_code(self, key, user_id):\n user = self.get(user_id, raise_error=True)\n h = hmac.new(key)\n h.update('%s%s%s%s' % (user_id, user.user_name, user.email, user.password))\n return h.hexdigest()", "def get(self, request):\n token = request.GET.get('token')\n payload = jwt.decode(token, settings.SECRET_KEY, algorithms='HS256')\n print(f'payload {payload}')\n user = User.objects.get(id = payload['user_id'])\n \n if not user.email_verified:\n user.email_verified = True\n user.save()\n return response.Response({'email': \"successful email verification\"}, status = status.HTTP_200_OK)\n return response.Response({'error': \"unsuccessful email verification\"}, status = status.HTTP_400_BAD_REQUEST)", "def verify_email(self, request, *args, **kwargs):\n verified_key_text = getattr(settings, \"VERIFIED_KEY_TEXT\", None)\n\n if not verified_key_text:\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n redirect_url = request.query_params.get(\"redirect_url\")\n verification_key = request.query_params.get(\"verification_key\")\n response_message = _(\"Missing or invalid verification key\")\n if verification_key:\n registration_profile = None\n try:\n registration_profile = RegistrationProfile.objects.select_related(\n \"user\", \"user__profile\"\n ).get(activation_key=verification_key)\n except RegistrationProfile.DoesNotExist:\n with use_master:\n try:\n registration_profile = (\n RegistrationProfile.objects.select_related(\n \"user\", \"user__profile\"\n ).get(activation_key=verification_key)\n )\n except RegistrationProfile.DoesNotExist:\n pass\n\n if registration_profile:\n registration_profile.activation_key = verified_key_text\n registration_profile.save()\n\n username = registration_profile.user.username\n set_is_email_verified(registration_profile.user.profile, True)\n # Clear profiles cache\n safe_delete(f\"{USER_PROFILE_PREFIX}{username}\")\n\n response_data = {\"username\": username, \"is_email_verified\": True}\n\n if redirect_url:\n query_params_string = urlencode(response_data)\n redirect_url = f\"{redirect_url}?{query_params_string}\"\n\n return HttpResponseRedirect(redirect_url)\n\n return Response(response_data)\n\n return HttpResponseBadRequest(response_message)", "def mailru_sig(data):\n param_list = sorted(list(item + '=' + data[item] for item in data))\n return md5(''.join(param_list) +\n settings.MAILRU_OAUTH2_CLIENT_SECRET).hexdigest()", "def _gen_activation_hash():\r\n # for now just cheat and generate an api key, that'll work for now\r\n return User.gen_api_key()", "def request_verification(data):\n if 'email' in data:\n if user_exists(data['email']):\n return get_user_id(data['email'])\n else:\n return 401\n else:\n return 400", "def encode_auth_token(self, user_id):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1, seconds=0),\n 'iat': datetime.datetime.utcnow(),\n 'sub': user_id\n }\n return jwt.encode(\n payload,\n app.config.get('SECRET_KEY'),\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def encode_email(email, key):\n return", "def verify_user(self, tokendict):\n return self.post('verify', tokendict)", "async def _encrypt_external_sub_id(sefl, external_user: ExternalUser) -> str:\n\t\tsalt = external_user.email.lower()\n\t\tsalt = salt.replace(\" \", \"\")\n\t\t# Hash the salt so that the email is not plain text visible in the database\n\t\tsalt = hashlib.sha256(salt.encode()).hexdigest()\n\t\t# bcrypt requires a 22 char salt\n\t\tif len(salt) > 21:\n\t\t\tsalt = salt[:21]\n\n\t\t# As per passlib the last character of the salt should always be one of [.Oeu]\n\t\tsalt = salt + \"O\"\n\n\t\tencrypted_external_sub_id = bcrypt.using(salt=salt).hash(external_user.external_sub_id)\n\t\treturn encrypted_external_sub_id", "def key_request(self, user):\n\t\tclient_log.debug(f'Запрос публичного ключа для {user}')\n\t\treq = {\n\t\t\tACTION: PUBLIC_KEY_REQUEST,\n\t\t\tTIME: time.time(),\n\t\t\tACCOUNT_NAME: user\n\t\t}\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, req)\n\t\t\tans = get_message(self.transport)\n\t\tif RESPONSE in ans and ans[RESPONSE] == 511:\n\t\t\treturn ans[DATA]\n\t\telse:\n\t\t\tclient_log.error(f'Не удалось получить ключ собеседника{user}.')", "def get_activation_key(self, user):\n return signing.dumps(obj=user.get_username(), salt=REGISTRATION_SALT)", "def public_key(self):", "def base64_pub_encode(self, key):\n (y, g, p, q) = (str(key.y), str(key.g), str(key.p), str(key.q))\n return base64.b64encode((y + \",\" + g + \",\" + p + \",\" + q).encode('utf-8')).decode('utf-8')", "def encode_u_id(u_id):\n return jwt.encode({\n \"u_id\": u_id,\n \"datetime\": json_time_translator.datetime_to_json(datetime.utcnow())\n }, '1$Arh\"1bWa/7+OS', algorithm='HS256').decode('utf-8')", "def get_activation_key(self, user):\n\t\treturn signing.dumps(\n\t\t\tobj=getattr(user, user.USERNAME_FIELD),\n\t\t\tsalt=REGISTRATION_SALT\n\t\t)", "def encode_auth_token(self,user_id): \n try: \n exp = datetime.utcnow() + timedelta(days=1)\n \n payload = {\n 'exp': exp, \n 'iat': datetime.utcnow(), \n 'sub': user_id\n }\n \n encoded_auth_token = jwt.encode(\n payload, \n getattr(settings, \"SECRET_KEY\",\"\"),\n algorithm='HS256'\n )\n return encoded_auth_token\n except Exception as e: \n print_exception(e)\n return e", "def new_profile(email):\n key = challenge_12.deterministic_random_key()\n profile = bytes(profile_for(email.decode()), 'ascii')\n\n return challenge_11.AES_ECB(key).encrypt(profile)", "def test_verified_user(self):\n self.nodes[0].overlay.trustchain.get_github_profile = lambda username: {\n \"username\": username,\n \"bio\": self.nodes[0].overlay.my_peer.mid.encode('hex'),\n \"followers\": 1337\n }\n yield self.nodes[0].overlay.trustchain.import_github_profile(\"test\")\n yield self.deliver_messages()\n\n self.assertTrue(self.nodes[1].overlay.trustchain.persistence.is_verified_user(self.nodes[0].overlay.my_peer.public_key.key_to_bin()))", "def encode_auth_token(secret_key, user_id):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=60),\n 'iat': datetime.datetime.utcnow(),\n 'sub': user_id\n }\n return jwt.encode(\n payload,\n secret_key,\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def encode_auth_token(user_id):\n rfexp = datetime.datetime.utcnow() + datetime.timedelta(days=1, seconds=5)\n exp = int(time.time()+600)\n try:\n payload = {\n 'exp': exp,\n 'iat': datetime.datetime.utcnow(),\n 'sub': user_id\n }\n RFpayload = {\n 'exp': rfexp,\n 'iat': datetime.datetime.utcnow(),\n 'sub': user_id\n }\n return jwt.encode(\n payload,\n key,\n algorithm='HS256'\n ), jwt.encode(\n RFpayload,\n key,\n algorithm='HS512'\n )\n except Exception as e:\n return e", "def encode_auth_token(userdata):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=10),\n 'iat': datetime.datetime.utcnow(),\n 'uid': userdata['uid'],\n 'pwd':userdata['pwd'],\n 'role': userdata['role']\n }\n return jwt.encode(\n payload,\n Config.SECRET_KEY,\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def _encrypted_user_photo_key_str(self):\r\n face_aes_key_str = settings.VERIFY_STUDENT[\"SOFTWARE_SECURE\"][\"FACE_IMAGE_AES_KEY\"]\r\n face_aes_key = face_aes_key_str.decode(\"hex\")\r\n rsa_key_str = settings.VERIFY_STUDENT[\"SOFTWARE_SECURE\"][\"RSA_PUBLIC_KEY\"]\r\n rsa_encrypted_face_aes_key = rsa_encrypt(face_aes_key, rsa_key_str)\r\n\r\n return rsa_encrypted_face_aes_key.encode(\"base64\")", "def email_key(self):\r\n url = '{0}/emailKey/generate'.format(self.get_url())\r\n request = http.Request('POST', url)\r\n return request, parsers.parse_json", "def __verify(self):\r\n code = self.request.get('code')\r\n email = None\r\n error = False\r\n # resend if code is not given or in case of some error\r\n if code is not None and code != '':\r\n email = User.verify(code, self.request.remote_addr)\r\n if email is None:\r\n error = True\r\n\r\n if email is None:\r\n template_values = {\r\n 'user_email': self.user_email,\r\n 'error': error\r\n }\r\n template = self.jinja2_env.get_template('verification.html')\r\n self.response.out.write(template.render(template_values))\r\n\r\n # message\r\n template_values = {\r\n 'user_email': self.user_email,\r\n 'message': self.gettext('THANK_YOU')\r\n }\r\n template = self.jinja2_env.get_template('staticmessage.html')\r\n self.response.out.write(template.render(template_values))", "def save(self):\n payload = self.context['payload']\n user = User.objects.get(username=payload['user'])\n user.is_verified = True\n user.save()", "def __send_verification(self, email):\r\n user = User.getUser(email.lower())\r\n if user is None or user.verified:\r\n self.set_error(constants.STATUS_BAD_REQUEST, message=None, url=\"/\")\r\n return\r\n user.verificationCode = b64encode(CryptoUtil.get_verify_code(), \"*$\")\r\n template_values = {\r\n 'user_email': self.user_email,\r\n 'code': user.verificationCode,\r\n 'url': constants.VERIFICATION_URL\r\n }\r\n template = self.jinja2_env.get_template('verificationemail.jinja')\r\n message = mail.EmailMessage()\r\n message.sender = constants.SENDER_ADDRESS\r\n message.to = user.email\r\n message.subject = 'Please verify your address'\r\n message.body = template.render(template_values)\r\n message.send()\r\n user.put()", "def main():\n key, plain = get_key_plain()\n encode(key, plain)", "def _get_private_key(self, user_obj):\n return user_obj.private_key.encode('utf-8')", "def validate_code(request):\n user_id = api.keystone.get_user_id(request)\n print \"USER CHECK\"\n print user_id\n user = api.keystone.user_get(request, user_id)\n user_auth_code = request.GET.get('auth_code', None)\n secret = request.GET.get('secret', None)\n\n #Generate a code form our side using algorithm and use it to validate\n generated_code = api.keystone.generate_totp(secret)\n\n print secret\n print user_auth_code\n print generated_code\n print 'entering code comparison'\n \n data = {}\n extra = {}\n\n #Code comparison\n if user_auth_code == generated_code:\n data['totp_authenticated'] = True\n extra['two_factor_enabled'] = True\n\textra['secret_key'] = secret\n api.keystone.enable_2fa(request, user, **extra)\n else:\n \tprint 'falseeeeee'\n data['totp_authenticated'] = False\n return JsonResponse(data)", "def encoded_validation_cert_pub_key(self) -> str:\n return pulumi.get(self, \"encoded_validation_cert_pub_key\")", "def encoded_validation_cert_pub_key(self) -> str:\n return pulumi.get(self, \"encoded_validation_cert_pub_key\")", "def send_verification_email(self, request, *args, **kwargs):\n verified_key_text = getattr(settings, \"VERIFIED_KEY_TEXT\", None)\n if not verified_key_text:\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n username = request.data.get(\"username\")\n redirect_url = request.data.get(\"redirect_url\")\n response_message = _(\"Verification email has NOT been sent\")\n\n if username:\n try:\n registration_profile = RegistrationProfile.objects.get(\n user__username=username\n )\n except RegistrationProfile.DoesNotExist:\n pass\n else:\n user = registration_profile.user\n set_is_email_verified(user.profile, False)\n\n verification_key = registration_profile.activation_key\n if verification_key == verified_key_text:\n verification_key = (\n user.registrationprofile.create_new_activation_key()\n )\n\n verification_url = get_verification_url(\n redirect_url, request, verification_key\n )\n\n email_data = get_verification_email_data(\n user.email,\n user.username,\n verification_url,\n request,\n )\n\n send_verification_email.delay(**email_data)\n response_message = _(\"Verification email has been sent\")\n\n return Response(response_message)\n\n return HttpResponseBadRequest(response_message)", "def send_verification(self):\n pass", "def public_key(ctx):\n if not ctx.data:\n raise RefError(\n \"Ref error: eval_func: public key cannot be derived; try \"\n \"something like '|reveal:path/to/encrypted_private_key|publickey'\"\n )\n\n data_dec = ctx.data\n if ctx.ref_encoding == \"base64\":\n data_dec = base64.b64decode(data_dec).decode()\n\n private_key = serialization.load_pem_private_key(\n data_dec.encode(), password=None, backend=default_backend()\n )\n public_key = private_key.public_key()\n\n ctx.data = str(\n public_key.public_bytes(\n encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo\n ),\n \"UTF-8\",\n )", "def encode_auth_token(userdata):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=10),\n 'iat': datetime.datetime.utcnow(),\n 'username': userdata['username'],\n 'password':userdata['password']\n }\n return jwt.encode(\n payload,\n Config.SECRET_KEY,\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def _make_hash_value(self, user, timestamp):\n return (\n six.text_type(user.pk) + six.text_type(timestamp) +\n six.text_type(user.profile.signup_confirmation)\n )", "def sendPublicKey(g, p, s):\r\n status = \"120 PubKey \" + str(computePublicKey(g, p, s))\r\n return status", "def _get_user_id(user_data: dict):\n data = json.dumps(user_data).encode('utf-8')\n hashed_data = hashlib.sha256()\n hashed_data.update(data)\n return hashed_data.hexdigest()", "def verifyemail(request,id=None,key=None):\n logging.debug('')\n if settings.EMAIL_VALIDATION == True:\n user = User.objects.get(id=id)\n if user:\n if user.email_key == key:\n user.email_isvalid = True\n clear_email_validation_message(user)\n user.save()\n data = {'action_type': 'validation_complete'}\n return render_to_response(\n 'authenticator/changeemail.html',\n RequestContext(request, data)\n )\n else:\n logging.error('hmm, no user found for email validation message - foul play?')\n raise Http404", "def verify_code(email, val):\r\n # TODO: is this the right string?\r\n verification_string = email.lower() + '|' + val\r\n return hashlib.md5(verification_string).hexdigest()", "def set_user_verified(self, authenticator_id, uv):\n pass", "def create_s256_code_challenge(code_verifier: str) -> str:\n code_verifier_bytes = code_verifier.encode(\"utf-8\")\n data = hashlib.sha256(code_verifier_bytes).digest()\n return base64.urlsafe_b64encode(data).rstrip(b\"=\").decode()", "def raw(self) -> bytes:\n return bytes(self._verify_key)", "def get_api_key_params(user):\n if user and user.is_authenticated():\n api_key, _ = APIKey.objects.get_or_create(user=user)\n return urlencode({'user': user.pk, 'key': api_key.key})\n return ''", "def verify_key(self, providerkey = None):\n h = Https(API_DOMAIN)\n\n data = {'apikey' : self.apikey}\n\n if providerkey is not None:\n data['providerkey'] = providerkey\n\n h.request( \"GET\",\n \"/publicapi/verify\"+ urlencode(data),\n headers=self.headers)\n\n request_status = h.getresponse().status\n\n if request_status != 200:\n raise Exception(\"Invalid API Key %s\" % self.apikey)", "def send_verification(self):\n secret_key = app.config['CONTACT_VERIFY_SECRET']\n base_url = app.config['URLS']['BASE_URL']\n redditor = praw.models.Redditor(self.client, name=self.identifier)\n verify_url = contact_verify_url(self.contact.id, base_url, secret_key)\n redditor.message(\"Verify your username!\", verify_url)", "def stubbed_receiver() -> PublicKey:\n return PublicKey(\"J3dxNj7nDRRqRRXuEMynDG57DkZK4jYRuv3Garmb1i99\")", "def alt_stubbed_receiver() -> PublicKey:\n return PublicKey(\"J3dxNj7nDRRqRRXuEMynDG57DkZK4jYRuv3Garmb1i98\")", "def from_key(self, public_id, key):\n otp = self.get_otp(key)\n from_key = modhex_encode(public_id.encode('hex')) + modhex_encode(otp.encode('hex'))\n return from_key", "def generate_invitation_token(user_email, expiration=7*3600):\n ser = Serializer(current_app.config['SECRET_KEY'], expiration)\n return ser.dumps(\n {'user_email': user_email}).decode('utf-8')", "def profileForEncrypt(self, emailAddress):\n kvstring = profileFor(emailAddress)\n # Turn the string into bytes\n kvbytes = kvstring.encode()\n # Encrypt it.\n data = padPkcs7(kvbytes, 16)\n return self.__aes.ecbEncrypt(data)", "def encode_auth_token(user_id: int, user_name:str, user_login:str, perfil_nome:str) -> bytes:\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1, seconds=5),\n 'iat': datetime.datetime.utcnow(),\n 'uid': user_id,\n 'name': user_name,\n 'login': user_login,\n 'perfil': perfil_nome,\n }\n return jwt.encode(\n payload,\n key,\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def verify(self, signature, body, external_aad, public_key):", "def request_idkey(self):\r\n if self.use_http():\r\n self.enqueue_http_request(\"money/idkey\", {}, \"idkey\")\r\n else:\r\n self.send_signed_call(\"private/idkey\", {}, \"idkey\")", "def create_pubkey_message(sender_nickname, sender_pubkey):\n tag = (VERSION<<16) | publicKeyOnly\n \n out = struct.pack(\"<LL\", tag, len(sender_nickname)) + sender_nickname\n out += struct.pack(\"<L\", len(sender_pubkey)) + sender_pubkey\n \n return out", "def get(self, request, token):\n try:\n print(token)\n encoded = token\n key = \"secret\"\n usr_details = jwt.decode(encoded, key=key, algorithms=\"HS256\")\n usr_name = usr_details.get(\"username\")\n if User.objects.filter(username=usr_name):\n return Response({\"message\": \"VERIFIED\"}, status=200)\n return Response({\"message\": \"SOME THING WENT WRONG\"}, status=400)\n except Exception as e:\n return Response({\"message\": \"SOMETHING WENT WRONG\",\n \"detail\": e.args}, status=400)", "def results_callback(request):\r\n body = request.body\r\n\r\n try:\r\n body_dict = json.loads(body)\r\n except ValueError:\r\n log.exception(\"Invalid JSON received from Software Secure:\\n\\n{}\\n\".format(body))\r\n return HttpResponseBadRequest(\"Invalid JSON. Received:\\n\\n{}\".format(body))\r\n\r\n if not isinstance(body_dict, dict):\r\n log.error(\"Reply from Software Secure is not a dict:\\n\\n{}\\n\".format(body))\r\n return HttpResponseBadRequest(\"JSON should be dict. Received:\\n\\n{}\".format(body))\r\n\r\n headers = {\r\n \"Authorization\": request.META.get(\"HTTP_AUTHORIZATION\", \"\"),\r\n \"Date\": request.META.get(\"HTTP_DATE\", \"\")\r\n }\r\n\r\n sig_valid = ssencrypt.has_valid_signature(\r\n \"POST\",\r\n headers,\r\n body_dict,\r\n settings.VERIFY_STUDENT[\"SOFTWARE_SECURE\"][\"API_ACCESS_KEY\"],\r\n settings.VERIFY_STUDENT[\"SOFTWARE_SECURE\"][\"API_SECRET_KEY\"]\r\n )\r\n\r\n _response, access_key_and_sig = headers[\"Authorization\"].split(\" \")\r\n access_key = access_key_and_sig.split(\":\")[0]\r\n\r\n # This is what we should be doing...\r\n #if not sig_valid:\r\n # return HttpResponseBadRequest(\"Signature is invalid\")\r\n\r\n # This is what we're doing until we can figure out why we disagree on sigs\r\n if access_key != settings.VERIFY_STUDENT[\"SOFTWARE_SECURE\"][\"API_ACCESS_KEY\"]:\r\n return HttpResponseBadRequest(\"Access key invalid\")\r\n\r\n receipt_id = body_dict.get(\"EdX-ID\")\r\n result = body_dict.get(\"Result\")\r\n reason = body_dict.get(\"Reason\", \"\")\r\n error_code = body_dict.get(\"MessageType\", \"\")\r\n\r\n try:\r\n attempt = SoftwareSecurePhotoVerification.objects.get(receipt_id=receipt_id)\r\n except SoftwareSecurePhotoVerification.DoesNotExist:\r\n log.error(\"Software Secure posted back for receipt_id {}, but not found\".format(receipt_id))\r\n return HttpResponseBadRequest(\"edX ID {} not found\".format(receipt_id))\r\n\r\n if result == \"PASS\":\r\n log.debug(\"Approving verification for {}\".format(receipt_id))\r\n attempt.approve()\r\n elif result == \"FAIL\":\r\n log.debug(\"Denying verification for {}\".format(receipt_id))\r\n attempt.deny(json.dumps(reason), error_code=error_code)\r\n elif result == \"SYSTEM FAIL\":\r\n log.debug(\"System failure for {} -- resetting to must_retry\".format(receipt_id))\r\n attempt.system_error(json.dumps(reason), error_code=error_code)\r\n log.error(\"Software Secure callback attempt for %s failed: %s\", receipt_id, reason)\r\n else:\r\n log.error(\"Software Secure returned unknown result {}\".format(result))\r\n return HttpResponseBadRequest(\r\n \"Result {} not understood. Known results: PASS, FAIL, SYSTEM FAIL\".format(result)\r\n )\r\n\r\n # If this is a reverification, log an event\r\n if attempt.window:\r\n course_id = attempt.window.course_id\r\n course = course_from_id(course_id)\r\n course_enrollment = CourseEnrollment.get_or_create_enrollment(attempt.user, course_id)\r\n course_enrollment.emit_event(EVENT_NAME_USER_REVERIFICATION_REVIEWED_BY_SOFTWARESECURE)\r\n\r\n return HttpResponse(\"OK!\")", "def private_key():\n return \"Toholampi summer festival 2017 has the most harcore rock bands\"", "def encode_auth_token(user_data, config):\n ttl_days = config.get('JWT_TTL_DAYS', 0)\n ttl_seconds = config.get('JWT_TTL_SECONDS', 0)\n secret_key = config['JWT_SECRET_KEY']\n\n now = dt.datetime.utcnow()\n try:\n payload = {\n 'exp': now + dt.timedelta(days=ttl_days, seconds=ttl_seconds),\n 'iat': now,\n 'sub': user_data\n }\n return jwt.encode(\n payload,\n secret_key,\n algorithm='HS256'\n )\n except Exception:\n raise", "def encode(self):\n if not self.verify():\n return None\n\n try:\n s = json.dumps(self.auth_dict)\n return encode(APP.config['SECRET_KEY'], s)\n except Exception as err:\n LOGGER.error('Error encoding auth: %s' % str(err))\n raise err", "def send_email_key(request):\n if settings.EMAIL_VALIDATION == True:\n if request.user.email_isvalid:\n data = {\n 'email': request.user.email, \n 'action_type': 'key_not_sent', \n 'change_link': reverse('user_changeemail')\n }\n return render_to_response(\n 'authenticator/changeemail.html',\n RequestContext(request, data)\n )\n else:\n send_new_email_key(request.user)\n return validation_email_sent(request)\n else:\n raise Http404", "def get_verifying_key(private_key):\n return private_key.get_verifying_key().to_pem().decode('ascii')", "def provision(event,context):\n body = json.loads(event['body'])\n try: \n assert 'serial_number' in body\n assert 'device_public_key' in body\n except AssertionError:\n return response(400, \"Missing required parameters.\")\n try:\n pub_key = base64.b64decode(body['device_public_key'])\n assert len(pub_key) == 128\n device_pub_key_bytes = bytearray.fromhex(pub_key.decode('ascii'))\n serial_number = base64.b64decode(body['serial_number'])\n assert len(serial_number) == 18\n assert len(body['device_label']) == 5\n except:\n return response(400, \"Parameters are in the incorrect format.\")\n\n requester_data = event[\"requestContext\"]\n if requester_data[\"authorizer\"][\"claims\"][\"email_verified\"]:\n identity_data = event[\"requestContext\"][\"identity\"]\n print(identity_data)\n ip_address = identity_data[\"sourceIp\"]\n email = requester_data[\"authorizer\"][\"claims\"][\"email\"].lower()\n else:\n return response(400, \"Email not verified.\")\n \n #generate server ECC key pair\n server_private_key = ec.generate_private_key(ec.SECP256R1(), default_backend())\n server_pem_key = server_private_key.private_bytes(\n encoding = serialization.Encoding.PEM,\n format = serialization.PrivateFormat.PKCS8,\n encryption_algorithm = serialization.NoEncryption())\n print(server_pem_key.decode('utf-8'))\n\n server_public_key = server_private_key.public_key()\n server_public_key_bytes = server_public_key.public_bytes(\n encoding = serialization.Encoding.X962,\n format = serialization.PublicFormat.UncompressedPoint)[1:]\n server_public_key_text = server_public_key_bytes.hex().upper()\n print('server_public_key:')\n print(server_public_key_text)\n \n #Hash device public key and server public key\n device_public_key_hash = hashlib.sha256(device_pub_key_bytes).digest()\n server_public_key_hash = hashlib.sha256(server_public_key_bytes).digest()\n\n # Generate a data key associated with the CMK\n # The data key is used to encrypt the file. Each file can use its own\n # data key or data keys can be shared among files.\n # Specify either the CMK ID or ARN\n data_key_encrypted, data_key_plaintext = create_data_key(cmk_id)\n if data_key_encrypted is None:\n return False\n print('Created new AWS KMS data key')\n\n \n # Encrypt the file\n f = Fernet(data_key_plaintext)\n server_pem_key_encrypted = f.encrypt(server_pem_key)\n\n #Create a random 16 bytes\n choices = string.ascii_letters + string.digits\n rand_pass = b''\n for i in range(16):\n \trand_pass += bytes(random.choice(choices),'ascii')\n\n #Load Device Public Key and derive shared secret\n device_bytes = b'\\x04' + device_pub_key_bytes\n print('device_bytes:')\n print(device_bytes)\n try:\n device_pub_key = ec.EllipticCurvePublicKey.from_encoded_point(ec.SECP256R1(),device_bytes)\n except ValueError:\n return response(400, \"Device Public Key is malformed\")\n shared_secret = server_private_key.exchange(ec.ECDH(),device_pub_key)\n\n #use the first 16 bytes (128 bits) of the shared secret to encrypt the random password\n cipher = Cipher(algorithms.AES(shared_secret[:16]), \n modes.ECB(), \n backend=default_backend())\n encryptor = cipher.encryptor()\n encrypted_rand_pass = encryptor.update(rand_pass) + encryptor.finalize()\n\n #Serialize server private key with password from rand_pass\n server_pem_key_pass = server_private_key.private_bytes(\n encoding = serialization.Encoding.PEM,\n format = serialization.PrivateFormat.PKCS8,\n encryption_algorithm = serialization.BestAvailableEncryption(rand_pass))\n\n\n can_logger_dict = {\n 'id': serial_number.decode(\"utf-8\"), #72 bit unique id from the ATECC608.\n 'device_label': body['device_label'],\n 'device_public_key': body['device_public_key'],\n 'device_public_key_prov_hash':device_public_key_hash.hex().upper()[:10],\n 'server_public_key_prov_hash':server_public_key_hash.hex().upper()[:10],\n 'email': email,\n 'sourceIp':ip_address,\n 'encrypted_data_key': base64.b64encode(data_key_encrypted).decode('utf-8'),\n 'encrypted_server_pem_key': base64.b64encode(server_pem_key_encrypted).decode('utf-8'),\n 'provision_time': datetime.datetime.now().isoformat().split('.')[0]\n #'password_for_testing': rand_pass.decode('ascii') #Will delete after testing\n\n }\n\n #Load the server_public_key, the server_pem_key_pass, and the encrypted_rand_pass\n data_dict = {\n \t'server_public_key': base64.b64encode(server_public_key_bytes).decode('ascii'),\n \t'server_pem_key_pass':base64.b64encode(server_pem_key_pass).decode('ascii'),\n \t'encrypted_rand_pass':base64.b64encode(encrypted_rand_pass).decode('ascii')\n }\n\n dbClient = boto3.resource('dynamodb', region_name='us-east-2')\n table = dbClient.Table(\"CANLoggers\")\n try:\n ret_dict = table.put_item(\n Item = can_logger_dict,\n ConditionExpression = 'attribute_not_exists(id)'\n )\n except:\n return response(400, \"serial number already exists\")\n return response(200, data_dict)", "def send_confirm_email(request,uid):\n user=models.UserProfile.objects.get(id=uid)\n current_site=get_current_site(request)\n email_subject='Activate Your Account'\n message=render_to_string('activate_account.html',{\n 'user':user,\n 'domain':current_site.domain,\n 'uid':urlsafe_base64_encode(force_bytes(uid)),\n 'token':account_activation_token.make_token(user),\n })\n to_email= user.email\n email= EmailMessage(email_subject,message,to=[to_email])\n email.send()\n return JsonResponse(\n {\n \"status\":\"The confirmation email has been sent.\",\n }\n )", "def _create_invitation_code(\n invitation_data: InvitationInputs, secret_key: bytes\n) -> bytes:\n\n # builds content\n content = InvitationContent(\n **invitation_data.dict(),\n created=datetime.utcnow(),\n )\n\n content_jsonstr: str = _ContentWithShortNames.serialize(content)\n assert \"\\n\" not in content_jsonstr # nosec\n\n # encrypts contents\n return _fernet_encrypt_as_urlsafe_code(\n data=content_jsonstr.encode(),\n secret_key=secret_key,\n )", "def get(self, request, token):\n try:\n encoded = token\n key = \"secret\"\n usr_details = jwt.decode(encoded, key=key, algorithms=\"HS256\")\n usr_name = usr_details.get(\"username\")\n check = CustomUser.objects.get(username=usr_name)\n if check:\n check.verified = True\n check.save()\n return Response({\"message\": \"VERIFIED\"}, status=200)\n return Response({\"message\": \"SOME THING WENT WRONG\"}, status=400)\n except Exception as e:\n user_log.exception(\"generic exception occurred\")\n return Response({\"message\": \"SOMETHING WENT WRONG\",\n \"detail\": e.args}, status=400)", "def set_confirmation_code(cls, user_obj: User) -> str:\n confirmation_code = cls.generate_new_code()\n if user_obj.phone == '+996553000117' or user_obj.phone == '+996999111222':\n confirmation_code = '123456'\n user_obj.confirmation_code = confirmation_code\n user_obj.confirmation_date = timezone.now()\n user_obj.save(\n update_fields=['confirmation_code', 'confirmation_date'])\n return confirmation_code", "def send_verification_email(self):\n url = (\"https://api.imgur.com/3/account/{0}\"\n \"/verifyemail\".format(self.name))\n self._imgur._send_request(url, needs_auth=True, method='POST')", "def register(request, key):\n profile = cpm.UserProfile.objects.filter(\n activation_key=key)\n\n if not profile.exists() or profile[0].user.is_active:\n hero_title = 'Hmm... that registration key is invalid.'\n return render_err_msg(request, hero_title)\n\n user = profile[0].user\n\n if request.POST:\n reg_form = RegForm(request.POST)\n if reg_form.is_valid():\n user.is_active = True\n user.first_name = reg_form.cleaned_data['first_name']\n user.last_name = reg_form.cleaned_data['last_name']\n user.set_password(reg_form.cleaned_data['password'])\n\n pic_url = put_profile_pic(\n reg_form.cleaned_data['pic_url'], user.profile)\n if pic_url:\n user.profile.pic_url = pic_url\n\n user.profile.class_year = reg_form.cleaned_data['class_year']\n\n alt_emails = request.POST.getlist('alt_email')\n for alt_email in alt_emails:\n if alt_email:\n user.profile.add_email(alt_email)\n\n user.save()\n user.profile.save()\n\n user = auth.authenticate(username=user.username,\n password=reg_form.cleaned_data['password'])\n if user is not None:\n if user.is_active:\n auth.login(request, user)\n # Redirect to a success page.\n return redirect('/')\n\n else:\n reg_form = RegForm()\n\n template_values = {\n 'page_title': 'register',\n 'form': reg_form,\n 'user': user,\n }\n\n return render_to_response('register.html',\n template_values, request)", "def public_key():\n if not Authorizer.__public_key:\n Authorizer.__public_key = download_public_key()\n return Authorizer.__public_key", "def send_activation_key_via_email(user, signup_key):\n subject = '[%s]Verify your email.' % (settings.ORGANIZATION_NAME)\n from_email = settings.DEFAULT_FROM_EMAIL\n to = [user.email, ]\n activation_link = '%s%s' % (settings.HOSTNAME_URL,\n reverse('activation_verify',\n args=(signup_key,)))\n\n html_content = \"\"\"\n <p>\n Hello %s. Please click the link to activate your account.<br>\n <a href=%s a> %s</a><br>\n\n Thank you,<br>\n\n The Team\n </p>\n \"\"\" % (user.first_name, activation_link, activation_link)\n\n text_content = \"\"\"\n Hello %s. Please click the link to activate your account.\n\n %s\n\n Thank you,\n\n The Team\n\n \"\"\" % (user.first_name, activation_link)\n\n msg = EmailMultiAlternatives(subject=subject, body=text_content,\n to=to, from_email=from_email)\n msg.attach_alternative(html_content, 'text/html')\n msg.send()", "def test_signup_verification(self):\n resp = self.client.post(self.signup_url, self.test_credential)\n\n self.assertEqual(len(mail.outbox), 1)\n email_message = str(mail.outbox[0].message())\n # Verification model instance should be created.\n sv = SignupVerification.objects.get(user__username=self.test_credential['username'])\n self.assertTrue(sv.key)\n self.assertIn(sv.key, email_message)", "def encoded_jwt(private_key, user):\n kid = JWT_KEYPAIR_FILES.keys()[0]\n scopes = ['openid']\n return generate_signed_access_token(\n kid, private_key, user, 3600, scopes, forced_exp_time=None)", "def publickey_unsafe(sk: bytes) -> bytes:\n h = H(sk)\n a = decodecoord(h)\n A = scalarmult_B(a)\n return encodepoint(A)", "def validate_signature_using_user_id(message, signature=None):\n if signature is None:\n signature = message.pop('signature')\n\n signature = (int(base64.b64decode(signature).decode()),)\n\n user_id = message['user_id']\n\n message = json.dumps(message)\n public_key_path = os.path.join('public_keys', f'public.{user_id}.key')\n with open(public_key_path, 'rb') as file:\n public_key = RSA.importKey(file.read())\n\n h = SHA.new(message.encode()).digest()\n\n return public_key.verify(h, signature)", "def save(self):\n payload = self.context['payload']\n user = User.objects.get(username=payload['user'])\n user.is_verified = True\n user.save()", "def save(self):\n payload = self.context['payload']\n user = User.objects.get(username=payload['user'])\n user.is_verified = True\n user.save()", "def send_verification_code(request) -> HttpResponse:\n request_data = get_request_data(request.body)\n if request_data is None:\n return error_response()\n\n phone_number = get_e164_phone_number(request_data.phone_number, request_data.region)\n if phone_number is None:\n return error_response(\"Invalid phone_number\")\n\n current_time = get_current_utc_time()\n verification_code_expiry_time = current_time + timedelta(\n seconds=VERIFICATION_CODE_LIFETIME_SECONDS\n )\n\n provider = get_communication_provider()\n message_status_callback = settings.TWILIO_MESSAGE_STATUS_CALLBACK\n success, verification_code = send_verification_code_sms(\n provider=provider,\n phone_number=phone_number,\n verification_code_population=VERIFICATION_CODE_ALPHABET,\n verification_code_length=VERIFICATION_CODE_LENGTH,\n verification_code_expiry_time=verification_code_expiry_time,\n callback=message_status_callback,\n )\n if success is False:\n invalidate_verification_code(verification_code.code)\n return error_response(\"Failed to send verification SMS\", status=503)\n\n return success_response({\"phone_number\": phone_number})", "def encode(msg: Message) -> bytes:\n msg = cast(SigningMessage, msg)\n signing_msg = signing_pb2.SigningMessage()\n signing_msg.message_id = msg.message_id\n dialogue_reference = msg.dialogue_reference\n signing_msg.dialogue_starter_reference = dialogue_reference[0]\n signing_msg.dialogue_responder_reference = dialogue_reference[1]\n signing_msg.target = msg.target\n\n performative_id = msg.performative\n if performative_id == SigningMessage.Performative.SIGN_TRANSACTION:\n performative = signing_pb2.SigningMessage.Sign_Transaction_Performative() # type: ignore\n skill_callback_ids = msg.skill_callback_ids\n performative.skill_callback_ids.extend(skill_callback_ids)\n skill_callback_info = msg.skill_callback_info\n performative.skill_callback_info.update(skill_callback_info)\n terms = msg.terms\n Terms.encode(performative.terms, terms)\n raw_transaction = msg.raw_transaction\n RawTransaction.encode(performative.raw_transaction, raw_transaction)\n signing_msg.sign_transaction.CopyFrom(performative)\n elif performative_id == SigningMessage.Performative.SIGN_MESSAGE:\n performative = signing_pb2.SigningMessage.Sign_Message_Performative() # type: ignore\n skill_callback_ids = msg.skill_callback_ids\n performative.skill_callback_ids.extend(skill_callback_ids)\n skill_callback_info = msg.skill_callback_info\n performative.skill_callback_info.update(skill_callback_info)\n terms = msg.terms\n Terms.encode(performative.terms, terms)\n raw_message = msg.raw_message\n RawMessage.encode(performative.raw_message, raw_message)\n signing_msg.sign_message.CopyFrom(performative)\n elif performative_id == SigningMessage.Performative.SIGNED_TRANSACTION:\n performative = signing_pb2.SigningMessage.Signed_Transaction_Performative() # type: ignore\n skill_callback_ids = msg.skill_callback_ids\n performative.skill_callback_ids.extend(skill_callback_ids)\n skill_callback_info = msg.skill_callback_info\n performative.skill_callback_info.update(skill_callback_info)\n signed_transaction = msg.signed_transaction\n SignedTransaction.encode(\n performative.signed_transaction, signed_transaction\n )\n signing_msg.signed_transaction.CopyFrom(performative)\n elif performative_id == SigningMessage.Performative.SIGNED_MESSAGE:\n performative = signing_pb2.SigningMessage.Signed_Message_Performative() # type: ignore\n skill_callback_ids = msg.skill_callback_ids\n performative.skill_callback_ids.extend(skill_callback_ids)\n skill_callback_info = msg.skill_callback_info\n performative.skill_callback_info.update(skill_callback_info)\n signed_message = msg.signed_message\n SignedMessage.encode(performative.signed_message, signed_message)\n signing_msg.signed_message.CopyFrom(performative)\n elif performative_id == SigningMessage.Performative.ERROR:\n performative = signing_pb2.SigningMessage.Error_Performative() # type: ignore\n skill_callback_ids = msg.skill_callback_ids\n performative.skill_callback_ids.extend(skill_callback_ids)\n skill_callback_info = msg.skill_callback_info\n performative.skill_callback_info.update(skill_callback_info)\n error_code = msg.error_code\n ErrorCode.encode(performative.error_code, error_code)\n signing_msg.error.CopyFrom(performative)\n else:\n raise ValueError(\"Performative not valid: {}\".format(performative_id))\n\n signing_bytes = signing_msg.SerializeToString()\n return signing_bytes", "def token_generate(self, user_id):\n try:\n payload = {\n 'exp': datetime.utcnow() + timedelta(minutes=200),\n 'iat': datetime.utcnow(),\n 'sub': user_id\n }\n encoded_token = jwt.encode(\n payload, current_app.config['SECRET_KEY'], algorithm='HS256'\n )\n return encoded_token\n\n except Exception:\n return str(Exception)", "def sign(self, body, external_aad, private_key):", "def user_confirm_email(request):\n # values from URL/matchdict\n conf_code = request.matchdict['code']\n user_name = request.matchdict['user_name']\n user_email = request.matchdict['user_email']\n\n #get matching user from db\n user = User.get_by_username(user_name)\n\n # check if the information in the matchdict makes sense\n # - user\n if isinstance(user, NoneType):\n #print \"user is of type NoneType\"\n return {\n 'result_msg':\n \"Something didn't work. \"\n \"Please check whether you tried the right URL.\"\n }\n # - email\n if (user.email == user_email):\n #print \"this one matched! \" + str(user_email)\n\n if (user.email_is_confirmed):\n #print \"confirmed already\"\n return {'result_msg': \"Your email address was confirmed already.\"}\n # - confirm code\n #print \"checking confirmation code...\"\n if (user.email_confirm_code == conf_code):\n #print \"conf code \" + str(conf_code)\n #print \"user.conf code \" + str(user.email_confirm_code)\n\n #print \" -- found the right confirmation code in db\"\n #print \" -- set this email address as confirmed.\"\n user.email_is_confirmed = True\n return {'result_msg':\n \"Thanks! Your email address has been confirmed.\"}\n # else\n return {'result_msg': \"Verification has failed. Bummer!\"}", "def encode(data):\n return jwt.encode(data, app.config[\"JWT_SECRET\"], algorithm=\"HS256\")", "def sign_and_verify(self, msg):\n ciphertext, tag = self.signer.encrypt_and_digest(msg.encode('utf-8'))\n plaintext = self.verifier.decrypt(ciphertext)\n try:\n self.verifier.verify(tag)\n print(\"The message is authentic: \", plaintext)\n except ValueError:\n print(\"Key incorrect or message corrupted\")", "def create(self, data):\n # Make User\n code = (random.randint(1000, 9999))\n user = User.objects.get(pk=self.context['user'].pk)\n new = str(code).strip()\n hs = hashlib.sha1(new.encode()).hexdigest()\n user.password = hs\n user.save()\n send_verification_email.delay(email=data['email'], code=code)\n return user", "def forge_public_key(value) -> bytes:\n prefix = value[:4]\n res = base58.b58decode_check(value)[4:]\n\n if prefix == 'edpk':\n return b'\\x00' + res\n elif prefix == 'sppk':\n return b'\\x01' + res\n elif prefix == 'p2pk':\n return b'\\x02' + res\n\n raise ValueError(f'Unrecognized key type: #{prefix}')", "def encode_token(userId):\n token = jwt.encode({'userId': userId, 'exp': datetime.datetime.utcnow() + datetime.timedelta(hours=20)},\n secret_key).decode('utf-8')\n return token", "def signup(self, code):\n log.info(\"Confirming user with username : \" + self.__username)\n path = 'user'\n signup_info = {\n 'user_name': self.__username,\n \"verification_code\": code\n }\n signup_url = serverconfig.HOST + path\n\n try:\n log.debug(\"Confirm user request url : \" + signup_url)\n response = requests.post(url=signup_url,\n data=json.dumps(signup_info),\n headers=self.__request_header,\n verify=configmanager.CERT_FILE)\n log.debug(\"Confirm user response : \" + response.text)\n response.raise_for_status()\n except requests.exceptions.SSLError:\n raise SSLError\n except requests.exceptions.ConnectionError:\n raise NetworkError\n except Exception:\n raise Exception(response.text)\n log.info(\"Signup successful.\")\n return True", "def calculate_auth_code(self, data) -> str:\n return (\n hmac.new(\n bytes(self.config.get(VENE_PAYMENTS_BAMBORA_API_SECRET), \"latin-1\"),\n msg=bytes(data, \"latin-1\"),\n digestmod=hashlib.sha256,\n )\n .hexdigest()\n .upper()\n )", "def private_key(self):" ]
[ "0.6182733", "0.5893101", "0.58795995", "0.5852252", "0.5845634", "0.5823406", "0.575619", "0.564391", "0.56402856", "0.5625334", "0.55854565", "0.55798405", "0.55779624", "0.55727375", "0.5564628", "0.5561092", "0.5531494", "0.55198497", "0.550899", "0.5470735", "0.5465826", "0.53901035", "0.5372451", "0.5334406", "0.53318757", "0.5321015", "0.53041154", "0.52728695", "0.5259347", "0.52554816", "0.52545804", "0.5240918", "0.5234621", "0.52277756", "0.5223482", "0.52146053", "0.5201796", "0.5199386", "0.51833147", "0.5182266", "0.5182266", "0.51745135", "0.51699793", "0.51667964", "0.514326", "0.5105554", "0.5100056", "0.5096213", "0.5085339", "0.50851274", "0.50835603", "0.50741196", "0.50730115", "0.50516796", "0.50513023", "0.50381553", "0.50344557", "0.50336", "0.5023588", "0.50215554", "0.50118977", "0.5011838", "0.5010816", "0.500852", "0.49956542", "0.4993554", "0.4990521", "0.49891302", "0.4972241", "0.49676454", "0.49635", "0.49586046", "0.4950535", "0.49472773", "0.49392805", "0.49385396", "0.49338427", "0.49300268", "0.49203822", "0.48972133", "0.4895198", "0.48951176", "0.48936117", "0.48929736", "0.48880255", "0.48792964", "0.48792964", "0.4875914", "0.48700887", "0.48689416", "0.48653075", "0.4857416", "0.48510388", "0.48492053", "0.48479307", "0.48352596", "0.4832819", "0.4829647", "0.48271713", "0.48258832" ]
0.5185567
38
Authenticate user based on code.
def authenticate_user(authentication_code): for suffix in ('', '=', '=='): attempt = authentication_code + suffix decoded = base64.decodestring(attempt) fields = decoded.split('_') email, user_id, time_stamp, str_hex = fields if time_stamp < time.time(): # Authentication Code Expired raise seerpod_exceptions.AuthenticationCodeExpired('Authentication code expired', response_data=authentication_code) user = None #business_contact_api.BusinessContacts().get_user_detail_from_email(email) if not user: continue if attempt == generate_authentication_code( user.id, time_stamp, user.owner_email_id, user.password): return user # Invalid authentication code raise seerpod_exceptions.InvalidAuthenticationCode('Invalid Authentication code', response_data=authentication_code)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def authenticate(self, request, **kwargs):\n\n self.request = request\n if not self.request:\n return None\n\n state = self.request.GET.get('state')\n code = self.request.GET.get('code')\n nonce = kwargs.pop('nonce', None)\n\n if not code or not state:\n return None\n\n reverse_url = import_from_settings('OIDC_AUTHENTICATION_CALLBACK_URL',\n 'oidc_authentication_callback')\n\n token_payload = {\n 'client_id': self.OIDC_RP_CLIENT_ID,\n 'client_secret': self.OIDC_RP_CLIENT_SECRET,\n 'grant_type': 'authorization_code',\n 'code': code,\n 'redirect_uri': absolutify(\n self.request,\n reverse(reverse_url)\n ),\n }\n\n # Get the token\n token_info = self.get_token(token_payload)\n id_token = token_info.get('id_token')\n access_token = token_info.get('access_token')\n refresh_token = token_info.get('refresh_token')\n\n # Validate the token\n payload = self.verify_token(id_token, nonce=nonce)\n\n # Store users tokens\n usertokens, created = UserTokens.objects.update_or_create(\n user=payload['sub'],\n defaults={'access_token': access_token,\n 'refresh_token': refresh_token}\n )\n\n if payload:\n self.store_tokens(access_token, id_token)\n try:\n return self.get_or_create_user(access_token, id_token, payload)\n except SuspiciousOperation as exc:\n LOGGER.warning('failed to get or create user: %s', exc)\n return None\n\n return None", "def authentication_callback(request):\n code = request.GET.get('code')\n user = authenticate(token=code, request=request)\n if user:\n auth_login(request, user)\n set_session_from_user(request, user)\n region = request.user.endpoint\n region_name = dict(Login.get_region_choices()).get(region)\n request.session['region_endpoint'] = region\n request.session['region_name'] = region_name\n url = getattr(settings, \"LOGIN_REDIRECT_URL\", \"/\")\n resp = HttpResponseRedirect(url)\n\n return resp", "def authenticate(user, request):", "def authentication_hook(self):\n pass", "def authenticate_user(self, email, password):\n authentication = self.client.validate(email, password).decode(\"utf-8\")\n if authentication == \"valid\":\n self.current_email = email\n self.unlock_time = round(datetime.now().timestamp())\n if self.is_user and not self.is_return:\n self.display_successful_unlock_cust()\n elif self.is_user and self.is_return:\n self.return_car()\n else:\n self.display_successful_unlock_eng()\n elif authentication == \"invalid\":\n print(self.INVALID_USER)\n time.sleep(3)\n self.display_main()", "def validate_code(request):\n user_id = api.keystone.get_user_id(request)\n print \"USER CHECK\"\n print user_id\n user = api.keystone.user_get(request, user_id)\n user_auth_code = request.GET.get('auth_code', None)\n secret = request.GET.get('secret', None)\n\n #Generate a code form our side using algorithm and use it to validate\n generated_code = api.keystone.generate_totp(secret)\n\n print secret\n print user_auth_code\n print generated_code\n print 'entering code comparison'\n \n data = {}\n extra = {}\n\n #Code comparison\n if user_auth_code == generated_code:\n data['totp_authenticated'] = True\n extra['two_factor_enabled'] = True\n\textra['secret_key'] = secret\n api.keystone.enable_2fa(request, user, **extra)\n else:\n \tprint 'falseeeeee'\n data['totp_authenticated'] = False\n return JsonResponse(data)", "def authenticate_user():\n\n error = request.args.get(\"error\")\n if error:\n logger.warning(\"Google sent us an error via OAuth2: %s\", error)\n\n return redirect(url_for(\"login\"))\n\n # Get OAuth2 authentication code\n code = request.args.get(\"code\")\n\n # Exchange code for fresh credentials\n credentials = flow.step2_exchange(code)\n\n # Extract email and email verification\n id_token = credentials.id_token\n email = id_token[\"email\"]\n verified_email = id_token[\"email_verified\"]\n\n if verified_email is True:\n # Find the user with the given email\n try:\n user = FlaskUser(User.objects.get(email = email))\n except User.DoesNotExist:\n user = None\n\n if not user:\n flash(\"A Galah account does not exist for this email.\", \"error\")\n\n logger.info(\n \"User %s has attempted to log in via OAuth2 but an account \"\n \"does not exist for them.\", email\n )\n else:\n login_user(user)\n\n logger.info(\n \"User %s has succesfully logged in via OAuth2.\", email\n )\n\n return redirect(url_for(\"home\"))\n\n else:\n flash(\"Sorry, we couldn't verify your email\", \"error\")\n\n logger.info(\"User %s failed to authenticate with OAuth2 because \"\n \"their email has not been verified with google.\", email)\n\n return redirect(url_for(\"login\"))", "def authenticate(self, msg=\"\"):\n if self.request.user:\n return True\n else:\n templating = self.server.templating # save current templating settings\n templating_path = self.server.templating_path\n self.server.set_templating(\"pystache\")\n self.server.set_templating_path(\".\")\n params = {'hidden_fields': self.request.params} # pass all parameters\n self.response.send_template(self.login_template, params)\n self.server.templating = templating # restore templating settings\n self.server.templating_path = templating_path\n raise AlreadyProcessed()", "def do_authenticate():\n #try:\n if 1:\n if 'referer' not in self.session:\n path = urlsplit(self.request.url)[2]\n self.session['referer'] = path\n self.session.put()\n #except:\n # pass\n aobj = self.config.auth_obj()\n self.get_controller()\n auth_res = aobj.auth(self.controller, *args, **kws)\n if auth_res:\n return func(*args, **kws)\n aobj.auth_redirect(self.controller, *args, **kws)\n # clear controller for development environment.", "def accesscode(request, code):\n employee = Employee.objects.get(access_code=code)\n user = employee.user\n user.backend = 'django.contrib.auth.backends.ModelBackend'\n login(request, user)\n return HttpResponseRedirect('/')", "def authenticate(credentials):", "def authenticate_with_github(username=None, password=None, code=None):\n if username is not None and password is not None:\n print(' (auth given as {}:{})'.format(username, '*'*len(password)))\n\n def _2fa_func():\n return code\n\n if code:\n return login(username, password, two_factor_callback=_2fa_func)\n else:\n return GitHub(username, password)", "def auth(self, user):", "def handleAuth(self, opcode, data, client):\n \n # Get the data the client sent.\n clientUser = data.getString()\n clientPass = data.getString()\n \n # Flag to be send back after serverside auth\n flag = None\n userpass = False\n loginTries = 0 # Not thought out now, will return to it later...\n \n # Get the data from DB\n try:\n # Here we can add the player to the PLAYERS{} by using a player\n # ID or something\n details = []\n details = Database.getAccountData(clientUser, clientPass)\n \n except:\n print \"Can't connected to ACCOUNT DATABASE\"\n \n # Will make some other checks later... this is just good for now..\n if details == None:\n flag = 2\n print \"Player: \", clientUser, \" Doesn't exist! or Incorrect!\"\n loginTries += 1\n \n # Check if the password/username match\n elif clientPass == details[2] and clientUser == details[1]:\n print details\n userpass = True\n self.network.base.PLAYERS[details[0]] = Player(self, details[0], details[1])\n print \"Player: \", details[1], \" Logged in, ID: \", details[0]\n flag = 1\n \n else:\n userpass = False\n print \"Player: \", clientUser, \" login incorrect\"\n loginTries += 1\n flag = 2\n \n # Create buffer\n pkg = PyDatagram()\n \n # Add response\n pkg.addUint16(SMSG_AUTH_RESPONSE)\n \n # Add the flag\n pkg.addUint16(flag)\n \n # Send the packet\n self.network.tcpWriter.send(pkg, client)", "def auth():\n pass", "def auth():\n pass", "def do_login(self, backend, user):", "def authenticate():\n if request.environ['PATH_INFO'] == \"/notification\":\n user = getUser()\n \n if user is None:\n raise HTTPResponse(body=\"Forbidden\", status=403)\n \n try:\n if authz.login(user):\n logging.info('Login success: %s', user.username)\n return\n except IOError:\n raise HTTPResponse(body=\"Error reading user file\", status=400)\n except Exception as e:\n raise HTTPResponse(body=\"Unexpected error\", status=400)\n \n raise HTTPResponse(body=\"Invalid username or password\", status=401)", "def auth_user():\n global token\n app.logger.info(\"Microsoft Planner Service running on /auth port as expected\")\n try:\n request_count = 0\n if request_count == 0:\n token = get_tokens_as_app(client_id, user_code_info, tenant_id)\n request_count = 1 \n if 'access_token' in token:\n app.logger.info('Adding access token to cache...')\n add_token_to_cache(client_id, tenant_id, token)\n return_object = (f\"{token['refresh_token']}\")\n return render_template('token.html', return_object=return_object)\n else:\n return_error = (\"Token response did not result in a proper response. Athenticate again please.\")\n return render_template('token.html', return_error=return_error)\n except AttributeError or TypeError:\n return_error = ('Authentification failed. Please pull and restart your system and authenticate again.')\n return render_template('token.html', return_error=return_error)\n except adal.AdalError as err:\n return_error = (\"You're logged in with the wrong user. Please log out and authenticate again.\")\n return render_template('token.html', return_error=return_error)", "def doAuth(pamh):\n\tprint('called third eye')\n\t# Abort if third_eye is disabled\n\tif config.getboolean(\"core\", \"disabled\"):\n\t\tsys.exit(0)\n\n\tif \"SSH_CONNECTION\" in os.environ or \"SSH_CLIENT\" in os.environ or \"SSHD_OPTS\" in os.environ:\n\t\tsys.exit(0)\n\tpamh.conversation(pamh.Message(pamh.PAM_TEXT_INFO, \"Attempting a face detection\"))\n\n\t# Run compare as python3 subprocess to circumvent python version and import issues\n\tstatus = subprocess.call([\"/usr/bin/python3\", os.path.dirname(os.path.abspath(__file__)) + \"/new_compare.py\", pamh.get_user()])\n\n\t# Status 12 means we aborted\n\tif status == 12:\n\t\treturn pamh.PAM_AUTH_ERR\n\t# Status 0 is a successful exit\n\telif status == 0:\n\t\t# Show the success message if it isn't suppressed\n\t\tpamh.conversation(pamh.Message(pamh.PAM_TEXT_INFO, \"Identified face as \" + pamh.get_user()))\n\t\treturn pamh.PAM_SUCCESS\n\t#unknown err\n\treturn pamh.PAM_SYSTEM_ERR", "def test_auth_code_positive(self, api):\n self.builder.add_user(api.get_user())\n resp = api.login_user(api.get_user().username, api.get_user().password)\n self.builder.del_user(api.get_user())\n assert resp.status_code == 200", "def authenticate(self, username, password):\n user = self.db.get_user(username)\n print(user)\n\n if user is None:\n self.__deny_state()\n\n if not self.argon2.verify(user[1], password):\n self.__deny_state()\n\n self.__accept_state()", "def auth_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_code\")", "def auth_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_code\")", "def auth_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_code\")", "def auth_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_code\")", "def auth_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_code\")", "def auth_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_code\")", "def auth_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_code\")", "def auth_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_code\")", "def api_auth():\n form = request.get_json(force=True)\n userdata = None\n if form['register']:\n userdata = userProvider.register_user(\n form['username'].encode('utf8'),\n form['password'].encode('utf8')\n )\n else:\n userdata = userProvider.load_authenticated_user(\n form['username'].encode('utf8'),\n form['password'].encode('utf8')\n )\n if userdata:\n user = userProvider.userdata_to_user(userdata)\n flask_login.login_user(user)\n return \"true\"\n raise Exception(\"No user loaded\")", "def _authenticate(self, mix, authenticator):\n if self._auth_attempt == 0:\n username, password = self._auth\n authenticator.setUser(username)\n authenticator.setPassword(password)\n self._auth_attempt += 1", "def check_auth():", "def authenticate(request):\n if not current_user.is_authenticated:\n raise NoAuthProvided()\n if current_user.is_locked or not current_user.active:\n raise UnauthorizedError(\n 'Authentication failed for <User '\n f'username=`{current_user.username}`>. '\n 'Wrong credentials or locked account')\n return current_user", "def authenticate(cls, handler):\n raise NotImplementedError(\"Missing implementation for authenticate\")", "def log_in(codecool):\n\n login = school_view.get_login()\n password = school_view.get_password()\n\n password = utilities.hash_password(password)\n\n users = codecool.managers_list + codecool.administrators_list + codecool.mentors_list + codecool.students_list\n for user in users:\n if user.login == login and user.password == password:\n return user", "async def async_step_auth(self, user_input=None):\n if user_input.get(const.CODE):\n self.data = user_input\n return self.async_external_step_done(next_step_id=\"finish\")\n\n profile = user_input.get(const.PROFILE)\n\n auth_client = self.get_auth_client(profile)\n\n url = auth_client.get_authorize_url()\n\n return self.async_external_step(step_id=\"auth\", url=url)", "def authenticate():\n try:\n return redirect(authorize_uri + '?client_id=' + client_id + \\\n '&response_type=code&redirect_uri=' + redirect_uri + '&scope=user-library-read user-modify-playback-state')\n except Exception as e:\n return ('authenticate() threw ' +str(e))", "def authenticate_userpass():\n return _userpwd_auth(current_app.config.get('VAULT_AUTH_PATH', 'userpass'))", "def auth():\n\tcode = request.query.code\n\tauth = 'https://foursquare.com/oauth2/access_token'\n\tparams = dict(\n\t\tclient_id=CLIENT_ID,\n\t\tclient_secret=CLIENT_SECRET,\n\t\tgrant_type='authorization_code',\n\t\tredirect_uri=REDIRECT_URI,\n\t\tcode=code\n\t)\n\tauth_says = fetch('%s?%s'%(auth, urlencode(params)))\n\tauth_response = json.loads(auth_says.content)\n\tif 'access_token' in auth_response:\n\t\toauth_token=auth_response['access_token']\n\t\tresponse.set_cookie('user', oauth_token, secret=CLIENT_SECRET)\n\t\tlogging.info('new oauth_token:%s'%oauth_token)\n\t\tredirect('/')\n\telse:\n\t\tlogging.error(auth_response)\n\t\tabort()", "def authenticate_user(data):\n \n try:\n auth_token = data[\"auth_token\"]\n user_token = Token.objects.get(username=data[\"username\"])\n if user_token.token == auth_token:\n return True\n except:\n return False\n return False", "def authenticate():\n return \"Please log in... Log in page\"", "def func_auth(self, data):\n check = bytes(data).decode().encode('ascii', 'ignore').decode().lower().rstrip()\n if check == 'auth login':\n auth_id = library.q_id_generate(size=12)\n message = '334 ' + auth_id\n self.func_sender(message)\n self.request.recv(self.std_recv_size)\n auth_id_two = library.q_id_generate(size=12)\n message_two = '334 ' + auth_id_two\n self.func_sender(message_two)\n self.request.recv(self.std_recv_size)\n message_three = self.conf_th_ic.get_item(q_key='std-messages').get(check)\n self.func_sender(message_three)\n return True", "def authenticate_user( db, request ):\n\n username = request.values.get( 'username' )\n password = request.values.get( 'password' )\n\n if ( not username or not password ):\n return False\n\n user = db.execute(\n text( 'select users_id, name, email from users where name = :name and password_hash = md5( :salt || :password ) and is_active' ),\n name = username, password = password, salt = app.config[ 'SECRET_KEY' ]\n ).fetchone()\n\n if ( user ):\n return user\n else:\n return False", "def authenticate_user(self, *, provider: str, user_name: str, password: str) -> UserId:", "def authenticate(self, registered_provider=None, template_path=None):\n if self.flow_type == \"web\" and registered_provider is not None:\n response = self.authenticate_from_server(registered_provider)\n if response is None or response.get(\"access_token\") is None:\n return (\n f\"Access Denied: Reason={request.args['error']} \"\n f\"error={request.args['error_description']} \"\n f\"response={response}\"\n )\n session[\"token\"] = response\n return redirect(url_for(\"home\", provider=self.name))\n elif self.flow_type == \"implicit\" and template_path is not None:\n return self.authenticate_implicit(template_path)\n elif self.flow_type == \"client\":\n return redirect(url_for(\"home\", provider=self.name))\n else:\n raise Exception(\n \"Invalid flow type, registered_provider is None, \"\n \"or template_path not specified\"\n )", "def check_user_and_login(self) -> Response:\n pass", "def verify_auth_code(self, code):\n raise NotImplementedError(\n \"\"\"\n verify_scope must be implemented by a child class\n \"\"\"\n )", "def authenticate_user(self):\n raise NotImplementedError(\n \"\"\"\n authenticate_user must be implemented by a child class\n \"\"\"\n )", "def _authenticate(self):\n auth = self.settings.get(\"auth\")\n if auth:\n if auth == Auth.PLAIN:\n self._authenticate_plain()\n elif auth == Auth.SHA256_MEMORY:\n self._authenticate_sha256_memory()\n elif auth == Auth.MYSQL41:\n self._authenticate_mysql41()\n elif self.stream.is_secure():\n # Use PLAIN if no auth provided and connection is secure\n self._authenticate_plain()\n else:\n # Use MYSQL41 if connection is not secure\n try:\n self._authenticate_mysql41()\n except InterfaceError:\n pass\n else:\n return\n # Try SHA256_MEMORY if MYSQL41 fails\n try:\n self._authenticate_sha256_memory()\n except InterfaceError as err:\n raise InterfaceError(\n \"Authentication failed using MYSQL41 and \"\n \"SHA256_MEMORY, check username and \"\n f\"password or try a secure connection err:{err}\"\n ) from err", "def authn_and_authz():\n authentication()\n authorization()", "def authenticate(cls, handler):\n return None", "def _oauth_callback(self):\n tokens = self.oauth.get_raw_access_token(data={\n 'code': flask.request.args.get('code', ''),\n 'redirect_uri': self.oauth_redirect_uri,\n 'grant_type': 'authorization_code'\n }).json()\n user = User(tokens=tokens, app=self)\n\n # Add subscriptions\n self.subscriptions.init_user(user)\n\n # Call endpoint for user login\n return self.subscriptions.call_endpoint(\"login\", user) or \"\"", "def login():\n if app.testing:\n callback_url = url_for('user.authorize', _external=True)\n else:\n callback_url = 'https://codegolf.uqcs.org.au/user/authorize'\n return git_auth.authorize(callback=callback_url)", "def authenticate():\n if request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n template = request.form['backto']\n car_id = request.form['car-id']\n if check_credentials(username, password):\n return after_auth_redirect(template, car_id, username)\n else:\n return render_template('login.html', error=\"Bad credentials!\")", "def login(self):\r\n user_account = db.find_one({\"cpr_number\": request.form.get(\"CPR\")})\r\n if user_account is not None:\r\n if self.verify_password(user_account[\"password\"], request.form.get(\"password\")):\r\n return self.start_session(user_account)\r\n return jsonify({\"error\": \"Invalid login credentials\"}), 401", "def _authenticate(self):\n\t\tfrom getpass import getpass\n\t\tpassword = getpass()\n\t\tself.msg('nickserv', 'identify %s' % password)", "def auth(self):\n ok = False\n if self.private_token:\n ok = self.token_auth()\n if not ok:\n self.credentials_auth()", "def authenticateBackend(self, tried_username=None, tried_password=None):\n\n # we keep these here in case frontend has authenticated and backend hasn't established the secure channel yet;\n # in that case, tried credentials are stored to be used whenever usearauth with backend can be performed\n if tried_username and tried_password:\n self.frontendTriedUsername = tried_username\n self.frontendTriedPassword = tried_password\n\n # do nothing if frontend is not authenticated, or backend has not established a secure channel\n if not self.factory.server.frontendAuthenticated or not self.canAuth:\n return\n\n # we authenticate with the backend using the credentials provided\n # TODO create the account in the backend before (contact the pool of VMs for example)\n # so these credentials from the config may not be needed after all\n username = CowrieConfig().get('proxy', 'backend_user').encode()\n password = CowrieConfig().get('proxy', 'backend_pass').encode()\n\n log.msg('Will auth with backend: {0}/{1}'.format(username, password))\n self.sendPacket(5, bin_string_to_hex(b'ssh-userauth'))\n payload = bin_string_to_hex(username) + \\\n string_to_hex('ssh-connection') + \\\n string_to_hex('password') + \\\n b'\\x00' + \\\n bin_string_to_hex(password)\n\n self.sendPacket(50, payload)\n self.factory.server.backendConnected = True\n\n # send packets from the frontend that were waiting to go to the backend\n for packet in self.factory.server.delayedPackets:\n self.factory.server.sshParse.parse_packet('[SERVER]', packet[0], packet[1])\n self.factory.server.delayedPackets = []\n\n # backend auth is done, attackers will now be connected to the backend\n self.authDone = True", "def authenticate():\n # Get JSON data from request\n json = request.get_json()\n\n if 'email' not in json or 'password' not in json:\n raise CustomError(400, message='Must include an email and a password')\n\n # Check email\n user = User.query.filter_by(email=json['email']).first()\n if user is None:\n raise CustomError(401, message='Email or password were not found.')\n\n # Check password\n if not check_password_hash(user.password, json['password']):\n raise CustomError(401, message='Email or password were not found.')\n\n return jsonify({'success': True, 'user': user.to_dict()}), 201", "def authenticate():\n return abort(401)", "def authenticate(self):\n self.login(closet.app.config['USERNAME'],\n closet.app.config['PASSWORD'])", "def authenticate(self, rfid):\n print(\"Auth id: [{}]\".format(rfid))\n\n values = {'id' : rfid}\n data = urllib.parse.urlencode(values)\n data = data.encode('utf-8')\n\n t1 = perf_counter()\n\n req = urllib.request.Request(self.auth_url, data)\n try:\n resp = urllib.request.urlopen(req, timeout=self.request_timeout)\n except URLError as err:\n print(\"URLError: auth_url:[{}]\".format(self.auth_url))\n print(\"URLError: {}\".format(err))\n print(\"Falling back to local cache\")\n cached = self.auth_from_cache(rfid)\n return cached\n except timeout as err:\n cached = self.auth_from_cache(rfid)\n return cached\n\n text = resp.read()\n\n t2 = perf_counter()\n print(\"Auth got [{}] in {} seconds\".format(text, t2-t1))\n\n if text == b'Granted':\n return True", "def _auth(self):\n\n def check_response(text):\n \"\"\"Check to see if authentication has failed.\n \"\"\"\n\n if 'incorrect' in response.text:\n msg = ('Username <%(username)s> and password <%(password)s> ' +\n 'do not match.') % {'username': self.username,\n 'password': self.password}\n\n raise exceptions.AuthError(msg)\n\n data = {'action': 'login',\n 'user': self.username,\n 'pwd': self.password}\n\n # step 1: submit login form\n response = self._do('POST', self.URLS['auth_step_one'],\n data=data, allow_redirects=True)\n check_response(response.text)\n\n # step 2: fake second form's submission\n # todo: sprinkle on some error checking,\n # even though this is only a redirect\n response = self._do('POST', self.URLS['auth_step_two'],\n data=data, allow_redirects=True)\n check_response(response.text)\n\n return True", "def __authenticate(self, data):\n if 'token' not in data:\n raise TokenError(\"Invalid Token\")\n if data['token'] != app.config['SLACK_TOKEN']:\n raise TokenError(\"Invalid Token\")", "def activate_user(username, code, new_pass):\r\n\r\n qry = Activation.query.\\\r\n filter(Activation.code == code).\\\r\n filter(User.username == username)\r\n\r\n res = qry.first()\r\n\r\n if UserMgr.acceptable_password(new_pass) and res is not None:\r\n user = res.user\r\n user.activated = True\r\n user.password = new_pass\r\n res.activate()\r\n\r\n LOG.debug(dict(user))\r\n\r\n return True\r\n else:\r\n return None", "def auth_code_handler(self, request, pk=None):\n try:\n # Get xero auth access information form xero connection\n stored_values = OAUTH_PERSISTENT_SERVER_STORAGE\n\n\n if len(stored_values) == 0:\n return Utils.dispatch_failure(request, 'NO_TOKEN_AUTHENTICATION')\n\n secret_keys = Utils.get_access_keys(pk)\n if AccountingConfiguration.PRIVATE == secret_keys.type:\n exists = AccountingOauth2.objects.filter(company=pk).first()\n if not exists:\n auth = AccountingOauth2(accessToken=stored_values['consumer_key'],\n accessSecretKey=stored_values['rsa_key'],\n company_id=pk)\n auth.save()\n else:\n exists.accessToken = stored_values['consumer_key']\n exists.accessSecretKey = stored_values['rsa_key']\n exists.save()\n else:\n auth_verifier_uri = settings.XERO_AUTH_VERIFIER_URI\n oauth_verifier = request.GET.get('oauth_verifier')\n credentials = Utils.get_xero_public_credentials(stored_values)\n\n if credentials.expired():\n return Utils.dispatch_failure(request, 'NO_TOKEN_AUTHENTICATION')\n\n # Verify the auth verifier for establish the connection\n\n credentials.verify(oauth_verifier)\n # Resave our verified credentials\n for key, value in credentials.state.items():\n OAUTH_PERSISTENT_SERVER_STORAGE.update({key: value})\n\n stored_values = OAUTH_PERSISTENT_SERVER_STORAGE\n exists = AccountingOauth2.objects.filter(company=pk).first()\n\n if exists:\n exists.accessToken = stored_values['oauth_token']\n exists.realmId = oauth_verifier\n exists.accessSecretKey = stored_values['oauth_token_secret']\n exists.tokenAcitvatedOn = stored_values['oauth_expires_at']\n exists.tokenExpiryON = stored_values['oauth_authorization_expires_at']\n exists.save()\n else:\n auth = AccountingOauth2(accessToken=stored_values['oauth_token'],\n refreshToken='',\n realmId=oauth_verifier,\n accessSecretKey=stored_values['oauth_token_secret'],\n tokenAcitvatedOn=stored_values['oauth_expires_at'],\n tokenExpiryON=stored_values['oauth_authorization_expires_at'],\n company_id=pk)\n auth.save()\n # auth_redirect_url = os.environ.get ('QBO_AUTH_REDIRECT_URL',\n # 'http://localhost:4200/coa-match/quickbooks')\n\n # auth_redirect_url = os.environ.get ('QBO_AUTH_REDIRECT_URL','http://ec2-52-207-28-114.compute-1.amazonaws.com/ix/coa-match/quickbooks')\n\n # return redirect(auth_redirect_url)\n\n except Exception as e:\n auth_cancel_url = settings.QBO_AUTH_CANCEL_URL\n Utils.send_company_misconfig(pk, e)\n return redirect(auth_cancel_url + '/error')\n #return Utils.dispatch_success(request, 'TOKEN_ALREADY_VALIDATED')\n\n auth_redirect_url = settings.XERO_AUTH_REDIRECT_URL\n return redirect(auth_redirect_url)\n # return Utils.dispatch_success(request, stored_values)", "def authenticate(self):\n rv = Form.validate(self)\n if not rv:\n return False\n\n user = self.username.data\n\n cur = get_cursor()\n if email_exists(cur, user):\n user = get_username(cur, user)\n\n if username_exists(cur, user):\n pw_hash = get_pw_hash(cur, user)\n\n if check_password(self.password.data, pw_hash):\n self.username.data = user\n return True\n\n return False", "def authenticate(self, *args, **kwargs):\n # Validate backend and arguments. Require that the Social Auth\n # response be passed in as a keyword argument, to make sure we\n # don't match the username/password calling conventions of\n # authenticate.\n if not (self.name and kwargs.get(self.name) and 'response' in kwargs):\n return None\n\n response = kwargs.get('response')\n pipeline = PIPELINE\n kwargs = kwargs.copy()\n kwargs['backend'] = self\n\n if 'pipeline_index' in kwargs:\n pipeline = pipeline[kwargs['pipeline_index']:]\n else:\n kwargs['details'] = self.get_user_details(response)\n kwargs['uid'] = self.get_user_id(kwargs['request'])\n kwargs['is_new'] = False\n \n out = self.pipeline(pipeline, *args, **kwargs)\n if not isinstance(out, dict):\n return out\n\n social_user = out.get('social_user')\n if social_user:\n # define user.social_user attribute to track current social\n # account\n user = social_user.user\n user.social_user = social_user\n user.is_new = out.get('is_new')\n return user", "def login_menu(self):\n print(\"\\nPlease enter your email and password\")\n email = self.validate_email()\n password = self.validate_password()\n self.authenticate_user(email, password)", "def login():\n auth_state = str(uuid.uuid4())\n SESSION.auth_state = auth_state\n\n # For this sample, the user selects an account to authenticate. Change\n # this value to 'none' for \"silent SSO\" behavior, and if the user is\n # already authenticated they won't need to re-authenticate.\n prompt_behavior = 'select_account'\n\n params = urllib.parse.urlencode({'response_type': 'code',\n 'client_id': config.CLIENT_ID,\n 'redirect_uri': config.REDIRECT_URI,\n 'state': auth_state,\n 'resource': config.RESOURCE,\n 'prompt': prompt_behavior})\n\n return bottle.redirect(config.AUTHORITY_URL + '/oauth2/authorize?' + params)", "def _delegate_authentication(username, password):\n payload = json.dumps({\n 'type': 'normal',\n 'username': username,\n 'password': password\n })\n headers = {'Content-Type': 'application/json'}\n login_response = requests.post(API_URL + \"/auth\", data=payload, headers=headers)\n if login_response.status_code != 200:\n return False\n\n try:\n decoded_response = login_response.json()\n except ValueError as error:\n logger.error(f'Cannot decode Taiga auth response: {error}. Response was: {login_response}')\n return False\n return decoded_response", "def authenticate(self):\n # self.qobject.remove_authenticate_signal.emit()\n # self.qobject.authenticate_signal.emit( )\n #if self.app.sync_thread.status != const.STATUS_SYNC:\n # self.app.sync_thread.force_sync()\n change_auth_token( )\n self.data_changed()", "def authcheck():\n user = get_user()\n return jsonify({'current_identity': user.username})", "def authenticator():", "def authentication_view(request):\n \n data = request.data\n username = data.get('username', '')\n password = data.get('password', '')\n user = authenticate(request, username=username, password=password) \n\n if user is not None:\n login(request, user)\n return JsonResponse({\"status\": \"success\"})\n else:\n return JsonResponse({\"status\": \"fail\"})", "def login_user():\n pass", "def get(self):\n\n\t\trequest = user_auth_parser.parse_args(strict=True)\n\n\t\tresult = Authenticator.authenticate(\n\t\t\trequest[\"username\"],\n\t\t\trequest[\"password\"]\n\t\t)\n\n\t\treturn result", "def skyserv_authenticator(self):\n \n header = {\n 'Content-Type': accept, \n 'X-Auth-Token': self.casjobtoken,\n 'Accept': accept\n }\n # this format is disgusting but required....\n authdata = {\n 'auth' :{\n 'identity': {\n 'password': {\n 'user': {\n 'name': username,\n 'password': password\n }\n }\n }\n }\n }\n payload = json.dumps(authdata).encode(encoding='utf-8')\n try:\n post = requests.post(self.loginurl, data=payload, headers=header)\n\n if post.status_code == 200:\n response = json.loads(post.text)\n token = response[self.tokenkey]\n return token\n else:\n print('Username and/or password are invalid.')\n post.raise_for_status()\n except Exception as e:\n raise(str(e))", "def authenticate(self, func):\n self._authentication_callback = func\n return func", "def authenticate():\n\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n get_auth_headers())", "def authenticate(self, username, password):\n auth = (username, password)\n res = requests.get(\n self.normalize_admin_url(\"authenticate\"),\n headers={\"user-agent\": self.u_agent},\n auth=auth,\n verify=False,\n )\n if res.status_code == 200:\n # authentication ok, keep authentication info for future use\n self.auth = auth\n return Response(0, \"Successfully logged in\")\n elif res.status_code == 401:\n try:\n val = res.json()\n except ValueError:\n val = \"Login credentials not accepted\"\n return Response(401, val)\n else:\n return Response(res.status_code, res.content)", "def check_auth(username, password):\n # return username == app.config['USER'] and password == app.config['PASS']\n\n return username == app.config['USER'] and password == app.config['PASS']", "def authenticate_credentials(self, payload):\n username = payload.get('username')\n email = payload.get('email')\n if not username and not email:\n msg = _('Invalid payload.')\n raise exceptions.AuthenticationFailed(msg)\n try:\n # Username query is case insensitive\n user_queryset = User.objects.filter(\n Q(username__iexact=username)|\n Q(email__iexact=email)\n ).distinct()\n if user_queryset.exists() and user_queryset.count() == 1:\n user = user_queryset.first()\n return user\n except User.DoesNotExist:\n return None", "def authenticate(self, cred): \n auth_helper = AuthHelper.AuthHelper(self.context)\n return auth_helper.auth(cred)", "def check_auth_interactive(self, username, submethods):\n return AUTH_FAILED", "def authentication(app, user_model):\n login_manager.login_message = \"Please login to access this page.\"\n login_manager.login_view = 'auth.login'\n login_manager.session_protection = 'strong'\n login_manager.login_message_category = 'danger'\n\n @login_manager.user_loader\n def load_user(user_id):\n return user_model.query.get(int(user_id))", "def check_auth(*args, **kwargs):\n r = cherrypy.request\n s = cherrypy.session\n\n username = s.get(USERNAME_SESSION_KEY, None)\n course = s.get(CUR_CRS_SESSION_KEY, None)\n # require a course to be selected\n if username and not course and r.path_info != '/auth/course':\n raise cherrypy.HTTPRedirect(\"/auth/course\")\n\n conditions = r.config.get('auth.restrict.require', None)\n if conditions is not None:\n if username:\n r.login = username\n for condition in conditions:\n # A condition is just a callable that returns true or false\n if not condition():\n raise cherrypy.HTTPRedirect(\"/auth/not-authorized\")\n else:\n s[FROMPATH_SESSION_KEY] = r.path_info\n raise cherrypy.HTTPRedirect(\"/auth/login\")", "def authenticate(self, username: str, password: str) -> Optional[str]:", "def authenticate(self, username: str, password: str) -> Optional[str]:", "def authorize():\n resp = git_auth.authorized_response()\n user_info = git_auth.get('user', token=(resp[\"access_token\"],)).data\n u = db_session.query(User).filter(User.email == user_info['email']).first()\n if not u:\n u = User(user_info['login'], user_info['email'])\n db_session.add(u)\n db_session.commit()\n login_user(u, remember=True)\n return redirect(url_for('index'))", "def authenticate(self):\n #it's weird i have to do this here, but the code makes this not simple\n auth_json={'email':self.user, 'password':self.password}\n #send a post with no auth. prevents an infinite loop\n auth_response = self.post('/auth', data = json.dumps(auth_json), auth =\n None)\n\n _token = auth_response.json['token']\n\n self._token = _token\n self._wrapped.auth = SpringAuth(_token)", "def authenticate(self):\n\n def decorate(func, *args, **kws):\n \"\"\"\n A function returned as a object in load time,\n which returns inner function do_decorate().\n \"\"\"\n def do_authenticate():\n \"\"\"\n A function to perform authentication\n every time decorated function is called.\n \"\"\"\n #try:\n if 1:\n if 'referer' not in self.session:\n path = urlsplit(self.request.url)[2]\n self.session['referer'] = path\n self.session.put()\n #except:\n # pass\n aobj = self.config.auth_obj()\n self.get_controller()\n auth_res = aobj.auth(self.controller, *args, **kws)\n if auth_res:\n return func(*args, **kws)\n aobj.auth_redirect(self.controller, *args, **kws)\n # clear controller for development environment.\n\n return do_authenticate\n\n return decorate", "def user_login():\n user = query_db('''select * from user where username = ?''', [request.authorization.username], one=True)\n if user is None:\n error = 'Invalid username'\n elif not check_password_hash(user['pw_hash'],request.authorization.password):\n error = 'Invalid password'\n else:\n flash('You were logged in')\n return jsonify({'user_id':user['user_id']}),200", "def login(self, user, password):\n \n def encode_base64(s, eol=None):\n return \"\".join(base64.encodestring(s).split(\"\\n\"))\n \n def encode_cram_md5(challenge, user, password):\n challenge = base64.decodestring(challenge)\n response = user + \" \" + hmac.HMAC(password, challenge).hexdigest()\n return base64_encode(response, eol=\"\")\n\n def encode_plain(user, password):\n return base64_encode(\"%s\\0%s\\0%s\" % (user, user, password), eol=\"\")\n\n\n AUTH_PLAIN = \"PLAIN\"\n AUTH_CRAM_MD5 = \"CRAM-MD5\"\n AUTH_LOGIN = \"LOGIN\"\n\n if self.helo_resp is None and self.ehlo_resp is None:\n if not (200 <= self.ehlo()[0] <= 299):\n (code, resp) = self.helo()\n if not (200 <= code <= 299):\n raise SMTPHeloError(code, resp)\n\n if not self.has_extn(\"auth\"):\n raise SMTPException(\"SMTP AUTH extension not supported by server.\")\n\n # Authentication methods the server supports:\n authlist = self.esmtp_features[\"auth\"]\n if authlist.startswith('='):\n authlist = authlist[1:]\n authlist = authlist.split()\n # List of authentication methods we support: from preferred to\n # less preferred methods. Except for the purpose of testing the weaker\n # ones, we prefer stronger methods like CRAM-MD5:\n \n preferred_auths = [AUTH_CRAM_MD5, AUTH_PLAIN, AUTH_LOGIN]\n if hmac is None:\n preferred_auths.remove(AUTH_CRAM_MD5)\n \n # Determine the authentication method we'll use\n authmethod = None\n for method in preferred_auths:\n if method in authlist:\n authmethod = method\n break\n\n if authmethod == AUTH_CRAM_MD5:\n (code, resp) = self.docmd(\"AUTH\", AUTH_CRAM_MD5)\n if code == 503:\n # 503 == 'Error: already authenticated'\n return (code, resp)\n (code, resp) = self.docmd(encode_cram_md5(resp, user, password))\n elif authmethod == AUTH_PLAIN:\n (code, resp) = self.docmd(\"AUTH\",\n AUTH_PLAIN + \" \" + encode_plain(user, password))\n elif authmethod == AUTH_LOGIN:\n (code, resp) = self.docmd(\"AUTH\",\n \"%s %s\" % (AUTH_LOGIN, encode_base64(user, eol=\"\")))\n if code != 334:\n raise SMTPException(\"Authorization failed.\")\n (code, resp) = self.docmd(encode_base64(password, eol=\"\"))\n elif authmethod == None:\n raise SMTPException(\"No suitable authentication method found.\")\n if code not in [235, 503]:\n # 235 == 'Authentication successful'\n # 503 == 'Error: already authenticated'\n raise SMTPException(\"Authorization failed.\")\n return (code, resp)", "def login_entrypoint(request, authentication_form=AuthenticationForm):\n if request.user.is_authenticated:\n return redirect_to_user_settings()\n\n if request.method == 'GET':\n return login_view(request)\n\n # authenticate the user\n form = authentication_form(request, data=request.POST)\n\n # store the user's id in the session so you can perform Instant2FA check\n if form.is_valid():\n user = form.get_user()\n request.session['distinct_id'] = get_user_distinct_id(user)\n return HttpResponseRedirect('/login/two-factor/')\n else:\n logging.debug(\"User did not provide valid authentication credentials.\")\n return login_view(request)", "def login_require(request):\n\n if request.method == \"GET\":\n data = request.GET\n else:\n data = request.POST\n user = authenticate(username=data[\"username\"], password=data[\"password\"])\n if user and user.is_active:\n ret = Response(SUCCESS, error_code[SUCCESS])\n else: \n ret = Response(AUTHENTICATION_FAIL, error_code[AUTHENTICATION_FAIL])\n return HttpResponse(ret.serialize(f))\n\n # Generate a token for authentication\n token = token_generator(30)\n try:\n user_token = Token.objects.get(username=data[\"username\"])\n user_token.token = token\n user_token.start_time = datetime.now()\n except: \n user_token = Token(token=token, username=data[\"username\"])\n user_token.save()\n ret.set_ret(\"auth_token\", token) \n user = User.objects.get(username=data[\"username\"])\n ret.set_ret(\"data\", UserSerializer(user.appuser).serialize())\n return HttpResponse(ret.serialize(f))", "def activateWebAppUser( self, username, activation_code ):\n try:\n con = self.getMetadataDatabaseConnection()\n user_data = con.cursor()\n\n con.cursor().callproc('verify_user_activation_code', [username, activation_code, user_data])\n row = user_data.fetchone()\n if row:\n con.cursor().callproc('activate_user_account', [username])\n return True\n else:\n return False\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def authenticate_user():\n if request.headers['content-type'] == 'application/json':\n print(request)\n data = request.get_json()\n if data:\n username = data['username']\n password = data['password']\n else:\n return Response(status=400) # no JSON to parse\n\n if username is None or password is None:\n return Response(status=400) # missing arguments\n\n if not verify_password(username, password):\n return Response(status=403) # User not authenticated\n\n return jsonify({'username': username, 'success': True}), 201\n else:\n print(\"invalid request type, no json\")\n return Response(status=400) # invalid request type", "def login():\n domain = parser[\"learningmachine\"][\"domain\"]\n secrets_file = \"{}/{}\".format(dir_path, \"client_secret.json\")\n scope = \"https://www.googleapis.com/auth/userinfo.email\"\n redirect_uri = \"http://{}/login\".format(domain)\n login_handler = LoginHandler(secrets_file, scope, redirect_uri)\n\n if \"code\" in request.args:\n login_handler.setup_user_info(request.args[\"code\"])\n session[\"email\"] = login_handler.email\n session[\"display_name\"] = login_handler.display_name\n\n if not fm.user_exists(login_handler.email):\n msg = \"Adding user: {} with ID of {} to the database.\"\\\n .format(login_handler.email, login_handler.display_name)\n fm.add_user(login_handler.email, login_handler.display_name)\n\n msg = \"Sending user: {} to main page\".format(login_handler.email)\n app.logger.info(msg)\n return redirect(\"/static/main.html\")\n\n else:\n msg = \"No login code yet. Letting Google handle the login process at: {}\"\\\n .format(login_handler.auth_url)\n app.logger.info(msg)\n return redirect(login_handler.auth_url)" ]
[ "0.66076124", "0.6604477", "0.6523267", "0.64544946", "0.6403558", "0.6359539", "0.6357345", "0.6290257", "0.6288636", "0.6280958", "0.6248023", "0.6243201", "0.6242323", "0.6237753", "0.6226705", "0.6226705", "0.62162906", "0.61871463", "0.6182399", "0.61742467", "0.6170313", "0.61575925", "0.6150889", "0.6150889", "0.6150889", "0.6150889", "0.6150889", "0.6150889", "0.6150889", "0.6150889", "0.6148997", "0.6137659", "0.61356163", "0.60954833", "0.60631275", "0.6033103", "0.60151505", "0.6004793", "0.6001416", "0.5990997", "0.59909195", "0.59785664", "0.59677255", "0.5965624", "0.59640485", "0.59554416", "0.59416056", "0.5927887", "0.59234565", "0.59191555", "0.59126776", "0.5911595", "0.5909318", "0.5904114", "0.5882838", "0.58727723", "0.5872426", "0.5863785", "0.58593476", "0.58451694", "0.581483", "0.58099437", "0.5801386", "0.5798975", "0.5796279", "0.5794899", "0.57869107", "0.5785157", "0.5785035", "0.5783977", "0.5783024", "0.5780957", "0.5774048", "0.576689", "0.5766561", "0.5765667", "0.576016", "0.5759286", "0.57584804", "0.5753387", "0.57532436", "0.57527345", "0.5747901", "0.5741838", "0.5738193", "0.57328737", "0.57296395", "0.57193863", "0.57167464", "0.57167464", "0.5711525", "0.5706996", "0.5706021", "0.5705268", "0.5705177", "0.57018644", "0.5700007", "0.5698187", "0.56884396", "0.5688313" ]
0.70372474
0
Creates a new service client
def __init__(self, config, **kwargs): validate_config(config, signer=kwargs.get('signer')) if 'signer' in kwargs: signer = kwargs['signer'] else: signer = Signer( tenancy=config["tenancy"], user=config["user"], fingerprint=config["fingerprint"], private_key_file_location=config.get("key_file"), pass_phrase=get_config_value_or_default(config, "pass_phrase"), private_key_content=config.get("key_content") ) base_client_init_kwargs = { 'regional_client': True, 'service_endpoint': kwargs.get('service_endpoint'), 'timeout': kwargs.get('timeout'), 'base_path': '/20160918', 'skip_deserialization': kwargs.get('skip_deserialization', False) } self.base_client = BaseClient("identity", config, signer, identity_type_mapping, **base_client_init_kwargs) self.retry_strategy = kwargs.get('retry_strategy')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_client(service_name: str, config_name: str = None, **client_args):\n session = get_session(config_name)\n return session.client(service_name, **client_args)", "def create_client(self) -> None:\n pass", "def create(ctx, name, company, mail, age):\n client = Client(name,company,mail,age)\n client_service = ClientService(ctx.obj['clients_table']) \n client_service.create_client(client)", "def make_service(self, endpoint_type, service_name, **client_kwargs):\n binding = self._make_binding(endpoint_type, service_name)\n service_cache_key = (binding, str(client_kwargs))\n\n if service_cache_key in self._service_cache:\n srvc = self._service_cache[service_cache_key]\n else:\n client = self._make_client(\n endpoint_type,\n service_name,\n **client_kwargs\n )\n srvc = client.create_service(binding, client.wsdl.location)\n self._service_cache[service_cache_key] = srvc\n return srvc", "def create_client(service, region, access_key_id, secret_access_key):\n client = boto3.client(service,\n region_name=region,\n aws_access_key_id=access_key_id,\n aws_secret_access_key=secret_access_key\n )\n return client", "def create_client(service, region, access_key_id, secret_access_key):\n client = boto3.client(service,\n region_name=region,\n aws_access_key_id=access_key_id,\n aws_secret_access_key=secret_access_key\n )\n return client", "def add_client(name):\n return create_client(name)", "def create_client(name):\n client = Client(name=name)\n print(client.client_secret)\n db.session.add(client)\n db.session.commit()\n return client", "def make_client(service_key, constructor=None, options=None, **kwargs):\n cloud = get_config(service_key=service_key, options=options, **kwargs)\n if not constructor:\n constructor = cloud_config._get_client(service_key)\n return cloud.get_legacy_client(service_key, constructor)", "def _create_service_client(self, srv_name):\n if self._srv:\n self._srv.close()\n\n if srv_name in rosservice.get_service_list():\n rospy.loginfo(\"Creating proxy for service '%s'\" % srv_name)\n self._srv = rospy.ServiceProxy(srv_name, rosservice.get_service_class_by_name(srv_name))", "def create_client(self) -> None:\n self._client = gapic.JobServiceClient(\n client_options=dict(api_endpoint=self._region + _UCAIP_ENDPOINT_SUFFIX))", "def create_client(\n body: ClientmodelClientCreateRequest,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n request = CreateClient.create(\n body=body,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def createService(data):\n return Service(data).create()", "def hello_svc_client():\n from clients.hello_svc import HelloServiceClient\n return HelloServiceClient()", "def make_rest_client(\n service_key, options=None,\n app_name=None, app_version=None, version=None,\n **kwargs):\n cloud = get_config(\n service_key=service_key, options=options,\n app_name=app_name, app_version=app_version,\n **kwargs)\n return cloud.get_session_client(service_key, version=version)", "def create_client(self) -> None:\n self._client = discovery.build('ml', 'v1')", "def create_test_service(context, **kw):\n service = get_test_service(context, **kw)\n service.create()\n return service", "def create_client(email, password):\n gd_client = gdata.contacts.service.ContactsService()\n gd_client.email = email\n gd_client.password = password\n gd_client.source = 'syncContacts'\n gd_client.ProgrammaticLogin()\n return gd_client", "def create_service():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file('credentials.json'\n , SCOPES)\n creds = flow.run_local_server(port=9797)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('drive', 'v3', credentials=creds)\n return service", "def make_client(instance):\n network_client = utils.get_client_class(\n API_NAME,\n instance._api_version[API_NAME],\n API_VERSIONS)\n LOG.debug('Instantiating network client: %s', network_client)\n\n endpoint = instance.get_endpoint_for_service_type(\n API_NAME,\n region_name=instance._region_name,\n )\n\n return network_client(\n username=instance._username,\n tenant_name=instance._project_name,\n password=instance._password,\n region_name=instance._region_name,\n auth_url=instance._auth_url,\n endpoint_url=endpoint,\n token=instance.auth.get_token(instance.session),\n insecure=instance._insecure,\n ca_cert=instance._cacert,\n )", "def create_service(self, service_name, *args, **kwargs):\n\n creator = self._service_creators.get(service_name, None)\n\n if creator is None:\n return None\n\n return creator(*args, **kwargs)", "def make_client(self, context):\n return Client(self.settings['client_routing'], context=context)", "def create_client_by_namespace(\n body: ClientmodelClientCreateRequest,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = CreateClientByNamespace.create(\n body=body,\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def CreateClient():\n client = gdata.docs.client.DocsClient(source=SampleConfig.APP_NAME)\n client.http_client.debug = SampleConfig.DEBUG\n # Authenticate the user with CLientLogin, OAuth, or AuthSub.\n try:\n gdata.sample_util.authorize_client(\n client,\n service=client.auth_service,\n source=client.source,\n scopes=client.auth_scopes\n )\n except gdata.client.BadAuthentication:\n exit('Invalid user credentials given.')\n except gdata.client.Error:\n exit('Login Error')\n return client", "def create_service(self, service_id, service_ref):\n raise exception.NotImplemented() # pragma: no cover", "def create_service(service, version, creds=None):\n # Instantiate an Http instance\n http = httplib2.Http()\n\n if creds:\n # Authorize the Http instance with the passed credentials\n creds.authorize(http)\n\n return build(service, version, http=http)", "def test_create_client(self):\n pass", "def create_service(flags, client_id, client_secret):\n flow = OAuth2WebServerFlow(\n client_id=client_id,\n client_secret=client_secret,\n scope='https://www.googleapis.com/auth/drive.readonly',\n redirect_uri='http://localhost')\n storage = Storage('oauth_storage')\n credentials = tools.run_flow(flow, storage, flags)\n http = credentials.authorize(httplib2.Http())\n return build('drive', 'v2', http=http)", "def create_client(self, module_name, version, client_class):\n # NOTE(kiennt): Get created client rather create a new one.\n # The key is the combination of module_name and version.\n # because we can create multiple clients of a module with\n # different versions.\n client = self.created_clients.get(module_name + version)\n if client:\n return client\n module_client = self._import_client(module_name)\n try:\n client = getattr(module_client, client_class)(\n version=version,\n session=self._sess)\n self.created_clients[module_name+version] = client\n return client\n except Exception as err:\n raise err", "def client(\n service_name: str, version: str = \"v1\", secrets: Secrets = None\n) -> Resource:\n credentials = load_credentials(secrets=secrets)\n return build(service_name, version=version, credentials=credentials)", "async def create_client_by_namespace_async(\n body: ClientmodelClientCreateRequest,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = CreateClientByNamespace.create(\n body=body,\n namespace=namespace,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )", "def create_service(cls, proto_py_module, service_name):\n\n return cls.create_services(proto_py_module, service_name)", "def create_client(self, initiator_iqn):\n client = self._get_target_client(initiator_iqn)\n if not client:\n try:\n self.client.create_client(self.target_iqn,\n initiator_iqn)\n except client_exceptions.ClientException as ex:\n raise exception.VolumeBackendAPIException(\n data=ex.get_description())", "def client():\n\n client = Client()\n return client", "def serviceClient(self, iTag, srvType, addr):\r\n return ROSServiceClient(self, iTag, srvType, addr)", "async def api_create_service(\n data: CreateService, wallet: WalletTypeInfo = Depends(get_key_type)\n):\n try:\n service = await create_service(data=data)\n except Exception as e:\n raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR, detail=str(e))\n\n return service.dict()", "def create_generated_client() -> None:\n print(\"Generating client\")\n\n delete_generated_client()\n args = [\n \"{}/../scripts/generate.sh\".format(ROOT),\n \"-i\",\n \"http://localhost:8000/openapi.json\",\n \"-p\",\n CLIENT_NAME,\n \"--include-auth\",\n \"-o\",\n ROOT,\n \"-t\",\n \"/tmp\",\n \"-m\",\n ]\n\n process_result = subprocess.run(args, capture_output=True)\n\n with open(os.path.join(LOG_DIR, \"generation.log\"), \"wb\") as file:\n file.write(process_result.stdout)\n\n with open(os.path.join(LOG_DIR, \"generation.err\"), \"wb\") as file:\n file.write(process_result.stderr)\n\n if process_result.returncode != 0: # pragma: no cover\n if process_result.stderr:\n sys.stderr.write(process_result.stderr.decode(\"utf-8\"))\n pytest.exit(\n \"Failed to generate client api, code {}\"\n \"\\nLogs are in logs/generation.log and logs/generation.err\".format(process_result.returncode),\n returncode=process_result.returncode,\n )\n\n print(\"Client created in {}, logs in logs/generation.log\\n\".format(CLIENT_DIR))", "def _create_client(p4, client_name, p4gf_dir):\n view = ['//{depot}/... //{client}/...'.format(depot=p4gf_const.P4GF_DEPOT,\n client=client_name)]\n spec_created = False\n if not p4gf_util.spec_exists(p4, \"client\", client_name):\n # See if the old object clients exist, in which case we will remove them.\n if p4gf_util.spec_exists(p4, \"client\", OLD_OBJECT_CLIENT):\n p4.run('client', '-df', OLD_OBJECT_CLIENT)\n if p4gf_util.spec_exists(p4, \"client\", OLDER_OBJECT_CLIENT):\n p4.run('client', '-df', OLDER_OBJECT_CLIENT)\n spec_created = p4gf_util.ensure_spec(\n p4, \"client\", spec_id=client_name,\n values={'Host': None, 'Root': p4gf_dir,\n 'Description': 'Created by Perforce Git Fusion',\n 'View': view})\n if not spec_created:\n p4gf_util.ensure_spec_values(p4, \"client\", client_name,\n {'Root': p4gf_dir, 'View': view})", "def service_create(service, service_type, api, endpoint):\n db = model.Session()\n _assert_absent(db, model.Service, service)\n api = _must_find(db, model.API, api)\n service = model.Service(service, service_type, api, endpoint)\n db.add(service)\n db.commit()", "def create_client():\n logger.debug(\"=====create_client fired...\")\n try:\n session = boto3.Session()\n client = session.client('dynamodb', region_name='us-east-1')\n return client\n except ClientError as err:\n logger.error(\n \"[BOTO3_ERROR]Failed to create boto3 client: %s\", str(err))", "def create_client(client_id, authority_url, client_secret):\n client = msal.ConfidentialClientApplication(\n client_id=client_id, authority=authority_url, client_credential=client_secret\n )\n return client", "def create_service():\n creds = None\n # The file token_sheet.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token_sheet.pickle'):\n with open('token_sheet.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials_sheets.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token_sheet.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('sheets', 'v4', credentials=creds)\n return service", "async def create_client_async(\n body: ClientmodelClientCreateRequest,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n request = CreateClient.create(\n body=body,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )", "def create_boto3_client(config, service):\n session = boto3.Session(profile_name=config.get('AWS_ACCESS', 'AWS_PROFILE'))\n return session.client(service, region_name=config.get('AWS_ACCESS', 'AWS_REGION'))", "def create_client(name: str, url: str, description: str, scopes: str,\n redirect_uri: str) -> None:\n app = create_web_app()\n with app.app_context():\n datastore.create_all()\n\n with datastore.util.transaction() as session:\n db_client = datastore.models.DBClient(\n name=name,\n url=url,\n description=description,\n redirect_uri=redirect_uri\n )\n secret = generate_token(48)\n hashed = hashlib.sha256(secret.encode('utf-8')).hexdigest()\n db_cred = datastore.models.DBClientCredential(client=db_client,\n client_secret=hashed)\n db_scopes = [\n datastore.models.DBClientAuthorization(\n client=db_client, authorized=datetime.now(), scope=scope\n ) for scope in scopes.split()\n ]\n db_grant_type = datastore.models.DBClientGrantType(\n client=db_client,\n grant_type='client_credentials',\n authorized=datetime.now()\n )\n db_grant_type = datastore.models.DBClientGrantType(\n client=db_client,\n grant_type='authorization_code',\n authorized=datetime.now()\n )\n\n session.add(db_client)\n session.add(db_cred)\n session.add(db_grant_type)\n for db_scope in db_scopes:\n session.add(db_scope)\n\n session.commit()\n click.echo(f'Created client {name} with ID {db_client.client_id}'\n f' and secret {secret}')", "def boto_client(account_id, service_name, region):\n logger.info('Creating boto3 client for account_id: {}, '\n 'service_name: {}'.format(account_id, service_name))\n return boto3.client(service_name, region_name=region)", "def _establish_client():\n logger.debug('SoapService - _establish_client()')\n try:\n client = zeep.Client(wsdl=settings.WSDL)\n except Exception as e:\n message = 'Unable to create soap client from wsdl file, error: {}'.format(e)\n logger.error(message)\n raise IOError(message)\n\n return client", "def create_resource(\n service_name: str, config_name: str = None, **resource_args\n):\n session = get_session(config_name)\n return session.resource(service_name, **resource_args)", "def create_client():\n result = False\n if g.client_id in drivers:\n result = True\n return jsonify({'Success': result})", "def new_instance(cls,\n version: date,\n service_name: str = DEFAULT_SERVICE_NAME,\n ) -> 'DirectLinkApisV1':\n if version is None:\n raise ValueError('version must be provided')\n\n authenticator = get_authenticator_from_environment(service_name)\n service = cls(\n version,\n authenticator\n )\n service.configure_service(service_name)\n return service", "def client(service_name, region_name=None):\n return session.client(service_name=service_name, region_name=region_name)", "def serviceClient(self, iTag, srvType, cb=None):\r\n if cb and not callable(cb):\r\n raise TypeError('Callback has to be callable.')\r\n\r\n return ServiceClient(self, iTag, srvType, cb)", "def _create_soap_object(self, name):\n return self.client.factory.create(name)", "def service_create(path, service_name, definition):\n compose_result, loaded_definition, err = __load_compose_definitions(\n path, definition\n )\n if err:\n return err\n services = compose_result[\"compose_content\"][\"services\"]\n if service_name in services:\n msg = \"Service {} already exists\".format(service_name)\n return __standardize_result(False, msg, None, None)\n services[service_name] = loaded_definition\n return __dump_compose_file(\n path,\n compose_result,\n \"Service {} created\".format(service_name),\n already_existed=True,\n )", "def createClient(self, name, wid, notes=None):\n\n data = {}\n data['client'] = {}\n data['client']['name'] = name\n data['client']['wid'] = wid\n data['client']['notes'] = notes\n\n response = self.postRequest(Endpoints.CLIENTS, parameters=data)\n return self.decodeJSON(response)", "def create(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrv_Create'))", "def create_dummy_client(index, user, client_manager = None, language = None, currency = None):\r\n \r\n if client_manager == None:\r\n client_manager = ClientManager(user)\r\n \r\n if currency is None:\r\n currency = create_dummy_currency(index)\r\n \r\n if language is None:\r\n language = create_dummy_language(index)\r\n \r\n return client_manager.add_client(\r\n name = 'client_%i' %index,\r\n address = 'address_%i' % index,\r\n email = 'corp_email_%i@email.com' % index,\r\n default_currency_id = currency.key().id(),\r\n default_language_id = language.key().id(),\r\n )", "def create_client(name):\n address = \"/run/com_handler.sock\"\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.connect(address)\n return Client(sock, \"\", name)", "def create_client(self, version=None, unstable=False, **kwargs):\n version_data = self._calculate_version(version, unstable)\n return self._create_client(version_data, **kwargs)", "def create_servicech(self, conf, params):\n\t\tpass", "def create_client():\n hostname = \"localhost\"\n username = \"she393\"\n password = os.getenv(\"PASSWORD\")\n\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(hostname=hostname, username=username, password=password)\n return client", "def test_client_create(self):\n pass", "def new(\n cls,\n name: str,\n description: str,\n registration_schema: JSON,\n result_schema: JSON,\n database_session: Session) -> 'Service':\n raise NotImplementedError()", "def create_service(self, url_data):\n data = {key: value[0] for key, value in url_data}\n\n publish_key = uuid.uuid4().hex\n service_id = uuid.uuid4().hex\n service_name = data['name']\n\n self.fastly_cache[service_name] = {\n 'service_details': {\n u'comment': '',\n u'locked': False,\n u'updated_at': u'2014-11-13T14:29:10+00:00',\n u'created_at': u'2014-11-13T14:29:10+00:00',\n u'testing': None,\n u'number': 1,\n u'staging': None,\n u'active': None,\n u'service_id': service_id,\n u'deleted_at': None,\n u'inherit_service_id': None,\n u'deployed': None},\n 'service_name': service_name\n }\n self.fastly_cache[service_id] = self.fastly_cache[service_name]\n\n create_service = {\n u'comment': '',\n u'publish_key': publish_key,\n u'name': service_name,\n u'versions': [{u'comment': '', u'locked': u'0',\n u'service': service_id,\n u'updated_at': u'2014-11-12T18:43:21',\n u'created_at': u'2014-11-12T18:43:21',\n u'testing': None, u'number': u'1',\n u'staging': None,\n u'active': None,\n u'service_id': service_id,\n u'deleted_at': None,\n u'inherit_service_id': None,\n u'deployed': None,\n u'backend': 0}],\n u'created_at': u'2014-11-12T18:43:21+00:00',\n u'updated_at': u'2014-11-12T18:43:21+00:00',\n u'customer_id': data['customer_id'],\n u'id': service_id}\n return create_service", "def build_service():\n\n\tstore = file.Storage('credentials.json')\n\tcreds = store.get()\n\tif not creds or creds.invalid:\n\t flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)\n\t creds = tools.run_flow(flow, store)\n\tservice = build('gmail', 'v1', http=creds.authorize(Http(disable_ssl_certificate_validation=True)))\n\treturn service", "def create():\n form = request.form\n try:\n # create a new BancBox client from the input form\n resp = api.create_client(form)\n except Exception, e:\n logger.error('Error creating new client: %s', e)\n return render_template('created.html', error=e.message)\n\n if resp.status == 1:\n # If the create request was successful, let's render a success\n # message with some data about the new client and a link to the\n # detail page\n new_client = {\n 'firstName': form['firstName'],\n 'lastName': form['lastName'],\n 'clientId': resp.clientId\n }\n return render_template('created.html', new_client=new_client)\n else:\n # If an error was returned by BancBox, let's render it\n if hasattr(resp, 'errors') and hasattr(resp.errors, 'message'):\n message = resp.errors.message\n else:\n message = \"Error creating new client.\"\n return render_template('created.html', error=message)", "def create_client(self):\n client = iperf3.Client()\n client.duration = self._host[CONF_DURATION]\n client.server_hostname = self._host[CONF_HOST]\n client.port = self._host[CONF_PORT]\n client.num_streams = self._host[CONF_PARALLEL]\n client.protocol = self._host[CONF_PROTOCOL]\n client.verbose = False\n return client", "def make_client(instance):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n instance._api_version[API_NAME],\r\n API_VERSIONS,\r\n )\r\n instance.initialize()\r\n url = instance._url\r\n url = url.rstrip(\"/\")\r\n if '2.0' == instance._api_version[API_NAME]:\r\n client = neutron_client(username=instance._username,\r\n tenant_name=instance._tenant_name,\r\n password=instance._password,\r\n region_name=instance._region_name,\r\n auth_url=instance._auth_url,\r\n endpoint_url=url,\r\n token=instance._token,\r\n auth_strategy=instance._auth_strategy,\r\n insecure=instance._insecure,\r\n ca_cert=instance._ca_cert)\r\n return client\r\n else:\r\n raise exceptions.UnsupportedVersion(_(\"API version %s is not \"\r\n \"supported\") %\r\n instance._api_version[API_NAME])", "def get_client(service_account_json):\n api_scopes = ['https://www.googleapis.com/auth/cloud-platform']\n api_version = 'v1'\n discovery_api = 'https://cloudiot.googleapis.com/$discovery/rest'\n service_name = 'cloudiotcore'\n\n credentials = service_account.Credentials.from_service_account_file(\n service_account_json)\n scoped_credentials = credentials.with_scopes(api_scopes)\n\n discovery_url = '{}?version={}'.format(\n discovery_api, api_version)\n\n return discovery.build(\n service_name,\n api_version,\n discoveryServiceUrl=discovery_url,\n credentials=scoped_credentials)", "def create_TestService(test_case, # type: AnyMagpieTestCaseType\n override_service_name=null, # type: Optional[Str]\n override_service_type=null, # type: Optional[Str]\n override_headers=null, # type: Optional[HeadersType]\n override_cookies=null, # type: Optional[CookiesType]\n ): # type: (...) -> JSON\n app_or_url = get_app_or_url(test_case)\n svc_name = override_service_name if override_service_name is not null else test_case.test_service_name\n svc_type = override_service_type if override_service_type is not null else test_case.test_service_type\n data = {\n \"service_name\": svc_name,\n \"service_type\": svc_type,\n \"service_url\": \"http://localhost:9000/{}\".format(svc_name)\n }\n if svc_name:\n test_case.extra_service_names.add(svc_name) # indicate potential removal at a later point\n resp = test_request(app_or_url, \"POST\", \"/services\", json=data,\n headers=override_headers if override_headers is not null else test_case.json_headers,\n cookies=override_cookies if override_cookies is not null else test_case.cookies,\n expect_errors=True)\n if resp.status_code == 409:\n path = \"/services/{svc}\".format(svc=svc_name)\n resp = test_request(app_or_url, \"GET\", path,\n headers=override_headers if override_headers is not null else test_case.json_headers,\n cookies=override_cookies if override_cookies is not null else test_case.cookies)\n body = check_response_basic_info(resp, 200, expected_method=\"GET\")\n if TestVersion(test_case.version) < TestVersion(\"0.9.1\"):\n body.update({\"service\": body[svc_name]})\n body.pop(svc_name)\n return body\n return check_response_basic_info(resp, 201, expected_method=\"POST\")", "def create_client(wsdl: str, raw_response: bool = True) -> CachingClient:\n # We want the raw response as there is an error when Zeep parses the XML\n settings: Settings = Settings(raw_response=raw_response)\n\n # Client that caches the WSDL\n client: CachingClient = CachingClient(\n wsdl=wsdl,\n # TODO: Store PW encrypted\n wsse=UsernameToken(\"n00394gz\", \"g427Ix19LMB\"),\n settings=settings,\n )\n logger.debug(f\"Client created\")\n\n return client", "def create_service_object(credentials):\n http_auth = httplib2.Http()\n http_auth = credentials.authorize(http_auth)\n service = discovery.build('analytics', 'v3', http=http_auth)\n return service", "def create_service(self, project_id, auth_token, service_json):\n try:\n flavor = self.flavor_controller.get(service_json.get('flavor_id'))\n # raise a lookup error if the flavor is not found\n except LookupError as e:\n raise e\n\n # add any default rules so its explicitly defined\n self._append_defaults(service_json, operation='create')\n\n # convert to an object\n service_obj = service.Service.init_from_dict(project_id, service_json)\n service_id = service_obj.service_id\n\n # validate the service\n service_json = service_obj.to_dict()\n schema = service_schema.ServiceSchema.get_schema(\"service\", \"POST\")\n validators.is_valid_service_configuration(service_json, schema)\n\n service_limit = self.storage_controller.get_service_limit(project_id)\n service_count = self.storage_controller.get_service_count(project_id)\n\n services_delete_in_progress = self.storage_controller.\\\n get_services_by_status('delete_in_progress')\n\n services_delete_count = len(services_delete_in_progress)\n\n # Check that the number of deleted services is less\n # than the total number of existing services for the project.\n # Adjust the service count removing delete_in_progress\n # services.\n service_count -= (\n services_delete_count\n if 0 < services_delete_count < service_count else 0\n )\n # service_count should always be a >= 0.\n\n if service_count >= service_limit:\n raise errors.ServicesOverLimit('Maximum Services '\n 'Limit of {0} '\n 'reached!'.format(service_limit))\n\n if any([domain for domain in service_obj.domains\n if domain.certificate == \"shared\"]):\n try:\n store = str(uuid.uuid4()).replace('-', '_')\n service_obj = self._shard_retry(project_id,\n service_obj,\n store=store)\n except errors.SharedShardsExhausted as e:\n raise e\n except ValueError as e:\n raise e\n\n try:\n self.storage_controller.create_service(project_id, service_obj)\n except ValueError as e:\n raise e\n\n providers = [p.provider_id for p in flavor.providers]\n kwargs = {\n 'providers_list_json': json.dumps(providers),\n 'project_id': project_id,\n 'auth_token': auth_token,\n 'service_id': service_id,\n 'time_seconds': self.determine_sleep_times(),\n 'context_dict': context_utils.get_current().to_dict()\n }\n\n self.distributed_task_controller.submit_task(\n create_service.create_service, **kwargs)\n\n return service_obj", "def _get_client(self):\n credentials = service_account.Credentials.from_service_account_info(self.service_account_info)\n client = googleapiclient.discovery.build('container', 'v1', credentials=credentials)\n\n return client", "def client(db):\n client = ClientFactory()\n db.session.commit()\n return client", "def create_client():\n host_api_id = Config.api_id\n host_api_hash = Config.api_hash\n host_user_id = Config.user_id\n host_phone = Config.phone\n\n client = TelegramClient(host_user_id, host_api_id, host_api_hash)\n client.connect()\n if not client.is_user_authorized():\n client.send_code_request(host_phone)\n client.sign_in(host_phone, input('Enter code sent to your telegram: '))\n return client", "def gen_nova_client(self):\n\n print \"\\t* Generating nova client\"\n client = nClient.get_client_class('2')\n self.novaclient = client(self.username,\n self.password,\n self.tenant_name,\n self.auth_url,\n service_type='compute')", "def client():\n return Client(**common_data.AUTH_ARGS)", "def __init__(self, service, acces_key, secret_key):\n \n self.client = boto3.client(\n service,\n aws_access_key_id=acces_key,\n aws_secret_access_key=secret_key,\n )", "def create_client(access_key_id, secret_access_key):\r\n client = boto3.client('s3',\r\n aws_access_key_id=access_key_id,\r\n aws_secret_access_key=secret_access_key)\r\n return client", "def _init_http_client(service_id=None, opts=None):\n if service_id:\n opts = _get_trs_opts(service_id)\n\n http_client = RequestsClient()\n\n http_client.set_api_key(host=opts['host'],\n api_key=opts['auth'],\n param_in='header')\n return http_client", "def service_client_initialization(self) -> global___Snippet.ClientInitialization:", "def test_client_create(self, mock_input, mock_pass):\n # Patch username and password.\n mock_input.return_value = \"user\"\n mock_pass.return_value = \"pass\"\n\n # Instantiate Agave object making reference to local mock server.\n local_uri = \"http://localhost:{port}/\".format(port=self.mock_server_port)\n ag = Agave(api_server=local_uri)\n\n # Create client.\n ag.clients_create(\"client-name\", \"some description\")\n\n assert ag.api_key == \"some api key\"\n assert ag.api_secret == \"some secret\"", "def create_new_client(main: MainApplication) -> str:\n client = main.create_window(\"client\", \"IPLMS\", main.client_ui.get_layout())\n client[\"_CLIENT_ID_\"].Update(getUUID())\n client[\"_CP_NAME_IP_\"].Update(\"\")\n client[\"_CP_PHONE_IP_\"].Update(\"\")\n client[\"_CP_ADDRESS_IP_\"].Update(\"\")\n client.un_hide()\n event, values = client.read()\n client_logic = Client(main, event, values)\n name = client_logic.run(main)\n client.hide()\n return name", "def _CreatePubsubClient():\n client = pubsub_client.PubSubClient()\n client.CreateTopic(DEVICE_NOTE_PUBSUB_TOPIC)\n client.CreateTopic(HOST_NOTE_PUBSUB_TOPIC)\n return client", "def get_service(api_name, api_version, scope, client_secrets_path):\n # Parse command-line arguments.\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n parents=[tools.argparser])\n flags = parser.parse_args([])\n\n # Set up a Flow object to be used if we need to authenticate.\n flow = client.flow_from_clientsecrets(\n client_secrets_path, scope=scope,\n message=tools.message_if_missing(client_secrets_path))\n\n # Prepare credentials, and authorize HTTP object with them.\n # If the credentials don't exist or are invalid run through the native client\n # flow. The Storage object will ensure that if successful the good\n # credentials will get written back to a file.\n storage = file.Storage(api_name + '.dat')\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage, flags)\n http = credentials.authorize(http=httplib2.Http())\n\n # Build the service object.\n service = build(api_name, api_version, http=http)\n\n return service", "def create_clients(aws_key, aws_secret):\n ec2_client = boto3.resource(\n 'ec2', region_name=AWS_REGION, aws_access_key_id=aws_key,\n aws_secret_access_key=aws_secret\n )\n s3_client = boto3.resource(\n 's3', region_name=AWS_REGION, aws_access_key_id=aws_key,\n aws_secret_access_key=aws_secret\n )\n iam_client = boto3.client(\n 'iam', region_name=AWS_REGION, aws_access_key_id=aws_key,\n aws_secret_access_key=aws_secret\n )\n redshift_client = boto3.client(\n 'redshift', region_name=AWS_REGION, aws_access_key_id=aws_key,\n aws_secret_access_key=aws_secret\n )\n return ec2_client, s3_client, iam_client, redshift_client", "def _client(self):\n\n if self._suds_client is None:\n self._suds_client = suds.client.Client(SERVICE_WSDL_URL)\n # Add SOAP Security tokens\n self.set_security_token()\n\n return self._suds_client", "def get_client():\n return Client(__address, authkey='strumamor')", "def create_clients(client_name): # Crear nuevo Cliente\n global clients\n\n if client_name not in clients:\n clients.append(client_name)\n else:\n print('The client name is alredy in the client\\'s list')", "def newClient(self, cid, **kwargs):\n client = Iourt42Client(console=self.console, cid=cid, timeAdd=self.console.time(), **kwargs)\n self[client.cid] = client\n self.resetIndex()\n\n self.console.debug('Urt42 Client Connected: [%s] %s - %s (%s)', self[client.cid].cid, self[client.cid].name,\n self[client.cid].guid, self[client.cid].data)\n\n self.console.queueEvent(self.console.getEvent('EVT_CLIENT_CONNECT', data=client, client=client))\n\n if client.guid:\n client.auth()\n elif not client.authed:\n self.authorizeClients()\n return client", "def create(cls, client, fields, **kwargs):\n\t\tres = cls(client, fields, **kwargs)\n\t\treturn res", "def newSDDCService(**kwargs):\n # Test for interactive flag - if False, check to ensure additional arguments were give for service entry\n if kwargs['interactive'] is False and (kwargs['l4_protocol'] is None or kwargs['dest_ports'] is None):\n print(\"Error - if not using interactive mode, at least protocol and destination port(s) must be configured. Source port(s) optional, based on your application.\")\n sys.exit(1)\n elif kwargs['interactive'] is True and (kwargs['l4_protocol'] is not None or kwargs['dest_ports'] is not None or kwargs['source_ports'] is not None):\n print(\"Error - if using interactive mode, please only specify the name of the desired service. All other parameters will be obtained interactively.\")\n sys.exit(1)\n else:\n pass\n proxy = kwargs['proxy']\n sessiontoken = kwargs['sessiontoken']\n service_id = kwargs['objectname']\n interactive = kwargs['interactive']\n\n if interactive == True:\n service_entry_list = []\n # Start a loop that will run until the user enters 'quit'.\n # Ask the user for a name.\n destination_port = \"\"\n while destination_port != 'done':\n destination_port_list = []\n source_port_list = []\n service_entry_id = input(\"Please enter the Service Entry ID:\")\n l4_protocol = input(\"Please enter the L4 Protocol:\")\n source_port = \"\"\n destination_port = \"\"\n while source_port != 'done':\n source_port = input(\"Plese enter the Source Ports or type 'done' when your list is finished:\")\n if source_port != \"done\":\n source_port_list.append(source_port)\n while (destination_port != 'next') and (destination_port != \"done\"):\n source_port = \"\"\n destination_port = input(\"Plese enter the Destination Ports, type 'next' when you want to define another service entry or 'done' if you have finished:\")\n if (destination_port != 'next') and (destination_port != \"done\"):\n destination_port_list.append(destination_port)\n service_entry = {\n \"l4_protocol\": l4_protocol,\n \"source_ports\": source_port_list,\n \"destination_ports\" : destination_port_list,\n \"resource_type\" : \"L4PortSetServiceEntry\",\n \"id\" : service_entry_id,\n \"display_name\" : service_entry_id }\n service_entry_list.append(service_entry)\n else:\n source_port_list = kwargs['source_ports']\n destination_port_list = kwargs['dest_ports']\n l4_protocol = kwargs['l4_protocol']\n service_entry_list = [\n {\n \"l4_protocol\": l4_protocol,\n \"source_ports\": source_port_list,\n \"destination_ports\": destination_port_list,\n \"resource_type\": \"L4PortSetServiceEntry\",\n \"display_name\": f'{service_id}_svc_entry'\n }\n ]\n json_data = {\n \"service_entries\":service_entry_list,\n \"id\" : service_id,\n \"display_name\" : service_id,\n }\n response = new_sddc_service_json(proxy,sessiontoken,service_id,json_data)\n if response == 200:\n print(f'Service {service_id} successfully updated.')\n params = {'proxy':proxy, 'sessiontoken':sessiontoken, 'objectname':service_id}\n getSDDCService(**params)\n else:\n print(\"Issues creating the service - please check your syntax and try again.\")\n sys.exit(1)", "def post(self, ws_id, project_id):\n service = servicesimpl.create_service(ws_id, project_id, get_json(request))\n return prepare_response(service, 201)", "def get_service(\n service_name: str,\n version: str = \"v1\",\n configuration: Configuration = None,\n secrets: Secrets = None,\n) -> Resource:\n return client(service_name, version=version, secrets=secrets)", "def client_setup(self):\n self.client = Client()", "def create_api_client(base_path, access_token):\n api_client = ApiClient()\n api_client.host = base_path\n api_client.set_default_header(header_name=\"Authorization\",\n header_value=f\"Bearer {access_token}\")\n return api_client", "def gen_neutron_client(self):\n\n print \"\\t* Generating neutron client\"\n self.neutronclient = neutronclient.Client(auth_url=self.auth_url,\n username=self.username,\n password=self.password,\n tenant_name=self.tenant_name,\n region_name=self.region_name)", "def create_services(cls, proto_py_module, *service_names):\n\n return cls(proto_py_module, *service_names).services", "def make_client(db, hdfs_client=None):\n return ImpalaClient(db, hdfs_client=hdfs_client)", "def build_service():\n creds = None\n\n # the file token.json stores the user's access and refresh tokens, and is \n # created automatically when the authorization flow completes for the first time\n \n if os.path.exists('../creds/token.json'):\n creds = Credentials.from_authorized_user_file('../creds/token.json', SCOPES)\n\n # if there are no (valid) credentials, ask the user to login\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n '../creds/credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n with open('../creds/token.json', 'w') as token:\n token.write(creds.to_json())\n\n service = build('drive', 'v3', credentials=creds)\n return service" ]
[ "0.801748", "0.78244793", "0.72532517", "0.7216976", "0.71908116", "0.71908116", "0.70718634", "0.7055989", "0.69973916", "0.6996517", "0.6937248", "0.68635684", "0.68541193", "0.68492603", "0.6825555", "0.67868036", "0.66969323", "0.6665927", "0.6663688", "0.6585112", "0.6581582", "0.6579658", "0.6529895", "0.65005505", "0.6482222", "0.6467781", "0.64329064", "0.6414261", "0.63963556", "0.6395781", "0.63946646", "0.6379576", "0.63593674", "0.63342196", "0.63320684", "0.633002", "0.63062704", "0.6286155", "0.6273915", "0.62580514", "0.62486017", "0.6247127", "0.6240954", "0.62365395", "0.6220226", "0.6219414", "0.62024796", "0.6177144", "0.61426616", "0.61358994", "0.6100425", "0.6082476", "0.60682803", "0.605948", "0.6059402", "0.6053242", "0.60343313", "0.6033299", "0.6017835", "0.59993064", "0.5995884", "0.59754145", "0.5956289", "0.5931459", "0.5924004", "0.5922091", "0.59182644", "0.590952", "0.58992004", "0.5897322", "0.58887273", "0.5885458", "0.5883872", "0.58724797", "0.5861407", "0.58542526", "0.58395606", "0.58294505", "0.5804527", "0.5798101", "0.5793488", "0.57817584", "0.5778538", "0.5778053", "0.5757338", "0.57508993", "0.57503605", "0.5745668", "0.57178354", "0.57074654", "0.5703485", "0.57010597", "0.5700976", "0.56952596", "0.5683107", "0.5662492", "0.56546277", "0.5652761", "0.5645203", "0.5642448", "0.5633304" ]
0.0
-1
Activates the specified MFA TOTP device for the user. Activation requires manual interaction with the Console.
def activate_mfa_totp_device(self, user_id, mfa_totp_device_id, mfa_totp_token, **kwargs): resource_path = "/users/{userId}/mfaTotpDevices/{mfaTotpDeviceId}/actions/activate" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "if_match", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "activate_mfa_totp_device got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "userId": user_id, "mfaTotpDeviceId": mfa_totp_device_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "if-match": kwargs.get("if_match", missing), "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=mfa_totp_token, response_type="MfaTotpDeviceSummary") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=mfa_totp_token, response_type="MfaTotpDeviceSummary")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def activate_application_token(self, apptoken, temptoken) -> bool:\n await self.raw_request(\n self.URL_ACTIVATE.format(apptoken=apptoken, temptoken=temptoken)\n )\n return True", "def activate(request, uidb64, token):\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except (TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n\n if user is not None and account_activation_token.check_token(user, token):\n user.is_active = True\n user.profile.email_confirmed = True\n user.save()\n login(request, user)\n return redirect('home')\n else:\n return render(request, 'registration/activation_invalid.html')", "def activate(request, uidb64, token):\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except(TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n if user is not None and account_activation_token.check_token(user, token):\n user.is_active = True\n user.save()\n return render(request, 'accounts/active_done.html')\n else:\n return HttpResponse('Activation link is invalid!')", "def enable_mfa_device(self, user_name, serial_number,\r\n auth_code_1, auth_code_2):\r\n params = {'UserName' : user_name,\r\n 'SerialNumber' : serial_number,\r\n 'AuthenticationCode1' : auth_code_1,\r\n 'AuthenticationCode2' : auth_code_2}\r\n return self.get_response('EnableMFADevice', params)", "def activate_token(request, token):\n # Getting environment from settings\n debug = settings.DEBUG\n\n # Based on the debug redirect the user to correct url\n if debug:\n REDIRECT_URL = 'http://localhost:3000'\n else:\n REDIRECT_URL = 'https://leadbook-challenge.herokuapp.com'\n\n try:\n profile = Profile.objects.get(activation_key=token)\n profile.is_verified = True\n profile.save()\n except Profile.DoesNotExist:\n profile = None\n\n if profile:\n return HttpResponseRedirect('{}/activation/success'.format(REDIRECT_URL))\n else:\n return HttpResponseRedirect('{}/activation/failed'.format(REDIRECT_URL))", "def activate(request, uidb64, token):\r\n\ttry:\r\n\t\tuid = force_text(urlsafe_base64_decode(uidb64))\r\n\t\tuser = User.objects.get(pk=uid)\r\n\texcept (TypeError, ValueError, OverflowError, User.DoesNotExist):\r\n\t\tuser = None\r\n\r\n\tif user is not None and account_activation_token.check_token(user, token):\r\n\t\t# User activated and redirected to the homepage\r\n\t\tuser.is_active = True\r\n\t\tuser.profile.email_confirmed = True\r\n\t\tuser.save()\r\n\t\tlogin(request, user, backend='django.contrib.auth.backends.ModelBackend')\r\n\t\tgames = Game.objects.all()\r\n\t\treturn redirect('/', {'games': games, 'MEDIA_URL': settings.MEDIA_URL})\r\n\telse:\r\n\t\treturn render(request, 'account_activation_invalid.html')", "def activate_user(request, uidb64, token):\n activation_session_token = '_activation_reset_token'\n activation_url_token = 'user-activation'\n title = \"Account activation\"\n context = {'title': 'Invalid Activation Link', 'isvalid': False}\n\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except (TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n\n if user and user.is_active:\n messages.success(request, 'The account is active.')\n return redirect('login')\n\n if request.method == 'GET':\n if token == activation_url_token:\n session_token = request.session.get(activation_session_token)\n if default_token_generator.check_token(user, session_token):\n # If the token is valid, display the password reset form.\n form = forms.ActivationForm(user=user)\n return render(request, 'user/activate_user.html', {\n 'form': form, 'title': title})\n else:\n if default_token_generator.check_token(user, token):\n # Store the token in the session and redirect to the\n # password reset form at a URL without the token. That\n # avoids the possibility of leaking the token in the\n # HTTP Referer header.\n request.session[activation_session_token] = token\n redirect_url = request.path.replace(token, activation_url_token)\n return HttpResponseRedirect(redirect_url)\n else:\n if token == activation_url_token:\n session_token = request.session.get(activation_session_token)\n form = forms.ActivationForm(user=user, data=request.POST)\n if form.is_valid() and default_token_generator.check_token(user, session_token):\n with transaction.atomic():\n user.set_password(form.cleaned_data['password1'])\n user.is_active = True\n # Check legacy credentials\n check_legacy_credentials(user, user.email)\n user.save()\n email = user.associated_emails.first()\n email.verification_date = timezone.now()\n email.is_verified = True\n email.save()\n request.session.pop(activation_session_token)\n logger.info('User activated - {0}'.format(user.email))\n messages.success(request, 'The account has been activated.')\n login(request, user)\n return redirect('project_home')\n return render(request, 'user/activate_user.html', {'form': form,\n 'title': title})\n\n return render(request, 'user/activate_user_complete.html', context)", "def activate():\n try:\n body = request.get_json()\n\n activate_token = body[\"activate_token\"]\n password = body[\"password\"]\n\n if len(password) < 3 or len(password) > 50:\n return bad_request()\n\n if not models.token_exists(activate_token):\n\n return bad_request()\n\n student_hash = models.create_hash(password)\n models.save_hash(student_hash, activate_token)\n\n except KeyError:\n return bad_request()\n except Exception as e:\n print(e)\n return server_error()\n\n return created()", "def account_activate(request, uidb64, token):\r\n try:\r\n # decode the user's id and get the user by id.\r\n user_id = smart_str(urlsafe_base64_decode(uidb64))\r\n user = get_object_or_404(User, id=user_id)\r\n if user.is_active:\r\n # Display already activated account message\r\n messages.success(request, f'Your Account already activated. You can login.', extra_tags='activation-valid')\r\n # check if the token is valid.\r\n elif account_activation_token.check_token(user, token):\r\n user.is_active = True\r\n # user.previously_logged_in = True\r\n user.save()\r\n # Display activation success message\r\n messages.success(request, f'Your Account has been activated successfully. Now you can login.', extra_tags='activation-valid') \r\n else:\r\n # Display error message.\r\n messages.error(request, f'The activation link is invalid. Please request a new one.', extra_tags='activation-invalid') \r\n except DjangoUnicodeDecodeError:\r\n # Display error message.\r\n messages.error(request, f'The activation link is invalid. Please request a new one.', extra_tags='activation-invalid') \r\n return redirect('accounts:login')", "def req_display_otp(self):\n\n ret = self.ui_auth.create_new_one_time_pwd()\n if ret is not None:\n self.error_msg_queue_list.append(ret)", "def activate(request, activation_key, template_name='registration/activate.html'):\n activation_key = activation_key.lower() # Normalize before trying anything with it.\n account = RegistrationProfile.objects.activate_user(activation_key)\n account.is_active = True\n account.save()\n return render(request, template_name,\n { 'account': account,\n 'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS })", "def activate_account(self):\n self.driver.execute_script(\"window.scrollTo(0, 1000)\")\n self.click_on_element_by_css(tep.ACTIVATION_LINK)\n self.click_on_element_by_css(tep.ACTIVATION_BUTTON)", "def activate_factor(self, state_token, factor_id, passcode, relay_state=None):\n request = {\n 'stateToken': state_token,\n 'passCode': passcode,\n 'relayState': relay_state\n }\n\n response = ApiClient.post_path(self, '/factors/{0}/lifecycle/activate'.format(factor_id), request)\n return Utils.deserialize(response.text, AuthResult)", "async def set_mfa_and_connect(self, mfa_input: str):\n await self._set_mfa_code(mfa_input)\n await asyncio.sleep(10)\n await self._set_products()", "def check_for_activate(self):\n try:\n # Attempt to activate. If the user has completed pairing on the,\n # backend, this will succeed. Otherwise it throws and HTTPError()\n\n token = self.data.get(\"token\")\n login = self.api.activate(self.state, token) # HTTPError() thrown\n\n # When we get here, the pairing code has been entered on the\n # backend and pairing can now be saved.\n # The following is kinda ugly, but it is really critical that we\n # get this saved successfully or we need to let the user know that\n # they have to perform pairing all over again at the website.\n try:\n IdentityManager.save(login)\n except Exception as e:\n self.log.debug(\"First save attempt failed: \" + repr(e))\n time.sleep(2)\n try:\n IdentityManager.save(login)\n except Exception as e2:\n # Something must be seriously wrong\n self.log.debug(\"Second save attempt failed: \" + repr(e2))\n self.abort_and_restart()\n\n if mycroft.audio.is_speaking():\n # Assume speaking is the pairing code. Stop TTS of that.\n mycroft.audio.stop_speaking()\n\n self.enclosure.activate_mouth_events() # clears the display\n\n # Notify the system it is paired\n self.gui.show_page(\"pairing_done.qml\", override_idle=False)\n self.bus.emit(Message(\"mycroft.paired\", login))\n\n self.pairing_performed = True\n with self.pair_dialog_lock:\n if self.mycroft_ready:\n # Tell user they are now paired\n self.speak_dialog(self.paired_dialog)\n mycroft.audio.wait_while_speaking()\n else:\n self.speak_dialog(\"wait.for.startup\")\n mycroft.audio.wait_while_speaking()\n\n # Un-mute. Would have been muted during onboarding for a new\n # unit, and not dangerous to do if pairing was started\n # independently.\n self.bus.emit(Message(\"mycroft.mic.unmute\", None))\n\n # Send signal to update configuration\n self.bus.emit(Message(\"configuration.updated\"))\n\n # Allow this skill to auto-update again\n self.reload_skill = True\n except HTTPError:\n # speak pairing code every 60th second\n with self.counter_lock:\n if self.count == 0:\n self.speak_code()\n self.count = (self.count + 1) % 6\n\n if time.monotonic() > self.time_code_expires:\n # After 20 hours the token times out. Restart\n # the pairing process.\n with self.counter_lock:\n self.count = -1\n self.data = None\n self.handle_pairing()\n else:\n # trigger another check in 10 seconds\n self.__create_activator()\n except Exception as e:\n self.log.debug(\"Unexpected error: \" + repr(e))\n self.abort_and_restart()", "def send_mfa(\n self,\n form: object = None, # noqa: ARG002\n code: str = \"\",\n trusted_device: bool = True,\n ) -> None:\n el_otp = self._driver.find_element(By.CSS_SELECTOR, \"input[name=otc]\", timeout=5)\n el_otp.clear()\n el_otp.send_keys(code)\n\n el_verify = self._driver.find_element(By.CSS_SELECTOR, \"input[type=submit]\", timeout=5)\n if el_verify.accessible_name != \"Verify\":\n msg = f'{self.__class__.__name__}: Cannot find \"Verify\" button'\n raise IdpError(msg)\n\n if trusted_device:\n el_verify.click()\n\n self._stay_signed_in()", "def activate_user(self, email):\r\n activation_key = Registration.objects.get(user__email=email).activation_key\r\n # and now we try to activate\r\n check_for_get_code(self, 200, reverse('activate', kwargs={'key': activation_key}))\r\n # Now make sure that the user is now actually activated\r\n self.assertTrue(User.objects.get(email=email).is_active)", "def _activate_user(self, email):\r\n activation_key = registration(email).activation_key\r\n\r\n # and now we try to activate\r\n resp = self.client.get(reverse('activate', kwargs={'key': activation_key}))\r\n return resp", "def activate(ctx: CLIContext, access_key):\n with Session() as session:\n try:\n data = session.KeyPair.activate(access_key)\n except Exception as e:\n ctx.output.print_mutation_error(\n e,\n item_name='keypair',\n action_name='activation',\n )\n sys.exit(1)\n if not data['ok']:\n ctx.output.print_mutation_error(\n msg=data['msg'],\n item_name='keypair',\n action_name='activation',\n )\n sys.exit(1)\n ctx.output.print_mutation_result(\n data,\n extra_info={\n 'access_key': access_key,\n },\n )", "def activate_user(activation_code, new_password):\n um = logic.UserManager()\n try:\n user = um.lookup_user_by_activation_code(activation_code)\n user.activate()\n user.set_password(new_password)\n except ex.UserNotFoundError:\n blogger.debug(\"no user found with activation code %s\" % activation_code)\n transaction.abort()\n return dict(activated=False)\n else:\n transaction.commit()\n return dict(activated=True)", "def dev_dial(action):\n\n try:\n client = AMIClient(address=AUTH_CREDS['address'], port=AUTH_CREDS['port'])\n client.login(username=AUTH_CREDS['username'], secret=AUTH_CREDS['secret'])\n\n future = client.send_action(action)\n if VERBOSE:\n print(future.response or \"None\")\n\n client.logoff()\n\n except Exception as e:\n print(\"Error: %s\" % e.strerror)\n sys.exit(1)", "def activate(self, *args, **kwargs):\n username = self.validate_key(kwargs.get(\"activation_key\"))\n user = self.get_user(username)\n user.is_active = True\n user.save()\n return user", "def add_user(self):\n\n pin, code = self.get_auth_pin() \n print(\"Enter the PIN '{}' into the Add Application window and click Add Application\".format(pin))\n input(\"waiting press enter to continue...\")\n\n access_token, refresh_token = self.get_tokens(code)\n user_id = self.tokens.get_next_user_id()\n self.tokens.insert_user(user_id, access_token, refresh_token)\n tstat_ids = self.get_tstat_ids(access_token)\n for tstat_id in tstat_ids:\n logger.info(\"Adding Thermostat ID: {}\".format(tstat_id))\n self.tokens.insert_tstat(user_id, tstat_id)", "def send_otp_to_primary_mobile(otp, mobile):\n print('Sending otp to mobile: ', otp, mobile)", "def user_activation(user):\n act_hash = random_password(32)\n user.set_hashword(act_hash)\n user.save()\n base_url = url_for('public.home', _external=True)\n act_url = url_for(\n 'auth.activate',\n userid=user.id,\n userhash=act_hash,\n _external=True)\n if not 'mailman' in current_app.extensions:\n logging.warning('E-mail extension has not been configured')\n return act_hash\n msg = EmailMessage()\n msg.subject = 'Your dribdat account'\n msg.body = \\\n \"Hello %s,\\n\" % user.username \\\n + \"Thanks for signing up at %s\\n\\n\" % base_url \\\n + \"Tap here to activate your account:\\n\\n%s\" % act_url\n msg.to = [user.email]\n logging.info('Sending activation mail to user %d' % user.id)\n logging.debug(act_url)\n msg.send(fail_silently=True)\n return act_hash", "def activate(self) -> None:\n self._bot.inject_flows_from(self)\n self.is_activated = True", "def login_on_activation(sender, user, request, **kwargs):\n user.backend = 'storybase_user.auth.backends.EmailModelBackend'\n login(request, user)", "def _turn_on(self):\n logger.info(\"Check antenna power\")\n power = yield WaitDBus(self.gsm_device.GetAntennaPower)\n logger.info(\"antenna power is %d\", power)\n if power:\n yield None\n logger.info(\"turn on antenna power\")\n try:\n yield WaitDBus(self.gsm_device.SetAntennaPower, True)\n except dbus.exceptions.DBusException, ex:\n if ex.get_dbus_name() != 'org.freesmartphone.GSM.SIM.AuthFailed':\n raise\n yield self._ask_pin()", "def turn_on_modem(self):\n if not self.is_power_on():\n self._logger.debug(\"Switching modem on...\")\n self.set_pin()\n # give modem some time to login\n time.sleep(10)\n else:\n self._logger.debug(\"Modem is already powered on...\")", "def activate_profile(field, code, request):\n try:\n activation = ActivationProfile.objects.get(**{field:code})\n except ActivationProfile.DoesNotExist:\n messages.error(request, _('Activation code expired or not valid!'))\n return False\n if timezone.now() < activation.valid_through:\n activation.user.is_active = True\n activation.user.set_unusable_password()\n activation.user.save()\n if request.user.is_anonymous():\n if field == 'token':\n user = authenticate(username=activation.user.username, token=activation.token)\n elif field == 'sms_key':\n user = authenticate(username=activation.user.username, code=activation.sms_key)\n else:\n user = None\n activation.delete()\n if user:\n login(request, user)\n messages.success(request, _(\"\"\"Profile activated successfully! You should change your password!\"\"\"))\n return True\n else:\n return False\n else:\n messages.success(request, _(\"\"\"You already have an account!\"\"\"))\n return False", "def activate(self):\r\n if self.activation_code == '':\r\n raise ValidationError('The member is already activated')\r\n signer = TimestampSigner()\r\n signer.unsign(self.activation_code, max_age=timedelta(days=2))\r\n self.hidden = False\r\n self.activation_code = ''\r\n self.joined_date = timezone.now()\r\n self.save()", "def sendOTP(code):\n # Modify the code here to change from print to any output \n print(\"Your OTP is \" + code + \". Kindly do not share it with anyone\")", "def activate_account_api():\n\n # get the data for this query\n data = request.get_json()\n if not data:\n response = jsonify({\n 'success': False,\n 'message': 'Missing request body'\n })\n response.status_code = 422\n return response\n\n # process arguments\n arg_email = data.get('email').strip().lower()\n\n # check if there is a user with this activation_link\n secret_link = data.get('secret_link')\n user = db.session.query(User).filter(\n User.activation_link == secret_link,\n ).one_or_none()\n if not user:\n response = jsonify({\n 'success': False,\n 'message': 'This activation link is no longer active. Contact your system administrator to receive a new one.'\n })\n response.status_code = 200\n return response\n\n # check if this user has already activated their account\n if user.activated:\n response = jsonify({\n 'success': False,\n 'message': 'This account has already been activated. Try forgot password to recover your password.'\n })\n response.status_code = 200\n return response\n\n # check if the correct email address was supplied\n if user.email != arg_email:\n response = jsonify({\n 'success': False,\n 'message': 'This is not the correct email for this activation link. Contact your system administrator to request a link for this email.'\n })\n response.status_code = 200\n return response\n\n # generate and set new password\n new_password = generate_password_hash(data.get('password'))\n user.password = new_password\n user.activated = True\n db.session.add(user)\n db.session.commit()\n\n # log that a user just activated their account\n _log('++ {} just activated their account'.format(user.email), '_signup')\n\n # return authenticated token\n token = generate_auth_token(user_id=user.user_id)\n response = jsonify({\n 'success': True,\n 'token': token\n })\n response.status_code = 200\n return response", "def apply_auth():\n\tclient = BaiduOpenApi()\n\tapi = client.device.code\n\tresp = client.device.code.get(response_type=\"device_code\", scope=\"netdisk\")\n\t# open grant page and wait for user confirm\n\twebbrowser.open_new_tab(r\"http://openapi.baidu.com/device?code=%s\"%resp[\"user_code\"])\n\t# yield to main\n\tyield\n\t# main will tell user to confirm and it will take a while\n\t# polling to wait server back\n\tpolling_tokens(resp[\"device_code\"], resp[\"interval\"], resp[\"expires_in\"])", "def activate(self):\n if not self.is_active:\n self.is_active = True\n self.activated_at = datetime.datetime.utcnow()\n import messaging # avoid circular import\n messaging.send_activated_emails(self)\n self.save()", "def activate_pair(device_pair):\n command = 'pair_activate \"%s\"' % (device_pair.identifier,)\n _run_command(command)", "def activate_task(self, task_id):\n if task_id is None:\n print('ERROR: task_id cannot be \"None\".')\n return False\n # Else, the task was specified\n if task_id in self.task_dict:\n self.task_dict[task_id].is_activated = True\n # Update the activation state\n self._update_activation_state()\n return True\n else:\n # Something wrong, agent was not in the dict\n print('ERROR: The task <%s> is not in the task_dict at agent <%d>.' % (str(task_id), self.agent_id))\n return False", "def activate(self):\r\n self.update_enrollment(is_active=True)", "def confirm(id):\n #: get resources\n user = User.query.get_or_404(id)\n service = SignUpService(user)\n input_token = request.args['token']\n\n #: active current account\n try:\n service.active(input_token)\n except TokenUsedError:\n message = _(u\"The account had been actived.\")\n return render_template(\"confirm-failed.html\", message=message), 403\n except TokenWrongError:\n message = _(u\"The active token is invalid.\")\n return render_template(\"confirm-failed.html\", message=message), 403\n\n #: automatic sign in\n session_login(user)\n #: output a success message\n message = _(u\"The account has been actived successfully.\")\n return render_template(\"confirm-success.html\", message=message)", "def test_yes_option_enabled(\n self, wait_tx_settled_mock, confirm_mock, do_transfer_mock\n ):\n password_option = self.get_password_args(self.PASSWORD)\n self.invoke(\n \"transfer\",\n self.LEDGER_ID,\n self.get_address(self.LEDGER_ID, self.PASSWORD),\n \"100000\",\n \"100\",\n \"-y\",\n *password_option,\n )\n confirm_mock.assert_not_called()", "def activate_user(self, user):\n if not user.active:\n user.active = True\n return True\n return False", "def activate_user(username, code, new_pass):\r\n\r\n qry = Activation.query.\\\r\n filter(Activation.code == code).\\\r\n filter(User.username == username)\r\n\r\n res = qry.first()\r\n\r\n if UserMgr.acceptable_password(new_pass) and res is not None:\r\n user = res.user\r\n user.activated = True\r\n user.password = new_pass\r\n res.activate()\r\n\r\n LOG.debug(dict(user))\r\n\r\n return True\r\n else:\r\n return None", "async def async_press(self) -> None:\n try:\n await self.entity_description.press_func(self.device)\n except DevicePasswordProtected as ex:\n self.entry.async_start_reauth(self.hass)\n raise HomeAssistantError(\n f\"Device {self.entry.title} require re-authenticatication to set or change the password\"\n ) from ex\n except DeviceUnavailable as ex:\n raise HomeAssistantError(\n f\"Device {self.entry.title} did not respond\"\n ) from ex", "def activate(request, activation_key,\r\n template_name='registration/activate.html',\r\n extra_context=None):\r\n activation_key = activation_key.lower() # Normalize before trying anything with it.\r\n account = RegistrationProfile.objects.activate_user(activation_key)\r\n if extra_context is None:\r\n extra_context = {}\r\n context = RequestContext(request)\r\n for key, value in extra_context.items():\r\n context[key] = callable(value) and value() or value\r\n return render_to_response(template_name,\r\n { 'account': account,\r\n 'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS },\r\n context_instance=context)", "def login_auto(token: Token, mac_address: str) -> bool:\n device = Device.get_by_mac(mac_address)\n fail_msg = \"\"\n if device is None:\n fail_msg = \"No device with mac address exist\"\n elif Token.is_token_expired(device.token_expires):\n fail_msg = \"Token is expired\"\n elif token != device.token:\n fail_msg = \"Wrong token\"\n if fail_msg:\n client_logger_security().info(f\"Failed to login automatically: {fail_msg}\")\n return False\n else:\n _set_user_authenticated(device.user_id, device.device_id)\n client_logger_security().info(f\"Successfully logged in automatically: device_id={device.device_id}, \"\n f\"user_id={device.user_id}\")\n return True", "def do_mfa_verify(mfa_info):\n headers = {\n \"Content-Type\": \"application/json\",\n \"Origin\": \"https://%s.auth0.com\"%TENANT,\n \"Authorization\": \"Bearer %s\"%mfa_info[\"requestToken\"],\n \"x-global-tracking-id\": mfa_info[\"globalTrackingId\"]\n }\n request = urllib.request.Request(\n \"%s/api/start-flow\"%mfa_info[\"mfaServerUrl\"],\n data=json.dumps({ \"state_transport\": \"polling\" }).encode(),\n method=\"POST\",\n headers=headers)\n try:\n response = urllib.request.urlopen(request)\n result = response.read().decode()\n except urllib.error.HTTPError as e:\n error = e.read().decode()\n raise RuntimeError(\"MFA start flow error: %s\"%error) from None\n mfa_flow_info = json.loads(result)\n mfa_transaction_token = mfa_flow_info[\"transaction_token\"]\n # print(mfa_flow_info)\n # print(mfa_transaction_token)\n\n mfa_code = input(\"Please enter your MFA verification code: \")\n mfa_payload = {\n \"code\": mfa_code,\n \"type\": \"manual_input\"\n }\n mfa_payload_json = json.dumps(mfa_payload).encode()\n headers = {\n \"Content-Type\": \"application/json\",\n \"Origin\": \"https://%s.auth0.com\"%TENANT,\n \"Authorization\": \"Bearer %s\"%mfa_transaction_token,\n \"x-global-tracking-id\": mfa_info[\"globalTrackingId\"]\n }\n request = urllib.request.Request(\n \"%s/api/verify-otp\"%mfa_info[\"mfaServerUrl\"],\n data=mfa_payload_json,\n method=\"POST\",\n headers=headers)\n try:\n response = urllib.request.urlopen(request)\n result = response.read().decode()\n except urllib.error.HTTPError as e:\n error = e.read().decode()\n raise RuntimeError(\"MFA verify error: %s\"%error) from None\n # print(result)\n\n headers = {\n \"Origin\": \"https://%s.auth0.com\"%TENANT,\n \"Authorization\": \"Bearer %s\"%mfa_transaction_token,\n \"x-global-tracking-id\": mfa_info[\"globalTrackingId\"]\n }\n request = urllib.request.Request(\n \"%s/api/transaction-state\"%mfa_info[\"mfaServerUrl\"],\n method=\"POST\",\n headers=headers)\n try:\n response = urllib.request.urlopen(request)\n result = response.read().decode()\n except urllib.error.HTTPError as e:\n error = e.read().decode()\n raise RuntimeError(\"Get MFA result error: %s\"%error) from None\n mfa_result = json.loads(result)\n if mfa_result[\"state\"] != \"accepted\":\n raise RuntimeError(\"MFA verification is not accepted: %s\"%result)\n # print(mfa_result)\n\n return mfa_result", "async def async_turn_on_when_active(self, **kwargs: Any) -> None:\n await self._data.controller.programs.start(self.entity_description.uid)\n self._update_activities()", "def send_mfa(self):\n\n try:\n response = self.post(\"/authentication/loginToken\",\n {\"user\": self.user, \"password\": self.password, \"TempCode\": True})\n except:\n print(\"Exception - unable to submit token request\")\n return False\n return True if response.status_code in [200, 204] else False", "def set_time_based_otp(self, otp_passcode: str):\n self._totp = TOTP(otp_passcode)", "def activateWebAppUser( self, username, activation_code ):\n try:\n con = self.getMetadataDatabaseConnection()\n user_data = con.cursor()\n\n con.cursor().callproc('verify_user_activation_code', [username, activation_code, user_data])\n row = user_data.fetchone()\n if row:\n con.cursor().callproc('activate_user_account', [username])\n return True\n else:\n return False\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def test_user_activation(self):\n user = User.objects.get()\n response = self.client.get(reverse('accounts:user-activate',\n kwargs={'uidb64': urlsafe_base64_encode(force_bytes(user.pk)),\n 'token': account_activation_token.make_token(user)}))\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def activate(request, activation_key,template_name='registration/activate.html',extra_context=None):\n\tactivation_key = activation_key.lower() # Normalize before trying anything with it.\n\taccount = RegistrationProfile.objects.activate_user(activation_key)\n\t\n\t\n\t#new profile PROBLEME NON ENREGISTREMENT DU PROFILE\n\t#recuperer l user id de l'account user.id\n\tprofile = UserProfile();\n\tprofile.user = account\n\tprofile.save()\n\t\n\t\n\tif extra_context is None:\n\t\textra_context = {}\n\tcontext = RequestContext(request)\n\tfor key, value in extra_context.items():\n\t\tcontext[key] = callable(value) and value() or value\n\treturn render_to_response(template_name,{ 'account': account,'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS }, context_instance=context)", "def active_user(request, uidb36=None, token=None,\r\n template_name='register/activation_confirm.html',\r\n token_generator=default_token_generator,\r\n current_app=None, extra_context=None):\r\n assert uidb36 is not None and token is not None # checked by URLconf\r\n \r\n try:\r\n uid_int = base36_to_int(uidb36)\r\n user = User.objects.get(id=uid_int)\r\n except (ValueError, User.DoesNotExist):\r\n user = None\r\n\r\n if user is not None and token_generator.check_token(user, token):\r\n validlink = True\r\n user.is_active = True\r\n user.save()\r\n \r\n #初始化userprofile\r\n profile_count = UserProfile.objects.filter(user=user).count()\r\n if profile_count == 0:\r\n profile = UserProfile()\r\n profile.user = user\r\n profile.song_ord_filed = 'post_datetime'\r\n profile.save()\r\n else:\r\n validlink = False\r\n context = {\r\n 'validlink': validlink,\r\n }\r\n context.update(extra_context or {})\r\n return render_to_response(template_name, context,\r\n context_instance=RequestContext(request, current_app=current_app))", "def enable_call_waiting(self, dtmf_code: str) -> None:", "def activate_user(self, user):\n if not user.active:\n user.active = True\n # noinspection PyUnresolvedReferences\n self.save(user)\n return True\n\n return", "async def _autopaired(self, new_user_apc_token: str):\n self._user_apc_token = new_user_apc_token\n self._fconfigure()", "def test_activate_active_user(self):\n activate_user(self.user, self.request)\n self.assertEqual(self.user.is_active, True)", "def test_create_user_auto_activate(self, services):\n data = {\n 'username': 'John',\n 'email': 'John@mailinator.com',\n 'password': 'test123!',\n 'phone': '1234567890',\n 'first_name': 'Chuck',\n 'last_name': 'Norris',\n 'university': {\n \"name\": \"random_university\"\n },\n 'academic_field': {'name': \"random_field\"},\n 'academic_level': {'name': \"random_level\"},\n 'gender': \"M\",\n 'birthdate': \"1999-11-11\",\n }\n\n response = self.client.post(\n reverse('user-list'),\n data,\n format='json',\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(json.loads(response.content)['phone'], '1234567890')\n\n user = User.objects.get(email=\"John@mailinator.com\")\n activation_token = ActionToken.objects.filter(\n user=user,\n type='account_activation',\n )\n\n self.assertTrue(user.is_active)\n self.assertEqual(1, len(activation_token))\n\n # Test that no email was sent:\n self.assertEqual(len(mail.outbox), 0)", "def account_activate(request):\r\n params = request.params\r\n\r\n username = params.get('username', None)\r\n activation = params.get('code', None)\r\n password = params.get('password', None)\r\n new_username = params.get('new_username', None)\r\n\r\n if username is None and activation is None and password is None:\r\n # then try to get the same fields out of a json body\r\n json_body = request.json_body\r\n username = json_body.get('username', None)\r\n activation = json_body.get('code', None)\r\n password = json_body.get('password', None)\r\n new_username = json_body.get('new_username', None)\r\n\r\n if not UserMgr.acceptable_password(password):\r\n request.response.status_int = 406\r\n return _api_response(request, {\r\n 'error': \"Come on, pick a real password please\",\r\n })\r\n\r\n username = username.lower()\r\n new_username = new_username.lower() if new_username else None\r\n res = ActivationMgr.activate_user(\r\n username,\r\n activation,\r\n password)\r\n\r\n if res:\r\n # success so respond nicely\r\n AuthLog.reactivate(username, success=True, code=activation)\r\n\r\n # if there's a new username and it's not the same as our current\r\n # username, update it\r\n if new_username and new_username != username:\r\n try:\r\n user = UserMgr.get(username=username)\r\n user.username = new_username\r\n except IntegrityError, exc:\r\n request.response.status_int = 500\r\n return _api_response(request, {\r\n 'error': 'There was an issue setting your new username',\r\n 'exc': str(exc)\r\n })\r\n\r\n return _api_response(request, {\r\n 'message': \"Account activated, please log in.\",\r\n 'username': username,\r\n })\r\n else:\r\n AuthLog.reactivate(username, success=False, code=activation)\r\n request.response.status_int = 500\r\n return _api_response(request, {\r\n 'error': \"There was an issue attempting to activate this account.\",\r\n })", "def activate(request, activation_key):\n profile = get_object_or_404(User, activation_key=activation_key)\n if profile.akey_expires < timezone.now():\n return render('user_account/activate.html', {'expired': True})\n\n profile.save(update_fields=['active', 'activation_key'])\n return render(\n 'user_account/activate.html',\n {'success': True, 'name': profile.name + \" \" + profile.surname}\n )", "def begin_trial(self):\n self._post(endpoint='{}/cm/trial/begin'.format(self.api_version))", "def enable_radio(self):\n self.acquire_response(b'AT*R1')", "def do_masterpassword(self, masterpassword):\n if masterpassword:\n if self.taskstarted == True:\n self.masterpassword = masterpassword\n else:\n if self.taskstarted == False:\n print(self.cl.red(\"[!] <ERROR> You need to start a new KeePass Interaction.\"))\n print(self.cl.red(\"[!] <ERROR> Start this with 'new' from the menu.\"))\n print(\"[!] <ERROR> You need to supply the command for typing\")", "def auth_active(hass):\n hass.loop.run_until_complete(\n register_auth_provider(hass, {\"type\": \"homeassistant\"})\n )", "async def enter_confirmation_сode(self):\n raise VkTwoFactorCodeNeeded()", "def activate_user(self, username):\n args = parser_activate.parse_args()\n isActive = request.json.get('isactive')\n\n query = \"\"\"UPDATE users SET isactive=%s WHERE username=%s\"\"\"\n values = isActive, username\n\n conn = self.db\n cursor = conn.cursor()\n cursor.execute(query, values)\n conn.commit()\n return True", "def activate(self):\n\t\tself.flash()\n\t\tself.do_action()", "def test_activate_user(self):\n activated_user = (RegistrationProfile.objects\n .activate_user(self.activation_key))\n self.assertTrue(activated_user.registrationprofile.activated)\n self.assertFalse(activated_user.is_active)", "def enable_aaa_password_restriction(device):\n cmd=\"aaa password restriction\"\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not configure aaa password restriction:\\n{e}'\n )", "def test_pm_profile_activate(profile_manager, test_profile):\n\n profile_manager.activate(test_profile.name)\n assert profile_manager.is_active(test_profile.name)\n profile = profile_manager.get_active_profile()\n assert profile.name == test_profile.name\n assert profile.path == test_profile.path", "def activate():\n refresh()\n activate_connection_with_mainloop(get_uuid())", "def enable_aaa_authentication_login(device,auth_list,auth_db1,auth_db2=None):\n\n cmd = f'aaa authentication login {auth_list} {auth_db1}'\n if auth_db2:\n cmd += f' {auth_db2}'\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not configure aaa authentication login:\\n{e}'\n )", "def authenticate_bluetooth(self):\n data = self.blu.main()\n if bool(data) == True:\n authentication = self.client.validate_mac(\n data[\"mac_address\"], data[\"email\"]).decode(\"utf-8\")\n if authentication == \"valid\":\n self.current_email = data[\"email\"]\n self.unlock_time = round(datetime.now().timestamp())\n self.display_successful_unlock_eng()\n elif authentication == \"invalid\":\n print(self.INVALID_USER)\n time.sleep(3)\n self.display_main()\n else:\n self.display_eng()", "def admin_action(self):\n SCREEN_MANAGER.current = 'passCode'", "def activate_user(self, activation_key):\n if SHA1_RE.search(activation_key):\n try:\n profile = RegistrationProfile.objects.get(activation_key=activation_key)\n except self.model.DoesNotExist:\n return False\n if not profile.activation_key_expired():\n user = profile.user\n user.is_active = True\n user.save()\n profile.activation_key = \"ALREADY_ACTIVATED\"\n profile.save()\n return user\n\n return False", "def signup_active(request, uidb36=None, token=None,\n post_activation_redirect=None,\n token_generator=default_token_generator,\n domain_override=None, use_https=False):\n assert uidb36 is not None and token is not None\n if post_activation_redirect is None:\n post_activation_redirect = reverse('amscms.core.views.signup_active_done')\n try:\n uid_int = base36_to_int(uidb36)\n user = User.objects.get(id=uid_int)\n except (ValueError, User.DoesNotExists):\n user = None\n \n if user is not None and token_generator.check_token(user, token):\n user.is_active = True\n user.save()\n \"\"\"\n Sends successful email to the user. \n \"\"\"\n if not domain_override:\n current_site = Site.objects.get_current()\n site_name = current_site.name\n domain = current_site.domain\n else:\n site_name = domain = domain_override\n c = {\n 'subject': _(u\"Registration was successful on %(site_name)s\" % {'site_name': site_name, }),\n 'site_name': site_name,\n 'user': user,\n 'domain': domain,\n 'protocol': use_https and 'https' or 'http',\n 'login_url': reverse('django.contrib.auth.views.login'),\n }\n send_email(user.email, c, settings.DEFAULT_FROM_EMAIL,\n \"registration/signup_email_activated.txt\",\n \"registration/signup_email_activated.html\")\n \n else:\n messages.error(request, _(u\"Invalid activation link, you may already activated, try to login. \"))\n return HttpResponseRedirect(\"/\")\n return HttpResponseRedirect(post_activation_redirect)", "def activate_user(cls, activation_key):\n #from registration.signals import user_activated\n \n # Make sure the key we're trying conforms to the pattern of a\n # SHA1 hash; if it doesn't, no point trying to look it up in\n # the database.\n db = DB_Session()\n if SHA1_RE.search(activation_key):\n query = db.query(RegistrationProfile)\n profile = query.filter(RegistrationProfile.activation_key == activation_key).one()\n if not profile:\n return False\n if not profile.activation_key_expired():\n user = profile.user\n user.is_active = 1\n profile.activation_key = RegistrationProfile.ACTIVATED\n db.flush()\n db.commit()\n db.close()\n #user_activated.send(sender=self.model, user=user)\n return user\n return False", "def mifare_auth_a(self,address,key_a):\n if self._uid == False:\n raise RuntimeError(\"No Mifare card currently activated.\")\n if len(self._uid) == 4:\n uid = self._uid\n elif len(self._uid) == 7: # 10-byte UID cards don't exist yet.\n uid = self._uid[3:7] # Sequence 1, keep it simple.\n self.in_data_exchange(bytearray([MIFARE_COMMAND_AUTH_A,address]) + key_a + uid)", "def activate_user(self, activation_key, request=None):\n # Make sure the key we're trying conforms to the pattern of a\n # SHA1 hash; if it doesn't, no point trying to look it up in\n # the database.\n if SHA1_RE.search(activation_key):\n try:\n profile = self.get(activation_key=activation_key)\n except self.model.DoesNotExist:\n profile = None\n statsd.incr('user.activate-error.does-not-exist')\n reason = 'key not found'\n if profile:\n if not profile.activation_key_expired():\n user = profile.user\n user.is_active = True\n user.save()\n\n # We don't need the RegistrationProfile anymore, delete it.\n profile.delete()\n\n # If user registered as contributor, send them the\n # welcome email.\n if user.groups.filter(name=CONTRIBUTOR_GROUP):\n self._send_email(\n confirmation_profile=profile,\n url=None,\n subject=_('Welcome to SUMO!'),\n text_template='users/email/contributor.ltxt',\n html_template='users/email/contributor.html',\n send_to=user.email,\n contributor=user)\n\n return user\n else:\n statsd.incr('user.activate-error.expired')\n reason = 'key expired'\n else:\n statsd.incr('user.activate-error.invalid-key')\n reason = 'invalid key'\n\n log.warning(u'User activation failure ({r}): {k}'.format(\n r=reason, k=activation_key))\n\n return False", "def user(self, user_token, user_device=None):\n self.set('user', user_token)\n self.set('device', user_device)", "def confirm_start(self, player=str):\n self.clear_screen()\n print(\"\\n\" * 11)\n pass_text = \"Pass the device to \" + player\n print(f\"{pass_text : ^100}\")\n input(f\"{'Press ENTER when ready.' : ^100}\")\n return self.stop_game", "def check_mfa(self) -> None:\n try:\n mfa_form = self._driver.find_element(By.CSS_SELECTOR, \"form[name=form]\", timeout=5)\n self._driver.find_element(By.CSS_SELECTOR, \"input[name=otc]\", timeout=5)\n self._halo.stop()\n mfacode = self._csc.prompt_for(\"MFA Code\")\n self._halo.start(SPINNER_MSGS[\"mfa_send\"])\n self.send_mfa(form=mfa_form, code=mfacode)\n self._halo.start(SPINNER_MSGS[\"token_refresh\"])\n self._driver.dump_cookies()\n except selenium.common.exceptions.TimeoutException:\n pass", "def test_activate_authenticated(client):\n employee = factories.EmployeeFactory(\n company=factories.CompanyFactory(),\n account_status=False\n )\n with client.session_transaction() as session:\n session[\"user_id\"] = employee.id\n g.user = employee\n response = client.post(\"/auth/activate\")\n assert b\"<h1>Successfully activated your account.</h1>\" in response.data\n assert employee.account_status\n assert response.status_code == HTTPStatus.OK", "def enable_call_forwarding_busy(self, dtmf_code: str, number: str) -> None:", "def activate(self):\n if not self._env.enable_registration:\n return\n legacy_key = '{}:{}'.format(self._env.flask_host, self._env.flask_port)\n self._key = self._env.get('my_ident', legacy_key, 'microservice')\n LoopingCall(self.ping).start(5, now=False)", "def device_action(self, client, action):\r\n client.deviceAction(action)", "def _activate(self):\n self.active = True", "async def async_turn_on(self, **kwargs: Any) -> None:\n\n await self.entity_description.ufp_set(self.device, True)", "async def async_turn_on(self, **kwargs: Any) -> None:\n\n await self.entity_description.ufp_set(self.device, True)", "def activateButtonClicked(self):\n print(\"trying to start process...\")\n subprocess.Popen(\"/usr/local/bin/g13d --config /usr/local/bin/defaults.bind\", shell=True)\n self.checkProcess()", "def activate(self):\n self._is_active = True", "def login_manual_user_device(username: str, password: str, mac_address: str) -> Union[str, Token]:\n possible_user = User.get_by_username(username)\n if possible_user is None:\n fail_msg = f\"No user with username: {username}.\"\n else:\n user = possible_user\n if not pwd_context.verify(password, user.password):\n fail_msg = f\"Wrong password\"\n else:\n token, device_id = _add_update_device(user.id, mac_address)\n _set_user_authenticated(user.id, device_id)\n client_logger_security().info(f\"Successfully logged in manual: device_id={device_id}, user_id={user.user_id}, \"\n f\"token={token}\")\n return token\n client_logger_security().info(f\"Failed to login manual: {fail_msg}\")\n return \"Wrong username or password\"", "def create_mfa_totp_device(self, user_id, **kwargs):\n resource_path = \"/users/{userId}/mfaTotpDevices\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"create_mfa_totp_device got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"MfaTotpDevice\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"MfaTotpDevice\")", "def _authenticate(self, block, uid, key = \"\\xff\\xff\\xff\\xff\\xff\\xff\", use_b_key = False):\n if nfc.nfc_device_set_property_bool(self.__device, nfc.NP_EASY_FRAMING, True) < 0:\n raise Exception(\"Error setting Easy Framing property\")\n abttx = (ctypes.c_uint8 * 12)()\n abttx[0] = self.MC_AUTH_A if not use_b_key else self.MC_AUTH_B\n abttx[1] = block\n for i in range(6):\n abttx[i + 2] = ord(key[i])\n for i in range(4):\n abttx[i + 8] = ord(uid[i])\n abtrx = (ctypes.c_uint8 * 250)()\n return nfc.nfc_initiator_transceive_bytes(self.__device, ctypes.pointer(abttx), len(abttx),\n ctypes.pointer(abtrx), len(abtrx), 0)", "def activate(userid, userhash):\n a_user = User.query.filter_by(id=userid).first_or_404()\n if a_user.check_hashword(userhash):\n a_user.hashword = None\n a_user.active = True\n a_user.save()\n login_user(a_user, remember=True)\n flash(\"Welcome! Your user account has been activated.\", 'success')\n return redirect(url_for('auth.user_profile'))\n elif a_user.active:\n flash(\"Your user account is active.\", 'success')\n else:\n flash(\"Activation not found, or has expired.\" \\\n + \"Please try again or ask an organizer.\", 'warning')\n logout_user()\n return redirect(url_for('public.home'))", "def mfa_login(self, mfacode):\n\n try:\n\n response = self.post(\"/authentication/login\",\n {\"user\": self.user, \"password\": self.password, \"token\": int(mfacode)})\n if response.status_code == 200:\n print(\"{0}: Orchestrator MFA login success\".format(self.url))\n # get and set X-XSRF-TOKEN\n for cookie in response.cookies:\n if cookie.name == \"orchCsrfToken\":\n self.headers[\"X-XSRF-TOKEN\"] = cookie.value\n return True\n else:\n print(\"{0}: Orchestrator MFA login failed: {1}\".format(self.url, response.text))\n return False\n except:\n print(\"{0}: Exception - unable to connect to Orchestrator\".format(self.url))\n return False", "def set_counter_based_otp(self, otp_passcode: str):\n self._hotp = HOTP(otp_passcode)", "def activate_user(self, activation_key):\n # Make sure the key we're trying conforms to the pattern of a\n # SHA1 hash; if it doesn't, no point even trying to look it up\n # in the DB.\n if SHA1_RE.search(activation_key):\n try:\n user_profile = self.get(activation_key=activation_key)\n except self.model.DoesNotExist:\n return False\n if not user_profile.activation_key_expired():\n # Account exists and has a non-expired key. Activate it.\n user = user_profile.user\n user.is_active = True\n user.save()\n return user\n return False", "def set_totp(self, totp_secret):\n self.totp = TOTP(totp_secret)\n return self", "def activated(self, value: bool) -> None:\n\n if not isinstance(value, bool):\n raise TypeError(f\"<value> should be {bool}, {type(value)} given.\")\n\n self._activated = value" ]
[ "0.60715777", "0.6028133", "0.59996897", "0.58769685", "0.5760404", "0.5734255", "0.5616806", "0.5603392", "0.5589512", "0.5500395", "0.5455279", "0.5448774", "0.5440349", "0.54232854", "0.5410598", "0.5386604", "0.5370613", "0.53615427", "0.5324096", "0.53227437", "0.5316876", "0.5257387", "0.5242015", "0.5238215", "0.52204853", "0.5180888", "0.5168266", "0.5167923", "0.5149471", "0.51486313", "0.5136919", "0.5122194", "0.5099806", "0.50952345", "0.5085686", "0.50736505", "0.5067133", "0.5052447", "0.5049608", "0.503255", "0.50311255", "0.50306964", "0.5010046", "0.5009087", "0.5008341", "0.5002462", "0.49990466", "0.49950635", "0.49906734", "0.49831188", "0.49712396", "0.49543926", "0.49519625", "0.49450558", "0.49329653", "0.49205124", "0.49176443", "0.49169865", "0.48983115", "0.48939362", "0.4877591", "0.48763728", "0.48606753", "0.48567647", "0.48388755", "0.48356223", "0.48327577", "0.48316598", "0.48277473", "0.4827056", "0.4825791", "0.48230392", "0.48194167", "0.48193097", "0.48054487", "0.48049524", "0.4796671", "0.47845018", "0.47751698", "0.47561207", "0.47523004", "0.475161", "0.4746635", "0.47390857", "0.47390676", "0.47375605", "0.47259092", "0.47257814", "0.47257814", "0.4707243", "0.47054324", "0.46902686", "0.46831268", "0.46723512", "0.4666625", "0.4656086", "0.46479917", "0.46470034", "0.46439013", "0.46407658" ]
0.6753613
0
Adds the specified user to the specified group and returns a `UserGroupMembership` object with its own OCID. After you send your request, the new object's `lifecycleState` will temporarily be CREATING. Before using the object, first make sure its `lifecycleState` has changed to ACTIVE.
def add_user_to_group(self, add_user_to_group_details, **kwargs): resource_path = "/userGroupMemberships" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "add_user_to_group got unknown kwargs: {!r}".format(extra_kwargs)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, header_params=header_params, body=add_user_to_group_details, response_type="UserGroupMembership") else: return self.base_client.call_api( resource_path=resource_path, method=method, header_params=header_params, body=add_user_to_group_details, response_type="UserGroupMembership")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addUserToGroup(self, user, group):\n return self.pm_getUserManager().addUserToGroup(self._unbox(user), self._unbox(group))", "def add_user(self, user):\n store = self.get('__store')\n members = Members(store.db)\n membership = members.first(group_id=self._id, user_id=user._id)\n if not membership:\n members.put(Member(group_id=self._id, user_id=user._id))", "def add_user_to_group(self, group_name, user_name):\r\n params = {'GroupName' : group_name,\r\n 'UserName' : user_name}\r\n return self.get_response('AddUserToGroup', params)", "def AddMemberToGroup(group_id,user_id):\r\n Group.AddMemberToGroup(group_id,user_id)", "def add_group_user(self, group_id, user_id):\n resp, body = self.put('groups/%s/users/%s' % (group_id, user_id),\n None)\n self.expected_success(204, resp.status)\n return service_client.ResponseBody(resp, body)", "def add_group_user(self, group_id, user_id):\n resp, body = self.put('groups/%s/users/%s' % (group_id, user_id),\n None)\n self.expected_success(204, resp.status)\n return rest_client.ResponseBody(resp, body)", "def action_add_to_group(self, kwargs):\n user = kwargs[\"user\"]\n group = kwargs[\"group\"]\n\n if self.engine.add_user_to_group(user, group):\n info(f\"User {user} sucessfully added to {group}\")\n else:\n error(f\"Unable to add {user} to {group}, check privileges or dn\")", "def add_member(self, user):\n user_in = user.get_groups()\n for group in user_in:\n if self.usergroup_node == group.usergroup_node:\n print('user is already a member')\n return False\n membership = Relationship(user.get(), 'in', self.usergroup_node)\n graph.create(membership)\n return self.usergroup_node", "def add_to_group(user: User, group: Group) -> Result:\n if user.pw_name in group.gr_mem:\n return Result(State.unchanged)\n command([\"/usr/sbin/addgroup\", user.pw_name, group.gr_name])\n group.gr_mem.append(user.pw_name)\n return Result(State.success)", "def register(self, user):\n if not self.get():\n user_node = user.get() # transform user object to user node object\n usergroup_node = Node(\"Usergroup\",\n groupname=self.groupname,\n id=uuid4().hex)\n graph.create(usergroup_node)\n ownership = Relationship(user_node, 'owns', usergroup_node)\n membership = Relationship(user_node, 'in', usergroup_node)\n graph.create(ownership)\n graph.create(membership)\n self.usergroup_node = usergroup_node\n self.id = usergroup_node['id']\n return usergroup_node\n return self", "def add_user_to_group(backend, details, response, user, is_new=False, *args, **kwargs):\n \n if is_new:\n google_apps_add_group_task.apply_async([GAPPS_GROUP_NAME, user.email])", "def add_user_to_group(self, login, group):\n return self.request('put',\n '/groups/{}/users/{}'.format(group, login),\n msg='adding user {} to group {}'.format(login, group)\n )", "def post(self, id):\r\n return UserGroupService.addUserGroup(self, id)", "def add_group(self, group):\n\n return self._client.group_memberships.create({\n 'account': self,\n 'group': group\n })", "def add_user_to_group(user, group):\n Command.run(['usermod', '-a', '-G', user, group])", "def add_user_group(self, groupname, ls_user):\n data = {\"groupname\": groupname, \"add_users\": ls_user}\n headers = {\"user-agent\": self.u_agent}\n req_url = self.normalize_admin_url(u\"groups/{}\".format(groupname))\n res = requests.put(\n req_url,\n headers=headers,\n auth=self.auth,\n data=json.dumps(data),\n verify=False,\n )\n if res.status_code in [200, 201, 206]:\n return Response(0, res)\n else:\n return Response(res.status_code, res)", "def add_new_member(self, event):\n body = event['body']\n body = json.loads(body)\n\n required_fields = ['group_id', 'new_user_id']\n for f in required_fields:\n if f not in body:\n return get_bad_request('POST body missing field {}'.format(f))\n\n group_id = body['group_id']\n new_user_id = body['new_user_id']\n \n user = self.mealShareUsers.get_user_cognito_data(event)\n current_user = user['user_id']\n \n # Requesting user must already be a member\n if not self.mealShareGroups.is_user_in_group(current_user, str(group_id)):\n return {\n 'statusCode': 401,\n 'statusMessage': 'User {} is not a member of the group ID {} and can not add a person to it'.format(current_user, group_id),\n 'group_id': group_id,\n 'new_user_id': new_user_id\n }\n \n # Check if adding was successful\n success = self.mealShareGroups.add_user_to_group(new_user_id, group_id)\n if success:\n return {\n 'statusCode': 200,\n 'statusMessage': 'Successfully added {} to group {}'.format(new_user_id, group_id),\n 'group_id': group_id,\n 'new_user_id': new_user_id\n }\n else:\n return {\n 'statusCode': 500,\n 'statusMessage': 'FAILED to add user {} to group {} by {}'.format(new_user_id, group_id, current_user),\n 'group_id': group_id,\n 'new_user_id': new_user_id\n }", "def add_group(self, resolvable):\n group = self._resolve_group(resolvable)\n return self._client.group_memberships.create({\n 'account': self,\n 'group': group,\n })", "def add_user_to_group(self,username,groupname):\n\n if not self.check_prereqs():\n raise StopIteration\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_add_user_to_group_query,{'username':username,'groupname':groupname,'username_field':self.sql_username_field,'groupname_field':self.sql_groupname_field})\n self.log.debug(\"sqlflexibleauthstore: add_user_to_group: %s\" % (query,))\n\n cursor.execute(query)\n if cursor.rowcount > 0:\n db.commit()\n return True\n return False", "def test_020_add_user_to_group(self):\n testflow.step(\"Adding user %s to group %s\", TEST_USER1, TEST_GROUP1)\n assert MANAGE_CLI.run(\n 'useradd',\n TEST_GROUP1,\n user=TEST_USER1\n )[0], \"Failed to add user to group '%s'\" % TEST_GROUP1\n\n testflow.step(\"Adding nonexisting user to group %s\", TEST_GROUP1)\n assert not MANAGE_CLI.run(\n 'useradd',\n TEST_GROUP1,\n user='nonsense'\n )[0], \"Possible to add nonexisting user to group\"\n\n testflow.step(\"Adding user %s to nonexisting group\", TEST_USER2)\n assert not MANAGE_CLI.run(\n 'useradd',\n 'nonsense',\n user=TEST_USER2\n )[0], \"Possible to add user to nonexisting group\"", "def AddUser(effective_user, effective_chat):\n chatId = effective_chat.id\n chatType = effective_chat.type\n chatTitle = effective_chat.title\n userId = effective_user.id\n\n if chatId == userId:\n exists = session.query(User).filter_by(user_id=userId, group_id=None).one_or_none()\n group_exists = False\n else:\n exists = session.query(User).filter_by(user_id=userId, group_id=chatId).one_or_none()\n group_exists = session.query(Group).filter_by(id=chatId).one_or_none()\n\n if(exists is not None):\n return False\n\n userGroupID = None\n if chatId != userId:\n userGroupID = chatId\n\n new_user = User(user_id=userId, \n user_name = effective_user.username, \n first_name=effective_user.first_name, \n last_name=effective_user.last_name, \n is_bot = effective_user.is_bot,\n group_id = userGroupID)\n\n if chatId != userId and group_exists is None:\n new_group = Group(id = chatId,\n group_type = chatType,\n title = chatTitle)\n session.add(new_group)\n\n session.add(new_user)\n session.commit()\n return True", "def add_user_to_group(conn, user_dn, old_user_group_dn):\n try:\n conn.extend.microsoft.add_members_to_groups([str(user_dn)], [str(old_user_group_dn)])\n except Exception as e:\n raise Exception(\"Can't add user to this group :: {}\".format(e))", "def add_user_to_group(self, token, userGroup, userName, isOwner):\n requestUser = self.get_username_from_token(token)\n if self.check_user_has_owner_clearance(requestUser, userGroup):\n dataBase = self.read_database()\n owners = dataBase['userGroups'][userGroup]['owners']\n members = dataBase['userGroups'][userGroup]['members']\n if isOwner and userName not in owners:\n dataBase['userGroups'][userGroup]['owners'].append(userName)\n elif not isOwner and userName not in members:\n dataBase['userGroups'][userGroup]['members'].append(userName)\n\n self.write_database(dataBase)\n else:\n raise UserPermissionException(\n \"Requesting user is not owner of specified user group\")", "def post(self):\n args = parser.parse_args()\n user_group = UserGroup()\n user_group.name = args['name']\n user_group.createdby = auth.username()\n db_session.add(user_group)\n db_session.commit()\n return user_group, 201", "def add_owner(self, user):\n user_in = user.get_groups()\n member = False\n for group in user_in:\n if self.usergroup_node == group.usergroup_node:\n member = True\n ownership = Relationship(user.get(), 'owns', self.usergroup_node)\n graph.create(ownership)\n if not member:\n membership = Relationship(user.get(), 'in', self.usergroup_node)\n graph.create(membership)\n return self.usergroup_node", "def add_group(self, group_node, group_relationship_properties=None):\n\n user_group_relationship = Relationship(start_node=self.user_node,\n rel=AgoraRelationship.MEMBER_OF,\n end_node=group_node)\n self.graph_db.create_unique(user_group_relationship)\n #TODO set properties on the relationsip\n # group_relationship_properties[\"unique_id\"] = str(uuid.uuid4())", "def add_to_group(_request, group_id, email):\n group = models.UserGroup.get_by_id(int(group_id))\n user_key = models.UserProfile.load(email).key()\n if group.users is None:\n group.users = []\n logging.warning('Group \"%s\" had a None users list', group.name)\n group.users.append(user_key)\n group.put()\n\n url = urlresolvers.reverse('views.admin.edit_user', args=[email])\n return http.HttpResponseRedirect(url)", "def test_save(self, name='test', user=None):\n group = Group(name=name)\n group.save()\n \n if user:\n group.user_set.add(user)\n \n return group", "def add_member_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n group_id = str(args.get('group_id'))\n user_id = str(args.get('user_id'))\n required_properties = {\n \"@odata.id\": f'https://graph.microsoft.com/v1.0/users/{user_id}'}\n client.add_member(group_id, required_properties)\n\n human_readable = f'User {user_id} was added to the Group {group_id} successfully.'\n return human_readable, NO_OUTPUTS, NO_OUTPUTS", "def _register(self, user=None):\n if user is None:\n user = User.objects.create_user(\n username=self.username,\n password=self.password)\n user.is_active = self.active\n # Automatically add the user to the proper group\n if self._group_name is not None:\n group = get_or_none(Group, name=self._group_name)\n if group is not None:\n user.groups.add(group)\n\n user.save()\n profile = UserProfile(user=user, member=self)\n profile.save()", "def create_new_user_group(self, token, userGroup):\n requestUser = self.get_username_from_token(token)\n dataBase = self.read_database()\n userGroups = dataBase['userGroups']\n if userGroup not in userGroups:\n newGroup = dict()\n newGroup['owners'] = [requestUser]\n newGroup['members'] = list()\n newGroup['masterKey'] = self.generate_master_key().decode('cp855')\n dataBase['userGroups'][userGroup] = newGroup\n self.write_database(dataBase)\n else:\n raise GroupAlreadyExistsException(\"Specified user group already exists.\")", "def add_user(request):\n if request.method == 'POST':\n form = CreateUserForm(request.POST)\n if form.is_valid():\n new_user = User.objects.create_user(form.cleaned_data['username'], \"user@invalid.com\", form.cleaned_data['password1'])\n role = form.cleaned_data['role']\n group = Group.objects.get(name=role)\n new_user.groups.add(group)\n return redirect(index)\n else:\n form = CreateUserForm() \n return render(request, 'users/add_user.html', {\n 'form': form,\n })", "def modify_group_membership(http_method, group_id, user_id):\n\n if http_method not in [\"put\", \"delete\"]:\n raise ValueError(f\"Invalid HTTP request type: {http_method}\")\n endpoint = f\"/identities/groups/{group_id}/userMembers/{user_id}\"\n http_response = call_rest_api(endpoint, http_method, **config.DEFAULT_REST_KWARGS)\n\n # Adding a user:\n if http_method == \"put\":\n if http_response.status_code != 201: # 201 = 'member added to group'\n raise ValueError(http_response.text)\n logger.log(f\"A user with ID: {user_id}, was added to the custom group with ID: {group_id}.\")\n\n # Removing a user:\n elif http_method == \"delete\":\n if http_response.status_code != 204: # 204 = 'member removed'\n raise ValueError(http_response.text)\n logger.log(f\"A user with ID: {user_id}, was removed from the custom group with ID: {group_id}.\")", "def add_user(self, user_id, group_id='', user_level=1, user_name='', name='', method_id=1):\n stmt = \"\"\"INSERT INTO users (_user_id, group_id, user_level, _user_name, _name, method_id) \n SELECT ?, ?, ?, ?, ?, ? \n WHERE NOT EXISTS(SELECT 1 FROM users WHERE (?) = _user_id)\"\"\"\n args = (user_id, group_id, user_level, user_name, name, method_id, user_id)\n self.conn.execute(stmt, args)\n self.conn.commit()", "def join_group(self, user, group, force=0):\n if not force and not group.can_join(user):\n raise NotEnoughPrivileges\n \n group.add_member(user)\n user.add_to_group(get_usergroup_database().get_usergroup(group.get_user_id()))\n if hasattr(user, 'karma_activity_credit'):\n # groups can join groups, and groups don't have karma_activity_credit\n user.karma_activity_credit()\n \n self._flush_user_data_caches(user)", "def handle(self, user):\n\n if not self.group_users:\n return\n for group_name, users in self.group_users.items():\n if user.username in users:\n group = self.get_group_from_db(group_name)\n user.groups.add(group)\n logger.info('Added {} to {}'.format(user.username, group_name))", "def add_member(self, parent_group, group_name=None, user_name=None):\n METHOD = 'POST'\n API_PATH = '/groups/add-member'\n\n # Process group_name and user_name and add parent name to resulting dict\n data, target_name = self.__prep_group_or_user(group_name=group_name, user_name=user_name)\n data['parent_name'] = parent_group\n\n # Make REST call\n resp = self._rest_call[METHOD](API_PATH, data=data)\n\n if resp.status_code == 200:\n return target_name\n\n elif resp.status_code == 403:\n raise AuthorizationError(\"User is not authorized or token is incorrect.\")\n\n else:\n if resp.json().get(\"error_code\") in ERROR_CODES:\n raise ERROR_CODES[resp.json().get('error_code')](resp.json().get('message'))\n else:\n raise APIError(\"Response code {0}: {1} {2}\".format(resp.status_code,\n resp.json().get('error_code'),\n resp.json().get('message')))", "def add_user_to_group(self, group_name, user_name, delegate_account=None):\n self.log.debug(\"Adding user \" + user_name + \" to group \" + group_name)\n params = {'GroupName': group_name,\n 'UserName': user_name}\n if delegate_account:\n params['DelegateAccount'] = delegate_account\n self.connection.get_response('AddUserToGroup', params)", "def add_user(self, session, user_data: Dict) -> User:\n chat_id = user_data[\"chat_id\"]\n username = user_data[\"username\"]\n first_name = user_data[\"first_name\"]\n last_name = user_data[\"last_name\"]\n time_registered = user_data[\"time_registered\"]\n is_admin = False\n reminder_time = datetime.time(hour=21, tzinfo=TIME_ZONE)\n\n user = session.query(User).get(chat_id)\n if user:\n if user.username != username:\n user.username = username\n session.commit()\n if user.is_banned is True:\n user.is_banned = False\n session.commit()\n return user\n\n new_user = User(\n chat_id=chat_id,\n is_banned=False,\n username=username,\n first_name=first_name,\n last_name = last_name,\n time_registered = time_registered,\n is_admin = is_admin,\n reminder_time = reminder_time,\n )\n session.add(new_user)\n session.commit()\n return new_user", "def add_user(self, user):\n logging.debug(\"Adding user id = %s \" % str(user.id))\n logging.debug(\"The set has %s users till now\" % str(len(self.users_queue.pending_pages)))\n return self._add_user(user)", "def add_member(self, user, is_admin=False):\n # Only add members if they are not already one\n membership_count = InitiativeMembership.objects.filter(user=user, initiative=self).count()\n\n if membership_count > 0:\n raise SuspiciousOperation(\"Is already a member\")\n\n return InitiativeMembership.objects.create(user=user, initiative=self, is_admin=is_admin)", "def create_group(user):\n if connexion.request.is_json:\n users_group = [User.from_dict(d) for d in connexion.request.get_json()]\n response = (\"success\", 201)\n if len(users_group) > 4:\n response = (\"Max number of player is 4\", 400)\n else:\n groupId = GroupStorageController.add_new_group(users_group)\n return response", "def test_add_user_to_course_group(self):\r\n # Create groups for a new course (and assign instructor role to the creator).\r\n self.assertFalse(has_access(self.creator, CourseInstructorRole(self.course_key)))\r\n add_users(self.global_admin, CourseInstructorRole(self.course_key), self.creator)\r\n add_users(self.global_admin, CourseStaffRole(self.course_key), self.creator)\r\n self.assertTrue(has_access(self.creator, CourseInstructorRole(self.course_key)))\r\n\r\n # Add another user to the staff role.\r\n self.assertFalse(has_access(self.staff, CourseStaffRole(self.course_key)))\r\n add_users(self.creator, CourseStaffRole(self.course_key), self.staff)\r\n self.assertTrue(has_access(self.staff, CourseStaffRole(self.course_key)))", "def test_adduser(self):\n self.run_function(\"group.add\", [self._group], gid=self._gid)\n self.run_function(\"user.add\", [self._user])\n self.assertTrue(self.run_function(\"group.adduser\", [self._group, self._user]))\n group_info = self.run_function(\"group.info\", [self._group])\n self.assertIn(self._user, str(group_info[\"members\"]))\n # try add a non existing user\n self.assertFalse(\n self.run_function(\"group.adduser\", [self._group, self._no_user])\n )\n # try add a user to non existing group\n self.assertFalse(\n self.run_function(\"group.adduser\", [self._no_group, self._user])\n )\n # try add a non existing user to a non existing group\n self.assertFalse(\n self.run_function(\"group.adduser\", [self._no_group, self._no_user])\n )", "def create(person_group_id, name=None, user_data=None):\n name = person_group_id if name is None else name\n url = 'persongroups/{}'.format(person_group_id)\n json = {\n 'name': name,\n 'userData': user_data,\n }\n\n return util.request('PUT', url, json=json)", "def _add_group(self, group):\n\n if group.name not in self.groups:\n # it's brand new, add him!\n self.groups[group.name] = group\n if self.groups[group.name] != group:\n # different object, merge\n self._merge_groups(self.groups[group.name], group)", "def add_user_with_status_granted(caller, user):\r\n if _add_user(user, CourseCreator.GRANTED):\r\n update_course_creator_group(caller, user, True)", "def addUser(self, user):\r\n self.users.append(user)\r\n return len(self.users)-1", "def add_user(self, user, role=OrganizationUserRole.MEMBER):\n users_count = self.users.all().count()\n if users_count == 0:\n role = OrganizationUserRole.OWNER\n org_user = self._org_user_model.objects.create(\n user=user, organization=self, role=role\n )\n if users_count == 0:\n self._org_owner_model.objects.create(\n organization=self, organization_user=org_user\n )\n\n # User added signal\n user_added.send(sender=self, user=user)\n return org_user", "def add(self, user):\n int_id = user.get_int_id(self.rooms)\n self.rooms[user.room][\"users\"].append(user)\n\n # Games\n if self.rooms[user.room][\"isGame\"] == \"true\":\n user.send([\"jg\", int_id, user.room])\n # Rooms\n else:\n user.send([\"jr\", int_id, user.room, self.get_strings(user.room)])\n self.packet.send_room([\"ap\", int_id, user.get_string()], user.room)", "def add_user(self, user: User):\n raise NotImplementedError", "def assign_TestUserGroup(test_case, # type: AnyMagpieTestCaseType\n override_user_name=null, # type: Optional[Str]\n override_group_name=null, # type: Optional[Str]\n override_headers=null, # type: Optional[HeadersType]\n override_cookies=null, # type: Optional[CookiesType]\n ): # type: (...) -> None\n app_or_url = get_app_or_url(test_case)\n usr_name = override_user_name if override_user_name is not null else test_case.test_user_name\n grp_name = override_group_name if override_group_name is not null else test_case.test_group_name\n path = \"/groups/{grp}/users\".format(grp=grp_name)\n resp = test_request(app_or_url, \"GET\", path,\n headers=override_headers if override_headers is not null else test_case.json_headers,\n cookies=override_cookies if override_cookies is not null else test_case.cookies)\n body = check_response_basic_info(resp, 200, expected_method=\"GET\")\n if usr_name not in body[\"user_names\"]:\n path = \"/users/{usr}/groups\".format(usr=usr_name)\n data = {\"group_name\": grp_name}\n resp = test_request(app_or_url, \"POST\", path, json=data,\n headers=override_headers if override_headers is not null else test_case.json_headers,\n cookies=override_cookies if override_cookies is not null else test_case.cookies)\n check_response_basic_info(resp, 201, expected_method=\"POST\")\n TestSetup.check_UserGroupMembership(test_case, override_user_name=usr_name, override_group_name=grp_name,\n override_headers=override_headers, override_cookies=override_cookies)", "def add_user_to_groups(username):\n groups = request.get_json().get(\"groups\", [])\n return jsonify(\n admin.add_user_to_groups(current_app.scoped_session(), username, groups=groups)\n )", "def add_user(self, REQUEST):\n\n role_id = REQUEST.form['role_id']\n country_code = role_id.rsplit('-', 1)[-1]\n user_id = REQUEST.form['user_id']\n agent = self._get_ldap_agent()\n\n if not self._allowed(agent, REQUEST, country_code):\n return None\n if not nfp_can_change_user(self, user_id, no_org=False):\n # This means somebody is manipulating the DOM in order to\n # add a user that belongs to an organisation from another\n # country (the button doesn't normally appear)\n return None\n\n with agent.new_action():\n role_id_list = agent.add_to_role(role_id, 'user', user_id)\n\n role_msg = get_role_name(agent, role_id)\n msg = \"User %r added to role %s. \\n\" % (user_id, role_msg)\n\n # for Eionet Groups roles only, test if the added user is member of a\n # national organisation\n\n if self.is_eionet_group(role_id):\n if not get_national_org(agent, user_id, role_id):\n msg += (\n \"The user you want to add to an Eionet Group does not\"\n \" have a mandatory reference to an organisation for \"\n \"your country. Please corect!\")\n\n IStatusMessage(REQUEST).add(msg, type='info')\n\n log.info(\"%s ADDED USER %r TO ROLE %r\",\n logged_in_user(REQUEST), user_id, role_id_list)\n\n if '-awp-' in role_id:\n return REQUEST.RESPONSE.redirect(self.absolute_url() +\n '/awps?nfp=%s#role_%s' %\n (country_code, role_id))\n\n return REQUEST.RESPONSE.redirect(self.absolute_url() +\n '/nrcs?nfp=%s#role_%s' %\n (country_code, role_id))", "def add_user_to_group(self, group_name, user_login):\n params = {\n 'login': user_login,\n 'name': group_name\n }\n self.sonarqube.make_call('post', API_USER_GROUPS_ADD_USER_ENDPOINT, **params)", "def add_user(self,\n\t\t\t\t user_id,\n\t\t\t\t password,\n\t\t\t\t user_group_oid,\n\t\t\t\t enable='',\n\t\t\t\t first_name='',\n\t\t\t\t last_name='',\n\t\t\t\t email='',\n\t\t\t\t description=''):\n\t\tresult = {\n\t\t\t'success': False,\n\t\t\t'response': '',\n\t\t\t'error': '',\n\t\t}\n\n\t\tself.ise.headers.update({'Content-Type': 'application/vnd.com.cisco.ise.identity.internaluser.1.0+xml'})\n\n\t\tdata = open(os.path.join(base_dir, 'xml/user_add.xml'), 'r').read().format(\n\t\t\t\tuser_id, password, enable, first_name, last_name, email, description, user_group_oid)\n\n\t\tresp = self.ise.post('{0}/config/internaluser'.format(self.url_base), data=data, timeout=self.timeout)\n\n\t\tif resp.status_code == 201:\n\t\t\tresult['success'] = True\n\t\t\tresult['response'] = '{0} Added Successfully'.format(user_id)\n\t\t\treturn result\n\t\telse:\n\t\t\tprint ('ERR:',resp.status_code) ; \n\t\t\timport pprint ; pprint.pprint(data) ; print ('RESP:',resp.text)\n\t\t\ttry:\n\t\t\t\tresult['response'] = ERS._to_json(resp.text)['ns3:ersResponse']['messages']['message']['title']\n\t\t\texcept:\n\t\t\t\tresult['response'] = resp.text\n\t\t\tresult['error'] = resp.status_code\n\t\t\treturn result", "def add(\n new_user: schemas.UserCreate,\n db_session: Session = Depends(get_db),\n current_user: models.User = Depends(get_current_admin_user)\n):\n db_user = crud.get_by_email(db_session, new_user.email)\n\n if db_user:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail='The user with this email already exists in the system.'\n )\n\n return crud.create(db_session, new_user)", "def create_group(self, event):\n body = event['body']\n body = json.loads(body)\n\n # Required field in POST body\n if 'group_name' not in body:\n return self.get_bad_request('POST body missing group_name')\n\n group_name = body['group_name']\n user = self.mealShareUsers.get_user_cognito_data(event)\n user_id = user['user_id']\n \n # Add the creator to the group, as the initial member\n group_id = self.mealShareGroups.create_group(group_name)\n success = self.mealShareGroups.add_user_to_group(user_id, group_id)\n if success:\n return {\n 'statusCode': 200,\n 'statusMessage': 'Successfully created group {} with ID {}'.format(group_name, group_id),\n 'group_id': group_id,\n 'group_name': group_name,\n 'user_id': user_id\n }\n else:\n return {\n 'statusCode': 500,\n 'statusMessage': 'FAILED to create group {} by user {}'.format(group_name, user_id),\n 'group_id': group_id,\n 'group_name': group_name,\n 'user_id': user_id\n }", "def create_group(self, group):\n if self.dryrun:\n self.logger.info(\"Would create group %s\", group)\n return FakeGroupId()\n result = self.conn.usergroup.create(name=group)\n groupid = result['usrgrpids'][0]\n self.logger.info(\"Create group %s with id %s\", group, groupid)\n return groupid", "def add_member(self, request, **kwargs):\n valid_user = Member.objects.filter(group=self.get_object(), user=self.request.user).values('role_type').first()\n if valid_user['role_type'] == 'member':\n return Response({'message': 'You have no right to perform this action'}, status=status.HTTP_403_FORBIDDEN)\n if request.data.get('phone') is None:\n return Response({'message': 'Phone number not provided'}, status=status.HTTP_400_BAD_REQUEST)\n if request.data.get('role') is None:\n return Response({'message': 'Role is required'}, status=status.HTTP_400_BAD_REQUEST)\n if request.data.get('display_name') is None:\n return Response({'message': 'Name is required'}, status=status.HTTP_400_BAD_REQUEST)\n req_user = request.data.get('phone')\n user_data = User.objects.get(phone=req_user)\n if user_data is None:\n return Response({'message': 'User with this number is not registered'}, status=status.HTTP_404_NOT_FOUND)\n group = self.get_object()\n if group.members.filter(user=user_data).count() != 0:\n return Response({'message': 'User is already member of this group'}, status=status.HTTP_400_BAD_REQUEST)\n member_role = request.data.get('role')\n new_member_data = Member.objects.create(group=group, user=user_data,role_type=member_role, display_name=request.data.get('display_name'))\n new_member_data.save()\n serializer_data = MemberSerializer(new_member_data)\n return Response(serializer_data.data)", "def test_add_new_user_to_public_group(self):\n new_user = User.objects.create_user(self.username)\n self.assertEqual(\n new_user.groups.filter(name=self.public_group_name).count(), 1)", "def add_user(self, user_id):\n user_doc = {\n 'type': 'user',\n 'name': user_id\n }\n return self.add_doc_if_not_exists(user_doc, 'name')", "def add_to_download_group(self, user):\r\n user.groups.add(self.dl_grp)", "def add_new_user(self, user):\n # print(\"Saving new user\")\n self.execute(TABELLE['id_users']['insert']['complete_user'],\n (user['id'], False, False, True, False, False))\n\n self.execute(TABELLE['users']['insert'],\n (user['id'], user['username']))", "def insert_user_group_additional(self, id_group:int, id_user:int) -> None:\n try:\n if not self.check_user_group_connection(id_group, id_user):\n self.connect_user_group(id_group, id_user)\n except Exception as e:\n msg = f\"We faced problems with additional insertion values; Mistake: {e}\"\n self.proceed_error(msg)", "def add_group_group_member(self, targetgroup, groupname):\n try:\n targetgroup = self.quote(targetgroup)\n groupname = self.quote(groupname)\n self.g.put('groups/%s/groups/%s' % (targetgroup,\n groupname),\n headers={})\n except HTTPError as e:\n return self._manage_errors(e)", "def one_group_member_sync_to_ucs(self, ucs_group_object, object):\n\t\t# In AD the object['dn'] is member of the group sync_object\n\n\t\tml = []\n\t\tif not self.__compare_lowercase(object['dn'], ucs_group_object['attributes'].get('uniqueMember', [])):\n\t\t\tml.append((ldap.MOD_ADD, 'uniqueMember', [object['dn']]))\n\n\t\tif object['attributes'].get('uid'):\n\t\t\tuid=object['attributes'].get('uid', [])[0]\n\t\t\tif not self.__compare_lowercase(uid, ucs_group_object['attributes'].get('memberUid', [])):\n\t\t\t\tml.append((ldap.MOD_ADD, 'memberUid', [uid]))\n\n\t\tif ml:\n\t\t\ttry:\n\t\t\t\tself.lo.lo.modify_s(ucs_group_object['dn'],compatible_modlist(ml))\n\t\t\texcept ldap.ALREADY_EXISTS:\n\t\t\t\t# The user is already member in this group or it is his primary group\n\t\t\t\t# This might happen, if we synchronize a rejected file with old informations\n\t\t\t\t# See Bug #25709 Comment #17: https://forge.univention.org/bugzilla/show_bug.cgi?id=25709#c17\n\t\t\t\tud.debug(ud.LDAP, ud.INFO, \"one_group_member_sync_to_ucs: User is already member of the group: %s modlist: %s\" % (ucs_group_object['dn'], ml))\n\t\t\t\tpass\n\n\t\t# The user has been removed from the cache. He must be added in any case\n\t\tif not self.group_members_cache_ucs.get(ucs_group_object['dn'].lower()):\n\t\t\tself.group_members_cache_ucs[ucs_group_object['dn'].lower()] = []\n\t\tud.debug(ud.LDAP, ud.INFO, \"one_group_member_sync_to_ucs: Append user %s to group ucs cache of %s\" % (object['dn'].lower(), ucs_group_object['dn'].lower()))\n\t\tself.group_members_cache_ucs[ucs_group_object['dn'].lower()].append(object['dn'].lower())", "def add_user_to_g():\n if CURR_USER_KEY in session:\n g.user = User.query.get(session[CURR_USER_KEY])\n\n else:\n g.user = None", "def add_user_to_g():\n\n if CURR_USER_KEY in session:\n g.user = User.query.get(session[CURR_USER_KEY])\n\n else:\n g.user = None", "def add_user_to_g():\n\n if CURR_USER_KEY in session:\n g.user = User.query.get(session[CURR_USER_KEY])\n\n else:\n g.user = None", "def add_user_to_g():\n\n if CURR_USER_KEY in session:\n g.user = User.query.get(session[CURR_USER_KEY])\n\n else:\n g.user = None", "def add_user_to_g():\n \n if CURRENT_USER in session:\n g.user = User.query.get(session[CURRENT_USER])\n\n else:\n g.user = None", "def add_member(self, request, pk):\n farm = self.get_object()\n user = request.data.get('user')\n farm.add_member(user)\n return Response({}, status=status.HTTP_202_ACCEPTED)", "def subscribe(self, group, user, reason=GroupSubscriptionReason.unknown):\n try:\n with transaction.atomic():\n self.create(\n user=user,\n group=group,\n project=group.project,\n is_active=True,\n reason=reason,\n )\n except IntegrityError:\n pass", "def newgroup(self, groupname, groupou=None, grouptype=None,\n description=None, mailaddress=None, notes=None, sd=None,\n gidnumber=None, nisdomain=None):\n\n group_dn = \"CN=%s,%s,%s\" % (groupname, (groupou or \"CN=Users\"), self.domain_dn())\n\n # The new user record. Note the reliance on the SAMLDB module which\n # fills in the default informations\n ldbmessage = {\"dn\": group_dn,\n \"sAMAccountName\": groupname,\n \"objectClass\": \"group\"}\n\n if grouptype is not None:\n ldbmessage[\"groupType\"] = normalise_int32(grouptype)\n\n if description is not None:\n ldbmessage[\"description\"] = description\n\n if mailaddress is not None:\n ldbmessage[\"mail\"] = mailaddress\n\n if notes is not None:\n ldbmessage[\"info\"] = notes\n\n if gidnumber is not None:\n ldbmessage[\"gidNumber\"] = normalise_int32(gidnumber)\n\n if nisdomain is not None:\n ldbmessage[\"msSFU30Name\"] = groupname\n ldbmessage[\"msSFU30NisDomain\"] = nisdomain\n\n if sd is not None:\n ldbmessage[\"nTSecurityDescriptor\"] = ndr_pack(sd)\n\n self.add(ldbmessage)", "def add_user(self, user_id):\n\n if not str(user_id).isalnum():\n raise ValueError('Identifier must be the numerical user ID')\n\n # skip adding user if existing & detailed\n existing_user = self.get_user(user_id)\n if existing_user:\n if existing_user.get('detail') == 'full':\n log.info('Not adding user %s, already (full) in graph' % user_id)\n return existing_user\n if existing_user.get('detail') == 'basic':\n log.info('Not adding user %s, already (basic) in graph: updating' % user_id)\n return self.update_user(existing_user)\n\n\n log.info('Adding user %s to graph' % user_id)\n # get and assign user data to node\n props = self.fetch_user_data(user_id)\n user_node = self.gdb.node(**props)\n\n # add user node to indexes\n users = self.gdb.nodes.indexes.get('users')\n users['user_id'][props.get('id_str')] = user_node\n users['screen_name'][props.get('screen_name')] = user_node\n\n # add followers/following\n \n self.add_subscriptions(user_node)\n\n return user_node", "def addUserObj(self, userObj : bbUser.bbUser):\n # Ensure no bbUser exists in the db with the same ID as the given bbUser\n if self.userIDExists(userObj.id):\n raise KeyError(\"Attempted to add a user that is already in this bbUserDB: \" + str(userObj))\n # Store the passed bbUser\n self.users[userObj.id] = userObj", "def test_user_is_group_member(self):\n self.user.add_to_group(self.thread.group.pk)\n self.assertEqual(\n Thread.public.get_by_user(\n thread_id=self.thread.pk, user=self.user),\n self.thread\n )", "def add_member(self, id, user):\n request = self.request_builder('orgs.teams.add_member',\n id=id, user=user)\n return self._put(request)", "def add(self, user):\r\n url = '{0}/{1}'.format(self.get_url(), user)\r\n\r\n # include a body, because requests does not send content-length when no\r\n # body is present, and that makes GitHub respond with HTTP 411\r\n return http.Request('PUT', url, '*'), parsers.parse_empty", "def signup(request):\n \n user_form = UserCreationForm()\n \n if request.method == 'POST':\n user_form = UserCreationForm(request.POST)\n if user_form.is_valid():\n \"\"\" new user account is created here\"\"\"\n \"\"\" @fixme: this is a buggy peice of code; cannot do commit=False; because a M-M relation cannot be attached to a non-existing object. \"\"\"\n new_user = user_form.save()\n \n \"\"\" @fixme: group is added after the account is created/commited to the DB; this is kinda bad; required two DB calls.\"\"\"\n# new_user.groups.add(Group.objects.get(name='student'))\n return HttpResponseRedirect(reverse(\"home.views.index\"))\n \n return render_to_response(\"auth/signup.html\", {\n 'form' : user_form\n }, context_instance=RequestContext(request))", "def _add(self, signup_form_id, group_ids):\n path = '/members/add'\n data = self.extract()\n if group_ids:\n data['group_ids'] = group_ids\n if signup_form_id:\n data['signup_form_id'] = signup_form_id\n\n outcome = self.account.adapter.post(path, data)\n self['member_status_id'] = outcome['status']\n if outcome['added']:\n self['member_id'] = outcome['member_id']", "def add_to_group(self, group):\n\n if not self.in_group(group):\n self.secondary_groups.append(group)\n return self", "def add_to_groups(self, username, groups):\n pass", "def get_user_group_membership(self, user_group_membership_id, **kwargs):\n resource_path = \"/userGroupMemberships/{userGroupMembershipId}\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"get_user_group_membership got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userGroupMembershipId\": user_group_membership_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"UserGroupMembership\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"UserGroupMembership\")", "def create(self, validated_data):\n # print('Validated Data',validated_data['profile'].get('group'))\n group = validated_data['profile'].get('group')\n profile_data = validated_data.pop('profile')\n password = validated_data.pop('password')\n user = User(**validated_data)\n user.set_password(password)\n user.save()\n \"\"\"\n After the creation of the user he is added to a particular group.\n \"\"\"\n user.groups.add(Group.objects.get(name=group))\n UserProfile.objects.create(user=user, **profile_data)\n return user", "def add_user(self, user):\n c = self.conn.cursor()\n cursor = c.execute(\"INSERT INTO users VALUES (null, ?, ?, ?)\",\n (user['username'], user['email'], hash_password(user['password']),))\n self.conn.commit()\n return self.get_user(cursor.lastrowid)", "def one_group_member_sync_from_ucs(self, ad_group_object, object):\n\t\tml = []\n\t\tif not self.__compare_lowercase(object['dn'], ad_group_object['attributes'].get('member', [])):\n\t\t\tml.append((ldap.MOD_ADD, 'member', [object['dn']]))\n\n\t\tif ml:\n\t\t\ttry:\n\t\t\t\tself.lo_ad.lo.modify_s(ad_group_object['dn'],compatible_modlist(ml))\n\t\t\texcept ldap.ALREADY_EXISTS:\n\t\t\t\t# The user is already member in this group or it is his primary group\n\t\t\t\t# This might happen, if we synchronize a rejected file with old informations\n\t\t\t\t# See Bug #25709 Comment #17: https://forge.univention.org/bugzilla/show_bug.cgi?id=25709#c17\n\t\t\t\tud.debug(ud.LDAP, ud.INFO, \"one_group_member_sync_from_ucs: User is already member of the group: %s modlist: %s\" % (ad_group_object['dn'], ml))\n\t\t\t\tpass\n\n\t\t# The user has been removed from the cache. He must be added in any case\n\t\tud.debug(ud.LDAP, ud.INFO, \"one_group_member_sync_from_ucs: Append user %s to group con cache of %s\" % (object['dn'].lower(), ad_group_object['dn'].lower()))\n\t\tif not self.group_members_cache_con.get(ad_group_object['dn'].lower()):\n\t\t\tself.group_members_cache_con[ad_group_object['dn'].lower()] = []\n\t\tself.group_members_cache_con[ad_group_object['dn'].lower()].append(object['dn'].lower())", "def add_group(username: str, gid: int=None, system: bool=False) -> Result[Group]:\n try:\n get_group(username)\n except KeyError:\n pass\n else:\n raise ValueError(\"Username {!r} is already in use\".format(username))\n args = [\"/usr/sbin/addgroup\", username]\n if gid:\n try:\n group = grp.getgrgid(gid)\n except KeyError:\n args[-1:-1] = [\"--gid\", str(gid)]\n else:\n raise ValueError(\"GID {} is already in use by {!r}\".format(gid, group.gr_name))\n if system:\n args[-1:-1] = [\"--system\"]\n command(args)\n return Result(State.success, get_group(username))", "def addUser(self, id : int) -> bbUser.bbUser:\n id = self.validateID(id)\n # Ensure no user exists with the specified ID in the database\n if self.userIDExists(id):\n raise KeyError(\"Attempted to add a user that is already in this bbUserDB\")\n # Create and return a new user\n newUser = bbUser.bbUser.fromDict(bbUser.defaultUserDict, id=id)\n self.users[id] = newUser\n return newUser", "def add_account(self, account):\n return self._client.group_memberships.create({\n 'account': account,\n 'group': self,\n })", "def test_user_in_group_can_access(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n\n utils.test_can_access(self, self.url)", "def newUser(self):\n user = IrcUser(hashed=True)\n self.nextId += 1\n id = self.nextId\n self.users[id] = user\n self.flush()\n user.id = id\n return user", "def add_member(self, db: Session, *, room: Room, user: User) -> Room:\n members = [x for x in room.members]\n members.append(user)\n return self.update(db=db, db_obj=room, obj_in={\"members\": members})", "def create_group(self, user_id, name, owner, description='',\n member_perms=[],\n other_perms=[],\n member_join_perms=[],\n other_join_perms=[],\n anon_read=0,\n no_pay=False):\n \n user_id = user_id.lower()\n \n if self.root.has_key(user_id) or get_usergroup_database().has_key(user_id):\n raise KeyError, \"Key %s already exists.\" % user_id\n\n if not no_pay:\n # charge owner for new group - don't create group if can't pay\n from qon.karma import NoKarmaToGive\n try:\n owner.pay_karma(self._karma_new_group)\n except NoKarmaToGive:\n return None\n\n group = Group(user_id=user_id, name=name, owner=owner)\n group.add_owner(owner)\n group.add_sponsor(owner)\n group.anon_read = anon_read\n group.description = description\n \n usergroup = UserGroup(group.user_id)\n get_usergroup_database().add_usergroup(usergroup)\n group.set_owning_group(usergroup)\n \n # members must have at least read access -- otherwise, what's the point of membership?\n mem_perms = member_perms\n if 'read' not in mem_perms:\n mem_perms.append('read')\n \n group.set_group_perms(mem_perms)\n group.set_other_perms(other_perms)\n group.get_members().set_group_perms(member_join_perms)\n group.get_members().set_other_perms(other_join_perms)\n \n # flush owner's group list cache\n self._flush_user_data_caches(owner)\n\n self.add_group(group)\n return group", "def test_resource_user_resource_add_user_groups_for_user_post(self):\n pass", "def post(self):\n status = ErrorCode.SUCCESS\n try:\n data = DotDict(json_decode(self.request.body))\n logging.info(\"[UWEB] add group request: %s, cid: %s\",\n data, self.current_user.cid)\n except Exception as e:\n status = ErrorCode.ILLEGAL_DATA_FORMAT\n logging.exception(\"[UWEB] Invalid data format. body:%s, Exception: %s\",\n self.request.body, e.args)\n self.write_ret(status)\n return\n\n try: \n cid = data.cid\n name = data.name\n group = self.get_group_by_cid(cid, name)\n if group:\n status = ErrorCode.GROUP_EXIST\n self.write_ret(status)\n return\n\n group_info = dict(cid=cid,\n name=name,\n type=UWEB.GROUP_TYPE.NEW)\n gid = add_group(group_info, self.db, self.redis)\n # NOTE: wspush to client\n tid = self.current_user.tid\n if status == ErrorCode.SUCCESS:\n WSPushHelper.pushS3(tid, self.db, self.redis)\n\n self.write_ret(status,\n dict_=dict(gid=gid,\n cid=cid,\n name=name))\n\n except Exception as e:\n logging.exception(\"[UWEB] Create group failed. uid: %s, Exception: %s\",\n self.current_user.uid, e.args)\n status = ErrorCode.SERVER_BUSY\n self.write_ret(status)", "def add_user(self, userdict):\n return self.post('users', userdict)", "def user_add(self, uname):\n # create group if it does not exist\n self.group_add()\n username = self.prefixed(uname)\n args = [\n \"useradd\",\n \"-g\",\n self._gname,\n \"-d\",\n \"/home/{}\".format(username),\n \"-s\",\n \"/sbin/nologin\",\n username\n ]\n logger.debug(args)\n self.call(args)", "def add_user_to_division(self, **kwargs):\n try:\n is_head = kwargs.get('isHead')\n except:\n is_head = 0\n request = post(url=self.base_url + 'api/services/etender/division/AddUserToDivision',\n headers=self.headers,\n data=json.dumps({'userid': kwargs.get('user').get('userid'),\n 'divisionid': kwargs.get('division').get('id'),\n 'isHead': is_head}))\n print('Adding result: ', json.loads(request.content))\n return json.loads(request.content)" ]
[ "0.74806035", "0.7402917", "0.73061603", "0.7215162", "0.720623", "0.7198249", "0.6962066", "0.6901342", "0.68765295", "0.6858431", "0.6735927", "0.6672501", "0.66710955", "0.6659128", "0.6647979", "0.65809935", "0.6571874", "0.646748", "0.6427779", "0.6386597", "0.62940246", "0.6280333", "0.62549394", "0.61948746", "0.61717635", "0.6171117", "0.61343294", "0.61324716", "0.6131206", "0.61292374", "0.611639", "0.6091987", "0.6084804", "0.60679567", "0.6056225", "0.60253674", "0.59916365", "0.59801465", "0.597571", "0.5969927", "0.59585756", "0.5949648", "0.5931489", "0.5927645", "0.59264314", "0.5910277", "0.59072506", "0.5889663", "0.5887331", "0.5860543", "0.58429223", "0.5842214", "0.5837445", "0.5834859", "0.5830742", "0.5828205", "0.57891965", "0.5779917", "0.5757", "0.57515156", "0.5742987", "0.57417506", "0.5737021", "0.5726234", "0.5704888", "0.56987673", "0.56960356", "0.56554335", "0.5653201", "0.5653201", "0.5653201", "0.56493783", "0.56408286", "0.5629584", "0.5614783", "0.5613126", "0.56073976", "0.55976975", "0.5592314", "0.5581693", "0.55804557", "0.5566832", "0.55626523", "0.55504364", "0.5545981", "0.5543884", "0.55419284", "0.5530253", "0.55235106", "0.5516214", "0.5500761", "0.54882777", "0.5479146", "0.5474335", "0.5470318", "0.54659325", "0.546139", "0.5454438", "0.54541916", "0.5444829" ]
0.71962476
6
Assembles tag defaults in the specified compartment and any parent compartments to determine the tags to apply. Tag defaults from parent compartments do not override tag defaults referencing the same tag in a compartment lower down the hierarchy. This set of tag defaults includes all tag defaults from the current compartment back to the root compartment.
def assemble_effective_tag_set(self, compartment_id, **kwargs): resource_path = "/tagDefaults/actions/assembleEffectiveTagSet" method = "GET" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "lifecycle_state" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "assemble_effective_tag_set got unknown kwargs: {!r}".format(extra_kwargs)) if 'lifecycle_state' in kwargs: lifecycle_state_allowed_values = ["ACTIVE"] if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values: raise ValueError( "Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values) ) query_params = { "compartmentId": compartment_id, "lifecycleState": kwargs.get("lifecycle_state", missing) } query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None} header_params = { "accept": "application/json", "content-type": "application/json" } retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[TagDefaultSummary]") else: return self.base_client.call_api( resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[TagDefaultSummary]")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initDefaults(self):\n return _libsbml.Compartment_initDefaults(self)", "def initDefaults(self):\n return _libsbml.CompartmentGlyph_initDefaults(self)", "def set_defaults(self, compmgr=None):\n for section, default_options in self.defaults(compmgr).items():\n for name, value in default_options.items():\n if not ProductSetting.exists(self.env, self.product,\n section, name):\n if any(parent[section].contains(name, defaults=False)\n for parent in self.parents):\n value = None\n self.set(section, name, value)", "def propagate_defaults(config_doc):\n for group_name, group_doc in config_doc.items():\n if isinstance(group_doc, dict):\n defaults = group_doc.get('defaults', {})\n\n for item_name, item_doc in group_doc.items():\n if item_name == 'defaults':\n continue\n if isinstance(item_doc, dict):\n\n group_doc[item_name] = \\\n dict_merge_pair(copy.deepcopy(defaults), item_doc)\n\n return config_doc", "def init_defaults(self, defaults):\r\n for (sect, opt, default) in defaults:\r\n self._default(sect, opt, default)", "def defaults():\n global __preset_staging\n \n t = TreeDict('Default_Parameter_Tree', __defaultpresettree__ = True)\n __preset_staging[id(t)] = t\n return t", "def replace_defaults(d):\n\n # remove the defaults section\n defaults = d.pop('.defaults')\n\n # look for default tags and replace them\n for k, v in defaults.items():\n recursive_search_replace(d, '!' + k + '!', v)", "def assign_defaults(self):\n\n def module_default_sort_key(module):\n sort_key = (\n 1 if module.marked_as_default else -1,\n module.version,\n module.variant,\n -self.index(module.modulepath),\n )\n return sort_key\n\n self.defaults = {}\n grouped = groupby(\n [module for path in self.path for module in path.modules], lambda x: x.name\n )\n for (_, modules) in grouped:\n for module in modules:\n module.is_default = False\n if len(modules) > 1:\n modules = sorted(modules, key=module_default_sort_key, reverse=True)\n modules[0].is_default = True\n self.defaults[modules[0].name] = modules[0]", "def buildTagMap(default, *args):\r\n built = {}\r\n for portion in args:\r\n if hasattr(portion, 'items'):\r\n #It's a map. Merge it.\r\n for k,v in portion.items():\r\n built[k] = v\r\n elif isList(portion):\r\n #It's a list. Map each item to the default.\r\n for k in portion:\r\n built[k] = default\r\n else:\r\n #It's a scalar. Map it to the default.\r\n built[portion] = default\r\n return built", "def set_config_all_to_defaults():\n logging.debug(\"Creating default config\")\n for section in all_defaults:\n set_config_section_to_defaults(section)\n global config_changed\n config_changed = True", "def setDefaults(self, cs):\n self._xsBlockRepresentation = cs[\"xsBlockRepresentation\"]\n self._disableBlockTypeExclusionInXsGeneration = cs[\n \"disableBlockTypeExclusionInXsGeneration\"\n ]\n for xsId, xsOpt in self.items():\n xsOpt.setDefaults(\n cs[\"xsBlockRepresentation\"],\n cs[\"disableBlockTypeExclusionInXsGeneration\"],\n )", "def get_default_vpas(self, composition_space):\n\n default_vpas = {}\n for element in composition_space.get_all_elements():\n default_vpas[element.symbol] = self.all_default_vpas[\n element.symbol]\n return default_vpas", "def _merge_with_default_values(self, cr, uid, external_session, ressource, vals, sub_mapping_list, defaults=None, context=None):\n if not defaults: return vals\n for key in defaults:\n if not key in vals:\n vals[key] = defaults[key]\n return vals", "def _update_default_configs(\n default_configs: tuple[dict[Any, Any]], passed_configs: tuple[dict[Any, Any]]\n ):\n\n for default_config, passed_config in zip(default_configs, passed_configs):\n if passed_config is not None:\n update_dict_recursively(default_config, passed_config)", "def _setup_parents(self, parents=None):\n from trac import config\n self.parents = (parents or [])\n for filename in self.get('inherit', 'file').split(','):\n filename = Section._normalize_path(filename.strip(), self.env)\n self.parents.append(config.Configuration(filename))", "def _apply_defaults(self):\n # Applies normal parameter defaults\n for scalar_parameter, value in self._DEFAULT_PARAMETER_SCALARS.items():\n if scalar_parameter not in self.parameters:\n self.parameters[scalar_parameter] = copy.copy(value)\n\n # Applies defaults to all ramp parameters\n for table_parameter, table in self._DEFAULT_PARAMETER_TABLES.items():\n self.parameters[table_parameter] = [list(tup) for tup in table]\n self.parameters['_' + table_parameter] = zip(*self.parameters[table_parameter])", "def list_tag_defaults(self, **kwargs):\n resource_path = \"/tagDefaults\"\n method = \"GET\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"page\",\n \"limit\",\n \"id\",\n \"compartment_id\",\n \"tag_definition_id\",\n \"lifecycle_state\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_tag_defaults got unknown kwargs: {!r}\".format(extra_kwargs))\n\n if 'lifecycle_state' in kwargs:\n lifecycle_state_allowed_values = [\"ACTIVE\"]\n if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:\n raise ValueError(\n \"Invalid value for `lifecycle_state`, must be one of {0}\".format(lifecycle_state_allowed_values)\n )\n\n query_params = {\n \"page\": kwargs.get(\"page\", missing),\n \"limit\": kwargs.get(\"limit\", missing),\n \"id\": kwargs.get(\"id\", missing),\n \"compartmentId\": kwargs.get(\"compartment_id\", missing),\n \"tagDefinitionId\": kwargs.get(\"tag_definition_id\", missing),\n \"lifecycleState\": kwargs.get(\"lifecycle_state\", missing)\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[TagDefaultSummary]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[TagDefaultSummary]\")", "def ancestor_append_tags(self, tags):\n\n for ancestor in self.lineage():\n ancestor.append_tags(tags)", "def _SetCompoundPriorities(self):\n priority = self._GetMaxCommonPriority()\n for c in self.reactants:\n c.compound.SetSpeciesGroupPriority(priority)", "def defaultize(self):\n if self.root is None:\n genome.initialize(self)\n self.root.defaultize()", "def __init__(self, tagged_sents, default_tag='nc0s000'):\n self._default_tag = default_tag", "def to_tag_spec(self, tags):\n\n tag_spec = []\n for child in self.all_children():\n if not child.children:\n child_spec = [{'equals': t} for t in child.path()]\n tag_spec.append({'section': child_spec, 'add_tags': tags})\n return tag_spec", "def fill_in_empty_tags(prior, tags):\n print \"filling in empty entries for tags\"\n for key in tags:\n if not key in prior:\n prior[key] = {}\n for key1 in tags:\n for key2 in tags:\n prior[key1].setdefault(key2, 0.0)", "def _unset_defaults_and_overrides(self):\n for info, group in self._all_opt_infos():\n info.pop('default', None)\n info.pop('override', None)", "def set_attrs_default(input_object, attr_name_list = None):\n if attr_name_list is None:\n attr_name_list = []\n if len(attr_name_list) > 0:\n attr_list = [input_object.attr(attr_name) for attr_name in attr_name_list]\n else:\n attr_list = general.get_channelbox_attributes(input_object)\n\n for attr in attr_list:\n current_val = attr.get()\n if hasattr(attr, 'addAttr'):\n attr.addAttr(e = True, defaultValue = current_val)", "def __init__(self, defaults=None, default_sec=\"Uncategorized\"):\n super(XFasterConfig, self).__init__(dict_type=OrderedDict)\n self.default_sec = default_sec\n self.add_section(default_sec)\n if defaults is not None:\n self.update(defaults)", "def __defaults__(self): \n self.tag = 'Component'\n self.origin = [[0.0,0.0,0.0]]", "def content(tmp_loc, ref_names_dict, order):\n \n fl = '[Content_Types].xml'\n inp_path = '/'.join([tmp_loc, fl])\n out_path = '/'.join([output_path, fl])\n \n cnt_lst = []\n asset_lst = []\n def_att = []\n d = dict()\n \n root1,tree1 = gen_tree(inp_path)\n root2,tree2 = gen_tree(out_path)\n \n # get all the extensions belongs to \"Default\" tag\n for relation in root2:\n if 'Default' in relation.tag:\n def_att.append(relation.attrib['Extension'])\n else:\n break\n \n for relation in root1:\n if 'Override' in relation.tag:\n attrib = relation.attrib['PartName'][1:]\n try:\n cnt = attrib.split('ppt/')[-1]\n ini = '/ppt/'\n except:\n cnt = attrib\n ini = '/'\n if cnt in ref_names_dict.keys():\n relation.attrib['PartName'] = f'{ini}{ref_names_dict[cnt]}'\n cnt_lst.append(relation)\n # asset_lst.append(relation.attrib['PartName'])\n else:\n cnt_lst.append(relation)\n if relation.attrib['PartName'] not in asset_lst:\n asset_lst.append(relation.attrib['PartName'])\n else:\n attrib = relation.attrib['Extension']\n if attrib not in def_att:\n cnt_lst.append(relation)\n # asset_lst.append(relation.attrib['Extension'])\n # deal with the assest_lst\n # print(\"AA: \", asset_lst)\n cnt_lst = natsort.natsorted(cnt_lst)\n for ele in cnt_lst:\n prev = tree2.find(ele.tag)\n prev.addnext(ele)\n \n tree2.write(out_path, pretty_print=True, xml_declaration=True, encoding='UTF-8', standalone=True)\n \n unq_attr = []\n for relation in root2:\n if 'Override' in relation.tag:\n if relation.attrib['PartName'] not in unq_attr:\n unq_attr.append(relation.attrib['PartName'])\n else:\n root2.remove(relation)\n tree2.write(out_path, pretty_print=True, xml_declaration=True, encoding='UTF-8', standalone=True)", "def _add_default_tags(self):\n self.tags.add_tag('ban', required=True)", "def add_default_options(self):\n\n options = getattr(self.parent, \"pyautodoc_set_default_option\", [])\n for option in options:\n self.set_default_option(option)", "def set_defaults(self, agents):\n for a in agents:\n for k, v in a.get_defaults().items():\n self.env[k] = v", "def resolve_kwdefaults(sign: inspect.Signature) -> Dict[str, Any]:\n kwdefaults = dict() # type: Dict[str, Any]\n\n # Add to the defaults all the values that are needed by the contracts.\n for param in sign.parameters.values():\n if param.default != inspect.Parameter.empty:\n kwdefaults[param.name] = param.default\n\n return kwdefaults", "def initDefaults(self):\n return _libsbml.Layout_initDefaults(self)", "def find_defaults(self):\n\n defaults = self.tree.findall('default')\n default_remote = None\n default_revision = None\n\n if len(defaults) > 1 and self.fail_on_invalid:\n raise InvalidManifest(\n 'More than one default entry, must be unique'\n )\n\n try:\n default_remote = defaults[-1].get('remote')\n default_revision = defaults[-1].get('revision', 'master')\n except IndexError:\n pass # Leave defaults to None\n\n self.defaults = {\n 'remote': default_remote, 'revision': default_revision\n }", "def set_default_dna_options(treebuilder):\n treebuilder.options = get_default_options()", "def defaults(file):\n\n\tUNCAT_TAGID = 47\n\tNOSERIES_TAGID = 375\n\n\treturn [NOSERIES_TAGID, UNCAT_TAGID]", "def sections(self, compmgr=None, defaults=True):\n sections = set(to_unicode(s) \\\n for s in ProductSetting.get_sections(self.env, self.product))\n for parent in self.parents:\n sections.update(parent.sections(compmgr, defaults=False))\n if defaults:\n sections.update(self.defaults(compmgr))\n return sorted(sections)", "def _populate_default_values(self):\n\n if 'input_data' not in self._definition:\n self._definition['input_data'] = []\n for input_dict in self._definition['input_data']:\n if 'required' not in input_dict:\n input_dict['required'] = True\n\n if 'jobs' not in self._definition:\n self._definition['jobs'] = []\n for job_dict in self._definition['jobs']:\n if 'recipe_inputs' not in job_dict:\n job_dict['recipe_inputs'] = []\n if 'dependencies' not in job_dict:\n job_dict['dependencies'] = []\n for dependency_dict in job_dict['dependencies']:\n if 'connections' not in dependency_dict:\n dependency_dict['connections'] = []", "def _write_overrides(\n self, node, parent=None, parent_packmode=None, path_parts=tuple(), root=None\n ):\n if root is None:\n root = node\n if (parent is root) or (\n parent_packmode and node[\"packmode\"] != parent_packmode\n ):\n self.overrides[str(PurePath(*path_parts))] = node[\"packmode\"]\n for key, child in node[\"children\"].items():\n self._write_overrides(\n node=child,\n parent=node,\n parent_packmode=node[\"packmode\"],\n path_parts=path_parts + (key,),\n root=root,\n )", "def set_default_protein_options(treebuilder):\n treebuilder.options = get_default_options()", "def combine_pax_configs(config, overrides):\n # TODO: we should soon be able to get this from pax, but let's wait a while to prevent incompatibilties\n for section_name, stuff in overrides.items():\n config.setdefault(section_name, {})\n config[section_name].update(stuff)\n return config", "def initDefaults(self):\n return _libsbml.SpeciesReference_initDefaults(self)", "def merge_new_overrides():\n # Take the dex config as is:\n new_doc = {'config': copy.deepcopy(DEFINES['dex_config'])}\n # Convert old dex certs.web.secret to https-tls volume/volumeMounts\n mount = {'mountPath': get_httpstls_mount(), 'name': 'https-tls'}\n vol = {'secret': {'secretName': get_httpstls_secret(),\n 'defaultMode': DEFAULT_HTTPSTLS_MODE},\n 'name': 'https-tls'}\n # Take 'extra' volumes and mounts that may exist in old dex\n # This is expected to be the WAD certificate\n volumes = []\n volumeMounts = []\n if 'volumes' in DEFINES:\n volumes = copy.deepcopy(DEFINES['volumes'])\n if 'volumeMounts' in DEFINES:\n volumeMounts = copy.deepcopy(DEFINES['volumeMounts'])\n\n # only add volumes/mounts if 'extra' was specified, or\n # if there was non-default mount\n if volumes or 'tls_secret' in DEFINES:\n volumes.append(vol)\n if volumeMounts or 'dex_https_tlsCert' in DEFINES:\n volumeMounts.append(mount)\n if volumes:\n new_doc['volumes'] = volumes\n if volumeMounts:\n new_doc['volumeMounts'] = volumeMounts\n return new_doc", "def _set_default_attributes(self):\n # Default input attributes\n self._has_studio_override = False\n self._had_studio_override = False\n\n self._is_overriden = False\n self._was_overriden = False\n\n self._is_modified = False\n self._is_invalid = False\n\n self._is_nullable = False\n self._as_widget = False\n self._is_group = False\n\n # If value should be stored to environments\n self._env_group_key = None\n\n self._any_parent_as_widget = None\n self._any_parent_is_group = None\n\n # Parent input\n self._parent = None\n\n # States of inputs\n self._state = None\n self._child_state = None\n\n # Attributes where values are stored\n self.default_value = NOT_SET\n self.studio_value = NOT_SET\n self.override_value = NOT_SET\n\n # Log object\n self._log = None\n\n # Only for develop mode\n self.defaults_not_set = False", "def __init__(self, tagged_sents, default_tag='nc0s000'):\n \n self.default_tag = default_tag\n \n self.word_tags = defaultdict(lambda: defaultdict(int))\n \n for sent in list(tagged_sents):\n for word, tag in sent:\n self.word_tags[word][tag] += 1\n\n self.word_tags = dict(self.word_tags)", "def tag(nodes, arg='', split=True, default=0, add_parent_space=True, add_default_spaces=True, zero_node=None, const_node=None):\n\n nodes = mc.ls(nodes)\n orig_const_node = const_node\n orig_zero_node = zero_node\n\n for node in nodes:\n\n # get zero if not specified\n if orig_zero_node:\n zero_node = orig_zero_node\n else:\n zero_node = node+'_ZERO'\n\n # get const node - this is what the parent constraint will be thrown onto\n if orig_const_node:\n const_node = orig_const_node\n else:\n const_node = node+'_CONST'\n\n if add_parent_space and not mc.objExists(zero_node):\n mc.warning('Cannot find a zero for '+node)\n continue\n\n if not mc.objExists(const_node):\n mc.warning('Cannot find a const for '+const_node)\n continue\n\n # build arg\n arg = arg.strip()\n if ' ' in arg:\n arg = re.sub(' +',' ', arg)\n\n arg_kwargs = {\n 'arg': arg,\n 'split': split,\n 'default': default,\n 'const_node': const_node,\n 'zero_node': zero_node,\n 'add_default_spaces': add_default_spaces,\n 'add_parent_space': add_parent_space\n }\n\n # create + set attr\n if not mc.objExists(node+'.tagSpaces'):\n mc.addAttr(node, ln='tagSpaces', dt='string', hidden=False)\n\n mc.setAttr(node+'.tagSpaces', str(arg_kwargs), type='string')\n\n # This convert all this legacy sloppy crap to the new hotness\n space_obj = Space(node)\n space_obj.set_data()\n\n return arg", "def _set_default_attrs(self, obj, subs):\n for attr_name, attr_subs in subs.iteritems():\n if not getattr(obj, attr_name, None):\n for newattr_name, newattr_val in attr_subs.iteritems():\n setattr(obj, newattr_name, newattr_val)", "def _to_config_recurse(config_cls, environ, prefixes, default=RAISE):\n # We keep track of values we actually got from the getter vs those we set\n # from the `ConfigEntry` default value\n got = {}\n defaulted = {}\n missing_vars = set()\n\n for attr_obj in attr.fields(config_cls):\n try:\n ce = attr_obj.metadata[CNF_KEY]\n except KeyError:\n continue\n name = attr_obj.name\n\n if ce.sub_cls is not None:\n prefix = ce.sub_cls._prefix or name\n got[name] = _to_config_recurse(\n ce.sub_cls, environ, (*prefixes, prefix), default=ce.default\n )\n else:\n getter = ce.callback or _default_getter\n try:\n got[name] = getter(environ, attr_obj.metadata, prefixes, name)\n except MissingEnvValueError as exc:\n if isinstance(ce.default, Raise):\n missing_vars |= set(exc.args)\n else:\n defaulted[name] = (\n attr.NOTHING\n if isinstance(ce.default, attr.Factory)\n else ce.default\n )\n\n if missing_vars:\n # If we were told to raise OR if we got *any* values for our attrs, we\n # will raise a `MissingEnvValueError` with all the missing variables\n if isinstance(default, Raise) or got:\n raise MissingEnvValueError(*missing_vars) from None\n\n # Otherwise we will simply use the default passed into this call.\n # Should be no need to handle `Factory`s here.\n return default\n\n # Merge the defaulted and actually collected values into the config type\n defaulted.update(got)\n return config_cls(**defaulted)", "def set_default_configs(self):\n\n raise Exception(\"Child classes must override set_default_configs().\")", "def twolevel_default_params(defaults):\n def wrap(function):\n def withargs(*args, **kwargs):\n merged = {}\n merged.update(defaults)\n for k, v in kwargs.items():\n if type(v) == dict and k in merged and type(merged[k]) == dict:\n merged[k].update(v)\n else:\n merged[k] = v\n return function(*args, **merged)\n\n return withargs\n\n return wrap", "def initDefaults(self):\n return _libsbml.Parameter_initDefaults(self)", "def _get_tags(self):\n if hasattr(self, \"_default_tags\"):\n tags = self._default_tags()\n else:\n tags = deepcopy(_default_tags)\n dynamic_tags = {}\n for cl in reversed(inspect.getmro(self.__class__)):\n if hasattr(cl, \"_more_static_tags\"):\n more_tags = cl._more_static_tags()\n tags.update(more_tags)\n if hasattr(cl, \"_more_tags\"):\n more_tags = cl._more_tags(self)\n dynamic_tags.update(more_tags)\n tags.update(dynamic_tags)\n\n return tags", "def initialize_children(self, bibs):\n\t\tpass", "def iterate(self, compmgr=None, defaults=True):\n options = set()\n name_str = self.name\n for setting in ProductSetting.select(self.env,\n where={'product': self.product,\n 'section': name_str}):\n option = self.optionxform(setting.option)\n options.add(option)\n yield option\n for parent in self.config.parents:\n for option in parent[self.name].iterate(defaults=False):\n loption = self.optionxform(option)\n if loption not in options:\n options.add(loption)\n yield option\n if defaults:\n for section, option in Option.get_registry(compmgr).keys():\n if section == self.name and \\\n self.optionxform(option) not in options:\n yield option", "def _add_merged_attributes(node, all_recipes, all_roles):\n # Get cookbooks from extended recipes\n attributes = {}\n for recipe in node['recipes']:\n # Find this recipe\n found = False\n for r in all_recipes:\n if recipe == r['name']:\n found = True\n for attr in r['attributes']:\n if r['attributes'][attr].get('type') == \"hash\":\n value = {}\n else:\n value = r['attributes'][attr].get('default')\n # Attribute dictionaries are defined as a single\n # compound key. Split and build proper dict\n build_dct(attributes, attr.split(\"/\"), value)\n if not found:\n error = \"Could not find recipe '{0}' while \".format(recipe)\n error += \"building node data bag for '{0}'\".format(node['name'])\n abort(error)\n\n # Get default role attributes\n for role in node['roles']:\n for r in all_roles:\n if role == r['name']:\n update_dct(attributes, r['default_attributes'])\n\n # Get normal node attributes\n non_attribute_fields = [\n 'id', 'name', 'role', 'roles', 'recipes', 'run_list', 'ipaddress']\n node_attributes = {}\n for key in node:\n if key in non_attribute_fields:\n continue\n node_attributes[key] = node[key]\n update_dct(attributes, node_attributes)\n\n # Get override role attributes\n for role in node['roles']:\n for r in all_roles:\n if role == r['name']:\n update_dct(attributes, r['override_attributes'])\n # Merge back to the original node object\n node.update(attributes)", "def _autoInitPars(self):\n for p in self._pars:\n setattr(self,p,self.defaultparval)", "def defaults() -> dict:\n pass", "def setup_defaults(self):\n status = self._lib_vscf_ecc.vscf_ecc_setup_defaults(self.ctx)\n VscfStatus.handle_status(status)", "def _merge_and_reorder(\n inherited_fields: Dict[str, Union[Annotation, Input, Output]],\n cls_fields: Dict[str, Union[Annotation, Input, Output]],\n ) -> Dict[str, Union[Annotation, Input, Output]]:\n\n def _split(\n _fields: Dict[str, Union[Annotation, Input, Output]]\n ) -> Tuple[Dict[str, Union[Annotation, Input, Output]], Dict[str, Union[Annotation, Input, Output]]]:\n \"\"\"Split fields to two parts from the first default field.\n\n :param _fields: The fields\n :type _fields: Dict[str, Union[Annotation, Input, Output]]\n :return: A 2-tuple of (fields with no defaults, fields with defaults)\n :rtype: Tuple[Dict[str, Union[Annotation, Input, Output]], Dict[str, Union[Annotation, Input, Output]]]\n \"\"\"\n _no_defaults_fields, _defaults_fields = {}, {}\n seen_default = False\n for key, val in _fields.items():\n if val.get(\"default\", None) or seen_default:\n seen_default = True\n _defaults_fields[key] = val\n else:\n _no_defaults_fields[key] = val\n return _no_defaults_fields, _defaults_fields\n\n inherited_no_default, inherited_default = _split(inherited_fields)\n cls_no_default, cls_default = _split(cls_fields)\n # Cross comparison and delete from inherited_fields if same key appeared in cls_fields\n # pylint: disable=consider-iterating-dictionary\n for key in cls_default.keys():\n if key in inherited_no_default.keys():\n del inherited_no_default[key]\n for key in cls_no_default.keys():\n if key in inherited_default.keys():\n del inherited_default[key]\n return OrderedDict(\n {\n **inherited_no_default,\n **cls_no_default,\n **inherited_default,\n **cls_default,\n }\n )", "def _fix_treetags(self, tree):\n for element in tree:\n element.tag = element.tag.split('}')[1]\n if len(element.getchildren()) > 0:\n self._fix_treetags(element)\n return tree", "def autoprovisioning_node_pool_defaults(self) -> Optional[pulumi.Input['AutoprovisioningNodePoolDefaultsArgs']]:\n return pulumi.get(self, \"autoprovisioning_node_pool_defaults\")", "def recursive_make_defaultdict(conf):\n if isinstance(conf, dict):\n for key in conf.keys():\n conf[key] = recursive_make_defaultdict(conf[key])\n return defaultdict(lambda: None, conf)\n return conf", "def default_configs(cls):\n config = super().default_configs()\n config.update(\n {\n \"entry_type\": None,\n \"attribute\": None,\n \"index_annotation\": None,\n }\n )\n return config", "def test_defaultdict_config():\n lang_configs = defaultdict(lambda: dict(processors=\"tokenize\"))\n run_multilingual_pipeline(en_has_dependencies=False, fr_has_dependencies=False, lang_configs=lang_configs)\n\n lang_configs = defaultdict(lambda: dict(processors=\"tokenize\"))\n lang_configs[\"en\"] = {\"processors\": \"tokenize,pos,lemma,depparse\"}\n run_multilingual_pipeline(en_has_dependencies=True, fr_has_dependencies=False, lang_configs=lang_configs)", "def autodefaults (self):\r\n\r\n self.defaults_from_notes(identifying_key=EMPTYCHAR,\r\n mark=EQUAL,\r\n obj=self.default_dict['commands'],\r\n entrytext=COMMANDMACROSCRIPT)", "def _getGroupDefaults(self):\n defaults = self.getDefaultGroupContainer(\n _name = \"defaults\",\n diff_command = self.general.diff_command,\n cia_rpc_server = self.general.cia_rpc_server,\n )\n try:\n self._passConfig(defaults, \"defaults\")\n except ConfigSectionNotFoundError:\n # [defaults] is optional\n pass\n else:\n self._config.remove_section('defaults')\n\n return defaults", "def get_defaults():\n\n return {\n 'base_types': _get_base_types(),\n 'template_types': _get_template_types(),\n 'refined_types': _get_refined_types(),\n 'humannames': _get_humannames(),\n 'argument_kinds': _get_argument_kinds(),\n 'variable_namespace': {},\n 'type_aliases': _get_type_aliases(),\n 'cpp_types': _get_cpp_types(),\n 'numpy_types': _get_numpy_types(),\n 'from_pytypes': _get_from_pytypes(),\n 'cython_ctypes': _get_cython_ctypes(),\n 'cython_cytypes': _get_cython_cytypes(),\n 'cython_pytypes': _get_cython_pytypes(),\n 'cython_cimports': _get_cython_cimports(),\n 'cython_cyimports': _get_cython_cyimports(),\n 'cython_pyimports': _get_cython_pyimports(),\n 'cython_functionnames': _get_cython_functionnames(),\n 'cython_classnames': _get_cython_classnames(),\n 'cython_c2py_conv': _get_cython_c2py_conv(),\n 'cython_py2c_conv_vector_ref': CYTHON_PY2C_CONV_VECTOR_REF,\n 'cython_py2c_conv': _get_cython_py2c_conv(),\n }", "def _node_defaults(self):\n parent = super(QTree, self)._node_defaults()\n parent[\"state\"] = np.zeros([self.size, self.size])\n parent[\"network\"] = self\n return parent", "def update_gui_defaults(GUI_defaults):\n # failure flag\n config_import_error = False\n \n # xml source directory\n src_dir = os.getcwd() + '\\\\src'\n \n # attempt to parse the xml file and get it's root\n try:\n tree = ET.parse(src_dir + '\\\\pySCPI_config.xml')\n root = tree.getroot()\n \n except (IOError, ET.ParseError):\n # parsing failed for some reason\n config_import_error = True\n GUI_defaults.log_error('*** pySCPI_config.xml is'\n 'missing or corrupt ***')\n # end try\n \n # import the default values from the xml file\n if not config_import_error:\n \n # list of tags to look for\n config_tags = ['default_filename', 'default_delay', \n 'default_length', 'default_dp']\n \n # iterate through tags\n for tag in config_tags:\n # find each tag\n config_element = root.findall(tag)\n \n # if there is only one of a tag\n if len(config_element) == 1:\n # convert it to text\n config_text = config_element[0].text\n \n # update the appropriate field\n if tag == 'default_filename':\n GUI_defaults.update_filename(config_text)\n \n elif tag == 'default_delay':\n GUI_defaults.update_delay(config_text)\n \n elif tag == 'default_length':\n GUI_defaults.update_length(config_text)\n \n elif tag == 'default_dp':\n GUI_defaults.update_dp(config_text)\n # end if\n \n else:\n GUI_defaults.log_error('*** There is the wrong number '\n 'of ' + tag + ' declarations in '\n 'pySCPI_config.xml ***') \n # end if\n # end for\n \n # find the default addresses\n address_elements = root.findall('addresses')\n \n # if there are addresses\n if (len(address_elements) == 1) and (len(address_elements[0]) > 0):\n for element in address_elements[0]:\n # add each address to the list\n GUI_defaults.add_address(element.tag, element.get('address'))\n # end for\n \n else:\n GUI_defaults.log_error('*** No addresses were provided in '\n 'pySCPI_config.xml ***') \n # end if\n \n # find the default commands\n command_elements = root.findall('default_commands')\n \n # if there are commands\n if (len(command_elements) == 1) and (len(command_elements[0]) > 0):\n for command in command_elements[0]:\n # add each command to the list\n GUI_defaults.add_command(command.text)\n # end for\n\n else:\n GUI_defaults.log_error('*** No commands were provided in '\n 'pySCPI_config.xml ***') \n # end if \n # end if", "def get_defaults(self):\n default_dict = {}\n args, varargs, keyword, defaults = inspect.getargspec(self.exec_obj)\n if defaults:\n default_dict = dict(zip(args[-len(defaults):], defaults))\n return default_dict", "def update_default_values(self, parent_values):\n raise NotImplementedError(\n \"{} does not have implemented `update_default_values`\".format(self)\n )", "def _fill_in_default_kwargs(\n node: torch.fx.Node,\n) -> Tuple[List[fx_type_utils.Argument], Dict[str, fx_type_utils.Argument]]:\n\n # TODO(titaiwang): aten::sym_size has overload, but fx graph is using\n # overloadpacket for some reasons.\n # https://github.com/pytorch/pytorch/issues/97201\n # We manually assigned overload for aten::sym_size.\n if hasattr(node.target, \"_schema\"):\n node_schema = node.target._schema # type: ignore[union-attr]\n else:\n node_schema = torch.ops.aten.sym_size.int._schema # type: ignore[union-attr]\n\n # This function assumes the order of arguments in FX op is the\n # same as the order of arguments in TorchScript op.\n complete_args: List[fx_type_utils.Argument] = []\n complete_kwargs: Dict[str, fx_type_utils.Argument] = {}\n\n if inspect.isbuiltin(node.target):\n complete_args = list(node.args)\n else:\n for i, expected_arg in enumerate(node_schema.arguments):\n if i < len(node.args):\n complete_args.append(node.args[i])\n elif expected_arg.name in node.kwargs:\n complete_kwargs[expected_arg.name] = node.kwargs[expected_arg.name]\n else:\n # Get default from schema.\n complete_kwargs[expected_arg.name] = expected_arg.default_value\n\n return complete_args, complete_kwargs", "def _fix_tags(self):\n logging.info('--- Fix Tags ---')\n saved_originals = {\n key: self.git.config.get(key, exit_on_error=False).strip()\n for key in (CI_USER_NAME, CI_USER_EMAIL)}\n try:\n for tag_name in sorted(self.tags):\n # Get commit data from the (latest) commit of a\n # svn/tags/… branch (following the convention for svn,\n # there should only be one),\n # produce a git tag using these data\n # and delete the now-obsolete branch.\n tag_name = tag_name.strip()\n tag_id = PRX_SVNTAGS_PREFIX.sub('', tag_name)\n commit_data = {\n key: self.git.log(\n '-1', f'--pretty=format:{value}', tag_name)\n for (key, value) in COMMIT_DATA_FORMATS.items()}\n self.git.config(CI_USER_NAME, commit_data[CD_AUTHOR_NAME])\n self.git.config(CI_USER_EMAIL, commit_data[CD_AUTHOR_EMAIL])\n original_git_committer_date = ENV.get(ENV_GIT_COMMITTER_DATE)\n ENV[ENV_GIT_COMMITTER_DATE] = commit_data[CD_DATE]\n self.git.tag(\n '-a', '-m', commit_data[CD_COMMENT], tag_id, tag_name)\n if original_git_committer_date is None:\n del ENV[ENV_GIT_COMMITTER_DATE]\n else:\n ENV[ENV_GIT_COMMITTER_DATE] = original_git_committer_date\n #\n self.git.branch('-d', '-r', tag_name)\n #\n finally:\n # We only change the git config values\n # if there are self.tags available.\n # So it stands to reason we should revert them only in that case.\n if self.tags:\n for (key, value) in saved_originals.items():\n if value:\n self.git.config(key, value)\n else:\n self.git.config.unset(key)\n #\n #\n #\n #", "def default_nested(self, data, many, **kwargs):\n if not data.get(\"metadata\"):\n data[\"metadata\"] = {}\n if not data.get(\"pids\"):\n data[\"pids\"] = {}\n\n return data", "def set_defaults(self, **kw):\n group = kw.pop('group', None)\n for o, v in kw.items():\n self.cfg_fixture.set_default(o, v, group=group)", "def voxel_env_override_defaults(env, parser):\n parser.set_defaults(\n encoder_type='conv',\n encoder_subtype='convnet_simple',\n hidden_size=512,\n obs_subtract_mean=0.0,\n obs_scale=255.0,\n actor_worker_gpus=[0],\n )", "def defaults():\n return {}", "def _initialize_defaults(self):\n for key, value in defaults.items():\n if key not in self.source_params:\n self.source_params[key] = value", "def _initialize_defaults(self):\n for key, value in defaults.items():\n if key not in self.source_params:\n self.source_params[key] = value", "def build_dictionary_element_tree(element, node_attr_fields=NODE_FIELDS, way_attr_fields=WAY_FIELDS,\n default_tag_type='regular'):\n node_attribs = {}\n way_attribs = {}\n way_nodes = []\n tags = [] # Handle secondary tags the same way for both node and way elements\n \n if element == None:\n print ('Element is Null')\n return None\n \n if element.tag == 'node':\n check = check_id(element.attrib['id'])\n \n if not check:\n print ('Node ID is Null or not a number: ', element.attrib['id'])\n fix_it.node_id_bad[element.attrib['id']] += 1\n return None\n \n for attr in element.attrib:\n if attr in node_attr_fields:\n node_attribs[attr] = element.attrib[attr]\n \n for child in element:\n temp = { }\n \n if 'cityracks.' in child.attrib['k']:\n child.attrib['k'] = child.attrib['k'].replace('cityracks.','')\n \n m = correct_chars_re.search(child.attrib['k']) # No match returns None\n\n if not m: \n print ('Node key -- Problem character! ', 'key = ', child.attrib['k'], ' value = ', child.attrib['v'])\n fix_it.counts['node child key eliminated'] += 1\n infoKey = 'node key: ' + child.attrib['k']\n fix_it.bad_keys[infoKey] += 1\n continue # eliminate the problematic child tag\n \n # Fix value\n fixed = fix_it.fixer(child, 'Node') # Correct or eliminate the child <tag> value\n # Function fix_it returns None if there is a data problem\n if fixed == '$skip':\n fix_it.counts['node tag skipped'] += 1\n continue\n \n if not fixed:\n fix_it.counts['node child value eliminated'] += 1\n continue # Eliminate this child tag\n else:\n temp['id'] = element.attrib['id'] # Save the fixed child tag for writing into csv file\n temp['value'] = fixed\n \n if ':' in child.attrib['k']:\n k = child.attrib['k'].split(':',1)\n temp['type'] = k[0]\n temp['key'] = k[1]\n else:\n temp['key'] = child.attrib['k']\n temp['type'] = default_tag_type\n \n fix_it.counts['node tag count'] += 1 # count the child tags not eliminated\n tags.append(temp)\n \n return {'node': node_attribs, 'node_tags': tags}\n \n elif element.tag == 'way':\n check = check_id(element.attrib['id'])\n \n if not check:\n print ('Way ID is Null or not a number: ', element.attrib['id'])\n fix_it.way_id_bad[element.attrib['id']] += 1\n return None\n \n for attr in element.attrib: \n if attr in way_attr_fields:\n way_attribs[attr] = element.attrib[attr]\n \n position = 0\n for child in element:\n temp = { }\n \n if child.tag == 'tag':\n m = correct_chars_re.search(child.attrib['k']) # No match returns None\n \n if not m:\n print ('Way key -- Problem char! ', 'key = ', child.attrib['k'], ' value = ', child.attrib['v'])\n fix_it.counts['way child key eliminated'] += 1\n infoKey = 'way key: ' + child.attrib['k']\n fix_it.bad_keys[infoKey] += 1\n continue # eliminate the problematic child tag\n \n # Fix value\n fixed = fix_it.fixer(child, 'Way') # Correct or eliminate the child <tag> value\n # Function fix_it returns None if there is a data problem\n if fixed == '$skip':\n fix_it.counts['way tag skipped'] += 1\n continue\n \n if not fixed:\n fix_it.counts['way child value eliminated'] += 1\n continue # Eliminate this child tag\n else:\n temp['id'] = element.attrib['id'] # Save the fixed child tag for writing into csv file\n temp['value'] = fixed\n\n if ':' in child.attrib['k']:\n k = child.attrib['k'].split(':',1)\n temp['type'] = k[0]\n temp['key'] = k[1]\n else:\n temp['key'] = child.attrib['k']\n temp['type'] = default_tag_type\n \n fix_it.counts['way tag count'] += 1 # count the child tags not eliminated\n tags.append(temp)\n \n elif child.tag == 'nd':\n check = check_id(child.attrib['ref'])\n \n if not check:\n print ('Way Node reference is Null or not a number: ', child.attrib['ref'])\n fix_it.way_node_reference_bad[child.attrib['ref']] += 1\n continue\n \n temp['id'] = element.attrib['id']\n temp['node_id'] = child.attrib['ref']\n temp['position'] = position\n position += 1\n fix_it.counts['way node tag count'] += 1 # count the child tags not eliminated\n way_nodes.append(temp)\n \n #print ('way_attribs:\\n', way_attribs)\n #print ('way_nodes:\\n', way_nodes)\n #print ('way_tags:\\n', tags)\n #print ('---------------\\n')\n return {'way': way_attribs, 'way_nodes': way_nodes, 'way_tags': tags}", "def replaceDefaults(d):\n defaults = d.pop('.defaults')\n for k, v in defaults.items():\n recursiveSearchReplace(d, '!' + k + '!', v)", "def _get_function_defaults(func: FunctionType) -> dict[str, Any]:\n # extracted bit from inspect.signature... ~20x faster\n pos_count = func.__code__.co_argcount\n arg_names = func.__code__.co_varnames\n\n defaults = func.__defaults__ or ()\n\n non_default_count = pos_count - len(defaults)\n positional_args = arg_names[:pos_count]\n\n output = {\n name: defaults[offset]\n for offset, name in enumerate(positional_args[non_default_count:])\n }\n if func.__kwdefaults__:\n output.update(func.__kwdefaults__)\n return output", "def correctDefaultNounTag(idx, tagged_term, tagged_terms, lexicon):\n term, tag, norm = tagged_term\n if tag == 'NND':\n if term.endswith('s'):\n tagged_term[1] = 'NNS'\n tagged_term[2] = term[:-1]\n else:\n tagged_term[1] = 'NN'", "def get_default_config():\n\n config = {}\n\n for name, cls in get_tools().items():\n config[name] = cls.get_default_config()\n\n try:\n workers = multiprocessing.cpu_count() - 1\n except NotImplementedError: # pragma: no cover\n workers = 1\n workers = max(1, min(4, workers))\n\n config.update({\n 'exclude': [],\n 'merge-issues': True,\n 'workers': workers,\n 'disabled': [],\n 'noqa': True,\n 'extends': [],\n 'ignore-missing-extends': False,\n })\n\n return config", "def xml_tag(inp_tag, out_tag):\n tag_dict = OrderedDict()\n sub_tag = OrderedDict()\n for i in range(len(inp_tag)):\n if inp_tag[i] not in out_tag:\n tag_dict[inp_tag[i]] = [inp_tag[i-1]]\n return tag_dict", "def BTagHighLevelAugmenterAlgCfg(ConfigFlags, JetCollection, BTagCollection, Associator, doFlipTagger=False, sequenceName=None, **options):\n\n options = {}\n options['JetCollectionName'] = JetCollection.replace('Track', 'PV0Track') + 'Jets'\n options['BTaggingCollectionName'] = BTagCollection\n options['JetLinkName'] = options['BTaggingCollectionName'] + '.jetLink'\n options['BTagTrackToJetAssociatorName'] = Associator\n options['name'] = (BTagCollection + '_Augment').lower()\n if doFlipTagger: options['FlipTagConfig'] = 'FLIP_SIGN'\n\n # -- create the augmentation algorithm\n if sequenceName:\n acc = ComponentAccumulator(sequenceName)\n acc.addEventAlgo(Analysis__BTagHighLevelAugmenterAlg(**options), sequenceName)\n else:\n acc = ComponentAccumulator()\n acc.addEventAlgo(Analysis__BTagHighLevelAugmenterAlg(**options))\n\n return acc", "def override(self, parent):\n return self.__class__(Cfg._mergedicts(self, parent, True))", "def setdefault(*dicts):\n param_complete = dict(dicts[0])\n for d in dicts[1:]:\n for k,v in d.items():\n param_complete.setdefault(k, v)\n\n return param_complete", "def parse_tag(self, root, fmt, insert_children=True):\n arguments = {}\n extra_args = {}\n children = []\n\n for k, val in root.attrib.iteritems():\n k = k.lower()\n # 'version' is currently the only supported XML attribute.\n if k == 'version' and root.tag == 'odML':\n continue\n\n # We currently do not support XML attributes.\n self.error(\"Attribute not supported, ignoring '%s=%s' \" % (k, val), root)\n\n for node in root:\n node.tag = node.tag.lower()\n self.is_valid_argument(node.tag, fmt, root, node)\n if node.tag in fmt.arguments_keys:\n # this is a heuristic, but works for now\n if node.tag in self.tags and node.tag in fmt.map_keys:\n sub_obj = self.parse_element(node)\n if sub_obj is not None:\n extra_args[fmt.map(node.tag)] = sub_obj\n children.append(sub_obj)\n else:\n tag = fmt.map(node.tag)\n if tag in arguments:\n self.warn(\"Element <%s> is given multiple times in \"\n \"<%s> tag\" % (node.tag, root.tag), node)\n\n # Special handling of values;\n curr_text = node.text.strip() if node.text else None\n if tag == \"values\" and curr_text:\n content = from_csv(node.text)\n arguments[tag] = content\n # Special handling of cardinality\n elif tag.endswith(\"_cardinality\") and curr_text:\n arguments[tag] = parse_cardinality(node.text)\n else:\n arguments[tag] = curr_text\n else:\n self.error(\"Invalid element <%s> in odML document section <%s> \"\n % (node.tag, root.tag), node)\n\n check_args = dict(list(arguments.items()) + list(extra_args.items()))\n self.check_mandatory_arguments(check_args, fmt, root.tag, root)\n\n # Instantiate the current odML object with the parsed attributes.\n obj = fmt.create()\n try:\n obj = fmt.create(**arguments)\n except Exception as exc:\n self.error(str(exc), root)\n\n if insert_children:\n for child in children:\n obj.append(child)\n\n return obj", "def _update_attributes(self, parent_node: etree.ElementBase) -> None:\r\n ppj_bool_keys = [\r\n XmlAttributeName.OPTIMIZE,\r\n XmlAttributeName.RELEASE,\r\n XmlAttributeName.FINAL,\r\n XmlAttributeName.ANONYMIZE,\r\n XmlAttributeName.PACKAGE,\r\n XmlAttributeName.ZIP\r\n ]\r\n\r\n other_bool_keys = [\r\n XmlAttributeName.NO_RECURSE,\r\n XmlAttributeName.USE_IN_BUILD\r\n ]\r\n\r\n for node in parent_node.getiterator():\r\n if node.text:\r\n node.text = self.parse(node.text.strip())\r\n\r\n tag = node.tag.replace('{%s}' % self.ppj_root.ns, '')\r\n\r\n if tag == XmlTagName.PAPYRUS_PROJECT:\r\n if XmlAttributeName.GAME not in node.attrib:\r\n node.set(XmlAttributeName.GAME, '')\r\n if XmlAttributeName.FLAGS not in node.attrib:\r\n node.set(XmlAttributeName.FLAGS, self.options.flags_path)\r\n if XmlAttributeName.OUTPUT not in node.attrib:\r\n node.set(XmlAttributeName.OUTPUT, self.options.output_path)\r\n for key in ppj_bool_keys:\r\n if key not in node.attrib:\r\n node.set(key, 'False')\r\n\r\n elif tag == XmlTagName.PACKAGES:\r\n if XmlAttributeName.OUTPUT not in node.attrib:\r\n node.set(XmlAttributeName.OUTPUT, self.options.package_path)\r\n\r\n elif tag == XmlTagName.PACKAGE:\r\n if XmlAttributeName.NAME not in node.attrib:\r\n node.set(XmlAttributeName.NAME, self.project_name)\r\n if XmlAttributeName.ROOT_DIR not in node.attrib:\r\n node.set(XmlAttributeName.ROOT_DIR, self.project_path)\r\n\r\n elif tag in (XmlTagName.FOLDER, XmlTagName.INCLUDE, XmlTagName.MATCH):\r\n if XmlAttributeName.NO_RECURSE not in node.attrib:\r\n node.set(XmlAttributeName.NO_RECURSE, 'False')\r\n if tag in (XmlTagName.INCLUDE, XmlTagName.MATCH):\r\n if XmlAttributeName.PATH not in node.attrib:\r\n node.set(XmlAttributeName.PATH, '')\r\n if tag == XmlTagName.MATCH:\r\n if XmlAttributeName.IN not in node.attrib:\r\n node.set(XmlAttributeName.IN, os.curdir)\r\n if XmlAttributeName.EXCLUDE not in node.attrib:\r\n node.set(XmlAttributeName.EXCLUDE, '')\r\n\r\n elif tag == XmlTagName.ZIP_FILES:\r\n if XmlAttributeName.OUTPUT not in node.attrib:\r\n node.set(XmlAttributeName.OUTPUT, self.options.zip_output_path)\r\n\r\n elif tag == XmlTagName.ZIP_FILE:\r\n if XmlAttributeName.NAME not in node.attrib:\r\n node.set(XmlAttributeName.NAME, self.project_name)\r\n if XmlAttributeName.ROOT_DIR not in node.attrib:\r\n node.set(XmlAttributeName.ROOT_DIR, self.project_path)\r\n if XmlAttributeName.COMPRESSION not in node.attrib:\r\n node.set(XmlAttributeName.COMPRESSION, 'deflate')\r\n else:\r\n node.set(XmlAttributeName.COMPRESSION, node.get(XmlAttributeName.COMPRESSION).casefold())\r\n\r\n elif tag in (XmlTagName.PRE_BUILD_EVENT, XmlTagName.POST_BUILD_EVENT,\r\n XmlTagName.PRE_IMPORT_EVENT, XmlTagName.POST_IMPORT_EVENT):\r\n if XmlAttributeName.DESCRIPTION not in node.attrib:\r\n node.set(XmlAttributeName.DESCRIPTION, '')\r\n if XmlAttributeName.USE_IN_BUILD not in node.attrib:\r\n node.set(XmlAttributeName.USE_IN_BUILD, 'True')\r\n\r\n # parse values\r\n for key, value in node.attrib.items():\r\n value = value.casefold() in ('true', '1') if key in ppj_bool_keys + other_bool_keys else self.parse(value)\r\n node.set(key, str(value))", "def setupWidgets(self, parentObject, \n\t\tforceCategory=None, \n\t\tinherit=None, \n\t\tstoreProperties=True, \n\t\tupdateOnly=False):\n\t\tif forceCategory is not None:\n\t\t\tcategory = forceCategory\n\n\t\tif updateOnly:\n\t\t\tstoreProperties = False\n\n\t\tfor widget in parentObject.findChildren(QtWidgets.QWidget):\n\n\t\t\t# Enable expansion of custom rollout group box controls...\n\t\t\tif widget.property('expandable'):\n\t\t\t\tif isinstance(widget, QtWidgets.QGroupBox):\n\t\t\t\t\twidget.setCheckable(True)\n\t\t\t\t\t# widget.setChecked(expand)\n\t\t\t\t\twidget.setFixedHeight(widget.sizeHint().height())\n\t\t\t\t\tif not updateOnly:\n\t\t\t\t\t\twidget.toggled.connect(self.toggleExpandGroup)\n\n\t\t\t# Set up handler for push buttons...\n\t\t\tif widget.property('exec'):\n\t\t\t\tif isinstance(widget, QtWidgets.QPushButton):\n\t\t\t\t\tif not updateOnly:\n\t\t\t\t\t\twidget.clicked.connect(self.execPushButton)\n\n\t\t\t# Set up handlers for different widget types & apply values\n\t\t\tattr = widget.property('xmlTag')\n\t\t\tif attr:\n\t\t\t\tself.base_widget = widget.objectName()\n\t\t\t\tif forceCategory is None:\n\t\t\t\t\tcategory = self.findCategory(widget)\n\t\t\t\tif category:\n\t\t\t\t\twidget.setProperty('xmlCategory', category)\n\n\t\t\t\t\tif inherit:\n\t\t\t\t\t\tvalue = self.prefs.get_attr(category, attr)\n\t\t\t\t\t\t# if value == \"\":\n\t\t\t\t\t\tif value is None:\n\t\t\t\t\t\t\tvalue = inherit.get_attr(category, attr)\n\n\t\t\t\t\t\t\t# widget.setProperty('xmlTag', None)\n\t\t\t\t\t\t\twidget.setProperty('inheritedValue', True)\n\t\t\t\t\t\t\tif widget.toolTip():\n\t\t\t\t\t\t\t\twidget.setProperty('oldToolTip', widget.toolTip())\n\t\t\t\t\t\t\twidget.setToolTip(\"This value is being inherited. Change the value to override the inherited value.\")\n\n\t\t\t\t\t\t\t# Apply pop-up menu to remove override - can't get to work here\n\t\t\t\t\t\t\t# self.addContextMenu(widget, \"Remove override\", self.remove_overrides)\n\t\t\t\t\t\t\t# widget.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)\n\n\t\t\t\t\t\t\t# actionRemoveOverride = QtWidgets.QAction(\"Remove override\", None)\n\t\t\t\t\t\t\t# actionRemoveOverride.triggered.connect(self.remove_overrides)\n\t\t\t\t\t\t\t# widget.addAction(actionRemoveOverride)\n\t\t\t\t\telse:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tvalue = self.prefs.get_attr(category, attr)\n\t\t\t\t\t\texcept AttributeError:\n\t\t\t\t\t\t\tvalue = None\n\n\t\t\t\t\t# Sliders...\n\t\t\t\t\tif isinstance(widget, QtWidgets.QSlider):\n\t\t\t\t\t\tif value is not None:\n\t\t\t\t\t\t\twidget.setValue(int(value))\n\t\t\t\t\t\tif storeProperties:\n\t\t\t\t\t\t\tself.storeValue(category, attr, widget.value())\n\t\t\t\t\t\tif not updateOnly:\n\t\t\t\t\t\t\twidget.valueChanged.connect(self.storeSliderValue)\n\n\t\t\t\t\t# Spin boxes...\n\t\t\t\t\tif isinstance(widget, QtWidgets.QSpinBox):\n\t\t\t\t\t\tif value is not None:\n\t\t\t\t\t\t\twidget.setValue(int(value))\n\t\t\t\t\t\tif storeProperties:\n\t\t\t\t\t\t\tself.storeValue(category, attr, widget.value())\n\t\t\t\t\t\tif not updateOnly:\n\t\t\t\t\t\t\twidget.valueChanged.connect(self.storeSpinBoxValue)\n\n\t\t\t\t\t# Double spin boxes...\n\t\t\t\t\telif isinstance(widget, QtWidgets.QDoubleSpinBox):\n\t\t\t\t\t\tif value is not None:\n\t\t\t\t\t\t\twidget.setValue(float(value))\n\t\t\t\t\t\tif storeProperties:\n\t\t\t\t\t\t\tself.storeValue(category, attr, widget.value())\n\t\t\t\t\t\tif not updateOnly:\n\t\t\t\t\t\t\twidget.valueChanged.connect(self.storeSpinBoxValue)\n\n\t\t\t\t\t# Line edits...\n\t\t\t\t\telif isinstance(widget, QtWidgets.QLineEdit):\n\t\t\t\t\t\tif value is not None:\n\t\t\t\t\t\t\twidget.setText(value)\n\t\t\t\t\t\tif storeProperties:\n\t\t\t\t\t\t\tself.storeValue(category, attr, widget.text())\n\t\t\t\t\t\tif not updateOnly:\n\t\t\t\t\t\t\t# widget.textEdited.connect(self.storeLineEditValue)\n\t\t\t\t\t\t\twidget.textChanged.connect(self.storeLineEditValue)\n\n\t\t\t\t\t# Plain text edits...\n\t\t\t\t\telif isinstance(widget, QtWidgets.QPlainTextEdit):\n\t\t\t\t\t\tif value is not None:\n\t\t\t\t\t\t\twidget.setPlainText(value)\n\t\t\t\t\t\tif storeProperties:\n\t\t\t\t\t\t\tself.storeValue(category, attr, widget.toPlainText())\n\t\t\t\t\t\tif not updateOnly:\n\t\t\t\t\t\t\twidget.textChanged.connect(self.storeTextEditValue)\n\n\t\t\t\t\t# Check boxes...\n\t\t\t\t\telif isinstance(widget, QtWidgets.QCheckBox):\n\t\t\t\t\t\tif value is not None:\n\t\t\t\t\t\t\tif value == True:\n\t\t\t\t\t\t\t\twidget.setCheckState(QtCore.Qt.Checked)\n\t\t\t\t\t\t\telif value == False:\n\t\t\t\t\t\t\t\twidget.setCheckState(QtCore.Qt.Unchecked)\n\t\t\t\t\t\tif storeProperties:\n\t\t\t\t\t\t\tself.storeValue(category, attr, self.getCheckBoxValue(widget))\n\t\t\t\t\t\tif not updateOnly:\n\t\t\t\t\t\t\twidget.toggled.connect(self.storeCheckBoxValue)\n\n\t\t\t\t\t# Radio buttons...\n\t\t\t\t\telif isinstance(widget, QtWidgets.QRadioButton):\n\t\t\t\t\t\tif value is not None:\n\t\t\t\t\t\t\twidget.setAutoExclusive(False)\n\t\t\t\t\t\t\tif value == widget.text():\n\t\t\t\t\t\t\t\twidget.setChecked(True)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\twidget.setChecked(False)\n\t\t\t\t\t\t\twidget.setAutoExclusive(True)\n\t\t\t\t\t\tif storeProperties:\n\t\t\t\t\t\t\tif widget.isChecked():\n\t\t\t\t\t\t\t\tself.storeValue(category, attr, widget.text())\n\t\t\t\t\t\tif not updateOnly:\n\t\t\t\t\t\t\twidget.toggled.connect(self.storeRadioButtonValue)\n\n\t\t\t\t\t# Combo boxes...\n\t\t\t\t\telif isinstance(widget, QtWidgets.QComboBox):\n\t\t\t\t\t\t# Add items if history is enabled\n\t\t\t\t\t\tif widget.property('storeHistory'):\n\t\t\t\t\t\t\twidget.setInsertPolicy(widget.InsertAtTop)\n\t\t\t\t\t\t\thistory = self.prefs.get_attr(category, \"%s_history\" % attr)\n\t\t\t\t\t\t\tif history:\n\t\t\t\t\t\t\t\twidget.addItems(history)\n\t\t\t\t\t\t# Add/set current item\n\t\t\t\t\t\tif value is not None:\n\t\t\t\t\t\t\tif widget.findText(value) == -1:\n\t\t\t\t\t\t\t\twidget.insertItem(0, value)\n\t\t\t\t\t\t\twidget.setCurrentIndex(widget.findText(value))\n\t\t\t\t\t\t# Store value in external file\n\t\t\t\t\t\tif storeProperties:\n\t\t\t\t\t\t\tself.storeValue(category, attr, widget.currentText())\n\t\t\t\t\t\t# Connect signals & slots\n\t\t\t\t\t\tif not updateOnly:\n\t\t\t\t\t\t\t# widget.currentTextChanged.connect(self.storeComboBoxValue)\n\t\t\t\t\t\t\tif widget.isEditable():\n\t\t\t\t\t\t\t\twidget.editTextChanged.connect(self.storeComboBoxValue)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\twidget.currentIndexChanged.connect(self.storeComboBoxValue)\n\n\t\t\t\t\t# List widgets...\n\t\t\t\t\t# TODO: Add option to remove duplicate entries\n\t\t\t\t\telif isinstance(widget, QtWidgets.QListWidget):\n\t\t\t\t\t\tif value is not None:\n\t\t\t\t\t\t\t# print(value)\n\t\t\t\t\t\t\twidget.clear()\n\t\t\t\t\t\t\twidget.addItems(value)\n\t\t\t\t\t\tif storeProperties:\n\t\t\t\t\t\t\titems = []\n\t\t\t\t\t\t\tfor i in range(widget.count()):\n\t\t\t\t\t\t\t\titems.append(widget.item(i).text())\n\t\t\t\t\t\t\tself.storeValue(category, attr, items)\n\t\t\t\t\t\tif not updateOnly:\n\t\t\t\t\t\t\twidget.itemChanged.connect(self.storeListWidgetValues)\n\n\t\t\t\t\t# Enable colour chooser buttons...\n\t\t\t\t\telif isinstance(widget, QtWidgets.QToolButton):\n\t\t\t\t\t\tif widget.property('colorChooser'):\n\t\t\t\t\t\t\tif value is not None:\n\t\t\t\t\t\t\t\twidget.setStyleSheet(\"QWidget { background-color: %s }\" % value)\n\t\t\t\t\t\t\t# if storeProperties:\n\t\t\t\t\t\t\t# \tself.storeValue(category, attr, widget.currentText())\n\t\t\t\t\t\t\tif not updateOnly:\n\t\t\t\t\t\t\t\twidget.clicked.connect(self.storeColor)", "def set_default_position(self):\n self.set_joint('elbow', 0, radians=False)\n self.set_joint('shoulder', 150, radians=False)\n for joint in self.chain.joints:\n self.set_joint(joint, self.chain.joints[joint]['default_value'], radians=True)", "def apply_defaults(self, commands, args):\r\n\r\n args = args[:]\r\n\r\n if RcFile._DISABLE_PANTS_RC_OPTION in args:\r\n return args\r\n\r\n config = Config.create_parser()\r\n read_from = config.read(self.paths)\r\n if not read_from:\r\n log.debug('no rcfile found')\r\n return args\r\n\r\n log.debug('using rcfiles: %s to modify args' % ','.join(read_from))\r\n\r\n def get_rcopts(command, key):\r\n return config.get(command, key).split() if config.has_option(command, key) else []\r\n\r\n commands = list(commands)\r\n if self.process_default:\r\n commands.insert(0, Config.DEFAULT_SECTION)\r\n\r\n for cmd in commands:\r\n opts = get_rcopts(cmd, 'options')\r\n args = (opts + args) if self.default_prepend else (args + opts)\r\n args = get_rcopts(cmd, 'prepend-options') + args + get_rcopts(cmd, 'append-options')\r\n return args", "def initDefaults(self):\n return _libsbml.Species_initDefaults(self)", "def initDefaults(self):\n return _libsbml.ReferenceGlyph_initDefaults(self)", "def visit_cname_place(self, node, children):\n # If a value is supplied it will be a single time list, so extract with [0]\n # If no value is supplied for an optional item, default must also be a single item list [default_value]\n w = children.results.get('wrap')\n wrap_value = 1 if not w else w[0]['wrap']\n cplace = {'cname': children.results['name'][0], # Required component\n 'dir': children.results.get('dir', [1])[0], # many optional components with default values\n 'bend': children.results.get('bend', [1])[0],\n 'notch': children.results.get('notch', [0])[0],\n 'wrap': wrap_value,\n }\n return cplace", "def initDefaults(self):\n return _libsbml.SpeciesReferenceGlyph_initDefaults(self)", "def default(self, block, name):\r\n if name in self.inheritable_names and block.parent is not None:\r\n parent = block.get_parent()\r\n if parent:\r\n return getattr(parent, name)\r\n super(InheritingFieldData, self).default(block, name)", "def parse_defaults(self, stmt):\r\n spec_type = stmt['spec_type']\r\n if spec_type in self._defaults:\r\n raise ValueError('More than one default for {}'.format(stmt['spec_type']))\r\n self._defaults[spec_type] = Default(spec_type, stmt)\r\n return None", "def add_parent_attributes(self):\n if len(self.parent_attributes) == 0:\n return\n dest = self.parent.attributes\n source = self.parent_attributes\n changes = {}\n self.merge_attribute_defs(dest, source, changes)\n for aid, value in changes.iteritems():\n# self.parent.h5node.attrs[aid] = value\n # may need modifying for MATLAB\n #- if self.path not in self.file.file_pointer:\n if self.file.get_node(self.path, abort=False) is None:\n # create parent node since it does not exist\n print \"trying to set parent attributes on non-registered parent node:\"\n print \"Non-registered parent node is: '%s'\", self.path\n traceback.print_stack()\n sys.exit(1)\n #- self.file.file_pointer[self.path].attrs[aid] = value\n self.file.set_attribute(self.path, aid, value)" ]
[ "0.5848242", "0.5620025", "0.539464", "0.5353507", "0.53439975", "0.5028083", "0.49867433", "0.49008948", "0.48286456", "0.46795782", "0.46378452", "0.4624426", "0.46004072", "0.45709348", "0.45590082", "0.4541855", "0.44945678", "0.44693825", "0.4469151", "0.44664097", "0.4446841", "0.44466433", "0.4425318", "0.44154054", "0.43990022", "0.43780386", "0.43699786", "0.43697378", "0.43675762", "0.433812", "0.43333912", "0.43150324", "0.43061092", "0.4289223", "0.42846638", "0.42783713", "0.42708305", "0.42573756", "0.42514697", "0.4234598", "0.42239898", "0.42047566", "0.4202317", "0.42011642", "0.41899762", "0.4187202", "0.41785905", "0.41660833", "0.41545922", "0.41498905", "0.41498673", "0.41474795", "0.41452122", "0.41441023", "0.41310993", "0.41154274", "0.4113048", "0.41113138", "0.41095382", "0.4108342", "0.41076756", "0.4101494", "0.40987903", "0.40852386", "0.40847215", "0.40785283", "0.40772653", "0.4073388", "0.40686715", "0.40663525", "0.40595818", "0.40564436", "0.4050119", "0.40484133", "0.4030075", "0.40280223", "0.4025741", "0.40252462", "0.40252462", "0.40114743", "0.40079823", "0.40078807", "0.40066555", "0.40065372", "0.4003564", "0.40024275", "0.3998931", "0.39969802", "0.3996713", "0.3995274", "0.3992858", "0.3991767", "0.3991018", "0.398954", "0.39874497", "0.3982112", "0.39811853", "0.39791167", "0.39787734", "0.39771158" ]
0.43321195
31
Moves the specified tag namespace to the specified compartment within the same tenancy. To move the tag namespace, you must have the manage tagnamespaces permission on both compartments. For more information about IAM policies, see `Details for IAM`__. Moving a tag namespace moves all the tag key definitions contained in the tag namespace.
def change_tag_namespace_compartment(self, tag_namespace_id, change_tag_namespace_compartment_detail, **kwargs): resource_path = "/tagNamespaces/{tagNamespaceId}/actions/changeCompartment" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "change_tag_namespace_compartment got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "tagNamespaceId": tag_namespace_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=change_tag_namespace_compartment_detail) else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=change_tag_namespace_compartment_detail)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_namespace(self, doc, namespace):\r\n ns = u'{%s}' % namespace\r\n nsl = len(ns)\r\n for elem in doc.getiterator():\r\n if elem.tag.startswith(ns):\r\n elem.tag = elem.tag[nsl:]\r\n else:\r\n pass", "def remove_namespace(doc, namespace=u\"{http://www.EcoInvent.org/EcoSpold02}\"):\n ns = u'{}'.format(namespace)\n nsl = len(ns)\n for elem in doc.getiterator():\n if elem.tag.startswith(ns):\n elem.tag = elem.tag[nsl:]", "def update():\n for namespace in metadata.get_namespaces():\n logging.info('Switching namespace: \\'%s\\'', namespace)\n namespace_manager.set_namespace(namespace)\n update_per_namespace()\n\n namespace_manager.set_namespace('')\n return ('', 204)", "def ReplaceNamespace(self, request, global_params=None):\n config = self.GetMethodConfig('ReplaceNamespace')\n return self._RunMethod(\n config, request, global_params=global_params)", "def ReplaceNamespace(self, request, global_params=None):\n config = self.GetMethodConfig('ReplaceNamespace')\n return self._RunMethod(\n config, request, global_params=global_params)", "def move_to(i3: i3ipc.Connection, workspace: int):\n i3.command(f\"move container to workspace number {workspace}\")", "async def remove_namespace(self, namespace: str) -> Any:\n if namespace == self.get_namespace(): # if it belongs to this app's namespace\n raise ValueError(\"Cannot remove namespace with the same name as operating namespace\")\n\n return await self.AD.state.remove_namespace(namespace)", "def clean_up_namespaces(node, namespace=None):\n if namespace is not None:\n Namespaces.delete_namespace(node, namespace)\n return\n\n namespace_copy = deepcopy(Namespaces.__namespaces)\n for namespace_name in namespace_copy:\n Namespaces.delete_namespace(node, namespace_name)", "def update_tag_namespace(self, tag_namespace_id, update_tag_namespace_details, **kwargs):\n resource_path = \"/tagNamespaces/{tagNamespaceId}\"\n method = \"PUT\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"update_tag_namespace got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"tagNamespaceId\": tag_namespace_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_tag_namespace_details,\n response_type=\"TagNamespace\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_tag_namespace_details,\n response_type=\"TagNamespace\")", "def fix(self):\n for namespace in pm.listNamespaces():\n for elem in namespace.ls():\n elem.rename(elem.split(\":\")[-1])\n namespace.remove()\n\n self.run()", "def removeNamespace(self, *args):\n return _libsbml.XMLToken_removeNamespace(self, *args)", "def set_target_namespace(self, namespace):\n # do shit\n self.target_namespace = namespace.strip(\":\")", "def test_replace_namespaced_deployment_config(self):\n pass", "def sync_namespace(alias, reg_code, authToken, space=None, action=None):\n if space == None:\n action = 'get'\n print(\" ACTION: GET\")\n elif action == None:\n if 'aeskey' not in space:\n print(\"Space not encrypted\")\n quit()\n action = 'update'\n print(\" ACTION: UPDATE\")\n elif action == 'delete':\n print(\" ACTION: DELETE\")\n url = endpoint('namespace')\n headers={'authorizationToken': authToken}\n data = json.dumps({'action': action, 'alias': alias, 'reg_code': reg_code, 'namespace': space})\n payload_size = sys.getsizeof(data)\n print(\" Size of payload is: %s\" % (convert_size(payload_size)))\n print(\" Max payload is: %s\" % (convert_size(max_payload_size)))\n if payload_size >= max_payload_size:\n print(\" OVER MAX PAYLOAD: %s\" % (convert_size(max_payload_size)))\n quit()\n r = requests.post(url, headers=headers, data=data) \n print(\" Request made\")\n if r.status_code == 403:\n print(\" Invalid registration code, exiting\")\n quit()\n elif r.status_code == 406:\n print(\" Namespace mismatch\")\n quit()\n else:\n print(\" └──statusCode:\" + str(r.status_code) )\n return r", "def delete_namespace(node, namespace):\n cmd_timeout = 5\n cmd = f\"ip netns delete {namespace}\"\n (ret_code, _, delete_errmsg) = \\\n exec_cmd(node, cmd, timeout=cmd_timeout, sudo=True)\n if ret_code != 0:\n cmd = f\"ip netns list {namespace}\"\n (stdout, _) = \\\n exec_cmd_no_error(node, cmd, timeout=cmd_timeout, sudo=True)\n if stdout == namespace:\n raise RuntimeError(f\"Could not delete namespace \"\n f\"({namespace}): {delete_errmsg}\")\n try:\n Namespaces.__namespaces.remove(namespace)\n except ValueError:\n pass", "def test_replace_net_namespace(self):\n pass", "def test_replace_namespaced_role(self):\n pass", "def create_tag_namespace(self, create_tag_namespace_details, **kwargs):\n resource_path = \"/tagNamespaces\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"create_tag_namespace got unknown kwargs: {!r}\".format(extra_kwargs))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=create_tag_namespace_details,\n response_type=\"TagNamespace\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=create_tag_namespace_details,\n response_type=\"TagNamespace\")", "def removeNamespace(self, *args):\n return _libsbml.SBMLNamespaces_removeNamespace(self, *args)", "def set_namespace(self, namespace: str) -> None:\n self._namespace = namespace", "def delete_tag_namespace(self, tag_namespace_id, **kwargs):\n resource_path = \"/tagNamespaces/{tagNamespaceId}\"\n method = \"DELETE\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\",\n \"opc_request_id\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"delete_tag_namespace got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"tagNamespaceId\": tag_namespace_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing),\n \"opc-request-id\": kwargs.get(\"opc_request_id\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)", "def move_objects(self, src_s3_prefix_path, destination_s3_prefix_path):\n src_bucket_name, src_prefix = S3Util.get_bucket_and_key(src_s3_prefix_path)\n destination_bucket_name, destination_prefix = S3Util.get_bucket_and_key(destination_s3_prefix_path)\n\n src_bucket = self.s3_resource.Bucket(src_bucket_name)\n destination_bucket = self.s3_resource.Bucket(destination_bucket_name)\n\n for obj in src_bucket.objects.filter(Prefix=src_prefix):\n source_obj = self._object_summary_to_copy_source(obj)\n\n # replace the prefix\n new_key = obj.key.replace(src_prefix, destination_prefix)\n destination_bucket.copy(CopySource=source_obj, Key=new_key)\n obj.delete()", "def replace_namespaced_namespace(self, body, name, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.replace_namespaced_namespace_with_http_info(body, name, **kwargs)\n else:\n (data) = self.replace_namespaced_namespace_with_http_info(body, name, **kwargs)\n return data", "def test_delete_net_namespace(self):\n pass", "def move_tag_seq(words, seq, dest, punc=None):\n if len(seq) > len(words):\n return None\n seq_start = index_tag_seq(words, seq)\n if seq_start > -1:\n move_words = words[seq_start:seq_start+len(seq)]\n words = words[:seq_start] + words[seq_start+len(seq):]\n if dest == 'start':\n words = move_words + words\n if dest == 'end':\n if punc is not None:\n words.append(punc)\n words += move_words\n return words\n return None", "def test_delete_namespaced_deployment_config(self):\n pass", "def namespace_delete(cursor, namespace_id):\n haystack = (namespace_id,)\n query = \"DELETE FROM namespaces WHERE _id=?\"\n try:\n cursor.execute(query, haystack)\n except Exception as e:\n on_error(e)\n else:\n cursor.connection.commit()\n raise Return((True, None))", "def move(owner_id=None, target_album_id=None, photo_id=None):\n params = {\n 'owner_id': owner_id,\n 'target_album_id': target_album_id,\n 'photo_id': photo_id\n }\n result = call('photos.move', **params)\n return parse_response(result)", "def swap_cnames(profile, source_environment, destination_environment):\n client = boto3client.get(\"elasticbeanstalk\", profile)\n params = {}\n params[\"SourceEnvironmentName\"] = source_environment\n params[\"DestinationEnvironmentName\"] = destination_environment\n return client.swap_environment_cnames(**params)", "def post_namespace_delete(self, resource_id, resource_dict):\n pass", "def replace_resource(self, namespace: \"str\" = None):\n names = [\n \"replace_namespaced_csistorage_capacity\",\n \"replace_csistorage_capacity\",\n ]\n\n _kube_api.execute(\n action=\"replace\",\n resource=self,\n names=names,\n namespace=namespace,\n api_client=None,\n api_args={\"body\": self.to_dict(), \"name\": self.metadata.name},\n )", "def replace_namespaced_net_namespace(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method replace_namespaced_net_namespace\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `replace_namespaced_net_namespace`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `replace_namespaced_net_namespace`\")\n\n resource_path = '/oapi/v1/netnamespaces/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1NetNamespace',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def move(name, other, newname=None):", "def namespace(*args, absoluteName: bool=True, addNamespace: AnyStr=\"\", collapseAncestors:\n AnyStr=\"\", deleteNamespaceContent: bool=True, exists: Union[AnyStr, bool]=\"\",\n force: bool=True, isRootNamespace: Union[AnyStr, bool]=\"\",\n mergeNamespaceWithOther: AnyStr=\"\", mergeNamespaceWithParent: bool=True,\n mergeNamespaceWithRoot: bool=True, moveNamespace: List[AnyStr, AnyStr]=None,\n parent: AnyStr=\"\", recurse: bool=True, relativeNames: bool=True, removeNamespace:\n AnyStr=\"\", rename: List[AnyStr, AnyStr]=None, setNamespace: AnyStr=\"\",\n validateName: AnyStr=\"\", q=True, query=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def test_create_deployment_config_rollback_for_all_namespaces(self):\n pass", "def replace_widget(container, tag, widget) -> None:\n try:\n container[tag].destroy()\n except KeyError:\n pass\n container[tag] = widget", "def remove(self, camera, namespace='*'):\n with self.lock:\n if namespace == '*':\n for cur_namespace in self.camera_namespaces:\n self.camera_namespaces[cur_namespace].remove(camera)\n else:\n self.camera_namespaces[namespace].remove(camera)", "def remove(self, *args):\n return _libsbml.XMLNamespaces_remove(self, *args)", "def test_unidirectional_namespace_bucket_replication(\n self,\n awscli_pod_session,\n mcg_obj_session,\n cld_mgr,\n bucket_factory,\n source_bucketclass,\n target_bucketclass,\n test_directory_setup,\n ):\n target_bucket_name = bucket_factory(bucketclass=target_bucketclass)[0].name\n\n replication_policy = (\"basic-replication-rule\", target_bucket_name, None)\n source_bucket = bucket_factory(\n 1, bucketclass=source_bucketclass, replication_policy=replication_policy\n )[0]\n source_bucket_name = source_bucket.name\n source_bucket_uls_name = source_bucket.bucketclass.namespacestores[0].uls_name\n\n namespacestore_aws_s3_creds = {\n \"access_key_id\": cld_mgr.aws_client.access_key,\n \"access_key\": cld_mgr.aws_client.secret_key,\n \"endpoint\": constants.AWS_S3_ENDPOINT,\n \"region\": source_bucketclass[\"namespace_policy_dict\"][\n \"namespacestore_dict\"\n ][\"aws\"][0][1],\n }\n\n written_random_objects = write_random_test_objects_to_bucket(\n awscli_pod_session,\n source_bucket_uls_name,\n test_directory_setup.origin_dir,\n amount=5,\n s3_creds=namespacestore_aws_s3_creds,\n )\n\n listed_obejcts = mcg_obj_session.s3_list_all_objects_in_bucket(\n source_bucket_name\n )\n\n compare_bucket_object_list(\n mcg_obj_session, source_bucket_name, target_bucket_name\n )\n\n assert set(written_random_objects) == {\n obj.key for obj in listed_obejcts\n }, \"Some of the uploaded objects are missing\"", "def update_tag(tag):\n remove_tag(tag)\n add_tag(tag)", "def list_tag_namespaces(self, compartment_id, **kwargs):\n resource_path = \"/tagNamespaces\"\n method = \"GET\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"page\",\n \"limit\",\n \"include_subcompartments\",\n \"lifecycle_state\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_tag_namespaces got unknown kwargs: {!r}\".format(extra_kwargs))\n\n if 'lifecycle_state' in kwargs:\n lifecycle_state_allowed_values = [\"ACTIVE\", \"INACTIVE\", \"DELETING\", \"DELETED\"]\n if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:\n raise ValueError(\n \"Invalid value for `lifecycle_state`, must be one of {0}\".format(lifecycle_state_allowed_values)\n )\n\n query_params = {\n \"compartmentId\": compartment_id,\n \"page\": kwargs.get(\"page\", missing),\n \"limit\": kwargs.get(\"limit\", missing),\n \"includeSubcompartments\": kwargs.get(\"include_subcompartments\", missing),\n \"lifecycleState\": kwargs.get(\"lifecycle_state\", missing)\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[TagNamespaceSummary]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[TagNamespaceSummary]\")", "def get_namespace(self, namespace, lowercase=True, trim_namespace=True):\n\t\treturn self.get_namespace_view(namespace, lowercase, trim_namespace).copy()", "async def removetags(self, ctx, tag=None):\r\n\t\tTag = self.settings.ServerConfig(ctx.guild.id, 'Tags')\r\n\t\tif not tag in Tag:\r\n\t\t\treturn await ctx.send('Can\\'t find Tag: '.format(tag))\t\r\n\r\n\t\tdel Tag[tag]\r\n\t\tself.settings.ServerConfig(ctx.guild.id, 'Tags', Tag)\r\n\r\n\t\tawait ctx.send('Removed Tag: '.format(tag))", "def test_replace_namespaced_template(self):\n pass", "def move_object(self, bucket_name, src_object_name, dst_object_name, no_overwrite=False):\n\n return h3lib.move_object(self._handle, bucket_name, src_object_name, dst_object_name, no_overwrite, self._user_id)", "def test_replace_namespaced_role_binding(self):\n pass", "def test_create_namespaced_deployment_config_rollback(self):\n pass", "def updateNamespace(self):\n import addict\n self.namespace['config'] = addict.Dict(self.namespace['config'])", "def namespace(self, namespace: str):\n\n self._namespace = namespace", "def delete_tag(tag, directory=None):\n execute_command('git tag -d {0}'.format(tag), shell=True, cwd=directory)", "def move_element(self,n_a,n_b):\n self.element_array.insert(n_b,self.element_array.pop(n_a))", "def test_patch_namespaced_deployment_config(self):\n pass", "def move_node(self, job):\n transfer = Transfer(job.jobInfo)\n target = transfer.target\n direction = transfer.direction\n result = None\n # Check uris\n check_uri(target, self.sm, shouldExist = True)\n checks = check_uri(direction, self.sm, shouldExist = False)\n null = direction.endswith(NULL)\n # Retrieve existing record\n node = self.sm.get_node(target)[0]['node']\n node = self.nf.get_node(node)\n # Check whether endpoint is reserved URI\n if null: self.nm.delete_node(target)\n if direction.endswith(AUTO): \n direction = generate_uri(direction)\n result = {'destination': direction}\n if not(null):\n # Check if endpoint is a container\n if checks['exists'] and checks['container']: direction += target[target.rfind('/'):]\n # Change identifier\n node.set_uri(direction)\n # Update db\n self.sm.update_node(target, direction, node.tostring())\n # Check if target is a container\n if isinstance(node, ContainerNode):\n # Move children\n for child in self.sm.get_children(target):\n node = self.nf.get_node(self.sm.get_node(child)[0]['node'])\n if null:\n self.nm.delete_node(node.uri)\n else:\n new_uri = node.uri.replace(target, direction)\n node.set_uri(new_uri)\n self.sm.update_node(child, new_uri, node.tostring())\n return result", "def setElementNamespace(self, *args):\n return _libsbml.ASTBasePlugin_setElementNamespace(self, *args)", "def destroyContainer(tag): #@NoSelf", "def test_remove_defined_tag(self, test, object_storage):\n namespace_name, bucket_name = self._get_bucket_details(object_storage)\n session_factory = test.oci_session_factory()\n policy = test.load_policy(\n {\n \"name\": \"bucket-remove-tag\",\n \"resource\": \"oci.bucket\",\n \"filters\": [\n {\"type\": \"value\", \"key\": \"name\", \"value\": bucket_name},\n ],\n \"actions\": [\n {\n \"type\": \"remove-tag\",\n \"defined_tags\": [\"cloud-custodian-test.mark-for-resize\"],\n },\n ],\n },\n session_factory=session_factory,\n )\n policy.run()\n resource = self._fetch_bucket_validation_data(\n policy.resource_manager, namespace_name, bucket_name\n )\n test.assertEqual(resource[\"name\"], bucket_name)\n test.assertEqual(self.get_defined_tag_value(resource[\"defined_tags\"]), None)", "async def save_namespace(self, **kwargs) -> None:\n namespace = self._get_namespace(**kwargs)\n await self.AD.state.save_namespace(namespace)", "def test_update_bucket(self, test, object_storage, with_or_without_compartment):\n namespace_name, bucket_name = self._get_bucket_details(object_storage)\n session_factory = test.oci_session_factory()\n policy = test.load_policy(\n {\n \"name\": \"add-defined-tag-to-bucket\",\n \"resource\": \"oci.bucket\",\n \"query\": [\n {\"namespace_name\": namespace_name},\n ],\n \"filters\": [\n {\"type\": \"value\", \"key\": \"name\", \"value\": bucket_name},\n ],\n \"actions\": [{\"type\": \"update\", \"defined_tags\": self.get_defined_tag(\"add_tag\")}],\n },\n session_factory=session_factory,\n )\n policy.run()\n resource = self._fetch_bucket_validation_data(\n policy.resource_manager, namespace_name, bucket_name\n )\n test.assertEqual(resource[\"name\"], bucket_name)\n test.assertEqual(self.get_defined_tag_value(resource[\"defined_tags\"]), \"true\")", "def move_objects(\n source_bucket=None,\n destination_bucket=None,\n prefix=None,\n **kwargs):\n\n storage_objects = kwargs['ti'].xcom_pull(task_ids='list_files')\n\n hook = GoogleCloudStorageHook()\n\n for storage_object in storage_objects:\n destination_object = storage_object\n\n if prefix:\n destination_object = '{}/{}'.format(prefix, storage_object)\n\n hook.copy(source_bucket, storage_object, destination_bucket, destination_object)\n hook.delete(source_bucket, storage_object)", "def addNamespace(self, *args):\n return _libsbml.XMLToken_addNamespace(self, *args)", "def add_namespace(self, q, ns):\n if ns in self.namespaces: return self.namespaces[ns]\n self.namespaces[ns] = q\n return q", "def delete_bucket_tagging(Bucket=None):\n pass", "def delete_tag(filename, tag_name):\n storeapps = APP.config[\"storage\"]\n filename = filename.encode(\"utf-8\")\n\n try:\n application = list(nativeapps.io.ls(storeapps, r\".*\" + filename + \"$\"))[0]\n meta_path = os.path.join(os.path.dirname(application), \"metadata.json\")\n metadata = json.loads(nativeapps.io.readfile(meta_path))\n tags = metadata.get(\"tags\", [])\n if tag_name in tags:\n tags.remove(tag_name)\n metadata[\"tags\"] = tags\n nativeapps.io.writefile(meta_path, json.dumps(metadata))\n except IndexError:\n return \"Unknown application: %s\" % (application), 404\n\n return \"removed\", 200", "def setNamespaces(self, *args):\n return _libsbml.XMLToken_setNamespaces(self, *args)", "def delete_namespace_content(context, namespace_id, session):\n count = 0\n query = (session.query(models.MetadefTag).filter_by(\n namespace_id=namespace_id))\n count = query.delete(synchronize_session='fetch')\n return count", "def tag_rename(self, item_id, old_tag, new_tag, **params):\n\n self.queue('tag_rename', item_id=item_id,\n old_tag=old_tag, new_tag=new_tag, **params)", "def removePackageNamespace(self, *args):\n return _libsbml.SBMLNamespaces_removePackageNamespace(self, *args)", "def move_object(self, src_s3_path, destination_s3_path):\n src_bucket_name, src_key = S3Util.get_bucket_and_key(src_s3_path)\n destination_bucket_name, destination_key = S3Util.get_bucket_and_key(destination_s3_path)\n obj = self.s3_resource.ObjectSummary(src_bucket_name, src_key)\n destination_bucket = self.s3_resource.Bucket(destination_bucket_name)\n destination_bucket.copy(CopySource=self._object_summary_to_copy_source(obj), Key=destination_key)\n obj.delete()", "def moveBar(self, from_, to_):\n fromBar = self.widget(from_)\n self.removeWidget(fromBar)\n self.insertWidget(to_, fromBar)", "def copyNamespace(self):\n ret = libxml2mod.xmlCopyNamespace(self._o)\n if ret is None:raise treeError('xmlCopyNamespace() failed')\n __tmp = xmlNs(_obj=ret)\n return __tmp", "def archive(self, namespace, archive_name, namespace_out=None, format=None):\n raise RuntimeError('Already achieved')", "def move(self, target):\n if target.relto(self):\n raise error.EINVAL(target, \"cannot move path into a subdirectory of itself\")\n try:\n self.rename(target)\n except error.EXDEV: # invalid cross-device link\n self.copy(target)\n self.remove()", "def namespace(self, namespace):\n\n self._namespace = namespace", "def namespace(self, namespace):\n\n self._namespace = namespace", "def create_namespace(node, namespace, delete_before_create=True):\n if delete_before_create:\n Namespaces.delete_namespace(node, namespace)\n\n cmd = f\"ip netns add {namespace}\"\n exec_cmd_no_error(node, cmd, sudo=True)\n Namespaces.__namespaces.append(namespace)", "def move_container(i3, name, monitor, container_id=None):\n i3.command(f'move container to workspace {name}')\n i3.command(f'workspace {name}, move workspace to output {monitor}')\n if container_id:\n i3.command(f'[con_id=\"{container_id}\"] focus')", "def delete_tag(tag):\n tag.destroy()", "def tag ():\n\n tagname = get_tag(comp_versions, 'ACE')\n\n if opts.tag:\n if opts.take_action:\n vprint (\"Placing tag %s on ACE_TAO\" % (tagname))\n ex (\"cd $DOC_ROOT/ACE_TAO && git tag -a \" + tagname + \" -m\\\"\" + tagname + \"\\\"\")\n\n vprint (\"Placing tag %s on MPC\" % (tagname))\n ex (\"cd $DOC_ROOT/MPC && git tag -a \" + tagname + \" -m\\\"\" + tagname + \"\\\"\")\n\n # Update release branches\n latest_branch_helper (update_latest_branch, opts.release_type)\n else:\n vprint (\"Placing tag %s on ACE_TAO\" % (tagname))\n vprint (\"Placing tag %s on MPC\" % (tagname))\n print (\"Creating tags:\\n\")\n print (\"Placing tag \" + tagname + \"\\n\")", "def test_replace_namespaced_policy(self):\n pass", "def reconfigure_namespace(self, namespace, mode, **kwargs):\n\n if namespace not in self.namespaces.keys():\n raise ValueError(\"Namespace '%s' doesn't exist.\" % namespace)\n\n info = self.namespaces[namespace]\n\n sector_size = kwargs.get(\"sector_size\", None)\n map_location = kwargs.get(\"map_location\", None)\n\n if sector_size and mode != \"sector\":\n raise ValueError(\"Sector size cannot be set for selected mode '%s'.\" % mode)\n\n if map_location and mode != \"memory\":\n raise ValueError(\"Map location cannot be set for selected mode '%s'.\" % mode)\n\n mode_t = BlockDev.nvdimm_namespace_get_mode_from_str(mode)\n\n if sector_size:\n extra = {\"-l\": str(sector_size)}\n elif map_location:\n extra = {\"-M\": map_location}\n else:\n extra = None\n\n BlockDev.nvdimm_namespace_reconfigure(namespace, mode_t, info.enabled, extra)\n\n # and update our namespaces info \"cache\"\n self.update_namespaces_info()", "def test_replace_namespaced_deployment_config_status(self):\n pass", "def test_replace_namespaced_policy_binding(self):\n pass", "def move_axis(data, label, new_position):\n # find current position of axis\n try:\n pos = data.dims.index(label)\n except ValueError as e:\n raise ValueError(\n f'Axis name {label} does not exist in input data') from e\n\n # create list of labels with new ordering\n axis_labels = list(data.dims)\n # the new position will be _before_ the given index, so will fail with a negative index\n # convert to a positive index in that case\n if new_position < 0:\n new_position += len(axis_labels)\n axis_labels.insert(new_position, axis_labels.pop(pos))\n # do the move\n return data.transpose(*axis_labels)", "def test_replace_namespaced_image_stream_tag(self):\n pass", "def MoveToAttributeNs(self, localName, namespaceURI):\n ret = libxml2mod.xmlTextReaderMoveToAttributeNs(self._o, localName, namespaceURI)\n return ret", "def remove_namespace(namespace, response, resp_keys=[]):\n if isinstance(namespace, str):\n namespace = str.encode(namespace)\n for key in resp_keys:\n response[key] = remove_namespace(namespace, response[key])\n if isinstance(response, (int, float, bool)):\n pass\n elif isinstance(response, bytes):\n response = response.replace(namespace, b'', 1)\n elif isinstance(response, (tuple, list)):\n response = tuple([remove_namespace(namespace, x) for x in response])\n return response", "def updateSBMLNamespace(self, *args):\n return _libsbml.SBMLDocument_updateSBMLNamespace(self, *args)", "def remove_tag(args):", "def tags_rename(self, item, tags):\n self._createTagAction(item, \"tags_rename\", tags)", "def test_patch_net_namespace(self):\n pass", "def untag_element(self,tag_name,element):\n pass", "def removeNode(self, nTag):\r\n try:\r\n self._nodes.pop(nTag).destroy()\r\n except KeyError:\r\n raise InvalidRequest('Can not remove a non existent node '\r\n \"'{0}' from the container.\".format(nTag))", "def remove( self, zone ):\n if zone.space is None:\n raise KeyError( \"zone not in space octree!\" )\n\n # remove zone from space node's contained set\n zone.space.zones.remove( zone )\n\n # set zone's containing space to none\n zone.space = None", "def delete_namespace(self, namespace):\n return self.core_client.delete_namespace(namespace)", "def replace_namespaced_namespace_finalize(self, body, name, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.replace_namespaced_namespace_finalize_with_http_info(body, name, **kwargs)\n else:\n (data) = self.replace_namespaced_namespace_finalize_with_http_info(body, name, **kwargs)\n return data", "def test_create_namespaced_deployment_config_rollback_rollback(self):\n pass", "def convert_to_namespace(file, output, keyword):\n resource = parse_bel_resource(file)\n write_namespace(\n namespace_keyword=(keyword or resource['AnnotationDefinition']['Keyword']),\n namespace_name=resource['AnnotationDefinition']['Keyword'],\n namespace_description=resource['AnnotationDefinition']['DescriptionString'],\n author_name='Charles Tapley Hoyt',\n namespace_domain=NAMESPACE_DOMAIN_OTHER,\n values=resource['Values'],\n citation_name=resource['Citation']['NameString'],\n file=output\n )", "def destroyNamespace(self, remoteNamespace):\r\n for namespace in self._namespaces:\r\n if namespace.destroyExternal(remoteNamespace):\r\n break", "def namespace_name(self, namespace_name):\n\n self._namespace_name = namespace_name", "def qos_policy_group_rename(self, policy_group_name, new_name):\n return self.request( \"qos-policy-group-rename\", {\n 'policy_group_name': [ policy_group_name, 'policy-group-name', [ basestring, 'None' ], False ],\n 'new_name': [ new_name, 'new-name', [ basestring, 'None' ], False ],\n }, {\n } )" ]
[ "0.55153644", "0.5485366", "0.5347061", "0.52107394", "0.52107394", "0.5199132", "0.5165057", "0.5079889", "0.5022554", "0.49942735", "0.49675435", "0.49604434", "0.49384886", "0.48304433", "0.4796919", "0.4729666", "0.46716085", "0.46684968", "0.4638175", "0.46381432", "0.46096462", "0.45636493", "0.45625663", "0.45530888", "0.45495844", "0.4545023", "0.4537018", "0.45306963", "0.4528945", "0.45188636", "0.4513859", "0.44731268", "0.44713363", "0.44707373", "0.44516405", "0.44463134", "0.44463098", "0.44382694", "0.44290954", "0.4417732", "0.44082084", "0.4397939", "0.43856716", "0.43625948", "0.43596593", "0.43594536", "0.43451887", "0.4345104", "0.4342578", "0.43381837", "0.43359712", "0.4335801", "0.43342754", "0.43292153", "0.4328543", "0.4317321", "0.4316458", "0.4295984", "0.42938465", "0.42923182", "0.426023", "0.42577487", "0.42567372", "0.4256192", "0.42544794", "0.4252844", "0.42522085", "0.4245407", "0.42433196", "0.42417425", "0.42404345", "0.42379874", "0.42306948", "0.42306948", "0.42294544", "0.4227811", "0.42262134", "0.42241132", "0.42236793", "0.4221493", "0.4217459", "0.42147943", "0.42120618", "0.4206442", "0.41974786", "0.41939682", "0.41916794", "0.41837955", "0.417806", "0.41712648", "0.41666347", "0.41665745", "0.41633916", "0.41625485", "0.41618294", "0.41602093", "0.41561183", "0.4155637", "0.41549587", "0.41544294" ]
0.61622286
0
Creates a new auth token for the specified user. For information about what auth tokens are for, see `Managing User Credentials`__. You must specify a description for the auth token (although it can be an empty string). It does not have to be unique, and you can change it anytime with
def create_auth_token(self, create_auth_token_details, user_id, **kwargs): resource_path = "/users/{userId}/authTokens" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_auth_token got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "userId": user_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=create_auth_token_details, response_type="AuthToken") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=create_auth_token_details, response_type="AuthToken")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_auth_token(self, user=None):\n token, created = Token.objects.get_or_create(user=user)\n return token", "def create_token(user, title, expiration=_default_expiration_duration_opt):\n if expiration == _default_expiration_duration_opt:\n duration = _default_expiration_duration()\n expiration = duration + datetime.now() if duration else None\n\n token_code = random_string_generator(TOKEN_NAME_PREFIX_LENGTH + MINIMUM_TOKEN_SUFFIX_LENGTH)()\n token_name = token_code[:TOKEN_NAME_PREFIX_LENGTH]\n token_secret = token_code[TOKEN_NAME_PREFIX_LENGTH:]\n\n assert token_name\n assert token_secret\n\n return AppSpecificAuthToken.create(\n user=user,\n title=title,\n expiration=expiration,\n token_name=token_name,\n token_secret=DecryptedValue(token_secret),\n )", "def create_token(user):\n access_token = create_access_token(user)\n payload = jwt.decode(\n access_token,\n app.config['JWT_SECRET_KEY'],\n algorithms=app.config['JWT_DECODE_ALGORITHMS'])\n data = {\n 'token':access_token,\n 'username': user.username,\n }\n data.update(payload)\n data['exp'] = datetime.fromtimestamp(data['exp'])\n app.logger.debug(str(data))\n if app.config.get('KEEP_TOKEN'):\n # deletes old tokens\n tokens = app.data.driver.db[config.DOMAIN['token']['datasource']['source']]\n tokens.delete_many({'username': user.username})\n # insets new token\n result = app.data.insert('token', data)\n return access_token, str(result[0])\n\n return access_token, None", "def create_auth_token(sender, instance=None, created=False, **kwargs):\n if created:\n Token.objects.create(user=instance)", "def create_auth_token(sender, instance=None, created=False, **kwargs):\n if created:\n Token.objects.create(user=instance)", "def create_auth_token(sender, instance=None, created=False, **kwargs):\n if created:\n Token.objects.create(user=instance)", "def post(self):\n _purge_expired_user_tokens()\n\n request_dict = get_json_and_verify_params({\n 'description': {'type': str, 'optional': True},\n 'expiration_date': {'optional': True},\n })\n\n expiration_date = request_dict.get('expiration_date')\n if expiration_date:\n expiration_date = parse_utc_datetime(\n expiration_date, timezone=\"UTC\")\n\n return current_user.create_auth_token(request_dict.get('description'),\n expiration_date)", "def create_auth_token(sender, instance=None, created=False, **kwargs): # pylint: disable=unused-argument\n if created:\n Token.objects.create(user=instance) # pylint: disable=no-member", "def create_token(user):\n payload = {\n 'sub': user.id,\n 'iat': datetime.utcnow(),\n 'exp': datetime.utcnow() + timedelta(days=1)\n }\n token = jwt.encode(payload, config.SECRET_KEY, algorithm='HS256')\n return token.decode('unicode_escape')", "def set_auth_token_header(self):\n\n username = 'test-user'\n passwd = 'testuserpass1234'\n user = User.objects.create(username=username)\n user.set_password(passwd)\n user.save()\n\n assert Account.objects.get(user=user) is not None\n url = reverse('token_obtain_pair')\n res = self.client.post(url,\n data={'username': username, 'password': passwd})\n self.client.credentials(HTTP_AUTHORIZATION=\n f\"Bearer {res.data['access']}\")\n return user", "def create_auth_token(\n username: str,\n admin: t.Optional[bool] = False,\n spotify: t.Optional[bool] = False,\n) -> JWT:\n auth_token: JWT = auth_manager.auth_token(\n username, {\"admin\": admin, \"spotify\": spotify}\n )\n return auth_token", "def generate_token(user):\n if JWT_AUTH:\n payload = JWT_PAYLOAD_HANDLER(user)\n return JWT_ENCODE_HANDLER(payload)\n else:\n token = Token.objects.create(user=user)\n token.save()\n return token", "def create_token(request, user):\n\n key = get_random_string(100)\n data = {}\n ip = get_client_ip_address(request)\n\n return Token.objects.create(user=user, key=key, data=json.dumps(data), ip=ip)", "def generate_token(user):\n try:\n # generate the auth token\n auth_token = User.encode_auth_token(user.id)\n response_object = {\n \"status\": \"success\",\n \"message\": \"Successfully registered.\",\n \"Authorization\": auth_token.decode(),\n }\n return response_object, 201\n except Exception as e:\n response_object = {\n \"status\": \"fail\",\n \"message\": \"Some error occurred. Please try again.\",\n }\n return response_object, 401", "def test_create_token_for_user(self):\r\n payload = {\r\n 'email': 'test@max.net',\r\n 'password': 'Testpass123',\r\n 'name': 'Maks'\r\n }\r\n create_user(**payload)\r\n\r\n res = self.client.post(TOKEN_URL, payload)\r\n\r\n self.assertIn('token', res.data)\r\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def test_create_token_for_user(self):\n payload = {'email': 'test1@test1.ri',\n 'password': 'testPassWord',\n 'time_zone': 'Europe/Dublin'}\n create_user(**payload)\n res = self.client.post(TOKEN_URL, payload)\n\n self.assertIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def test_create_token_for_user(self):\n payload = {\n 'email': 'test@gmail.com',\n 'password': 'testpass'\n }\n create_user(**payload)\n res = self.client.post(TOKEN_URI, payload)\n self.assertIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def test_create_token_for_user(self):\n payload = {'email': 'test@test.com', 'password': 'testpass'}\n create_user(**payload)\n res = self.client.post(TOKEN_URL, payload)\n\n self.assertIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def test_create_token_for_user(self):\n payload = {\n 'email': 'test@gmail.com',\n 'password': 'abcd1234',\n }\n\n create_user(**payload)\n response = self.client.post(TOKEN_URL, payload)\n\n # We expect a token and should get a HTTP 200\n self.assertIn('token', response.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_create_token_for_user(self):\n\n credentials = {'email': 'testuser@gmail.com', 'password': 'Testpass12'}\n get_user_model().objects.create_user(**credentials)\n\n response = self.client.post(URL_TOKEN, credentials)\n\n # Check that the response is HTTP 200, and contains a token.\n self.assertIn('token', response.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def token_auth(self):\n self.client = APIClient()\n self.user = User.objects.create_user(username='testuser', email='test@test.com', password='testpassword')\n self.token = Token.objects.create(user=self.user)\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)", "def _create_user(userid, **kw):\n\n new_user = User(userid, **kw)\n USERS[new_user.token] = new_user\n return USERS[new_user.token]", "def newToken(self, description):\n self.__require_privilaged_access()\n with DBSession(self.__config_db) as session:\n token = generate_session_token()\n user = self.getLoggedInUser()\n groups = ';'.join(self.__auth_session.groups)\n session_token = Session(token, user, groups, description, False)\n\n session.add(session_token)\n session.commit()\n\n LOG.info(\"New personal access token '%s...' has been generated \"\n \"by '%s'.\", token[:5], self.getLoggedInUser())\n\n return SessionTokenData(token,\n description,\n str(session_token.last_access))", "def test_create_token_for_user(setup_client):\n client = setup_client\n payload = {\n 'email': 'test@gmail.com',\n 'password': 'testpass',\n }\n create_user(**payload, **{'role': 'Supplier'})\n res = client.post(TOKEN_URL, payload)\n assert \"token\" in res.data\n assert res.status_code == status.HTTP_200_OK", "def create_auth_token():\n data = get_request_data(request)\n address = data.get(\"address\")\n expiration = int(data.get(\"expiration\"))\n\n pk = get_provider_private_key(use_universal_key=True)\n token = jwt.encode({\"exp\": expiration, \"address\": address}, pk, algorithm=\"HS256\")\n token = token.decode(\"utf-8\") if isinstance(token, bytes) else token\n\n valid, message = is_token_valid(token, address)\n if not valid:\n if message == \"Token is deleted.\":\n force_restore_token(token)\n else:\n return jsonify(error=message), 400\n\n return jsonify(token=token)", "def create_google_user(self, payload, user_data):\n if not User.objects.filter(email=payload['email']).exists():\n u = User()\n u.generate_token()\n u.email = payload['email']\n u.name = payload['given_name'] or ''\n u.surname = payload['family_name'] or ''\n u.image_url = user_data['image_url'] or ''\n u.google_id = user_data['id']\n u.google_token = user_data['token']\n u.save()\n else:\n u = User.objects.get(email=payload['email'])\n\n return u.token", "def generate_token(usr):\n token = jwt.encode({\"user\":usr, \"exp\":datetime.datetime.utcnow()\n + datetime.timedelta(minutes=30)}, KEY)\n user = User.update(token=token).where(User.username == usr)\n user.execute()\n return token", "def generate_token_from_user(user, expires_at=None):\n issued_at = datetime.utcnow()\n token = AccessToken()\n token.payload.update(\n {\n \"email\": user.email,\n \"exp\": expires_at or issued_at + timedelta(days=2),\n \"iat\": issued_at,\n \"language\": user.language,\n \"username\": user.username,\n \"full_name\": user.get_full_name(),\n }\n )\n return token", "def for_user(cls, user):\n\n token = super().for_user(user)\n\n TokenMeta.objects.get_or_create(\n jti=token['jti'],\n token=str(token),\n )\n\n return token", "def insertNewUser(self,user, access_token):\n newUser = UserToken(username=user, user_key = access_token.key, user_secret = access_token.secret)\n newUser.put()", "def create_token(self, consumer, token_type, timestamp, user=None):\n token, created = self.first_or_create(consumer=consumer, \n token_type=token_type, \n timestamp=timestamp,\n user=user)\n\n if created:\n token.key, token.secret = self.generate_random_codes()\n token.save()\n\n return token", "def create(self, request):\n token = AuthTokenClass().post(request)\n return token", "def create(self, user, token):\n\n session['user'] = {\n 'id': str(user.id),\n 'login': user.login,\n 'token': token\n }\n\n return UserSession.create(session['user'])", "def create(self, request):\n return ObtainAuthToken().post(request)", "def create(self,request):\n return CustomAuthToken().post(request)", "def post(self, **kwargs):\n username: str = request.json.get(\"username\", None)\n password: str = request.json.get(\"password\", None)\n user = get_user_instance(username, password)\n return {\"access_token\": create_access_token(identity=user)}, 200", "def create_user(self):\n username = \"\".join(choice(\n string.ascii_letters) for x in range (randint(7,10)))\n params = {\n \"first_name\":\"ugali\",\n \"last_name\":\"mayai\",\n \"email\":\"ugalimayai@gmail.com\",\n \"username\":username,\n \"password\":\"password\"\n }\n path = \"/api/v2/auth/signup\"\n user = self.client.post(path,\n data=json.dumps(params),\n content_type=\"application/json\")\n \n user_id = user.json['user_id']\n auth_token = user.json['AuthToken']\n return int(user_id), auth_token", "async def create_new_user(*, user: User):\n with Session(engine) as session:\n user.password = simple_hash(user.name, user.password) #Hashing password for security\n session.add(user)\n session.commit()\n return {\"message\": \"User {user_id} created\".format(user_id = user.id)}", "def test_create_token(self):\n data = {'email': 'test@test.com', 'password': 'testtest'}\n sigin_in_user(**data)\n payload = {\n 'username': 'test@test.com',\n 'password': 'testtest'\n }\n res = self.client.post(TOKEN_URL, payload)\n \n self.assertIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.save()\n headers = self.get_success_headers(serializer.data)\n return Response(\n dict(serializer.data, token=str(user.auth_token)),\n status=status.HTTP_201_CREATED,\n headers=headers\n )", "def generate_new_token(uid):\n random_token = uuid.uuid4()\n token = TokenAuth(user_id=uid, token=random_token)\n token.save()\n return random_token", "def get_token(cls, user, full_result=False):\n if user is None:\n return EMPTY_KNOX_TOKEN\n result = AuthToken.objects.create(user=user)\n return result if full_result else result[1]", "def generate_token(user: dict):\n\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1),\n 'iat': datetime.datetime.utcnow(),\n 'user': user\n }\n token = jwt.encode(\n payload,\n os.getenv('SECRET_KEY'),\n algorithm='HS256'\n )\n return token.decode('UTF-8')", "def post(self, request, *args, **kwargs):\n self.create(request, *args, **kwargs)\n token, created = Token.objects.get_or_create(user=self.user)\n return Response({'token': token.key}, status=201)", "def create_jwt(user, secret):\n logger.debug(\"Create JWT with secret %s\" % secret)\n # username = request.POST['username']\n # password = request.POST['password'\n\n expiry = datetime.datetime.now() + datetime.timedelta(seconds=30)\n expiry_s = time.mktime(expiry.timetuple())\n if user.is_authenticated():\n internalid = user.authprofile.internalid\n payload = {'username': user.username, 'expiry': expiry_s, 'type': \"AuthenticatedUser\", 'internalid': internalid, 'email': user.email}\n token = jws.sign(payload, secret, algorithm='HS256')\n else:\n payload = {'expiry':expiry_s, 'type': \"AnonymousUser\", 'internalid': None, 'email': None}\n token = jws.sign(payload, secret, algorithm='HS256')\n logger.debug(\"Payload: %s\" % payload)\n # logger.info(\"Token: %s\" % token)\n return token", "def test_create_token_to_user(self):\n data = {\n 'email': 'test@test.com', \n 'password': \"testtest\"\n }\n res = self.client.post(TOKEN_URL, data)\n\n self.assertNotIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def create(self, request):\n\n return ObtainAuthToken().post(request)", "def new_user(client, username, password, apikey=None, docs=None):\n if apikey is None:\n apikey = str(uuid.uuid4())\n passhash = generate_password_hash(password, method='sha1')\n user = User(username, passhash, apikey, docs=docs)\n user.create(client)\n return user", "def create_auth_token(sender, instance=None, created=False, **kwargs):\n\n if created:\n # Generate API token for user.\n api_token = Token.objects.create(user=instance)\n\n # Only create agent using username and API token for non-admin users.\n if instance.is_superuser is False:\n Agent.objects.create(scan_agent=instance, api_token=api_token)", "def authorize_user(case: APITestCase, user: User):\n\n token = Token.objects.create(user=user)\n case.client.credentials(HTTP_AUTHORIZATION=f'Token {token}')", "def create(self, validated_data):\n\t\tinstance = super(UserSerializer, self).create(validated_data)\n\t\tinstance.set_password(validated_data['password'])\n\t\ttoken = Token.objects.create(user=instance)\n\t\tinstance.token = token\n\t\treturn instance", "def create_user():\n record = request.get_json()\n if record is None:\n return {\"Error\": \"No data Supplied.\"}, 400\n\n schema = user_schema.load(record)\n\n if UserModel.objects(email=schema['email']):\n return {\"Error\": \"User Data already exists.\"}, 400\n user = UserModel(**schema)\n user.hash_password()\n user.save()\n ser_data = user_schema.dump(user)\n token = Auth.generate_token(ser_data[\"_id\"])\n return {\"message\": \"User Created Successfully\", \"Token\": token, \"id\": str(user.id)}, 200", "def make_token(self, user):\n return super()._make_token_with_timestamp(user, int(time.time()))", "def create_token_missing_field(self):\n payload = {'email': 'test@test.com', 'password': 'testpass'}\n create_user(**payload)\n payload = {'email': 'test@test.com', 'password': ''}\n res = self.client.post(TOKEN_URL, payload)\n self.assertNotIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def auth_token_generate(identity_param_val, expires_delta=False):\n access_token = ''\n try:\n if expires_delta is not False:\n expires_delta = timedelta(minutes=expires_delta)\n access_token = create_access_token(identity=identity_param_val, expires_delta=expires_delta)\n except Exception as e:\n print(e)\n\n return access_token", "def create(self, data):\n token, created = Token.objects.get_or_create(user=self.context['user'])\n return self.context['user'], token.key", "def create(self, data):\n token, created = Token.objects.get_or_create(user=self.context['user'])\n return self.context['user'], token.key", "def create(self, data):\n token, created = Token.objects.get_or_create(user=self.context['user'])\n return self.context['user'], token.key", "def create(self, data):\n token, created = Token.objects.get_or_create(user=self.context['user'])\n return self.context['user'], token.key", "def create(self, data):\n token, created = Token.objects.get_or_create(user=self.context['user'])\n return self.context['user'], token.key", "def create(self, data):\n token, created = Token.objects.get_or_create(user=self.context['user'])\n return self.context['user'], token.key", "def create_fake_JWT_token(userEmail):\n pass", "def create_token_no_user(self):\n payload = {'email': 'test@test.com', 'password': 'testpass'}\n res = self.client.post(TOKEN_URL, payload)\n self.assertNotIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "async def create_token(self, *args, **kwargs) -> OAuth2Token:\n token = await super().create_token(*args, **kwargs)\n # NOTE: Save data from token to db here.\n return token", "def test_create_token_valid(self):\n create_mock_user(**self.mock_user)\n res = self.client.post(TOKEN_URL, self.mock_user)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertIn('token', res.data)", "def issue_user_token(user, salt):\r\n\r\n\t\tif user is not None:\r\n\t\t\tif (salt == 'login'):\r\n\t\t\t\ttoken, _ = Token.objects.get_or_create(user=user)\r\n\t\t\telse:\r\n\t\t\t\ttoken = signing.dumps({'pk': user.pk}, salt=salt)\r\n\r\n\t\t\treturn token\r\n\r\n\t\treturn None", "def generate_token_for_user(user: User, expiration: datetime.timedelta=datetime.timedelta(days=7)):\n\n return generate_token({'id': user.id}, expiration)", "def create(self, validated_data):\n request = self._kwargs['context']['request']\n user = User.objects.create(**validated_data)\n user.set_password(validated_data[\"password\"])\n user.save()\n category_list = ['Fuel', 'Bill', 'Entertainment', 'Education', 'Food']\n for category in category_list:\n user.user_categories.create(name=category)\n login(request, user)\n token, created = Token.objects.get_or_create(user=user)\n validated_data[\"token\"] = token.key\n return validated_data", "def create_user(UserName=None, MessageAction=None, FirstName=None, LastName=None, AuthenticationType=None):\n pass", "def create_token(self):\n ts_datetime = self.logged_at or self.created_at\n ts = int(mktime(ts_datetime.timetuple()))\n key = base64.encodestring(self.email)\n base = \"{}{}\".format(key, ts)\n salt, hsh = self.password.split('$')\n return \"{}$${}\".format(key, get_hexdigest(salt, base))", "def token_generate(self, user_id):\n try:\n payload = {\n 'exp': datetime.utcnow() + timedelta(minutes=200),\n 'iat': datetime.utcnow(),\n 'sub': user_id\n }\n encoded_token = jwt.encode(\n payload, current_app.config['SECRET_KEY'], algorithm='HS256'\n )\n return encoded_token\n\n except Exception:\n return str(Exception)", "def login(user: User):\n try:\n time = datetime.datetime.utcnow() + datetime.timedelta(seconds=60)\n payload = {\"user_id\": user.name, \"password\": user.password, \"exp\": time}\n token = jwt.encode(payload, JWT_SECRET_KEY, JWT_ALGORITHM).decode('utf-8')\n return {\"token\": token}\n except Exception:\n return {\"message\": \"Error in creating Token\"}", "async def r_create_user(*_, role: str = \"USER\") -> User:\n # create api object\n password = token_hex(10)\n # save to db\n user = await User.create(\n key=token_bytes(32),\n username=token_hex(10),\n password_hash=ARGON.hash(password),\n role=role,\n created=time(),\n )\n # set password field on user to pass them their password 1 time\n user.password = password\n # return api object to resolver\n return user", "def generate_token(exp=None):\n\n secret_key = getenv('JWT_SECRET_KEY')\n user = {\n 'first_name': fake.name(),\n 'last_name': fake.name(),\n 'email': fake.email(),\n 'is_admin': IsAdmin.yes,\n 'password': fake.password()\n }\n\n payload = {'id': str(User.find_or_create(user, email=user['email']).id)}\n payload.__setitem__('exp', exp) if exp is not None else ''\n token = jwt.encode(payload, secret_key, algorithm='HS256').decode(CHARSET)\n return 'Bearer {0}'.format(token)", "def generate_token(user, expire_time=86400):\n session = Session()\n token = session.query(PasswordRecoveryToken)\\\n .filter(PasswordRecoveryToken.user_id == user.user_id)\\\n .first()\n\n if token is not None:\n self.expire(token)\n \n token = PasswordRecoveryToken()\n token.user_id = user.user_id\n session.add(token)\n \n token.expiration = datetime.now() + timedelta(seconds=expire_time)\n \n sha_token = hashlib.sha224(user.login)\n sha_token.update(user.password)\n sha_token.update(str(token.expiration))\n \n token.token = sha_token.hexdigest()\n print token.token\n return token", "def create_token():\n def token_helper():\n token = util.prompt_for_user_token(username=\"robbo1992\", scope='user-library-read playlist-modify-private playlist-modify',\n client_id=config[\"spotify\"][\"client_id\"], client_secret=config[\"spotify\"][\"secret_id\"],\n redirect_uri='http://localhost:8080', cache_path=spotify_cache)\n return token\n if token_helper():\n log.debug(\"Succesfully generated a spotify token for authentication\")\n return spotipy.Spotify(auth=token_helper())\n else:\n if motley.internet:\n if token_helper():\n log.debug(\"Succesfully generated a spotify token for authentication\")\n return spotipy.Spotify(auth=token_helper())\n else:\n log.error(\"Authentication error in create_token method.\")", "def _create_security_token(user):\n timestamp = int(time.time())\n plaintext = \"%x %s\" % (timestamp, user.email)\n nearest_mult_of_16 = 16 * ((len(plaintext) + 15) // 16)\n # Pad plaintest with whitespace to make the length a multiple of 16,\n # as this is a requirement of AES encryption.\n plaintext = plaintext.rjust(nearest_mult_of_16, ' ')\n if _DISABLE_CRYPTO:\n body = plaintext\n sig = \"sig\"\n else:\n key_storage = KeyStorage.get()\n body = AES.new(key_storage.aes_key, AES.MODE_CBC).encrypt(plaintext)\n hmac_key = key_storage.hmac_key\n if type(hmac_key) == unicode:\n # Crypto requires byte strings\n hmac_key = hmac_key.encode('utf8')\n sig = HMAC.HMAC(key=hmac_key, msg=body).hexdigest()\n return '%s:%s' % (sig, body)", "def create_token(self, token_type=DEFAULT_TOKEN, extra_data='{}'):\n if token_type not in dict(TOKEN_TYPES).keys():\n raise ValueError(\"Unable to create token, unknown type\")\n\n value = calc_checksum(self.email, salt=randint(0, maxint))\n\n return LoginToken.objects.create(user=self, value=value, token_type=token_type, extra_data=extra_data)", "def create_token(self, consumer, token_type, timestamp, scope,\n user=None, callback=None, callback_confirmed=False):\n token = self.create(consumer=consumer, \n token_type=token_type, \n timestamp=timestamp,\n scope=scope,\n user=user,\n callback=callback,\n callback_confirmed=callback_confirmed,\n key=uuid.uuid4().hex,\n secret=get_random_string(length=SECRET_SIZE))\n\n return token", "def post(self):\n parser = reqparse.RequestParser()\n parser.add_argument('username', help='Required field', required=True)\n parser.add_argument('password', help='Required field', required=True)\n args = parser.parse_args()\n\n existing_user = users_db.get_user_by_username(args['username'])\n if not existing_user:\n # Create new user\n hashed_password = BCRYPT.generate_password_hash(args['password']).decode('utf-8')\n user_id = users_db.create_user(\n {\n 'username': args['username'],\n 'password': hashed_password\n }\n )\n\n # Use create_access_token() and create_refresh_token()\n # to create our access and refresh tokens\n response, status = {\n 'message': 'User <{0}> was successfully created: id={1}'.format(\n args['username'], user_id\n ),\n 'access_token': create_access_token(identity=str(user_id)),\n 'refresh_token': create_refresh_token(identity=str(user_id))\n }, 201\n else:\n # check password of existing user and create new jwt token\n if BCRYPT.check_password_hash(existing_user[0]['password'], args['password']):\n response, status = {\n 'message': 'User <{0}> was successfully logged in'.format(\n existing_user[0]['username']\n ),\n 'access_token': create_access_token(identity=str(existing_user[0]['_id'])),\n 'refresh_token': create_refresh_token(identity=str(existing_user[0]['_id']))\n }, 200\n else:\n response, status = {\n 'message': 'Invalid user password'\n }, 400\n\n return Response(dumps(response), status=status, mimetype='application/json')", "def test_create_user_endpoint_creates_user(caplog):\n caplog.set_level('INFO')\n\n _request_create_user(SEED_USER_DATA)\n created_user = Advisor.objects.get(email=SEED_USER_DATA['email'])\n\n user_data_keys = SEED_USER_DATA.keys() - set(['token'])\n for key in user_data_keys:\n assert str(getattr(created_user, key)) == SEED_USER_DATA[key]\n\n user_info = [\n 'Creating a user: {',\n f' \"dit_team_id\": \"{SEED_USER_DATA[\"dit_team_id\"]}\",',\n f' \"email\": \"{SEED_USER_DATA[\"email\"]}\",',\n f' \"first_name\": \"{SEED_USER_DATA[\"first_name\"]}\",',\n f' \"last_name\": \"{SEED_USER_DATA[\"last_name\"]}\",',\n f' \"sso_email_user_id\": \"{SEED_USER_DATA[\"sso_email_user_id\"]}\"',\n '}',\n ]\n user_token = f'Created a token `{SEED_USER_DATA[\"token\"]}` for user {created_user.id}.'\n assert caplog.messages == [\n '\\n'.join(user_info),\n user_token,\n ]", "def test_create_token_invalid_credentials(setup_client):\n client = setup_client\n payload = {\n 'email': 'test@gmail.com',\n 'password': 'testpass',\n }\n create_user(**payload, **{'role': 'Supplier'})\n payload[\"password\"] = \"Something else\"\n res = client.post(TOKEN_URL, payload)\n assert \"token\" not in res.data\n assert res.status_code == status.HTTP_400_BAD_REQUEST", "async def _token(self, user: discord.User = None, user_id: int = None):\n # This is to be used with the registration URL so that it doesn't contain\n # the user's ID in cleartext. This is so that person A cannot trivially\n # generate person B's URL and assign them to person A's team.\n if not user:\n user = self.bot.get_user(user_id)\n hashh = await self.config.user(user).digest()\n if hashh is None:\n salt = await self.config.user(user).secret()\n if salt is None:\n salt = random_salt()\n await self.config.user(user).secret.set(salt)\n hashh = digest(user.id, salt)\n await self.config.user(user).digest.set(hashh)\n await self.config.set_raw('undigest', hashh, value=user.id)\n return hashh", "def token_gen_call(username, password, exp=None):\n #pdb.set_trace()\n \n #username_set = params['AUTH']['username_set']\n #password_set = params['AUTH']['password_set']\n username_set = username\n password_set = password\n \"\"\"\n Creates JWT Token\n :return:\n \"\"\"\n if exp is None:\n exp = datetime.utcnow() + timedelta(seconds=3600)\n _token = {\n 'aud': JWT_AUDIENCE,\n 'exp': exp,\n 'iss': JWT_ISSUER,\n 'user': username,\n 'role': 'admin',\n 'time':time.time()\n }\n _token.update(_token)\n \n if password_set == password and username_set == username: # example, don't do this in production\n return {\"token\" : jwt.encode(_token, SECRET_KEY, algorithm=JWT_OPTIONS_ALGORITHM).decode('utf-8') }\n return 'Invalid username and/or password for user: {0}'.format(username)", "def create_new_user():\n return get_user_model().objects.create_user(\n email='test@gmail.com',\n password='test@londodnjisdjfois',\n username='tempusername'\n )", "def new_user(cls, user):\r\n pass", "def create_token_invalid_credentials(self):\n create_user(email='test@test.com', password='testpass')\n payload = {'email': 'test@test.com', 'password': 'wrong'}\n\n res = self.client.post(TOKEN_URL, payload)\n\n self.assertNotIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_token_invalid_credentials(self):\n\n credentials = {'email': 'testuser@gmail.com', 'password': 'Testpass12'}\n get_user_model().objects.create_user(**credentials)\n\n invalid_credentials = {\n 'email': 'testuser@gmail.com',\n 'password': 'wrong'\n }\n response = self.client.post(URL_TOKEN, invalid_credentials)\n\n # Check that the response is HTTP 400, and does not contain a token.\n self.assertNotIn('token', response.data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def gen_verification_token(user):\n exp_date = timezone.now() + timedelta(days= 3)\n payload = {\n 'user': user.username,\n 'exp': int (exp_date.timestamp()),\n 'type': 'email_confirmation'\n }\n token = jwt.encode(payload, settings.SECRET_KEY, algorithm= 'HS256')\n return token", "def gen_verification_token(user):\n exp_date = timezone.now() + timedelta(days=3)\n payload = {\n 'user': user.username,\n 'exp': int(exp_date.timestamp()),\n 'type': 'email_confirmation'\n }\n # Generacion del token\n token = jwt.encode(payload, settings.SECRET_KEY, algorithm='HS256')\n return token", "def generateAuthToken(self):\n try:\n payload = {\n 'exp': datetime.utcnow() + timedelta(days=0, minutes=30),\n 'iat': datetime.utcnow(),\n 'sub': self.id\n }\n return jwt.encode(payload, current_app.config['SECRET_KEY'], algorithm='HS256').decode()\n except Exception as error:\n print(error)\n return error", "def add_user(self, username='TestUser', password='password',\n with_token=False):\n token = None\n user = User(login=username, password=crypto(username, password))\n if with_token:\n token = str(uuid.uuid4())\n user.last_token = token\n user.save()\n return token", "def build_access_token_normal_user():\n return do_build_access_token(tenant_id='intility_tenant_id', admin=False)", "def test_create_token_invalid_credentials(self):\n payload = {\n 'email': 'test@gmail.com',\n 'password': 'testpass'\n }\n create_user(**payload)\n wrong_payload = {\n 'email': 'test@gmail.com',\n 'password': 'wrong'\n }\n res = self.client.post(TOKEN_URI, wrong_payload)\n self.assertNotIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def create_user(user_id, password_16char, public_key_32char):\n headers = {'Content-type': 'application/json'}\n payload = {'user_id': user_id\n , 'user_password': password_16char\n , 'public_key': public_key_32char}\n response = requests.post(\"http://localhost:5000/user/createUser\", data=json.dumps(payload), headers=headers)\n return response.text", "def default_user(self):\n self.user = self.create_user(create_token=True)\n return", "def create_user(email, password, f_name, l_name):\n pass", "def new_user(cls, user):\n pass", "def create_token(self,uid):\n token_str = self.get_random(5) + str(uid) + str(int(time.time()))\n m = hashlib.md5()\n m.update(token_str)\n return m.hexdigest()", "def create_account(self, short_name, author_name=None, author_url=None,\n replace_token=True):\n response = self._telegraph.method('createAccount', values={\n 'short_name': short_name,\n 'author_name': author_name,\n 'author_url': author_url\n })\n\n if replace_token:\n self._telegraph.access_token = response.get('access_token')\n\n return response" ]
[ "0.7601047", "0.7413485", "0.7076036", "0.70279247", "0.70279247", "0.70279247", "0.6908097", "0.68845016", "0.683764", "0.68366253", "0.6822452", "0.6817627", "0.68121517", "0.6806323", "0.67799586", "0.6771342", "0.676491", "0.67617136", "0.6744228", "0.67380023", "0.668147", "0.66268677", "0.6613045", "0.660879", "0.65834117", "0.6496971", "0.6480837", "0.6447179", "0.6446974", "0.64392906", "0.6407566", "0.6359538", "0.63285154", "0.63216835", "0.62730855", "0.6271753", "0.6267881", "0.6261526", "0.6251311", "0.6240688", "0.6238316", "0.61928886", "0.61924297", "0.61919177", "0.61906165", "0.61901593", "0.6167083", "0.6160513", "0.6159672", "0.613919", "0.6138683", "0.6130583", "0.6128868", "0.61206627", "0.6109322", "0.6074863", "0.6074863", "0.6074863", "0.6074863", "0.6074863", "0.6074863", "0.60724163", "0.60536456", "0.60474616", "0.6039723", "0.6027003", "0.6019266", "0.6012439", "0.5996157", "0.59951603", "0.59841335", "0.5983542", "0.59769326", "0.5970406", "0.5960097", "0.59580415", "0.595518", "0.59436995", "0.591753", "0.59125125", "0.591038", "0.5901625", "0.58892876", "0.58685285", "0.58652484", "0.5859687", "0.5843304", "0.5826938", "0.5825239", "0.58169013", "0.58141387", "0.5812102", "0.58052546", "0.57943016", "0.5790545", "0.5782189", "0.5775316", "0.57717836", "0.5769745", "0.57674825" ]
0.70019704
6
Creates a new compartment in the specified compartment.
def create_compartment(self, create_compartment_details, **kwargs): resource_path = "/compartments" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_compartment got unknown kwargs: {!r}".format(extra_kwargs)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, header_params=header_params, body=create_compartment_details, response_type="Compartment") else: return self.base_client.call_api( resource_path=resource_path, method=method, header_params=header_params, body=create_compartment_details, response_type="Compartment")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createCompartment(self):\n return _libsbml.Model_createCompartment(self)", "def addCompartment(self, vol=1, comp_id=\"\"):\n\n c1 = self.model.createCompartment()\n self.check(c1, \"create compartment\")\n if len(comp_id) == 0:\n comp_id = \"c\" + str(self.model.getNumCompartments())\n self.check(c1.setId(comp_id), \"set compartment id\")\n self.check(c1.setConstant(True), 'set compartment \"constant\"')\n self.check(c1.setSpatialDimensions(3), \"set compartment dimensions\")\n\n self.check(c1.setSize(vol), 'set compartment \"size\"')\n self.check(c1.setUnits(\"litre\"), \"set compartment size units\")\n return c1", "def addCompartment(self, *args):\n return _libsbml.Model_addCompartment(self, *args)", "def __init__(self, *args):\n this = _libsbml.new_CompartmentType(*args)\n try: self.this.append(this)\n except: self.this = this", "def createCompartmentType(self):\n return _libsbml.Model_createCompartmentType(self)", "def __init__(self, *args):\n this = _libsbml.new_Compartment(*args)\n try: self.this.append(this)\n except: self.this = this", "def add_compartment(self, Vp=1, Qp=1):\n self.__n_compartments += 1\n self.__compartments.append({'Vp': Vp, 'Qp': Qp})", "def create_new_component(self, cname):\n while True:\n try:\n self.model.get_component_by_name(cname)\n cname += u'_'\n except KeyError:\n # Component with this name doesn't exist\n break\n # Create the component\n comp = cellml_component.create_new(self.model, cname)\n self.model._add_component(comp)\n return comp", "def setCompartment(self, *args):\n return _libsbml.CompartmentReference_setCompartment(self, *args)", "def createCompartmentReference(self):\n return _libsbml.MultiCompartmentPlugin_createCompartmentReference(self)", "def setCompartment(self, *args):\n return _libsbml.Reaction_setCompartment(self, *args)", "def createObject(self, *args):\n return _libsbml.MultiCompartmentPlugin_createObject(self, *args)", "def setCompartment(self, *args):\n return _libsbml.Species_setCompartment(self, *args)", "def addCompartmentType(self, *args):\n return _libsbml.Model_addCompartmentType(self, *args)", "def createCompartmentGlyph(self):\n return _libsbml.Layout_createCompartmentGlyph(self)", "def addCompartmentGlyph(self, *args):\n return _libsbml.Layout_addCompartmentGlyph(self, *args)", "def container(name, ostemplate, **kwargs):\n if not openvz.exists(name):\n ctid = openvz.get_available_ctid()\n openvz.create(ctid, ostemplate=ostemplate, **kwargs)\n openvz.set(ctid, name=name)\n return Container(name)", "def add_comp(self, name, ctype):\n\n name = self.name + '.' + name\n\n assert name not in self.components, 'A component named \\'{}\\' already exists for node \\'{}\\''.format(\n name, self.name)\n\n try:\n cls = co.str_to_comp(ctype)\n except AttributeError:\n try:\n cls = rc.str_to_comp(ctype)\n except AttributeError:\n cls = None\n\n if cls:\n obj = cls(name=name,\n temperature_driven=self.temperature_driven,\n repr_days=self.repr_days)\n else:\n raise ValueError(\n \"%s is not a valid class name! (component is %s, in node %s)\" % (\n ctype, name, self.name))\n\n self.logger.info('Component {} added to {}'.format(name, self.name))\n\n self.components[name] = obj", "def setCompartmentType(self, *args):\n return _libsbml.Compartment_setCompartmentType(self, *args)", "def create(cls, name, container_object, enable=True, optional_attributes={}):\n assert type(name) == str\n assert container_object.__class__.__name__ == 'ADContainer'\n return container_object.create_computer(name=name,enable=enable,optional_attributes=optional_attributes)", "def setCompartment(self, *args):\n return _libsbml.QualitativeSpecies_setCompartment(self, *args)", "def addCompartmentReference(self, *args):\n return _libsbml.MultiCompartmentPlugin_addCompartmentReference(self, *args)", "def firmware_pack_create(handle, org_name, name, rack_bundle_version,\n blade_bundle_version, descr=\"\", mode=\"staged\",\n org_parent=\"org-root\"):\n\n org_dn = org_parent + \"/org-\" + org_name\n p_mo = handle.query_dn(org_dn)\n if not p_mo:\n log.info(\"Sub-Org <%s> not found!\" % org_name)\n else:\n from ucsmsdk.mometa.firmware.FirmwareComputeHostPack import\\\n FirmwareComputeHostPack\n\n mo = FirmwareComputeHostPack(parent_mo_or_dn=org_dn,\n name=name,\n descr=descr,\n rack_bundle_version=rack_bundle_version,\n mode=mode,\n blade_bundle_version=blade_bundle_version)\n handle.add_mo(mo)\n handle.commit()", "def compartment_id(self, compartment_id):\n self._compartment_id = compartment_id", "def compartment_id(self, compartment_id):\n self._compartment_id = compartment_id", "def setCompartment(self, *args):\n return _libsbml.MultiSpeciesType_setCompartment(self, *args)", "def clone(self):\n return _libsbml.Compartment_clone(self)", "def createappendcomp(self, componentname, componentclass, *args, **kwargs):\n component = componentclass(self, self.debugmode, *args, **kwargs)\n self.components.append(componentname, component)\n return component", "def clone(self):\n return _libsbml.CompartmentType_clone(self)", "def create(\n cls, component_config: Dict[Text, Any], config: DazuConfig\n ) -> \"Component\":\n\n # Check language supporting\n language = config.language\n if not cls.can_handle_language(language):\n # check failed\n raise UnsupportedLanguageError(cls.name, language)\n\n return cls(component_config)", "def createElement(tagName):\n print(\"Warning: createElement is deprecated in favor of createComponent\")\n return createComponent(tagName)", "def create_port(self, port):\n try:\n backend_id = self.client.create_nic(\n port.vm.backend_id, port.network.backend_id\n )\n except VMwareError as e:\n raise VMwareBackendError(e)\n else:\n port.backend_id = backend_id\n port.save(update_fields=['backend_id'])\n return port", "def env_creator(env_config):\n return CartPoleBTEnv(\n goal_state=env_config['goal_state'],\n disturbances=env_config['disturbances'],\n initial_state=env_config['initial_state'],\n initial_state_variance=env_config['initial_state_variance']\n )", "def setCompartmentType(self, *args):\n return _libsbml.MultiCompartmentPlugin_setCompartmentType(self, *args)", "def create(cls, name, mac_address, comment=None):\n comment = comment if comment else ''\n cls.json = {'name': name,\n 'address': mac_address,\n 'comment': comment}\n return ElementCreator(cls)", "def getCompartment(self, *args):\n return _libsbml.Model_getCompartment(self, *args)", "def create(args):\n print('Creates an HPC fleet with given name \"{}\"'.format(args.fleet_name))", "def setCompartmentId(self, *args):\n return _libsbml.CompartmentGlyph_setCompartmentId(self, *args)", "def create_vlan_pool(self, vlan_pool_name, allocation_mode):\n VlanInstP_mo = VlanInstP('uni/infra/', vlan_pool_name, allocation_mode)\n self.commit(VlanInstP_mo)\n return VlanInstP_mo", "def make_component(self, name=\"Face\") -> 'Component':\n return BRepComponent(self.brep, component=self.component, name=name)", "def ec_contaier_create(self, oclass):\n # Get container params\n self.ec_container = TestContainer(\n self.pool, daos_command=DaosCommand(self.bin))\n self.ec_container.get_params(self)\n self.ec_container.oclass.update(oclass)\n # update object class for container create, if supplied\n # explicitly.\n ec_object = self.get_data_parity_number(oclass)\n self.ec_container.properties.update(\"rf:{}\".format(ec_object['parity']))\n\n # create container\n self.ec_container.create()", "def create_environment(cls, full_config):\n\n config = full_config['template']['devops_settings']\n environment = cls.create(config['env_name'])\n\n # create groups and drivers\n groups = config['groups']\n environment.add_groups(groups)\n\n # create address pools\n address_pools = config['address_pools']\n environment.add_address_pools(address_pools)\n\n # process group items\n for group_data in groups:\n group = environment.get_group(name=group_data['name'])\n\n # add l2_network_devices\n group.add_l2_network_devices(\n group_data.get('l2_network_devices', {}))\n\n # add network_pools\n group.add_network_pools(\n group_data.get('network_pools', {}))\n\n # Connect nodes to already created networks\n for group_data in groups:\n group = environment.get_group(name=group_data['name'])\n\n # add group volumes\n group.add_volumes(\n group_data.get('group_volumes', []))\n\n # add nodes\n group.add_nodes(\n group_data.get('nodes', []))\n\n return environment", "def create_instance(c_instance):\n return AumPC40(c_instance)", "def set_compartment(self, label, init_val=0.):\n assert type(label) is str, 'Compartment label for initial setting not string'\n assert type(init_val) is float or type(init_val) is int, 'Value to start % compartment from not string' % label\n assert init_val >= 0., 'Start with negative compartment not permitted'\n if label not in self.labels:\n self.labels.append(label)\n self.init_compartments[label] = init_val", "def _create_org(org_name, org_status):\n new_org = OcOrg().setup_org(name=org_name, status=org_status)\n\n return new_org", "def create_container(self, hostname, container_id, endpoint):\n\n endpoint_path = ENDPOINT_PATH % {\"hostname\": hostname,\n \"container_id\": container_id,\n \"endpoint_id\": endpoint.id}\n\n _log.info(\"Creating endpoint at %s\", endpoint_path)\n try:\n self.client.write(endpoint_path + \"addrs\", json.dumps(endpoint.addrs))\n self.client.write(endpoint_path + \"mac\", endpoint.mac)\n self.client.write(endpoint_path + \"state\", endpoint.state)\n except etcd.EtcdException as e:\n _log.exception(\"Hit Exception %s writing to etcd.\", e)\n pass", "def create_board(self, board_id, comp):\n new_board = self.create_board_from_template(board_id)\n comp.answer(new_board.id)", "def create(\n location: str,\n outputdir: pathlib.Path,\n *,\n extrabindings: Sequence[Binding],\n interactive: bool,\n revision: Optional[str],\n directory: Optional[pathlib.Path],\n in_place: bool,\n) -> None:\n config = ProjectConfig(location, (), revision, directory)\n\n with createproject(\n config, userbindings=extrabindings, interactive=interactive\n ) as project:\n projectdir = outputdir if in_place else outputdir / project.name\n repository = ProjectRepository.create(projectdir, message=\"Initial commit\")\n commit = commitproject(repository, project, commitmessage=createcommitmessage)\n\n repository.import_(commit)", "def create_node(self, node_cfg):\n with self.__connect_node(node_cfg) as conn:\n self._provision_node(conn, node_cfg)\n self._bootup_node(conn)", "def build(cmpd, density=0.5*u.gram/(u.cm**3), n_compounds=1000, \n ff='ff/TraPPE_UA_3_fully_flexible_propane.xml'):\n density.convert_to_units(u.kilogram/u.m**3)\n\n # Pack a box\n box = mb.fill_box(cmpd, n_compounds=n_compounds, density=density.value)\n\n # Wrap coordinates\n new_xyz = box.xyz - 1 * np.floor_divide(box.xyz, box.periodicity) * box.periodicity\n box.xyz = new_xyz\n\n # Apply non-atomistic, custom element naming convention\n for part in box.particles():\n part.name = \"_\" + part.name\n\n # Utilize foyer to parametrize our box\n ff = foyer.Forcefield(forcefield_files=ff)\n box = box.to_parmed(infer_residues=True)\n parametrized_structure = ff.apply(box, combining_rule='lorentz')\n\n # Dump initial coordinates\n parametrized_structure.save('compound.pdb', overwrite=True)\n parametrized_structure.save('compound.mol2', overwrite=True)\n parametrized_structure.save('compound.gro', overwrite=True)\n\n return parametrized_structure", "def CreateComponent(self, name, state):\n component = SimpleTestClientComponent()\n component.__dict__.update(state)\n return component", "def create(self):\n config = {}\n for key in ('ident', 'cgroupparent', 'infra', 'labels', 'share'):\n config[key] = self.opts.get(key)\n\n try:\n pod = self.client.pods.create(**config)\n except podman.ErrorOccurred as ex:\n sys.stdout.flush()\n print(\n '{}'.format(ex.reason).capitalize(),\n file=sys.stderr,\n flush=True)\n else:\n print(pod.id)", "def createComponent(tagName):\n\n class Component():\n \"\"\"A basic class for a virtual DOM Component\"\"\"\n def __init__(self, *children, **kwargs):\n self.children = _flatten_children(*children, **kwargs)\n self.attributes = kwargs\n self.tagName = tagName\n\n def _repr_mimebundle_(self, include, exclude, **kwargs):\n return {\n 'application/vdom.v1+json': toJSON(self),\n 'text/plain': '<{tagName} />'.format(tagName=tagName)\n }\n \n Component.__doc__ = \"\"\"A virtual DOM component for a {tagName} tag\n \n >>> {tagName}()\n <{tagName} />\n \"\"\".format(tagName=tagName)\n \n return Component", "def create_host(self, conf, tenant_id, network_id, params):\n\t\tpass", "def __create_cont(self, path, filesystem, cont_stat, component_number):\n try:\n self.logger.debug('Create container interface called')\n status_obj = Status()\n cont_id = \"container\"\n #cont_id = get_container_id()\n tmp_path = '%s/%s/%s/%s/%s' % (self.__fs_base, \\\n filesystem, TMPDIR, cont_id,component_number)\n self.asyn_helper.call(\"create_container\", \\\n tmp_path, path, cont_stat, status_obj)\n return status_obj\n except Exception as err:\n self.logger.error(('create_container for %(con_dir)s failed ',\n 'close failure: %(exc)s : %(stack)s'),\n {'con_dir' : path, \n 'exc': err, 'stack': ''.join(traceback.format_stack())})\n raise err", "def create_port(self, component):\n if self.fixed_size is not None and not self.array:\n raise ValueError(\n \"{}.{}: @{}port specified fixed_size but not array=True\".format(\n self, self.name,\n self.kind))\n ptype = self.get_port_type()\n return ptype(component, **self.data)", "def __init__(self, *args):\n this = _libsbml.new_MultiCompartmentPlugin(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _libsbml.new_ListOfCompartmentTypes(*args)\n try: self.this.append(this)\n except: self.this = this", "def newChemCompVar(self, **attrlinks):\n return ChemCompVar(self, **attrlinks)", "def mkcomponent(self,\n context=[],\n componentobj=None):\n if componentobj == None:\n raise ValueError, \"mkcomponent: componentobj is None\"\n return jsoncall.do_call(\"mkcomponent\", {'modelname':self.modelname,\\\n 'user':self.user,\\\n 'password':self.password,\\\n 'context':context,\\\n 'componentobj':componentobj.__dict__},\n self.connection)", "def test_create_new_placements(self):\n subv = SimpleMachineVertex(None, \"\")\n pl = Placement(subv, 0, 0, 1)\n Placements([pl])", "def createComplejoPlatino(c_dict):\n\tnom = c_dict['nombre'].split()\n\tnom.insert(1, 'Platino')\n\tnombre = ' '.join(nom)\n\tc_dict['nombre'] = nombre\n\tc_dict['id_org'] = int(c_dict['id_org']) + 10000\n\tcreateComplejo(c_dict)", "def create():", "def create():", "def createAllocation(name, tag, facility):\n return Allocation(Cuebot.getStub('allocation').Create(\n facility_pb2.AllocCreateRequest(name=name, tag=tag, facility=facility),\n timeout=Cuebot.Timeout).allocation)", "def _env_create(name_or_path, init_file=None, dir=False, with_view=None, keep_relative=False):\n if dir:\n env = ev.Environment(name_or_path, init_file, with_view, keep_relative)\n env.write()\n tty.msg(\"Created environment in %s\" % env.path)\n tty.msg(\"You can activate this environment with:\")\n tty.msg(\" spack env activate %s\" % env.path)\n else:\n env = ev.create(name_or_path, init_file, with_view, keep_relative)\n env.write()\n tty.msg(\"Created environment '%s' in %s\" % (name_or_path, env.path))\n tty.msg(\"You can activate this environment with:\")\n tty.msg(\" spack env activate %s\" % (name_or_path))\n return env", "def add_cable(self, env, size_range, info={}, cable_idx=0,\n direction='z', max_force=100):\n num_parts = self.num_parts\n radius = self.radius\n length = self.length\n color = self.colors[cable_idx] + [1]\n color_end = U.COLORS['yellow'] + [1]\n\n # Add beaded cable.\n distance = length / num_parts\n position, _ = self.random_pose(env, size_range)\n position = np.float32(position)\n part_shape = p.createCollisionShape(p.GEOM_BOX, halfExtents=[radius]*3)\n part_visual = p.createVisualShape(p.GEOM_SPHERE, radius=radius*1.5)\n\n # Iterate through parts and create constraints as needed.\n for i in range(num_parts):\n if direction == 'x':\n position[0] += distance\n parent_frame = (distance, 0, 0)\n elif direction == 'y':\n position[1] += distance\n parent_frame = (0, distance, 0)\n else:\n position[2] += distance\n parent_frame = (0, 0, distance)\n\n part_id = p.createMultiBody(0.1, part_shape, part_visual,\n basePosition=position)\n if i > 0:\n constraint_id = p.createConstraint(\n parentBodyUniqueId=env.objects[-1],\n parentLinkIndex=-1,\n childBodyUniqueId=part_id,\n childLinkIndex=-1,\n jointType=p.JOINT_POINT2POINT,\n jointAxis=(0, 0, 0),\n parentFramePosition=parent_frame,\n childFramePosition=(0, 0, 0))\n p.changeConstraint(constraint_id, maxForce=max_force)\n\n if (i > 0) and (i < num_parts - 1):\n p.changeVisualShape(part_id, -1, rgbaColor=color)\n elif i == num_parts - 1:\n p.changeVisualShape(part_id, -1, rgbaColor=color_end)\n\n # The usual for tracking IDs. Four things to add.\n self.cable_bead_IDs.append(part_id)\n self._IDs[part_id] = f'cable_part_{str(part_id).zfill(2)}'\n env.objects.append(part_id)\n self.object_points[part_id] = np.float32((0, 0, 0)).reshape(3, 1)\n\n # Get target placing positions for each cable bead, if applicable.\n if self._name == 'cable-shape' or self._name == 'cable-shape-notarget' or \\\n self._name == 'cable-line-notarget':\n # ----------------------------------------------------------- #\n # Here, zone_pose = square_pose, unlike Ravens cable, where the\n # zone_pose is shifted so that its center matches the straight\n # line segment center. For `true_position`, we use `zone_pose`\n # but apply the correct offset to deal with the sides. Note\n # that `length` is the size of a fully smoothed cable, BUT we\n # made a rectangle with each side <= length.\n # ----------------------------------------------------------- #\n lx = info['lengthx']\n ly = info['lengthy']\n r = radius\n\n if info['nb_sides'] == 1:\n # Here it's just a straight line on the 'lx' side.\n x_coord = lx / 2 - (distance * i)\n y_coord = 0\n true_position = (x_coord - r, y_coord, 0)\n\n elif info['nb_sides'] == 2:\n # Start from lx side, go 'left' to the pivot point, then on\n # the ly side, go 'upwards' but offset by `i`. For radius\n # offset, I just got this by tuning. XD\n if i < info['cutoff']:\n x_coord = lx / 2 - (distance * i)\n y_coord = -ly / 2\n true_position = (x_coord - r, y_coord, 0)\n else:\n x_coord = -lx / 2\n y_coord = -ly / 2 + (distance * (i - info['cutoff']))\n true_position = (x_coord, y_coord + r, 0)\n\n elif info['nb_sides'] == 3:\n # Start from positive lx, positive ly, go down to first\n # pivot. Then go left to the second pivot, then up again.\n # For v1, division by two is because we assume BOTH of the\n # 'ly edges' were divided by two.\n v1 = (self.num_parts - info['cutoff']) / 2\n v2 = self.num_parts - v1\n if i < v1:\n x_coord = lx / 2\n y_coord = ly / 2 - (distance * i)\n true_position = (x_coord, y_coord - r, 0)\n elif i < v2:\n x_coord = lx / 2 - (distance * (i - v1))\n y_coord = -ly / 2\n true_position = (x_coord - r, y_coord, 0)\n else:\n x_coord = -lx / 2\n y_coord = -ly / 2 + (distance * (i - v2))\n true_position = (x_coord, y_coord + r, 0)\n\n elif info['nb_sides'] == 4:\n # I think this is similar to the 2-side case: we start in\n # the same direction and go counter-clockwise.\n v1 = info['cutoff'] / 2\n v2 = num_parts / 2\n v3 = (num_parts + info['cutoff']) / 2\n if i < v1:\n x_coord = lx / 2 - (distance * i)\n y_coord = -ly / 2\n true_position = (x_coord, y_coord, 0)\n elif i < v2:\n x_coord = -lx / 2\n y_coord = -ly / 2 + (distance * (i - v1))\n true_position = (x_coord, y_coord, 0)\n elif i < v3:\n x_coord = -lx / 2 + (distance * (i - v2))\n y_coord = ly / 2\n true_position = (x_coord, y_coord, 0)\n else:\n x_coord = lx / 2\n y_coord = ly / 2 - (distance * (i - v3))\n true_position = (x_coord, y_coord, 0)\n\n # Map true_position onto the workspace from zone_pose.\n true_position = self.apply(self.zone_pose, true_position)\n\n # See `cable.py`: just get the places and steps set.\n self.goal['places'][part_id] = (true_position, (0, 0, 0, 1.))\n symmetry = 0\n self.goal['steps'][0][part_id] = (symmetry, [part_id])\n\n # Debugging target zones.\n if self.target_debug_markers:\n sq_pose = ((true_position[0], true_position[1], 0.002), (0,0,0,1))\n sq_template = 'assets/square/square-template-allsides-blue.urdf'\n replace = {'DIM': (0.003,), 'HALF': (0.003 / 2,)}\n urdf = self.fill_template(sq_template, replace)\n env.add_object(urdf, sq_pose, fixed=True)\n os.remove(urdf)\n else:\n print(f'Warning, env {self._name} will not have goals.')", "def create(self, config):\n\n assert config[\"name\"] == self.name, \"Given config is not for this template\"\n\n data = self._json_encode(config)\n headers = self._default_headers()\n\n return self._request(\"\",\n ok_status=None,\n data=data,\n headers=headers)", "def create(self, nDeviceType):\n\t\treturn handle_to_object(call_sdk_function('PrlVmDev_Create', nDeviceType))", "def create_org_vdc(self,\n vdc_name,\n provider_vdc_name,\n description='',\n allocation_model='AllocationVApp',\n cpu_units='MHz',\n cpu_allocated=0,\n cpu_limit=0,\n mem_units='MB',\n mem_allocated=0,\n mem_limit=0,\n nic_quota=0,\n network_quota=0,\n vm_quota=0,\n storage_profiles=[],\n resource_guaranteed_memory=None,\n resource_guaranteed_cpu=None,\n vcpu_in_mhz=None,\n is_thin_provision=None,\n network_pool_name=None,\n uses_fast_provisioning=None,\n over_commit_allowed=None,\n vm_discovery_enabled=None,\n is_enabled=True):\n if self.resource is None:\n self.resource = self.client.get_resource(self.href)\n sys_admin_resource = self.client.get_admin()\n system = System(self.client, admin_resource=sys_admin_resource)\n pvdc = system.get_provider_vdc(provider_vdc_name)\n resource_admin = self.client.get_resource(self.href_admin)\n params = E.CreateVdcParams(\n E.Description(description),\n E.AllocationModel(allocation_model),\n E.ComputeCapacity(\n E.Cpu(\n E.Units(cpu_units), E.Allocated(cpu_allocated),\n E.Limit(cpu_limit)),\n E.Memory(\n E.Units(mem_units), E.Allocated(mem_allocated),\n E.Limit(mem_limit))),\n E.NicQuota(nic_quota),\n E.NetworkQuota(network_quota),\n E.VmQuota(vm_quota),\n E.IsEnabled(is_enabled),\n name=vdc_name)\n for sp in storage_profiles:\n pvdc_sp = system.get_provider_vdc_storage_profile(sp['name'])\n params.append(\n E.VdcStorageProfile(\n E.Enabled(sp['enabled']),\n E.Units(sp['units']),\n E.Limit(sp['limit']),\n E.Default(sp['default']),\n E.ProviderVdcStorageProfile(href=pvdc_sp.get('href'))))\n if resource_guaranteed_memory is not None:\n params.append(\n E.ResourceGuaranteedMemory(resource_guaranteed_memory))\n if resource_guaranteed_cpu is not None:\n params.append(E.ResourceGuaranteedCpu(resource_guaranteed_cpu))\n if vcpu_in_mhz is not None:\n params.append(E.VCpuInMhz(vcpu_in_mhz))\n if is_thin_provision is not None:\n params.append(E.IsThinProvision(is_thin_provision))\n if network_pool_name is not None:\n npr = system.get_network_pool_reference(network_pool_name)\n href = npr.get('href')\n params.append(\n E.NetworkPoolReference(\n href=href,\n id=href.split('/')[-1],\n type=npr.get('type'),\n name=npr.get('name')))\n params.append(pvdc)\n if uses_fast_provisioning is not None:\n params.append(E.UsesFastProvisioning(uses_fast_provisioning))\n if over_commit_allowed is not None:\n params.append(E.OverCommitAllowed(over_commit_allowed))\n if vm_discovery_enabled is not None:\n params.append(E.VmDiscoveryEnabled(vm_discovery_enabled))\n return self.client.post_linked_resource(\n resource_admin, RelationType.ADD, EntityType.VDCS_PARAMS.value,\n params)", "def getCompartment(self):\n return _libsbml.CompartmentReference_getCompartment(self)", "def create_deployment(deployment_id, blueprint_id, environment):\n environment.add_cleanup(\n environment.cfy.deployments.delete,\n kwargs={\n 'deployment_id': deployment_id,\n },\n )\n environment.cfy.deployments.create(\n blueprint_id=blueprint_id,\n deployment_id=deployment_id,\n skip_plugins_validation=True,\n )", "def hfp_create(handle, org_dn, name,\r\n blade_bundle_version=\"\",\r\n rack_bundle_version=\"\",\r\n ignore_comp_check=\"yes\",\r\n update_trigger=\"immediate\",\r\n mode=\"staged\",\r\n stage_size=\"0\",\r\n policy_owner=\"local\",\r\n descr=\"testdescr\"):\r\n\r\n from ucsmsdk.mometa.firmware.FirmwareComputeHostPack import \\\r\n FirmwareComputeHostPack\r\n\r\n org = handle.query_dn(org_dn)\r\n if org is None:\r\n raise ValueError(\"Org '%s' does not exist\" % org_dn)\r\n\r\n mo = FirmwareComputeHostPack(parent_mo_or_dn=\"org-root\",\r\n name=name,\r\n blade_bundle_version=blade_bundle_version,\r\n rack_bundle_version=rack_bundle_version,\r\n ignore_comp_check=ignore_comp_check,\r\n update_trigger=update_trigger,\r\n mode=mode,\r\n stage_size=stage_size,\r\n policy_owner=policy_owner,\r\n descr=descr)\r\n handle.add_mo(mo, modify_present=True)\r\n handle.commit()\r\n\r\n return mo", "def generate_compartments(parameterdict):\n\n refcmpts, model = [parameterdict[i] for i in ['refcmpts', 'model']]\n\n peripherals = [] # List for peripheral compartments\n # Iterates through compartments. Adds peripherals to peripheral list,\n # creates main and optionally sub compartment (if in SC model).\n # Doesn't allow multiple main/sub compartments.\n for cmpt in refcmpts:\n if cmpt[2] == 'Peripheral':\n peripherals.append(Compartment(cmpt[0], cmpt[1]))\n\n elif cmpt[2] == 'Main':\n if 'maincmpt' in locals():\n raise ValueError(\"Can't have two main compartments.\")\n else:\n maincmpt = Compartment(cmpt[0], cmpt[1])\n\n elif cmpt[2] == 'Sub' and model == 'sc':\n if 'subcmpt' in locals():\n raise ValueError(\"Can't have two subcompartments.\")\n else:\n subcmpt = Compartment(cmpt[0], cmpt[1])\n if subcmpt not in locals():\n subcmpt = None\n\n return maincmpt, peripherals, subcmpt", "def create_cluster(self, provision_details, project_id=\"\"):\n response = self.post(f'{ApiVersion.A1.value}/groups/{project_id}/clusters'\n ,body=provision_details)\n return response", "def create_b_obj(ob_name, b_obj_data):\n b_obj = bpy.data.objects.new(ob_name, b_obj_data)\n bpy.context.scene.objects.link(b_obj)\n bpy.context.scene.objects.active = b_obj\n b_obj.select = True\n return b_obj", "def create(compte_origine, compte_dest, montant, date=None, notes=\"\"):\n if not isinstance(compte_origine, Compte):\n raise TypeError('pas compte')\n if not isinstance(compte_dest, Compte):\n raise TypeError('pas compte')\n if compte_origine == compte_dest:\n raise TypeError(\"Attention, le compte de départ ne peut être celui d'arrivée\")\n vir = Virement()\n vir.origine = Ope()\n vir.dest = Ope()\n vir.origine.compte = compte_origine\n vir.dest.compte = compte_dest\n vir.montant = montant\n if date:\n vir.date = date\n else:\n vir.date = utils.today()\n vir.notes = notes\n vir._init = True\n moyen = Moyen.objects.filter(type='v')[0]\n vir.dest.moyen = moyen\n vir.origine.moyen = moyen\n vir.save()\n vir.origine.jumelle = vir.dest\n vir.dest.jumelle = vir.origine\n vir.save()\n return vir", "def create_component(self, name: str, nx_class: str, description: str) -> Component:\n name = _convert_name_with_spaces(name)\n parent_group = self.nexus.instrument\n if nx_class in COMPONENTS_IN_ENTRY:\n parent_group = self.nexus.entry\n component_group = self.nexus.create_nx_group(name, nx_class, parent_group)\n component = create_component(self.nexus, component_group)\n component.description = description\n return component", "def removeCompartment(self, *args):\n return _libsbml.Model_removeCompartment(self, *args)", "def add_comp(mol, projects):\n\n # Now attribute all this meta-deta to the compound object\n new_comp = Compound()\n comp = calc_cpd(new_comp, mol, projects)\n return comp", "def createPort(self):\n return _libsbml.CompModelPlugin_createPort(self)", "def new(name=None):", "def create(self):\n flavor = env_vars[\"cassandra_%s_flavor\" % self.type]\n #create the VM\n self.vm = VM(self.name, flavor, self.image, create=True)", "def product_group_create(obj, name, department):\n client = get_client(obj)\n\n with Action('Creating product_group: {}'.format(name), nl=True):\n pg = client.product_group_create(name, department)\n\n print(json.dumps(pg, indent=4))", "def create_trunk(self, trunk_id, port_id, port_mac):\n trunk = TrunkParentPort(trunk_id, port_id, port_mac)\n try:\n if not trunk.bridge.exists():\n raise exc.TrunkBridgeNotFound(bridge=trunk.bridge.br_name)\n trunk.plug(self.br_int)\n except RuntimeError as e:\n raise TrunkManagerError(error=e)", "def create_data_center(\n dc_name, cluster_name, host_name, comp_version=config.COMPATIBILITY_VERSION\n):\n testflow.step(\"Add data-center %s\", dc_name)\n assert ll_dc.addDataCenter(\n True, name=dc_name, local=False, version=comp_version\n ), \"Failed to create dc %s\" % dc_name\n\n testflow.step(\"Add cluster %s\", cluster_name)\n assert ll_clusters.addCluster(\n True, name=cluster_name, cpu=config.CPU_NAME,\n data_center=dc_name, version=comp_version\n ), \"addCluster %s with cpu %s and version %s to datacenter %s failed\" % (\n cluster_name, config.CPU_NAME, comp_version, dc_name\n )\n testflow.step(\"Move host %s to cluster %s\", host_name, cluster_name)\n assert hl_hosts.move_host_to_another_cluster(\n host=host_name, cluster=cluster_name, activate=True\n ), \"Failed to move host %s to cluster %s\" % (host_name, cluster_name)", "def create(self, verbose=False):\r\n # delete the window if its handle exists\r\n if cmds.window(self.window, exists=True):\r\n cmds.deleteUI(self.window)\r\n # initialize the window as a pane for docking\r\n self.window = cmds.loadUI(uiFile=self.uiFile, verbose=verbose)\r\n #layoutWin = cmds.paneLayout(configuration='single')\r\n # create a dockControl and parent the control to layoutWin\r\n cmds.dockControl(allowedArea='all', area='right', floating=False, \r\n height=cmds.window(self.window, query=True, height=True), \r\n content=self.window, label='Docked Cone Pointer Window')\r\n cmds.showWindow(self.window)", "def create_computer(DirectoryId=None, ComputerName=None, Password=None, OrganizationalUnitDistinguishedName=None, ComputerAttributes=None):\n pass", "def create(cls, task_name, cfd_mesh):\n if task_name not in cls._available_tasks:\n raise KeyError(\"Invalid task name: %s\"%task_name)\n tcls = cls._available_tasks[task_name]\n obj = tcls(cfd_mesh)\n return obj", "def appendcomp(self, componentname, component):\n self.components.append(componentname, component)\n return component", "def clone(self):\n return _libsbml.CompartmentReference_clone(self)", "def CreateDataContainer(name):\n dc = simpl.DataContainer.New(name)\n return dc", "def add_pod(\n self, pod_name: str, head_host, head_port_in, tail_port_out, head_zmq_identity\n ) -> None:\n if pod_name in self.pods:\n raise ValueError(\n f'Vertex with name {pod_name} already exists. Please check your configuration for unique Pod names.'\n )\n target = self.pods[pod_name]\n\n target.host = head_host\n target.port = head_port_in\n target.port_out = tail_port_out\n target.target_identity = head_zmq_identity", "def create(openstack_resource):\n # Update port config before create port\n _update_port_config(openstack_resource.config)\n\n # Create port\n created_resource = openstack_resource.create()\n ipv4_list, ipv6_list = _get_fixed_ips_from_port(created_resource)\n fixed_ips = ipv4_list + ipv6_list\n _export_ips_to_port_instance(ipv4_list, ipv6_list)\n\n # Handle runtime properties\n update_runtime_properties(\n {\n RESOURCE_ID: created_resource.id,\n 'fixed_ips': fixed_ips,\n 'mac_address': created_resource.mac_address,\n 'allowed_address_pairs': created_resource.allowed_address_pairs,\n }\n )", "def create(cls , specgrid , zcorn , coord , actnum , mapaxes = None ):\n return cls._grdecl_create( specgrid[0] , specgrid[1] , specgrid[2] , zcorn , coord , actnum , mapaxes )", "def create(self):\n\n raise NotImplementedError", "def new_object(self):\r\n\t\tpass", "def create_comm(comm_title, days, comm_name, comm_plat, comm_desc, comp_rating):\n time = timezone.now() + datetime.timedelta(days=days)\n return Commission.objects.create(commission_title=comm_title,\n commissioner_name=comm_name,\n commissioner_platform=comm_plat,\n commission_description=comm_desc,\n complexity_rating=comp_rating,\n creation_date=time)", "def createNode(self, pkg, exe, args, name, nspace):\r\n node = Node(self)\r\n self.callRemote('createNode', pkg, exe, args, name,\r\n nspace).chainDeferred(node)\r\n return node", "def create(cls, name, comment=None):\n comment = comment if comment else ''\n cls.json = {'name': name,\n 'comment': comment}\n return ElementCreator(cls)" ]
[ "0.736534", "0.7325312", "0.66521376", "0.6403803", "0.6099133", "0.60763836", "0.6029606", "0.59636647", "0.5960595", "0.59251946", "0.57609355", "0.57137877", "0.5665697", "0.563881", "0.55801606", "0.5452205", "0.54070544", "0.53563625", "0.5342132", "0.5272799", "0.5225759", "0.52138823", "0.5193385", "0.51932365", "0.51932365", "0.5170296", "0.51327497", "0.510586", "0.50864923", "0.5050677", "0.50235975", "0.50020546", "0.499863", "0.49834025", "0.49655697", "0.49607366", "0.49476153", "0.49365166", "0.49184203", "0.4892837", "0.4888291", "0.4879354", "0.4869009", "0.48590887", "0.48461714", "0.48433676", "0.4836576", "0.48358646", "0.48340952", "0.48277715", "0.48205927", "0.48086292", "0.47939035", "0.479008", "0.47895938", "0.47818884", "0.47799912", "0.4760526", "0.47581747", "0.4751541", "0.47425914", "0.4740594", "0.473945", "0.473945", "0.4732469", "0.4716388", "0.4715481", "0.4704558", "0.46965876", "0.46884087", "0.46871576", "0.46869037", "0.4674155", "0.46726263", "0.46698022", "0.4662941", "0.46608776", "0.4656949", "0.46272", "0.46207342", "0.46182996", "0.46170294", "0.46131828", "0.46098003", "0.46025735", "0.45954907", "0.45914477", "0.45849815", "0.458323", "0.4577306", "0.45726094", "0.45675448", "0.45674413", "0.45489955", "0.45449355", "0.4538911", "0.4532393", "0.4531944", "0.45304164", "0.45280948" ]
0.6708804
2
Creates a new secret key for the specified user. Secret keys are used for authentication with the Object Storage Service's Amazon S3 compatible API. For information, see `Managing User Credentials`__. You must specify a description for the secret key (although it can be an empty string). It does not have to be unique, and you can change it anytime with
def create_customer_secret_key(self, create_customer_secret_key_details, user_id, **kwargs): resource_path = "/users/{userId}/customerSecretKeys" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_customer_secret_key got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "userId": user_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=create_customer_secret_key_details, response_type="CustomerSecretKey") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=create_customer_secret_key_details, response_type="CustomerSecretKey")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_access_key(self, user_name=None):\r\n params = {'UserName' : user_name}\r\n return self.get_response('CreateAccessKey', params)", "def create_key(iam_username):\n\n try:\n response = iam.create_access_key(UserName=iam_username)\n access_key = response[\"AccessKey\"][\"AccessKeyId\"]\n secret_key = response[\"AccessKey\"][\"SecretAccessKey\"]\n json_data = json.dumps({\"AccessKey\": access_key, \"SecretKey\": secret_key})\n secretmanager.put_secret_value(SecretId=iam_username, SecretString=json_data)\n\n \n emailmsg = (\n \"Hello,\\n\\n\"\n \"A new access key has been created for key rotation. \\n\\n\"\n f\"Access Key Id: {access_key}\\n\"\n f\"Secrets Manager Secret Id: {iam_username}\"\n )\n\n emailmsg = (\n f\"{emailmsg}\\n\\n\"\n f\"Please obtain the new access key information from \"\n \"secrets manager using the secret Id provided above in \"\n f\"{AWS_REGION_NAME} and update your application within 14 days \"\n \"to avoid interruption.\\n\"\n )\n\n sns.publish(\n TopicArn=SNS_TOPIC_ARN,\n Message=emailmsg,\n Subject=f\"AWS Access Key Rotation: New key is available for \"\n f\"{iam_username}\",\n )\n print(f\"New access key has been created for {iam_username}\")\n return {\"status\": 200}\n except ClientError as e:\n print(e)\n return {\"status\": 500}", "def create(self):\n id_access_secretkey = uuid.uuid4()\n id_webuser = Base.logged_id_webuser or None\n keys = Token().generate_secretkey(config.PACKAGE_NAME)\n\n with Database() as db:\n db.insert(Table(id_access_secretkey, id_webuser, config.PACKAGE_NAME,\n keys['randomkey'], keys['secretkey']))\n db.commit()\n\n return {\n 'secretkey': keys['secretkey'],\n 'message': 'access secretkey successfully created'\n }", "def add(ctx: CLIContext, user_id, resource_policy, admin, inactive, rate_limit):\n with Session() as session:\n try:\n data = session.KeyPair.create(\n user_id,\n is_active=not inactive,\n is_admin=admin,\n resource_policy=resource_policy,\n rate_limit=rate_limit)\n except Exception as e:\n ctx.output.print_mutation_error(\n e,\n item_name='keypair',\n action_name='add',\n )\n sys.exit(1)\n if not data['ok']:\n ctx.output.print_mutation_error(\n msg=data['msg'],\n item_name='keypair',\n action_name='add',\n )\n sys.exit(1)\n ctx.output.print_mutation_result(\n data,\n item_name='keypair',\n extra_info={\n 'access_key': data['keypair']['access_key'],\n 'secret_key': data['keypair']['secret_key'],\n },\n )", "def create_secret(secret_name, secret_value, environment):\n environment.add_cleanup(\n environment.cfy.secrets.delete,\n kwargs={\n 'secret_name': secret_name,\n },\n )\n environment.cfy.secrets.create(\n secret_name=secret_name,\n secret_value=secret_value,\n )", "def GetSecretKey(cls, user_id):\n uid = hashlib.sha256(str(user_id)).hexdigest()\n entity = ndb.Key(cls, uid).get()\n if not entity:\n entity = cls(id=uid, secret_key=GenerateRandomHexKey())\n entity.put()\n return entity.secret_key", "def create_keypair(self, username):\n msg = \"create_keypair not implemented\"\n raise NotImplementedError(msg)", "def create_consumer(self, name, description=None, user=None):\n consumer, created = self.get_or_create(name=name)\n\n if user:\n consumer.user = user\n\n if description:\n consumer.description = description\n\n if created:\n consumer.key, consumer.secret = self.generate_random_codes()\n consumer.save()\n\n return consumer", "def create_with_user_id(cls: Type[T], user_id: int, secret: str) -> T:\n if not isinstance(user_id, int):\n raise TypeError('user_id must be of type `int`')\n\n if not isinstance(secret, str):\n raise TypeError('secret must be of type `str`')\n\n return cls._create(user_id, None, secret)", "def create_access_key(self, user_name=None, delegate_account=None):\n self.log.debug(\"Creating access key for \" + user_name )\n params = {'UserName': user_name}\n if delegate_account:\n params['DelegateAccount'] = delegate_account\n response = self.connection.get_response('CreateAccessKey', params)\n access_tuple = {}\n access_tuple['access_key_id'] = response['create_access_key_response']\\\n ['create_access_key_result']['access_key']['access_key_id']\n access_tuple['secret_access_key'] = response['create_access_key_response']\\\n ['create_access_key_result']['access_key']['secret_access_key']\n return access_tuple", "def create_jwt(user, secret):\n logger.debug(\"Create JWT with secret %s\" % secret)\n # username = request.POST['username']\n # password = request.POST['password'\n\n expiry = datetime.datetime.now() + datetime.timedelta(seconds=30)\n expiry_s = time.mktime(expiry.timetuple())\n if user.is_authenticated():\n internalid = user.authprofile.internalid\n payload = {'username': user.username, 'expiry': expiry_s, 'type': \"AuthenticatedUser\", 'internalid': internalid, 'email': user.email}\n token = jws.sign(payload, secret, algorithm='HS256')\n else:\n payload = {'expiry':expiry_s, 'type': \"AnonymousUser\", 'internalid': None, 'email': None}\n token = jws.sign(payload, secret, algorithm='HS256')\n logger.debug(\"Payload: %s\" % payload)\n # logger.info(\"Token: %s\" % token)\n return token", "def api_key( self, trans, user_id, **kwd ):\n user = self.get_user( trans, user_id )\n key = self.create_api_key( trans, user )\n return key", "def add_ssh_key(self, user_id, title, ssh_key):\n _gu = self.get_user(user_id)\n if _gu is None:\n return None\n\n # build URL and make request\n return self._post(\n '/users/{0}/keys'.format(_gu['id']),\n data={'title': title, 'key': ssh_key},\n )", "def create_user(user_id, password_16char, public_key_32char):\n headers = {'Content-type': 'application/json'}\n payload = {'user_id': user_id\n , 'user_password': password_16char\n , 'public_key': public_key_32char}\n response = requests.post(\"http://localhost:5000/user/createUser\", data=json.dumps(payload), headers=headers)\n return response.text", "def create_secret(logger,namespace,body,v1=None):\n if v1 is None:\n v1 = client.CoreV1Api()\n logger.debug('new client - fn create secret')\n try:\n name = body['metadata']['name']\n except KeyError:\n logger.debug(\"No name in body ?\")\n raise kopf.TemporaryError(\"can not get the name.\")\n try:\n data = body.get('data')\n except KeyError:\n data = ''\n logger.error(\"Empty secret?? could not get the data.\")\n \n secret_type = 'Opaque'\n if 'type' in body:\n secret_type = body['type']\n\n metadata = {'name': name, 'namespace': namespace}\n api_version = 'v1'\n kind = 'Secret'\n body = client.V1Secret(api_version, data , kind, metadata, type = secret_type)\n # kopf.adopt(body)\n logger.info(f\"cloning secret in namespace {namespace}\")\n try:\n api_response = v1.create_namespaced_secret(namespace, body)\n except client.rest.ApiException as e:\n if e.reason == 'Conflict':\n logger.warning(f\"secret `{name}` already exist in namesace '{namespace}'\")\n return 0\n logger.error(f'Can not create a secret, it is base64 encoded? data: {data}')\n logger.error(f'Kube exception {e}')\n return 1\n return 0", "def _create_user(userid, **kw):\n\n new_user = User(userid, **kw)\n USERS[new_user.token] = new_user\n return USERS[new_user.token]", "def secret_key(self, val):\n self.__secret_key = val", "def create_keypair(econfig_file=None, region=None, keyname=\"bcbio\"):\n import boto\n import boto.ec2\n if econfig_file:\n keypair_dir = os.path.dirname(econfig_file).replace(\"elasticluster\", \"aws_keypairs\")\n else:\n keypair_dir = os.path.join(os.getcwd(), \"aws_keypairs\")\n if not os.path.exists(keypair_dir):\n os.makedirs(keypair_dir)\n private_key = os.path.join(os.path.join(keypair_dir, keyname))\n new_key = not os.path.exists(private_key)\n if new_key:\n cmd = [\"ssh-keygen\", \"-t\", \"rsa\", \"-N\", \"\", \"-f\", private_key, \"-C\", \"bcbio_aws_keypair\"]\n subprocess.check_call(cmd)\n public_key = private_key + \".pub\"\n if region:\n ec2 = boto.ec2.connect_to_region(region)\n else:\n ec2 = boto.connect_ec2()\n key = ec2.get_key_pair(keyname)\n if key and new_key:\n print(\"Non matching key %s found in AWS, removing.\" % keyname)\n ec2.delete_key_pair(keyname)\n key = None\n if not key:\n print(\"Key %s not found in AWS, importing created key\" % keyname)\n with open(public_key) as in_handle:\n body = in_handle.read()\n try:\n ec2.import_key_pair(keyname, body)\n except TypeError as e:\n body = body.encode('utf-8')\n ec2.import_key_pair(keyname, body)\n return {\"user_key_name\": keyname, \"user_key_private\": private_key,\n \"user_key_public\": public_key}", "def create_secret(self, name, namespace):\n secret_manifest = {\n \"apiVersion\": \"v1\",\n \"kind\": \"Secret\",\n \"metadata\": {\n \"name\": name,\n \"annotations\": {\n \"kubernetes.io/service-account.name\": name\n }\n }\n }\n self.v1_client.create_namespaced_secret(namespace=namespace,\n body=secret_manifest)", "def install_secret_key(app, filename='secret_key'):\n filename = os.path.join(app.instance_path, filename)\n\n try:\n app.config['SECRET_KEY'] = open(filename, 'rb').read()\n except IOError:\n print('Error: No secret key. Create it with:')\n full_path = os.path.dirname(filename)\n if not os.path.isdir(full_path):\n print('mkdir -p {filename}'.format(filename=full_path))\n print('head -c 24 /dev/urandom > {filename}'.format(filename=filename))\n sys.exit(1)", "def add(key, value, **kwargs):\n cluster_call(\"secret_add\", key=key, value=value, **kwargs, prefix=f\"Adding secret {key}...\", postfix=\"added.\")", "def ssh_keygen(username):\n d = user_exists(username)\n assert d, fabric.colors.red(\"User does not exist: %s\" % username)\n\n home = d['home']\n if not fabric.contrib.files.exists(os.path.join(home, \".ssh/id_rsa.pub\")):\n fabric.api.run(\"mkdir -p %s\" % os.path.join(home, \".ssh/\"))\n fabric.api.run(\n \"ssh-keygen -q -t rsa -f '%s' -N ''\" %\n os.path.join(\n home, '.ssh/id_rsa'))\n run('chown indabom:indabom {}'.format(\"/home/indabom/.ssh\"))\n run('chown indabom:indabom {}'.format(\"/home/indabom/.ssh/id_rsa\"))\n run('chown indabom:indabom {}'.format(\"/home/indabom/.ssh/id_rsa.pub\"))", "def create_jwt(user_obj):\n return jwt.encode(\n user_serializer.GetUserInfoSerializer(user_obj).data,\n settings.SECRET_KEY, algorithm='HS256').decode('utf-8')", "def create_key ():", "def create_keypair(key_name):\n if os.path.isfile(SSH_FOLDER + key_name + \".pem\"):\n return # Key already created\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n key = ec2.create_key_pair(key_name)\n key.save(SSH_FOLDER)", "def create_user_key_file(username: str):\n\n user: User = UserModel().get_user(username=username)\n user_key: Key = user.public_key\n\n public_key: bytes = user_key.public_key\n\n if not os.path.exists(\"./ssh_ca\"):\n os.mkdir(\"./ssh_ca\")\n\n with open(f\"./ssh_ca/{username}.pub\") as public_key_file:\n public_key_file.write(public_key.decode())", "def generate_user_api_key(user):\n now = datetime.datetime.utcnow()\n payload = {\n 'iss': 'minesweeper-api',\n 'aud': 'client',\n 'iat': now,\n 'nbf': now,\n 'exp': now + _get_api_token_exp_from_config(),\n 'user_id': str(user.id),\n 'is_admin': user.is_admin,\n }\n bytestring = jwt.encode(payload, _get_api_key_from_config())\n token = bytestring.decode('utf-8')\n return token", "def generate_access_key(self):\n\t\tfrom app import app\n\t\ts = JSONWebSignatureSerializer(app.config['SECRET_KEY'])\n\t\taccess_key = s.dumps({'username': self.username}) \n\t\tself.access_key = access_key", "def create_ssh_key_file(username: str, ssh_key: bytes, ip_address: str):\n\n if not os.path.exists(\"./ansible/keys\"):\n os.mkdir(\"./ansible/keys\")\n\n with open(f\"./ansible/keys/admin_{ip_address}.pem\", \"w\") as ssh_key_file:\n ssh_key_file.write(ssh_key.decode())\n\n os.system(f\"chmod 400 ./ansible/keys/admin_{ip_address}.pem\")", "def create_apikey(self, username, api_key):\r\n return 'ApiKey %s:%s' % (username, api_key)", "def create_token(user):\n payload = {\n 'sub': user.id,\n 'iat': datetime.utcnow(),\n 'exp': datetime.utcnow() + timedelta(days=1)\n }\n token = jwt.encode(payload, config.SECRET_KEY, algorithm='HS256')\n return token.decode('unicode_escape')", "def manage_createNewSecret(self, REQUEST):\n manager = getUtility(IKeyManager)\n manager.rotate()\n response = REQUEST.response\n response.redirect(\n '%s/manage_secret?manage_tabs_message=%s' %\n (self.absolute_url(), 'New+secret+created.')\n )", "def gen_api_key(username):\n salt = str(os.urandom(64)).encode('utf-8')\n return hash_password(username, salt)", "def upload_api_key(self, user_id, create_api_key_details, **kwargs):\n resource_path = \"/users/{userId}/apiKeys\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"upload_api_key got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=create_api_key_details,\n response_type=\"ApiKey\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=create_api_key_details,\n response_type=\"ApiKey\")", "def generate_token(user: dict):\n\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1),\n 'iat': datetime.datetime.utcnow(),\n 'user': user\n }\n token = jwt.encode(\n payload,\n os.getenv('SECRET_KEY'),\n algorithm='HS256'\n )\n return token.decode('UTF-8')", "def create(self, validated_data):\n resource = Resource.objects.create(**validated_data.get(\"resource\"))\n return Secret.objects.create(resource=resource)", "def create_ssh_keypair(keyname, comment):\n sshdir = os.path.join(util.get_homedir(), '.ssh')\n util.create_directory(sshdir, 0o700)\n keyfile = os.path.join(sshdir, keyname)\n if util.try_stat(keyfile):\n raise RuntimeError('~/.ssh/{} already exists'.format(keyname))\n subprocess.check_call(['ssh-keygen', '-f', keyfile, '-N', \"\", '-q', '-C', comment])\n os.chmod(keyfile, 0o600)\n os.chmod(keyfile + '.pub', 0o644)\n return keyfile", "def set_s3_credentials(secret_key_id, secret_access_key, session_token = None):\n if secret_key_id is None:\n raise H2OValueError(\"Secret key ID must be specified\")\n\n if secret_access_key is None:\n raise H2OValueError(\"Secret access key must be specified\")\n \n if not secret_key_id:\n raise H2OValueError(\"Secret key ID must not be empty\")\n \n if not secret_access_key:\n raise H2OValueError(\"Secret access key must not be empty\")\n\n params = {\"secret_key_id\": secret_key_id,\n \"secret_access_key\": secret_access_key,\n \"session_token\": session_token\n }\n h2o.api(\"POST /3/PersistS3\", data=params)\n print(\"Credentials successfully set.\")", "async def create_new_user(*, user: User):\n with Session(engine) as session:\n user.password = simple_hash(user.name, user.password) #Hashing password for security\n session.add(user)\n session.commit()\n return {\"message\": \"User {user_id} created\".format(user_id = user.id)}", "def generate_user(self):\n user = self.iam_client.create_user(\n UserName=self.generate_username(),\n Tags=self.get_tags()\n )\n\n username = user['User']['UserName']\n accesskey = self.iam_client.create_access_key(UserName=username)\n\n print('UserName = {}\\nAccessKeyId = {}\\nSecretAccessKey = {}'\n .format(\n username,\n accesskey['AccessKey']['AccessKeyId'],\n accesskey['AccessKey']['SecretAccessKey']\n ))\n\n return username, user['User']['Arn']", "def create_user_profile(IamUserArn=None, SshUsername=None, SshPublicKey=None, AllowSelfManagement=None):\n pass", "def add(ctx, secret, name, issuer, period, oath_type, digits, touch, algorithm,\n counter, force):\n\n digits = int(digits)\n\n if not secret:\n while True:\n secret = click.prompt('Enter a secret key (base32)', err=True)\n try:\n secret = parse_b32_key(secret)\n break\n except Exception as e:\n click.echo(e)\n\n ensure_validated(ctx)\n\n _add_cred(ctx, CredentialData(secret, issuer, name, oath_type, algorithm,\n digits, period, counter, touch), force)", "def create_api_key(sender, **kwargs):\r\n if kwargs.get('created') is True:\r\n ApiKey.objects.create(user=kwargs.get('instance'))", "def insertNewUser(self,user, access_token):\n newUser = UserToken(username=user, user_key = access_token.key, user_secret = access_token.secret)\n newUser.put()", "def generate_key():\n key = Fernet.generate_key()\n with open(\"Secret.key\",\"wb\")as key_file:\n key_file.write(key)", "def create_bucket(request: Dict) -> Dict:\n global config\n\n body = {\n \"user_name\": request.get(\"user_name\"),\n \"prefix\": request.get(\"bucket_name\")[0:5],\n \"bucket_name\": request.get(\"bucket_name\"),\n \"region\": request.get(\"region\")\n }\n\n response = requests.post(url=config.api_url('bucket'),\n data=json.dumps(body),\n headers={'content-type': 'application/json'})\n\n if response.status_code == HTTPStatus.OK:\n return response.json()", "def set_SecretKey(self, value):\n super(RetrieveUserDashboardInputSet, self)._set_input('SecretKey', value)", "def create_token(self, consumer, token_type, timestamp, user=None):\n token, created = self.first_or_create(consumer=consumer, \n token_type=token_type, \n timestamp=timestamp,\n user=user)\n\n if created:\n token.key, token.secret = self.generate_random_codes()\n token.save()\n\n return token", "def addPubKey(User, pubkey):\n with cd('~%s' % (User)):\n sudo('mkdir -p .ssh && chmod 700 .ssh', user=User)\n # add key if it doesn't already exist #\n _hazKey = 'no'\n _hazFile = sudo(\"[ -f .ssh/authorized_keys ] && echo 'yes' || echo 'no'\", user=User)\n if _hazFile == 'yes':\n # authorized_keys exist - check if the key already exists\n _hazKey = sudo(\"grep '%s' .ssh/authorized_keys >/dev/null 2>&1 && echo 'yes'\" % (pubkey), user=User)\n if _hazKey == 'no':\n sudo(\"echo '%s' >> .ssh/authorized_keys\" % (pubkey), user=User)\n else:\n print \"[Info] User '%s' key already exists on host '%s'\" % (User, env.host_string)", "def create_profile(self, user):\r\n salt = sha.new(str(random.random())).hexdigest()[:5]\r\n activation_key = sha.new(salt+user.username).hexdigest()\r\n return self.create(user=user,\r\n activation_key=activation_key)", "def _generateSecretKey():\n return f\"secret.{str(datetime.now())}\"", "def add_user_key(self, obj, validity = 0, max_queries_per_ip_per_hour = 0, max_hits_per_query = 0, indexes = None):\n if obj is dict:\n params = obj\n else:\n params = {\"acl\": obj}\n if validity != 0:\n params[\"validity\"] = validity\n if max_queries_per_ip_per_hour != 0:\n params[\"maxQueriesPerIPPerHour\"] = max_queries_per_ip_per_hour\n if max_hits_per_query != 0:\n params[\"maxHitsPerQuery\"] = max_hits_per_query\n if not indexes is None:\n params['indexes'] = indexes\n return AlgoliaUtils_request(self.headers, self.write_hosts, \"POST\", \"/1/keys\", self.timeout, params)", "def _secret(value):\n\n match = SECRET_ARN_RE.match(value)\n if match:\n named_groups = match.groupdict()\n return AwsSecret(arn=value,region=named_groups[\"Region\"])\n\n raise argparse.ArgumentTypeError('Given argument \"%s\" is not a valid secret' % value)", "def create_namespaced_secret(self, body, namespace, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.create_namespaced_secret_with_http_info(body, namespace, **kwargs)\n else:\n (data) = self.create_namespaced_secret_with_http_info(body, namespace, **kwargs)\n return data", "def create_profile(self, user):\n salt = sha.new(str(random.random())).hexdigest()[:5]\n activation_key = sha.new(salt+user.username).hexdigest()\n return self.create(user=user,\n activation_key=activation_key)", "def _bcbio_iam_user(conn, args):\n import boto\n name = \"bcbio\"\n access_key_name = \"full_admin_access\"\n if args.nocreate:\n need_creds = False\n else:\n try:\n conn.get_user(name)\n if args.recreate:\n keys = conn.get_all_access_keys(name)\n for access_key in tz.get_in([\"list_access_keys_response\", \"list_access_keys_result\",\n \"access_key_metadata\"], keys, []):\n conn.delete_access_key(access_key[\"access_key_id\"], name)\n need_creds = True\n else:\n need_creds = False\n except boto.exception.BotoServerError:\n conn.create_user(name)\n conn.put_user_policy(name, access_key_name, IAM_POLICY)\n need_creds = True\n if need_creds:\n creds = conn.create_access_key(name)\n else:\n creds = {}\n if creds:\n creds = tz.get_in([\"create_access_key_response\", \"create_access_key_result\", \"access_key\"], creds)\n print(\"User credentials for %s:\" % name)\n for awsid in [\"access_key_id\", \"secret_access_key\"]:\n print(\" %s: %s\" % (awsid, creds.get(awsid)))\n return {\"ec2_access_key\": creds.get(\"access_key_id\"),\n \"ec2_secret_key\": creds.get(\"secret_access_key\")}\n else:\n print(\"User %s already exists, no new credentials\" % name)\n print(\"Edit the configuration file to add existing user's access and secret keys\")\n return {}", "def create_profile(self, user, *args, **kwargs):\n salt = hashlib.sha1(str(random.random())).hexdigest()[:5]\n activation_key = hashlib.sha1(salt + user.username).hexdigest()\n return self.create(user=user, activation_key=activation_key, **kwargs)", "def create_privatekey():\n \n # Generate the private key\n key_jwk = wallet.create_JWK()\n response_jwk = key_jwk.export(private_key=True, as_dict=True)\n\n return response_jwk", "def user_key(user_name=DEFAULT_USER_NAME):\n return ndb.Key('User', user_name)", "def test_creating_a_bucket(self):\n with self.client:\n self.create_bucket(self.get_user_token())", "def UpdateSecretKey():\n _LOG.info('Updating webapp2_secret_key.')\n webapp2_secret_key = Webapp2SecretKey(id='current_secret_key')\n webapp2_secret_key.secret_key = os.urandom(16).encode('hex')\n webapp2_secret_key.put()\n return True", "def create_token(user, title, expiration=_default_expiration_duration_opt):\n if expiration == _default_expiration_duration_opt:\n duration = _default_expiration_duration()\n expiration = duration + datetime.now() if duration else None\n\n token_code = random_string_generator(TOKEN_NAME_PREFIX_LENGTH + MINIMUM_TOKEN_SUFFIX_LENGTH)()\n token_name = token_code[:TOKEN_NAME_PREFIX_LENGTH]\n token_secret = token_code[TOKEN_NAME_PREFIX_LENGTH:]\n\n assert token_name\n assert token_secret\n\n return AppSpecificAuthToken.create(\n user=user,\n title=title,\n expiration=expiration,\n token_name=token_name,\n token_secret=DecryptedValue(token_secret),\n )", "def generate_pair(cls, user: User) -> Dict[str, str]:\n if not isinstance(user, User):\n raise PermissionDenied()\n\n refresh_token = RefreshToken.objects.create(user=user)\n access_payload = refresh_token.get_payload_by_token()\n access_payload['type'] = 'access'\n access_token = jwt_encode(access_payload)\n\n return {\n 'access_token': access_token,\n 'refresh_token': refresh_token.token,\n }", "def generate_key():\n key = Fernet.generate_key()\n with open(\"secret.key\", \"wb\") as key_file:\n key_file.write(key)", "def test_aws_service_api_keypair_generate_post(self):\n pass", "def _create_user(self, new_user):\n new_user = User(user_name=new_user['user_name'], pin=new_user['pin'], user_type='customer')\n self.session.output(new_user.get_user_info(), '\\n[ New user created ]')", "def create_bucket() -> None:\n try:\n client.make_bucket(DATASETS_BUCKET)\n except BucketAlreadyOwnedByYou:\n logger.debug(f\"Not creating bucket {DATASETS_BUCKET}: Bucket already exists\")\n pass\n else:\n logger.debug(f\"Successfully created bucket {DATASETS_BUCKET}\")", "def add(\n ctx,\n secret,\n name,\n issuer,\n period,\n oath_type,\n digits,\n touch,\n algorithm,\n counter,\n force,\n password,\n remember,\n):\n\n digits = int(digits)\n\n if not secret:\n while True:\n secret = click_prompt(\"Enter a secret key (base32)\")\n try:\n secret = parse_b32_key(secret)\n break\n except Exception as e:\n click.echo(e)\n\n _init_session(ctx, password, remember)\n\n _add_cred(\n ctx,\n CredentialData(\n name, oath_type, algorithm, secret, digits, period, counter, issuer\n ),\n touch,\n force,\n )", "def add_user():\n load_jws_from_request(request)\n if not hasattr(request, 'jws_header') or request.jws_header is None:\n return \"Invalid Payload\", 401\n username = request.jws_payload['data'].get('username')\n address = request.jws_header['kid']\n user = SLM_User(username=username)\n ses.add(user)\n try:\n ses.commit()\n except Exception as ie:\n current_app.logger.exception(ie)\n ses.rollback()\n ses.flush()\n return 'username taken', 400\n userkey = UserKey(key=address, keytype='public', user_id=user.id,\n last_nonce=request.jws_payload['iat']*1000)\n ses.add(userkey)\n try:\n ses.commit()\n except Exception as ie:\n current_app.logger.exception(ie)\n ses.rollback()\n ses.flush()\n #ses.delete(user)\n #ses.commit()\n return 'username taken', 400\n jresult = jsonify2(userkey, 'UserKey')\n current_app.logger.info(\"registered user %s with key %s\" % (user.id, userkey.key))\n return current_app.bitjws.create_response(jresult)", "def new_super_secret(self):\n super_secret_obj = gen_new_super_secret()\n\n temp_cursor = user_db.cursor()\n\n temp_cursor.execute(\n \"\"\"\n UPDATE users\n SET super_secret=?,\n secret_salt=?\n WHERE user_id=?\n \"\"\",\n (\n super_secret_obj[\"super_secret\"],\n super_secret_obj[\"salt\"],\n self.user_id,\n ),\n )\n user_db.commit()\n\n return {\n \"key\": super_secret_obj[\"key\"],\n \"salt\": super_secret_obj[\"salt\"]\n }", "def prepareInstance(username, sshId):\n print os.environ['EC2_KEYPAIR_PATH']\n with settings(user='ubuntu',\n key_filename=os.environ['EC2_KEYPAIR_PATH']):\n password = getpass('Enter a new password for user %s:' % username)\n password2 = getpass('Enter the password a again:')\n if password != password2:\n raise RuntimeError(\"Passwords don't match\")\n sudo('adduser --disabled-password --gecos \",,,\" %s' % username)\n cryptedPassword = _hashPassword(password)\n sudo('usermod --password %s %s' % (cryptedPassword, username))\n sudo('gpasswd --add %s admin' % username)\n authorizeSshKey(username, sshId)\n sudo('apt-get update')\n sudo('DEBIAN_FRONTEND=noninteractive apt-get dist-upgrade -y')\n if exists('/var/run/reboot-required'):\n reboot()", "def create_keypair(address_type, addresses_path, address_prefix, name):\n vkey_file = get_vkey_file(addresses_path, address_prefix, name)\n skey_file = get_skey_file(addresses_path, address_prefix, name)\n\n if(path.exists(vkey_file)) :\n print(address_prefix, \"key pair already exists for\", name)\n return\n \n makedirs(path.dirname(vkey_file), mode=0o777, exist_ok=True)\n\n run_params = ['cardano-cli', address_type, 'key-gen', '--verification-key-file', vkey_file, '--signing-key-file', skey_file]\n subprocess_run(run_params, capture_output=False, text=True)\n return", "def setup_keys():\n if os.path.isfile(\"key.txt\"):\n message = \"Key already generated\"\n else:\n secret = secrets.token_urlsafe(64)\n message = \"Secret generated and saved in key.txt\"\n with open(\"key.txt\", \"w\") as fd:\n fd.write(secret)\n return json.dumps({'message': message})", "def create(self, name, public_key=None):\n data = {\n \"keypair\": {\n \"name\": name\n }\n }\n if public_key is not None:\n data['keypair']['public_key'] = public_key\n \n path = '/os-keypairs'\n res = self.client.call(path, 'POST', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Create/import openstack keypair: %s' % truncate(res))\n return res[0]['keypair']", "def s3_create_bucket(self):\n self.conn.create_bucket(DEFAULT_BUCKET_NAME)", "def create_key(key_name, save_path, region=None, key=None, keyid=None, profile=None):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n try:\n key = conn.create_key_pair(key_name)\n log.debug(\"the key to return is : %s\", key)\n key.save(save_path)\n return key.material\n except boto.exception.BotoServerError as e:\n log.debug(e)\n return False", "def create_user(self):\n # TODO-ROB: This is used ONLY when the user registers in flask\n # TODO-ROB: Create the cookiecutter.json file\n # extra_context overrides user and default configs\n cookiecutter(self.user_cookie, no_input=True, extra_context={\"user_name\": self.user}, output_dir=self.users)", "def create_new_credential(account,userName,password):\n new_credential = Credentials(account,userName,password)\n return new_credential", "def create_tags(resource_id, key, value):\n response = EC2.create_tags(\n Resources=[\n resource_id,\n ],\n Tags=[\n {\n 'Key': key,\n 'Value': value\n },\n ]\n )\n return response", "async def add_secret(app: Sanic, secret: str, passphrase: str, ttl: Optional[int]) -> str:\n\n key = get_fernet_key(app, passphrase)\n\n sign = hmac.digest(key=key, msg=passphrase.encode(), digest='sha512').hex()\n secret_key = secrets.token_hex(16)\n\n cipher = fernet.Fernet(key)\n encrypted = cipher.encrypt(secret.encode()).decode()\n\n expires = None\n if ttl:\n expires = datetime.utcnow() + timedelta(seconds=ttl)\n\n await app.db.secrets.insert_one({\n 'secret': encrypted,\n 'secret_key': secret_key,\n 'signature': sign,\n 'expires': expires, # for mongo index\n 'ttl': ttl, # for fernet check\n })\n\n return secret_key", "def add_user():\n if not request.json:\n abort(400)\n\n db_conn = sqlite3.connect(db_path)\n db = db_conn.cursor()\n\n username = request.json['username']\n public_key = request.json['public_key']\n\n try:\n db.execute(\"INSERT INTO users (username) VALUES (?)\", [username])\n db.execute(\"INSERT INTO public_keys (username, public_key, status) VALUES (?,?,?)\", [username, public_key, PK_STATUS_OK])\n db_conn.commit()\n db_conn.close()\n except sqlite3.IntegrityError:\n db_conn.close()\n abort(400)\n return jsonify({'success':True})", "def new_user(client, username, password, apikey=None, docs=None):\n if apikey is None:\n apikey = str(uuid.uuid4())\n passhash = generate_password_hash(password, method='sha1')\n user = User(username, passhash, apikey, docs=docs)\n user.create(client)\n return user", "def create_profile(self, user):\n salt = sha.new(str(random.random())).hexdigest()[:5]\n activation_key = sha.new(salt+user.username).hexdigest()\n# prepend \"key_\" to the key_name, because key_names can't start with numbers\n registrationprofile = RegistrationProfile(user=user, activation_key=activation_key)\n db = DB_Session()\n db.add(registrationprofile)\n db.flush()\n db.refresh(registrationprofile)\n db.commit()\n db.close()\n return registrationprofile", "def generate_key(self):\n self.key = Fernet.generate_key()\n with open(\"secret.key\", \"wb\") as key_file:\n key_file.write(self.key)", "def process_create_dropbox_client_account ( iam_conn, base_name, action_params ) :\n account_name = create_dropbox_client_name( base_name, action_params[ 'name' ] )\n if does_user_exist( iam_conn, account_name ) :\n print 'AWS account ' + account_name + ' already exists. Skipping.'\n return None\n\n print \"Creating AWS account \" + account_name\n iam_conn.create_user( account_name )\n\n if action_params.get( 'generate-access-key', 'NO' ) == 'YES' :\n print \"Generating access key\"\n response = iam_conn.create_access_key( account_name )\n access_key = response[ 'create_access_key_response' ][ 'create_access_key_result' ][ 'access_key' ][ 'access_key_id' ]\n access_key_secret = response[ 'create_access_key_response' ][ 'create_access_key_result' ][ 'access_key' ][ 'secret_access_key' ]\n\n ## FIX: Need to store these with the credentials service at some point.\n key_filename = account_name + '.accesskey.txt'\n print 'Saving access key to file ' + key_filename\n key_file = open( key_filename, 'w' )\n key_file.write( 'Access Key: ' + access_key + '\\n' )\n key_file.write( 'Access Key Secret: ' + access_key_secret )\n key_file.close( )\n print '** WARNING: The access key MUST be registered manually with the credential service before it can be used.'", "def create( cls, user_id = None, private_keyfile_path = None ) :\n user_id = user_id or config.USER_ID()\n private_keyfile_path = private_keyfile_path or PRIVATE_RSA_KEYFILE_PATH()\n with open( private_keyfile_path, \"r\" ) as stream :\n private_key = rsa.PrivateKey.load_pkcs1( stream.read() )\n return cls( user_id, private_key )", "def token_generate(self, user_id):\n try:\n payload = {\n 'exp': datetime.utcnow() + timedelta(minutes=200),\n 'iat': datetime.utcnow(),\n 'sub': user_id\n }\n encoded_token = jwt.encode(\n payload, current_app.config['SECRET_KEY'], algorithm='HS256'\n )\n return encoded_token\n\n except Exception:\n return str(Exception)", "def put_slice_secret( observer_pkey_pem, slice_name, slice_secret, slice_fk=None, opencloud_slice=None ):\n \n ss = None \n \n if opencloud_slice is None:\n # look up the slice \n try:\n if slice_fk is None:\n opencloud_slice = models.Slice.objects.get( name=slice_name )\n else:\n opencloud_slice = models.Slice.objects.get( id=slice_fk.id )\n except Exception, e:\n logger.exception(e)\n logger.error(\"Failed to load slice (%s, %s)\" % (slice_fk, slice_name) )\n return False \n \n ss = models.SliceSecret( slice_id=opencloud_slice, secret=slice_secret )\n \n ss.save()\n \n return True", "def create_secrets(file):\n with open(file, 'w') as secfile:\n secfile.write((\n '# _credentials: Maintain your credentials below. Do not remove unused fields.\\n'\n 'USER = \\'\\'\\nPASSWORD = \\'\\'\\n# _courses: Define which courses should be crawled\\nCOURSES = []\\n\\n'\n '# local: Required if you want to download files and store them in a local folder'\n ' (for example in the Dropbox client folder)\\n'\n 'PATH = \\'\\' # Path to the destination folder\\n\\n'\n '# dropbox (-d): Required if you want to download files and upload them to Dropbox\\n'\n 'DROPBOX_TOKEN = \\'\\' # Personal Dropbox API token\\n'\n 'PATH_IN_DB = \\'\\' # Destination path of downloaded files within Dropbox\\n'))\n print('File app_secrets.py was created. Please maintain your credentials.')\n sys.exit(1)", "def create_token(request, user):\n\n key = get_random_string(100)\n data = {}\n ip = get_client_ip_address(request)\n\n return Token.objects.create(user=user, key=key, data=json.dumps(data), ip=ip)", "def createSaltKey(operation,newPassword,newPasswordTag):\n \n newPasswordEncrypted=encrypt(GlobalSaltKeyValue,newPassword)\n \n if os.path.isfile(GlobalKeyVaultFile):\n if checkTag(GlobalKeyVaultFileSection,newPasswordTag):\n if operation == 'update':\n addUpdateTag(newPasswordTag, newPasswordEncrypted)\n print \"Success-Password updated\"\n else:\n print \"Error:0001-Section and password tag already exists.\"\n sys.exit(2)\n\n else:\n if operation == 'add': \n addUpdateTag(newPasswordTag, newPasswordEncrypted)\n print \"Success-Password added\"\n else:\n print \"Error:0002-No matching tag found.\"\n sys.exit(2)\n else:\n print \"Error:0003-Missing file \", GlobalKeyVaultFile\n sys.exit(2)", "def secret_cognito_hash(\n username: str,\n cognito_client_id: str,\n cognito_secret: str\n) -> str:\n message = bytes(username + cognito_client_id, encoding='utf-8')\n key = bytes(cognito_secret, encoding='utf-8')\n\n secret_hash = base64.b64encode(\n hmac.new(\n key=key,\n msg=message,\n digestmod=hashlib.sha256\n ).digest()\n ).decode()\n\n return secret_hash", "def create_new_user():\n username = input('Vad är ditt användarID?: ')\n filename = 'user_db.json'\n \n with open(f'txt_files/{filename}', 'w') as f:\n json.dump(username, f)\n return username", "def _generateSSHKey(self, private_filepath, public_filepath):\n self.log.debug(\"Writing SSH keys to: \" + private_filepath + \" and \" + public_filepath)\n\n (ssh_dir, filename) = os.path.split(os.path.expanduser(private_filepath))\n if not os.path.exists(ssh_dir):\n self.log.debug(\"SSH Directory doesn't exist, creating \" + ssh_dir)\n os.makedirs(ssh_dir)\n\n key = paramiko.RSAKey.generate(1024)\n key.write_private_key_file(os.path.expanduser(private_filepath))\n \n with open(os.path.expanduser(public_filepath),\"w\") as public:\n public.write(\"%s %s\" % (key.get_name(), key.get_base64()))\n\n public.close()", "def create_sample_tag(user, name=\"spicy\"):\n return Tag.objects.create(custom_user=user, name=name)", "def create_user_credentials(storage_type, storage_id, space_name, client_ip,\n user_details):\n user_id = user_details[\"id\"]\n if user_id == \"0\":\n return PosixCredentials(0, 0)\n\n uid = gid = gen_storage_id(user_id)\n return PosixCredentials(uid, gid)", "async def write_secret(self, name: str, value: str, content_type: str, tags: dict):\n pass", "def create_temp_user(client, role_arn):\n try:\n response = client.assume_role(\n RoleArn=role_arn,\n RoleSessionName=\"Lambda-Start-Stop-functionality\"\n )\n ec2_user = boto3.client(\n 'ec2',\n aws_access_key_id=response['Credentials']['AccessKeyId'],\n aws_secret_access_key=response['Credentials']['SecretAccessKey'],\n aws_session_token=response['Credentials']['SessionToken']\n )\n return ec2_user\n except Exception as error:\n logger.info(\"Creating a temporary ec2 privileged user failed with the following error : {}\".format(error))", "def create_sample_recipe(user, **params):\n\n defaults = {\n 'title': 'Polish Soup',\n 'time_minutes': 45,\n 'price': 15.89\n }\n defaults.update(params)\n return Recipe.objects.create(user=user, **defaults)", "async def create_wallet(self, user_id: str) -> None:\n self._wallet_id = self.generate_wallet_id()\n\n transaction = Transaction(\n wallet_id=self.wallet_id,\n type=TransactionType.CREATE,\n data={\"amount\": self.DEFAULT_BALANCE},\n nonce=None,\n )\n\n # todo: create separate user storage\n user_pk = f\"{user_id}{self.USER_KEY_POSTFIX}\"\n\n try:\n await self.storage.transaction_write_items(\n items=[\n # create transaction record\n self.storage.item_factory.put_idempotency_item(\n pk=transaction.unique_id, data=transaction.as_dict()\n ),\n # create wallet\n self.storage.item_factory.put_idempotency_item(\n pk=self.unique_id,\n data={self.BALANCE_KEY: self.DEFAULT_BALANCE},\n ),\n # create link between wallet and user\n self.storage.item_factory.put_idempotency_item(\n pk=user_pk, data={self.USER_WALLET_KEY: self.wallet_id}\n ),\n ]\n )\n except storage.exceptions.TransactionMultipleError as e:\n if e.errors[0]:\n raise crud.exceptions.WalletTransactionAlreadyRegisteredError(\n str(e.errors[0])\n )\n\n raise crud.exceptions.WalletAlreadyExistsError(\n f\"Wallet already exists for the user {user_pk}\"\n )" ]
[ "0.6538113", "0.6313658", "0.6207733", "0.61237496", "0.60848606", "0.6013739", "0.6008015", "0.58083314", "0.5791427", "0.56150365", "0.5601213", "0.55981004", "0.5564056", "0.55623674", "0.5552964", "0.5467448", "0.54615223", "0.545093", "0.5446797", "0.5376637", "0.5359139", "0.5348455", "0.5324149", "0.53158396", "0.531524", "0.531401", "0.5294298", "0.5270801", "0.5247391", "0.5245174", "0.52391446", "0.52151966", "0.5178087", "0.5155728", "0.51480955", "0.5146912", "0.51419073", "0.51297593", "0.5124745", "0.5085309", "0.50827247", "0.50765675", "0.5072045", "0.5070251", "0.5054839", "0.5052295", "0.50494564", "0.50442725", "0.50433695", "0.50367606", "0.50289196", "0.50233203", "0.5017201", "0.5014731", "0.50039005", "0.50012356", "0.4997671", "0.49934122", "0.49903914", "0.4976262", "0.4971201", "0.49581695", "0.49501428", "0.49378198", "0.49354157", "0.49342546", "0.49251273", "0.4916669", "0.49146828", "0.4909562", "0.49087533", "0.49032003", "0.48913097", "0.4890683", "0.48904285", "0.4873788", "0.48715183", "0.48630196", "0.48625377", "0.48491824", "0.48441297", "0.48270258", "0.48240247", "0.48173767", "0.48119023", "0.48051327", "0.48039898", "0.48018038", "0.4801658", "0.47962716", "0.4795972", "0.47934136", "0.4792127", "0.47832453", "0.47824788", "0.4780954", "0.4780568", "0.47803503", "0.47779685", "0.47737724" ]
0.62441874
2
Creates a new dynamic group in your tenancy. You must specify your tenancy's OCID as the compartment ID in the request object (remember that the tenancy is simply the root compartment). Notice that IAM resources (users, groups, compartments, and some policies) reside within the tenancy itself, unlike cloud resources such as compute instances, which typically reside within compartments inside the tenancy. For information about OCIDs, see `Resource Identifiers`__. You must also specify a name for the dynamic group, which must be unique across all dynamic groups in your tenancy, and cannot be changed. Note that this name has to be also unique across all groups in your tenancy. You can use this name or the OCID when writing policies that apply to the dynamic group. For more information about policies, see `How Policies Work`__. You must also specify a description for the dynamic group (although it can be an empty string). It does not
def create_dynamic_group(self, create_dynamic_group_details, **kwargs): resource_path = "/dynamicGroups" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_dynamic_group got unknown kwargs: {!r}".format(extra_kwargs)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, header_params=header_params, body=create_dynamic_group_details, response_type="DynamicGroup") else: return self.base_client.call_api( resource_path=resource_path, method=method, header_params=header_params, body=create_dynamic_group_details, response_type="DynamicGroup")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_group():\n groupname = request.get_json().get(\"name\")\n description = request.get_json().get(\"description\")\n grp = admin.create_group(current_app.scoped_session(), groupname, description)\n if grp:\n response = admin.get_group_info(current_app.scoped_session(), groupname)\n else:\n response = {\"result\": \"group creation failed\"}\n response = jsonify(response)\n return response", "def create_group(group_id, group_name):\n\n kwargs = config.DEFAULT_REST_KWARGS\n kwargs[\"data\"] = {\"id\": group_id, \"name\": group_name}\n http_response = call_rest_api(\"/identities/groups/\", \"post\", **kwargs)\n if http_response.status_code != 201: # 201 = 'new group created'\n raise ValueError(http_response.text)\n logger.log(f\"New custom group, {group_name}, with ID: {group_id}, was created successfully.\")", "def product_group_create(obj, name, department):\n client = get_client(obj)\n\n with Action('Creating product_group: {}'.format(name), nl=True):\n pg = client.product_group_create(name, department)\n\n print(json.dumps(pg, indent=4))", "def create_group(self, groupname):\n data = {\"groupname\": groupname}\n headers = {\"user-agent\": self.u_agent}\n req_url = self.normalize_admin_url(\"groups\")\n res = requests.post(\n req_url,\n headers=headers,\n auth=self.auth,\n data=json.dumps(data),\n verify=False,\n )\n if res.status_code == 201:\n return Response(0, u\"Group {} has been created\".format(groupname))\n else:\n return Response(res.status_code, res)", "def create_group(self, **kwargs):\n post_body = json.dumps({'group': kwargs})\n resp, body = self.post('groups', post_body)\n self.expected_success(201, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)", "def create_namespaced_group(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_group`\")\n\n resource_path = '/oapi/v1/groups'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Group',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def create_group(self, group_name, group_type):\n grp_data = {\"name\": group_name, \"type\": group_type}\n return requests.post(self.groups_url, data=json.dumps(grp_data),\n headers=self.headers)", "def create_TestGroup(test_case, # type: AnyMagpieTestCaseType\n override_group_name=null, # type: Optional[Str]\n override_discoverable=null, # type: Optional[bool]\n override_data=null, # type: Optional[JSON]\n override_headers=null, # type: Optional[HeadersType]\n override_cookies=null, # type: Optional[CookiesType]\n ): # type: (...) -> JSON\n app_or_url = get_app_or_url(test_case)\n data = override_data\n if override_data is null:\n data = {\"group_name\": override_group_name if override_group_name is not null else test_case.test_group_name}\n # only add 'discoverable' if explicitly provided here to preserve original behaviour of 'no value provided'\n if override_discoverable is not null:\n data[\"discoverable\"] = override_discoverable\n grp_name = (data or {}).get(\"group_name\")\n if grp_name:\n test_case.extra_group_names.add(grp_name) # indicate potential removal at a later point\n resp = test_request(app_or_url, \"POST\", \"/groups\", json=data,\n headers=override_headers if override_headers is not null else test_case.json_headers,\n cookies=override_cookies if override_cookies is not null else test_case.cookies)\n return check_response_basic_info(resp, 201, expected_method=\"POST\")", "def __create_new_group(self, group_name) -> None:\n group = Group(name=group_name)\n group.save()\n\n self.__add_permission_to_group(group)", "def create_group(self, name):\n\t\tdata = {\"name\":name}\n\t\tresponse = self.client.post(self._endpoint + \"/group\", content=data)\n\t\treturn Group(\n\t\t\tresponse.json['group_id'],\n\t\t\tself.user_id,\n\t\t\tself.site_id,\n\t\t\tdata=response.json\n\t\t)", "def create_placement_group(self, name, strategy='cluster'):\r\n params = {'GroupName':name, 'Strategy':strategy}\r\n group = self.get_status('CreatePlacementGroup', params, verb='POST')\r\n return group", "def create_group(self, event):\n body = event['body']\n body = json.loads(body)\n\n # Required field in POST body\n if 'group_name' not in body:\n return self.get_bad_request('POST body missing group_name')\n\n group_name = body['group_name']\n user = self.mealShareUsers.get_user_cognito_data(event)\n user_id = user['user_id']\n \n # Add the creator to the group, as the initial member\n group_id = self.mealShareGroups.create_group(group_name)\n success = self.mealShareGroups.add_user_to_group(user_id, group_id)\n if success:\n return {\n 'statusCode': 200,\n 'statusMessage': 'Successfully created group {} with ID {}'.format(group_name, group_id),\n 'group_id': group_id,\n 'group_name': group_name,\n 'user_id': user_id\n }\n else:\n return {\n 'statusCode': 500,\n 'statusMessage': 'FAILED to create group {} by user {}'.format(group_name, user_id),\n 'group_id': group_id,\n 'group_name': group_name,\n 'user_id': user_id\n }", "def create(self, group_name):\n METHOD = 'POST'\n API_PATH = '/groups/create'\n\n data = {'group_name': group_name}\n\n # Make REST call\n resp = self._rest_call[METHOD](API_PATH, data=data)\n if resp.status_code == 200:\n return resp.json()\n\n elif resp.status_code == 403:\n raise AuthorizationError(\"User is not authorized or token is incorrect.\")\n\n else:\n if resp.json().get(\"error_code\") in ERROR_CODES:\n raise ERROR_CODES[resp.json().get('error_code')](resp.json().get('message'))\n else:\n raise APIError(\"Response code {0}: {1} {2}\".format(resp.status_code,\n resp.json().get('error_code'),\n resp.json().get('message')))", "def allocate_group(remote, objectid):\n cmd1 = mmapi.StoredCommands()\n key1 = cmd1.AppendSceneCommand_AllocateNewGroupID(objectid)\n remote.runCommand(cmd1)\n result_val = mmapi.any_result()\n cmd1.GetSceneCommandResult_AllocateNewGroupID(key1, result_val)\n return result_val.i", "def add_group():\n name = request.form['name']\n data, code, message = FIELD_SERVICE.add_group(name)\n return __result(data, code, message)", "def create_group(self, identifier: str, group_name: str) -> Group:\n\n # APM-137701 - Namespace for custom device calculation should not be set\n group_id = get_group_id(\"\", identifier)\n if group_id in self._groups:\n raise ValueError(\"Group \" + group_name + \" already exist, id: \" + str(group_id))\n else:\n group = Group(group_id, group_name, self._technologies, self._results_builder)\n\n self._groups[group_id] = group\n return group", "def test_create_group(self):\n groupid = 'villains'\n\n # create the group\n resp = self.app.post('/groups', data=json.dumps({'name':groupid}))\n assert resp.status_code == 200\n\n # Fetch the group to check that it persists\n resp = self.app.get('/groups/{}'.format(groupid))\n assert resp.status_code == 200", "def createGroup(self, name):\n new_group = ET.SubElement(self._root,'group')\n group_name = ET.SubElement(new_group, 'name')\n group_name.text = name\n # update the document's groups\n self._groups = self._root.findall('group') \n print 'Creating group, \\'%s\\'' % name\n return CAGroup(new_group)", "def create_group(self, create_group_details, **kwargs):\n resource_path = \"/groups\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"create_group got unknown kwargs: {!r}\".format(extra_kwargs))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=create_group_details,\n response_type=\"Group\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=create_group_details,\n response_type=\"Group\")", "def post_groups(\n data: PostGroupIn, tkn: Token = Depends(from_authotization_header_nondyn),\n):\n assert_has_clearance(tkn.owner, \"sni.create_group\")\n grp = Group(\n description=data.description,\n members=[tkn.owner],\n group_name=data.group_name,\n owner=tkn.owner,\n ).save()\n logging.debug(\n \"Created group %s (%s) owned by %s\",\n data.group_name,\n str(grp.pk),\n tkn.owner.character_name,\n )\n return GetGroupOut.from_record(grp)", "def create_adgroup(self, account_id, name, campaign_id,\n creative_id, bid_type=None, bid_info=None, max_bid=None,\n tracking_specs=None, view_tags=None, objective=None,\n adgroup_status=None, targeting=None, conversion_specs=None, batch=False):\n path = 'act_%s/adgroups' % account_id\n args = {\n 'name': name,\n 'campaign_id': campaign_id,\n 'creative': json.dumps({'creative_id': creative_id}),\n }\n if bid_type:\n args['bid_type'] = bid_type\n if max_bid:\n # can only use max_bid with CPM bidding\n args['max_bid'] = max_bid\n elif bid_info:\n args['bid_info'] = json.dumps(bid_info)\n\n if tracking_specs:\n args['tracking_specs'] = json.dumps(tracking_specs)\n if view_tags:\n args['view_tags'] = json.dumps(view_tags)\n if objective:\n args['objective'] = objective\n if adgroup_status:\n args['adgroup_status'] = adgroup_status\n if targeting:\n args['targeting'] = json.dumps(targeting)\n if conversion_specs:\n args['conversion_specs'] = json.dumps(conversion_specs)\n return self.make_request(path, 'POST', args, batch=batch)", "def autoCreateGroup(cleaned_data, cookie_user, isAutoApproved=False, querystring_content=False):\n existingSites = Site.objects.filter(\n domain=cleaned_data['domain'],\n )\n if len(existingSites) > 0:\n\n try:\n site = existingSites[0]\n group = site.group\n except Exception, e:\n raise Exception(\"Site \"+cleaned_data['domain']+\" has no group.\")\n else:\n # make a group and site\n try:\n group = Group.objects.create(\n name=cleaned_data['name'],\n short_name=cleaned_data['short_name'],\n approved=False,\n temp_interact=0,\n requires_approval=False,\n )\n except Exception, e:\n print \"* * * ** * * * * * * * EXCEPTION \"\n print e\n logger.warn(e)\n groups = Group.objects.filter(\n short_name=cleaned_data['short_name']\n )\n if len(groups) == 1:\n group = groups[0]\n elif len(groups) > 1:\n raise Exception(\"More than one group with shortname found: \" + cleaned_data['short_name'])\n else:\n raise Exception(\"No groups found with shortname: \" + cleaned_data['short_name'])\n\n site = Site.objects.create(\n name=cleaned_data['domain'],\n domain=cleaned_data['domain'],\n group=group,\n # this is whether or not a querystring is counted in the url - we should rename this\n querystring_content=querystring_content,\n )\n\n blessed_tags = addDefaultsForNewGroup(group, cookie_user)\n autoApproveUserAsAdmin(group, cookie_user, isAutoApproved=isAutoApproved)\n\n return group, site, blessed_tags", "def create():\n name = request.json['name']\n level = request.json['level']\n manager = request.json['manager']\n if models.user.Group.get(name):\n raise Conflict('Group already exists.', creation=False)\n else:\n authorize(manager, level=level)\n group = models.user.Group(name=name, level=level, manager=manager)\n models.db.session.add(group)\n models.db.session.commit()\n return response(200, creation=True)", "def create_ad_group(client, customer_id, campaign_resource_name):\n ad_group_service = client.get_service(\"AdGroupService\")\n\n # Creates the ad group.\n # Note that the ad group type must not be set.\n # Since the advertising_channel_sub_type is APP_CAMPAIGN,\n # 1- you cannot override bid settings at the ad group level.\n # 2- you cannot add ad group criteria.\n ad_group_operation = client.get_type(\"AdGroupOperation\")\n ad_group = ad_group_operation.create\n ad_group.name = f\"Earth to Mars cruises {uuid4()}\"\n ad_group.status = client.enums.AdGroupStatusEnum.ENABLED\n ad_group.campaign = campaign_resource_name\n\n ad_group_response = ad_group_service.mutate_ad_groups(\n customer_id=customer_id, operations=[ad_group_operation]\n )\n\n ad_group_resource_name = ad_group_response.results[0].resource_name\n print(f'Ad Group created with resource name: \"{ad_group_resource_name}\".')\n return ad_group_resource_name", "def create(person_group_id, name=None, user_data=None):\n name = person_group_id if name is None else name\n url = 'persongroups/{}'.format(person_group_id)\n json = {\n 'name': name,\n 'userData': user_data,\n }\n\n return util.request('PUT', url, json=json)", "def create_new_group(self, group_id, poll_id, name):\n obj = self.table()\n obj.group_id = str(group_id)\n obj.poll_id = poll_id\n obj.name = name\n self.db.session.add(obj)\n self.db.session.commit()", "def createGroup(self, *group):\n if not self.rank:\n logging.info('Creating atom group {}'.format(group))\n\n if not len(group):\n for idSS in self.pargs['idSS']:\n self.lmp.command('group group{} type {}'.format(idSS, idSS))\n else:\n self.lmp.command('group ' + ('{} ' * len(group)).format(*group))", "def capacitygroup_create(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_capacitygroup_create(cmd_ctx, cpc, options))", "def group_add_name(org_id, data):\n if data.has_key('groupname'):\n groupname = data['groupname']\n add_group(org_id, groupname, False)", "def request_group_create():\n return Response(render_template('admin/group/create-update.html',\n csrf_token=(\n get_raw_jwt() or {}).get(\"csrf\"),\n target=\"/admin/group/create\"),\n mimetype='text/html')", "def _make_group(self, _rk, _group_hint):\n\n if isinstance(_group_hint, dict):\n # _group_hint is a single key/value pair\n g = _group_hint[list(_group_hint)[0]]\n\n r_type = g.get(\"type\", \"none\")\n if r_type != \"OS::Nova::ServerGroup\":\n return \"support only ServerGroup resource\"\n\n properties = g.get(\"properties\", {})\n if len(properties) == 0:\n return \"no properties\"\n\n group_name = properties.get(\"name\", None)\n if group_name is None:\n return \"no group name\"\n group_name = group_name.strip()\n\n policies = properties.get(\"policies\", [])\n if len(policies) == 0:\n return \"no policy of the group\"\n\n if len(policies) > 1:\n return \"multiple policies\"\n\n # TODO: exclude soft-affinity and soft-anti-affinity?\n\n if group_name in self.groups.keys():\n group = self.groups[group_name]\n else:\n group = Group(group_name)\n\n policy = policies[0].strip()\n if policy == \"anti-affinity\":\n group_type = \"diversity\"\n else:\n group_type = policy\n\n group.group_type = group_type\n group.factory = \"server-group\"\n group.level = \"host\"\n\n self.groups[group_name] = group\n else:\n # group hint is uuid string.\n rg = self.resource.get_group_by_uuid(_group_hint)\n if rg is None:\n return \"unknown group found while making group\"\n\n # TODO: exclude soft-affinity and soft-anti-affinity?\n\n if rg.name in self.groups.keys():\n group = self.groups[rg.name]\n else:\n group = Group(rg.name)\n\n group.group_type = rg.group_type\n group.factory = rg.factory\n group.level = \"host\"\n\n self.groups[rg.name] = group\n\n if group is not None:\n group.server_list.append(self.app_name + \":\" + _rk)\n\n return \"ok\"", "def create_group_scene(self, name, group):\n data = {\n \"name\": name,\n \"group\": group,\n \"recycle\": True,\n \"type\": \"GroupScene\"\n }\n return self.bridge.bridge.post('/scenes', data)", "def create(self, name, desc, tenant_id):\n data = {\"security_group\": {\"name\": name, \n \"description\": desc, \n \"tenant_id\":tenant_id}}\n\n path = '%s/security-groups' % self.ver\n res = self.client.call(path, 'POST', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Create openstack security group: %s' % truncate(res))\n return res[0]['security_group']", "def post_security_group_create(self, resource_dict):\n pass", "async def async_create_group(\n hass: HomeAssistant,\n name: str,\n entity_ids: Collection[str] | None = None,\n user_defined: bool = True,\n icon: str | None = None,\n object_id: str | None = None,\n mode: bool | None = None,\n order: int | None = None,\n ) -> Group:\n group = Group.async_create_group_entity(\n hass, name, entity_ids, user_defined, icon, object_id, mode, order\n )\n\n # If called before the platform async_setup is called (test cases)\n await _async_get_component(hass).async_add_entities([group])\n return group", "def new_group(request):\n return edit_group(request, None)", "def createNewGroup():\n if request.method == 'POST':\n groupname = request.form['groupname1']\n internal = request.form['internal1']\n external = request.form['external1']\n userNo = request.form['usersNo1']\n if 'node1' in request.form:\n node = int(request.form['node1'])\n else:\n node = -1\n\n if int(userNo) == 0:\n if hl.createGroup(groupname, internal, external, node):\n return True\n elif int(userNo) > 0:\n if hl.createGroup(groupname, internal, external, node, genUsers=True, numUsers=int(userNo)):\n return True\n\n return False", "def _assert_create_group(self, personality, response=400):\n group_response = self.autoscale_behaviors.create_scaling_group_given(\n lc_personality=personality)\n self.assertEquals(group_response.status_code, response, msg='Create group '\n 'with invalid lc_personality returned {0} as against '\n '{1}'.format(group_response.status_code, response))\n if response is 200:\n group = group_response.entity\n self.resources.add(group, self.empty_scaling_group)\n return group", "def create_groups(**kwargs):\n for gname in SEC_GROUP_NAMES.itervalues():\n Group.objects.get_or_create(name=gname)", "def _create_child_group(self, name) -> \"GroupBase\":\n pass", "async def create_group(ctx, name: str, role: str, group_type: str=None, comp: str=None, rating: int=None, time: str=None):\n\n owner = ctx.message.author.name\n \n if comp:\n comp = [int(i) for i in comp.split()] # convert string input to array\n\n new_group = Group(owner, name, role, group_type, rating, time, comp)\n bg_bot.manager.add_group(owner, new_group)\n \n await ctx.send(f'Created new {group_type} group for leader {owner}!')", "def post(self):\n status = ErrorCode.SUCCESS\n try:\n data = DotDict(json_decode(self.request.body))\n logging.info(\"[UWEB] add group request: %s, cid: %s\",\n data, self.current_user.cid)\n except Exception as e:\n status = ErrorCode.ILLEGAL_DATA_FORMAT\n logging.exception(\"[UWEB] Invalid data format. body:%s, Exception: %s\",\n self.request.body, e.args)\n self.write_ret(status)\n return\n\n try: \n cid = data.cid\n name = data.name\n group = self.get_group_by_cid(cid, name)\n if group:\n status = ErrorCode.GROUP_EXIST\n self.write_ret(status)\n return\n\n group_info = dict(cid=cid,\n name=name,\n type=UWEB.GROUP_TYPE.NEW)\n gid = add_group(group_info, self.db, self.redis)\n # NOTE: wspush to client\n tid = self.current_user.tid\n if status == ErrorCode.SUCCESS:\n WSPushHelper.pushS3(tid, self.db, self.redis)\n\n self.write_ret(status,\n dict_=dict(gid=gid,\n cid=cid,\n name=name))\n\n except Exception as e:\n logging.exception(\"[UWEB] Create group failed. uid: %s, Exception: %s\",\n self.current_user.uid, e.args)\n status = ErrorCode.SERVER_BUSY\n self.write_ret(status)", "def create(self, context=None):\n values = self.obj_get_changes()\n db_nodegroup = self.dbapi.create_nodegroup(values)\n self._from_db_object(self, db_nodegroup)", "def CreateGroup(self, name, alloc_policy=None, dry_run=False, reason=None):\n query = []\n _AppendDryRunIf(query, dry_run)\n _AppendReason(query, reason)\n\n body = {\n \"name\": name,\n \"alloc_policy\": alloc_policy,\n }\n\n return self._SendRequest(HTTP_POST, \"/%s/groups\" % GANETI_RAPI_VERSION,\n query, body)", "def create_group(name, nodes, description=None):\n group, created = Group.get_or_create(name=name)\n if created:\n print('Group created with PK={} and name {}'.format(group.pk, group.name))\n else:\n print('Group with name {} and pk {} already exists. Do you want to add nodes?[y/n]'.format(group.name, group.pk))\n answer = raw_input()\n if answer.strip().lower() == 'y':\n pass\n else:\n return\n nodes2 = []\n nodes2_pks = []\n for node in nodes:\n try:\n node = int(node)\n except ValueError:\n pass\n nodes2_pks.append(node)\n try:\n nodes2.append(load_node(node))\n except:# NotExistentError:\n pass\n\n group.add_nodes(nodes2)\n print('added nodes: {} to group {} {}'.format(nodes2_pks, group.name, group.pk))\n\n if description:\n group.description = description\n\n return group", "async def create(\n self,\n resource_group_name: str,\n project_name: str,\n group_name: str,\n group: Optional[\"models.Group\"] = None,\n **kwargs\n ) -> \"models.Group\":\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.Group\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2018-06-01\"\n content_type = kwargs.pop(\"content_type\", \"application/json\")\n accept = \"application/json\"\n\n # Construct URL\n url = self.create.metadata['url'] # type: ignore\n path_format_arguments = {\n 'subscriptionId': self._serialize.url(\"self._config.subscription_id\", self._config.subscription_id, 'str'),\n 'resourceGroupName': self._serialize.url(\"resource_group_name\", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\\w\\._\\(\\)]+$'),\n 'projectName': self._serialize.url(\"project_name\", project_name, 'str'),\n 'groupName': self._serialize.url(\"group_name\", group_name, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Content-Type'] = self._serialize.header(\"content_type\", content_type, 'str')\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n body_content_kwargs = {} # type: Dict[str, Any]\n if group is not None:\n body_content = self._serialize.body(group, 'Group')\n else:\n body_content = None\n body_content_kwargs['content'] = body_content\n request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)\n pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200, 201]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, error_format=ARMErrorFormat)\n\n response_headers = {}\n if response.status_code == 200:\n response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))\n deserialized = self._deserialize('Group', pipeline_response)\n\n if response.status_code == 201:\n response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))\n deserialized = self._deserialize('Group', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, response_headers)\n\n return deserialized", "def post(self):\n args = parser.parse_args()\n user_group = UserGroup()\n user_group.name = args['name']\n user_group.createdby = auth.username()\n db_session.add(user_group)\n db_session.commit()\n return user_group, 201", "def createGroup(self):\n return _libsbml.GroupsModelPlugin_createGroup(self)", "def test_createGroup(self):\n\t\tself.client.force_authenticate(user=User.objects.get(id=1))\n\t\turl = \"/groups/\"\n\t\tdata = {\n\t\t\t'name' : 'testGroup3',\n\t\t\t'description' : 'This is another test group that just created.',\n\t\t\t'isPublic' : True\n\t\t}\n\t\tresponse = self.client.post(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\t\tself.assertEqual(response.data[\"id\"], 3)\n\t\tself.assertEqual(response.data[\"name\"], 'testGroup3')", "def create_group(self, tenant_id, group_id):\n maas_client = self._get_maas_client()\n d = maas_client.add_notification_and_plan()\n\n def create_group_in_db((notification, notification_plan)):\n return cass.create_group(\n self._db, tenant_id, group_id, notification, notification_plan)\n d.addCallback(create_group_in_db)\n\n return d", "def create_group(self, properties: dict[str, Any | None]) -> dict:\n group = self.ms_client.http_request(method='POST', url_suffix='groups', json_data=properties)\n return group", "def create_group(\n self,\n name,\n group,\n validate_only=None,\n retry=google.api_core.gapic_v1.method.DEFAULT,\n timeout=google.api_core.gapic_v1.method.DEFAULT,\n metadata=None,\n ):\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n # Wrap the transport method to add retry and timeout logic.\n if \"create_group\" not in self._inner_api_calls:\n self._inner_api_calls[\n \"create_group\"\n ] = google.api_core.gapic_v1.method.wrap_method(\n self.transport.create_group,\n default_retry=self._method_configs[\"CreateGroup\"].retry,\n default_timeout=self._method_configs[\"CreateGroup\"].timeout,\n client_info=self._client_info,\n )\n\n request = group_service_pb2.CreateGroupRequest(\n name=name, group=group, validate_only=validate_only,\n )\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n try:\n routing_header = [(\"name\", name)]\n except AttributeError:\n pass\n else:\n routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(\n routing_header\n )\n metadata.append(routing_metadata)\n\n return self._inner_api_calls[\"create_group\"](\n request, retry=retry, timeout=timeout, metadata=metadata\n )", "def create_group(self, group_name, user_ids=[], role_ids=[]):\n payload = {}\n payload['name'] = group_name\n payload['user_ids'] = user_ids\n payload['role_ids'] = role_ids\n return Client._post(self, payload)", "def create_group_with_http_info(self, bucket_id, group, **kwargs):\n\n all_params = ['bucket_id', 'group', 'if_match', 'if_none_match']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_group\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'bucket_id' is set\n if ('bucket_id' not in params) or (params['bucket_id'] is None):\n raise ValueError(\"Missing the required parameter `bucket_id` when calling `create_group`\")\n # verify the required parameter 'group' is set\n if ('group' not in params) or (params['group'] is None):\n raise ValueError(\"Missing the required parameter `group` when calling `create_group`\")\n\n if 'if_match' in params and not re.search('\\\\\\\"[0-9]+\\\\\\\"', params['if_match']):\n raise ValueError(\"Invalid value for parameter `if_match` when calling `create_group`, must conform to the pattern `/\\\\\\\"[0-9]+\\\\\\\"/`\")\n if 'if_none_match' in params and not re.search('\\\\\\\"[0-9]+\\\\\\\"', params['if_none_match']):\n raise ValueError(\"Invalid value for parameter `if_none_match` when calling `create_group`, must conform to the pattern `/\\\\\\\"[0-9]+\\\\\\\"/`\")\n\n collection_formats = {}\n\n resource_path = '/buckets/{bucket_id}/groups'.replace('{format}', 'json')\n path_params = {}\n if 'bucket_id' in params:\n path_params['bucket_id'] = params['bucket_id']\n\n query_params = {}\n\n header_params = {}\n if 'if_match' in params:\n header_params['If-Match'] = params['if_match']\n if 'if_none_match' in params:\n header_params['If-None-Match'] = params['if_none_match']\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'group' in params:\n body_params = params['group']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['basicAuth']\n\n return self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Group',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def create(isamAppliance, id, check_mode=False, force=False):\n if force is True or _check(isamAppliance, id=id) is False:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_post(\"Creating group\", \"/sysaccount/groups/v1\",\n {\n 'id': id\n })\n\n return isamAppliance.create_return_object()", "def gen_group(group_name=None, group_vars={}):\n group = Group(name=group_name)\n for key, value in group_vars.iteritems():\n group.set_variable(key, value)\n return group", "def create_group(self, group):\n if self.dryrun:\n self.logger.info(\"Would create group %s\", group)\n return FakeGroupId()\n result = self.conn.usergroup.create(name=group)\n groupid = result['usrgrpids'][0]\n self.logger.info(\"Create group %s with id %s\", group, groupid)\n return groupid", "def create( self, trans, payload, **kwd ):\n group_dict = dict( message='', status='ok' )\n name = payload.get( 'name', '' )\n if name:\n description = payload.get( 'description', '' )\n if not description:\n description = ''\n else:\n # TODO add description field to the model\n group_dict = self.group_manager.create( trans, name=name ).to_dict( view='element', value_mapper=self.__get_value_mapper( trans ) )\n else:\n raise RequestParameterMissingException( 'Missing required parameter \"name\".' )\n return group_dict", "def create_group(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/GroupV2/Create/\"))", "def create_security_group(self, body=None):\r\n return self.post(self.security_groups_path, body=body)", "def handle(self, *args, **options):\n new_group, created = Group.objects.get_or_create(name=options.get('group_name')) \n self.stdout.write(f\"Group {options.get('group_name')} created\")", "def create_secgroup(self, args):\n message = MessageClass()\n region = args[\"Region\"]\n sgid = args[\"Group-Name\"]\n desc = args[\"Description\"]\n\n # Boto3 client creation by providing the access_id and access_secret\n ec2 = boto3.client(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n response = ec2.describe_vpcs()\n vpc_id = response.get('Vpcs', [{}])[0].get('VpcId', '')\n\n response = ec2.create_security_group(GroupName=sgid,\n Description=desc,\n VpcId=vpc_id)\n attachment = MessageAttachmentsClass()\n d = response[\"GroupId\"]\n attachment.title = d\n message.message_text = \"Security group created:\"\n message.attach(attachment)\n\n return message.to_json()", "def replace_namespaced_group(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method replace_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `replace_namespaced_group`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `replace_namespaced_group`\")\n\n resource_path = '/oapi/v1/groups/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Group',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def create_new_scaling_group(self, request, data):\n group_cfg = data['groupConfiguration']\n\n group_cfg.setdefault('maxEntities', MAX_ENTITIES)\n group_cfg.setdefault('metadata', {})\n\n if group_cfg['minEntities'] > group_cfg['maxEntities']:\n raise InvalidMinEntities(\n \"minEntities must be less than or equal to maxEntities\")\n\n if data['launchConfiguration']['type'] == 'launch_server':\n validate_launch_config_servicenet(data['launchConfiguration'])\n\n deferred = get_supervisor().validate_launch_config(\n self.log, self.tenant_id, data['launchConfiguration'])\n\n deferred.addCallback(\n lambda _: self.store.create_scaling_group(\n self.log, self.tenant_id,\n group_cfg,\n normalize_launch_config(data['launchConfiguration']),\n data.get('scalingPolicies', None)))\n\n def _do_obey_config_change(result):\n group_id = result['id']\n config = result['groupConfiguration']\n launch = result['launchConfiguration']\n group = self.store.get_scaling_group(\n self.log, self.tenant_id, group_id)\n log = self.log.bind(scaling_group_id=group_id)\n d = controller.modify_and_trigger(\n self.dispatcher,\n group,\n bound_log_kwargs(log),\n partial(\n controller.obey_config_change, log,\n transaction_id(request), config, launch_config=launch),\n modify_state_reason='create_new_scaling_group')\n return d.addCallback(lambda _: result)\n\n deferred.addCallback(_do_obey_config_change)\n\n def _add_to_bobby(result, client):\n d = client.create_group(self.tenant_id, result['id'])\n return d.addCallback(lambda _: result)\n\n bobby = get_bobby()\n if bobby is not None:\n deferred.addCallback(_add_to_bobby, bobby)\n\n def _format_output(result):\n uuid = result['id']\n result[\"state\"] = format_state_dict(result[\"state\"])\n request.setHeader(\n \"Location\",\n get_autoscale_links(self.tenant_id, uuid, format=None))\n result[\"links\"] = get_autoscale_links(self.tenant_id, uuid)\n linkify_policy_list(\n result['scalingPolicies'], self.tenant_id, uuid)\n result['scalingPolicies_links'] = get_policies_links(\n result['scalingPolicies'],\n self.tenant_id, uuid, rel='policies')\n return {\"group\": result}\n\n deferred.addCallback(_format_output)\n deferred.addCallback(json.dumps)\n return deferred", "def make_grp(self, name='grp'):\n self.base[name] = self.get_group_array()", "def create_rule_group(self: object,\n body: dict,\n cs_username: str = None # pylint: disable=W0613 # cs_username is deprecated\n ) -> dict:\n # [POST] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/create-rule-groupMixin0\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"create_rule_groupMixin0\",\n body=body\n )", "def create_group(user):\n if connexion.request.is_json:\n users_group = [User.from_dict(d) for d in connexion.request.get_json()]\n response = (\"success\", 201)\n if len(users_group) > 4:\n response = (\"Max number of player is 4\", 400)\n else:\n groupId = GroupStorageController.add_new_group(users_group)\n return response", "def create_group():\n incoming = request.get_json()\n chatroom = Chatroom(\n name = incoming['name'],\n tag = incoming['tag'],\n )\n db.session.add(chatroom)\n db.session.commit()\n participant = Participant(\n user_id = session['user_id'],\n room_id = chatroom.room_id,\n )\n db.session.add(participant)\n db.session.commit()\n return jsonify(results = chatroom.room_id)", "def creategroup(body):\n group = body.get(\"groupname\", None)\n pps = body.get(\"pilotpoints\", None)\n print('lol',group, pps)\n print(type(pps))\n\n # Does the person exist already?\n if group not in group_dict and group is not None:\n group_dict[group] = {\n \"groupname\": group,\n \"pilotpoints\": pps,\n }\n return group_dict[group], 201\n\n # Otherwise, they exist, that's an error\n else:\n abort(\n 406,\n \"Person with last name {group} already exists\".format(group=group),\n )", "def groups_create(self, mar, request):\n if not permissions.CanCreateGroup(mar.perms):\n raise permissions.PermissionException(\n 'The user is not allowed to create groups.')\n\n user_dict = self._services.user.LookupExistingUserIDs(\n mar.cnxn, [request.groupName])\n if request.groupName.lower() in user_dict:\n raise exceptions.GroupExistsException(\n 'group %s already exists' % request.groupName)\n\n if request.ext_group_type:\n ext_group_type = str(request.ext_group_type).lower()\n else:\n ext_group_type = None\n group_id = self._services.usergroup.CreateGroup(\n mar.cnxn, self._services, request.groupName,\n str(request.who_can_view_members).lower(),\n ext_group_type)\n\n return api_pb2_v1.GroupsCreateResponse(\n groupID=group_id)", "def newgroup(self, groupname, groupou=None, grouptype=None,\n description=None, mailaddress=None, notes=None, sd=None,\n gidnumber=None, nisdomain=None):\n\n group_dn = \"CN=%s,%s,%s\" % (groupname, (groupou or \"CN=Users\"), self.domain_dn())\n\n # The new user record. Note the reliance on the SAMLDB module which\n # fills in the default informations\n ldbmessage = {\"dn\": group_dn,\n \"sAMAccountName\": groupname,\n \"objectClass\": \"group\"}\n\n if grouptype is not None:\n ldbmessage[\"groupType\"] = normalise_int32(grouptype)\n\n if description is not None:\n ldbmessage[\"description\"] = description\n\n if mailaddress is not None:\n ldbmessage[\"mail\"] = mailaddress\n\n if notes is not None:\n ldbmessage[\"info\"] = notes\n\n if gidnumber is not None:\n ldbmessage[\"gidNumber\"] = normalise_int32(gidnumber)\n\n if nisdomain is not None:\n ldbmessage[\"msSFU30Name\"] = groupname\n ldbmessage[\"msSFU30NisDomain\"] = nisdomain\n\n if sd is not None:\n ldbmessage[\"nTSecurityDescriptor\"] = ndr_pack(sd)\n\n self.add(ldbmessage)", "def create(self, body: CloudSecurityGroup) -> Dict:\n\t\treturn self._post(route=AWSSecurityGroupConsts.CLOUD_SECURITY_GROUP.value, body=body)", "def create_new_group(self, a, b):\n self.groups[self.group_id] = set([a,b])\n self.node_id[a] = self.node_id[b] = self.group_id\n self.group_id += 1", "def post(self):\n args = platform_group_arguments.parse_args()\n\n platform_group = PlatformGroup(**args)\n self.session.add(platform_group)\n self.session.commit()\n\n return platform_group", "def test_create_group(self):\n pass", "def test_create_group(self):\n pass", "def create_group(self, groupname, filters=[], filter_options=[], **kwargs):\n pytan.utils.check_for_help(kwargs=kwargs)\n clean_kwargs = pytan.utils.clean_kwargs(kwargs=kwargs)\n\n filter_defs = pytan.utils.dehumanize_question_filters(question_filters=filters)\n option_defs = pytan.utils.dehumanize_question_options(question_options=filter_options)\n\n h = (\n \"Issue a GetObject to get the full object of specified sensors for inclusion in a \"\n \"group\"\n )\n filter_defs = self._get_sensor_defs(defs=filter_defs, pytan_help=h, **clean_kwargs)\n\n add_group_obj = pytan.utils.build_group_obj(\n q_filter_defs=filter_defs, q_option_defs=option_defs,\n )\n add_group_obj.name = groupname\n\n h = \"Issue an AddObject to add a Group object\"\n group_obj = self._add(obj=add_group_obj, pytan_help=h, **clean_kwargs)\n\n m = \"New group {!r} created with ID {!r}, filter text: {!r}\".format\n self.mylog.info(m(group_obj.name, group_obj.id, group_obj.text))\n return group_obj", "def make_custom_group(self, qid, name='', path='', attrs={}):\n gslash = \"/\"\n parent = self\n sdef, name, path = self.file.get_custom_node_info(qid, gslash, name, path, parent) \n grp = Group(self.file, sdef, name, path, attrs, parent)\n return grp", "async def command_create(self, context):\n # await self._create_new_role(context, name, target=GROUP_CATEGORY_NAME)\n print('main create')", "def create_sec_group(ec2, sec_group_name):\n sec = ec2.create_security_group(sec_group_name, 'Jvivian Boto SecGroup')\n port = 22\n sec.authorize('tcp', port, port, '0.0.0.0/0')", "def placement_group(template, name):\n p = PlacementGroup(name, template=template)\n p.Strategy = 'cluster'\n return p", "def security_group_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(keep_name=True, **kwargs)\n return cloud.create_security_group(**kwargs)", "def __create_group(self):\n\n group = time.strftime(_GROUP_NAME_FORMAT, time.localtime())\n LOG.info(\"Creating backup group '%s'.\", group)\n\n group_path = self.group_path(group)\n\n try:\n os.mkdir(group_path)\n except EnvironmentError as e:\n if e.errno != errno.EEXIST:\n raise Error(\"Unable to create a new backup group '{}': {}.\",\n group_path, psys.e(e))\n\n self.__on_group_created(group)\n\n return group", "def create_app_policy_group(self, name, **kwargs):\n post_body = {'application_policy_group': {'name': name}}\n if kwargs.get('description'):\n post_body['description'] = kwargs.get('description')\n post_body = json.dumps(post_body)\n resp, body = self.post(self.get_uri(self.resource), post_body)\n body = json.loads(body)\n self.expected_success(http_client.CREATED, resp.status)\n return rest_client.ResponseBody(resp, body)", "def add_group_to_json(args):\n\n sanitised_group = args.group.replace('/', '-')\n new_group = {\n \"name\": sanitised_group,\n \"propogate_permissions\": False,\n \"allowed_attributes\": [\n\t\t\t{\n \t\"attribute_requirements\": {}, \n \"permissions\": \"l\"\n }\n\t\t],\n \"buckets\": []\n }\n\n try:\n with open(args.file, \"r\") as f:\n config = json.load(f)\n except FileNotFoundError:\n print(\"Error: could not find given auth JSON file\")\n return 1\n\n config[\"groups\"].append(new_group)\n\n with open(args.file, \"w\") as f:\n json.dump(config, f, indent=4)\n\n return 0", "def create_sg(vpc_id, description, group_name):\n client = boto3.client('ec2')\n security_group = str(group_name + \"_sg\")\n\n # get the security groups\n idle_sg = get_sg()\n\n print(idle_sg)\n print(security_group)\n\n # if security group doesnt exist, create it\n if security_group not in idle_sg:\n print(\"Creating SG\")\n return client.create_security_group(\n Description=description,\n GroupName=security_group,\n VpcId=vpc_id\n )\n return get_sg_id(security_group)", "def create_subgroup_global(request_ctx, id, title, description=None, vendor_guid=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/subgroups'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n }\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.post(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "def create_group_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n required_properties = {\n 'displayName': str(args.get('display_name')),\n 'mailNickname': str(args.get('mail_nickname')),\n 'mailEnabled': args.get('mail_enabled') == 'true',\n 'securityEnabled': args.get('security_enabled')\n }\n\n # create the group\n group = client.create_group(required_properties)\n\n # display the new group and it's properties\n group_readable, group_outputs = parse_outputs(group)\n human_readable = tableToMarkdown(name=f\"{required_properties['displayName']} was created successfully:\",\n t=group_readable,\n headers=['ID', 'Display Name', 'Description', 'Created Date Time', 'Mail',\n 'Security Enabled', 'Mail Enabled'],\n removeNull=True)\n entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': group_outputs}\n return human_readable, entry_context, group", "async def create_contact_group(dbcon: DBConnection, name: str, active: bool) -> str:\n q = \"\"\"insert into contact_groups (name, active) values (%s, %s)\"\"\"\n q_args = (name, active)\n contact_group_id = await dbcon.operation(q, q_args)\n return contact_group_id", "def test_create_group_409(self):\n request = {\n 'name': self.test_group1_groupid\n }\n # First create a group indirectly by making a user with a group\n resp = self.app.post('/users', data=json.dumps(self.test_user1_data))\n assert resp.status_code == 200\n\n # Now create a group that is already there\n resp = self.app.post('/groups', data=json.dumps(request))\n assert resp.status_code == 409", "def createGroup(self):\n return _libsbml.ListOfGroups_createGroup(self)", "def make_custom_group(self, qid, name='', path='', attrs={}):\n gslash = \"/\"\n sdef, name, path = self.get_custom_node_info(qid, gslash, name, path) \n parent = None # no parent since this node created from File object (top level)\n grp = Group(self, sdef, name, path, attrs, parent)\n return grp", "def make_group(self, qid, name='', path='', attrs={}, link='', abort=True):\n gqid = qid + \"/\"\n sdef = self.get_sdef(gqid, self.default_ns, \"referenced in make_group\")\n id = sdef['id']\n ns = sdef['ns']\n path = self.deduce_path(id, ns, path)\n if not abort:\n id_noslash = id.rstrip('/') # could be different from gqid if namespace present\n grp = self.get_existing_group(path, id_noslash, name)\n if grp:\n # found already existing group\n return grp \n link_info = self.extract_link_info(name, link, Group)\n # create the group\n parent = None # no parent since this node created from File object (top level)\n grp = Group(self, sdef, name, path, attrs, parent, link_info)\n return grp", "def post(self, request, *args, **kwargs):\n\n task_log, fund_source = get_task_log_and_fund_source(kwargs['workspace_id'])\n\n async_create_expense_groups(kwargs['workspace_id'], fund_source, task_log)\n\n return Response(status=status.HTTP_200_OK)", "def add_group(self,groupname):\n\n if not self.check_prereqs():\n raise StopIteration\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_add_group_query,{'groupname':groupname,'groupname_field':self.sql_groupname_field})\n self.log.debug(\"sqlflexibleauthstore: add_group: %s\" % (query,))\n\n cursor.execute(query)\n if cursor.rowcount > 0:\n db.commit()\n return True\n return False", "def create_test_portgroup(**kw):\n portgroup = get_test_portgroup(**kw)\n # Let DB generate ID if it isn't specified explicitly\n if 'id' not in kw:\n del portgroup['id']\n dbapi = db_api.get_instance()\n return dbapi.create_portgroup(portgroup)", "def async_create_group_entity(\n hass: HomeAssistant,\n name: str,\n entity_ids: Collection[str] | None = None,\n user_defined: bool = True,\n icon: str | None = None,\n object_id: str | None = None,\n mode: bool | None = None,\n order: int | None = None,\n ) -> Group:\n if order is None:\n hass.data.setdefault(GROUP_ORDER, 0)\n order = hass.data[GROUP_ORDER]\n # Keep track of the group order without iterating\n # every state in the state machine every time\n # we setup a new group\n hass.data[GROUP_ORDER] += 1\n\n group = Group(\n hass,\n name,\n order=order,\n icon=icon,\n user_defined=user_defined,\n entity_ids=entity_ids,\n mode=mode,\n )\n\n group.entity_id = async_generate_entity_id(\n ENTITY_ID_FORMAT, object_id or name, hass=hass\n )\n\n return group", "def create_group(self, name) -> \"GroupBase\":\n ancestor, group_names, last_name = self._descend(name)\n parent = ancestor._require_descendant_groups(*group_names)\n if last_name in parent:\n raise FileExistsError(f\"Group or dataset found at '{name}'\")\n return parent._create_child_group(last_name)", "def create_dataset(request: CreateDatasetRequest) -> CreateDatasetResponse:\n assert isinstance(request, CreateDatasetRequest)\n\n create_privacy_group_if_not_exists(\n privacy_group_id=str(request.privacy_group_id),\n privacy_group_name=request.privacy_group_name,\n description=request.description,\n in_use=True,\n fetcher_active=request.fetcher_active,\n matcher_active=request.matcher_active,\n write_back=request.write_back,\n )\n\n return CreateDatasetResponse(\n response=f\"Created dataset {request.privacy_group_id}\"\n )", "def test_create_resource_group(self):\n pass" ]
[ "0.7186179", "0.71604204", "0.7018895", "0.679185", "0.6695333", "0.661491", "0.6580392", "0.6452021", "0.64109707", "0.64004433", "0.6369519", "0.63523793", "0.6294868", "0.628068", "0.6277388", "0.62702346", "0.6215326", "0.6186225", "0.61719334", "0.6139862", "0.61309004", "0.6128545", "0.6117724", "0.6116714", "0.61166877", "0.6114094", "0.61130893", "0.61098534", "0.6097828", "0.60967726", "0.60910565", "0.60856557", "0.60209066", "0.5993855", "0.5961947", "0.5955662", "0.59547627", "0.5951312", "0.5948155", "0.5934469", "0.5928412", "0.59261185", "0.59146506", "0.5907985", "0.590323", "0.5900247", "0.5897385", "0.5890283", "0.5878735", "0.5877304", "0.58734035", "0.58618337", "0.5841242", "0.5829357", "0.5818993", "0.58050513", "0.57855034", "0.5769989", "0.57687587", "0.57674813", "0.57602143", "0.5749873", "0.57495475", "0.5748384", "0.5738509", "0.57288253", "0.57278144", "0.572261", "0.5706104", "0.5689989", "0.5689505", "0.56874144", "0.56719726", "0.565817", "0.56463003", "0.56463003", "0.5642606", "0.5642302", "0.56399184", "0.5628184", "0.56101185", "0.5581777", "0.5573728", "0.55523866", "0.5550261", "0.55469614", "0.5533662", "0.5522931", "0.5514987", "0.55103326", "0.55030173", "0.55012363", "0.5501194", "0.5493946", "0.5490748", "0.5488968", "0.54805857", "0.5479473", "0.54778916", "0.5474399" ]
0.71613544
1
Creates a new group in your tenancy. You must specify your tenancy's OCID as the compartment ID in the request object (remember that the tenancy is simply the root compartment). Notice that IAM resources (users, groups, compartments, and some policies) reside within the tenancy itself, unlike cloud resources such as compute instances, which typically reside within compartments inside the tenancy. For information about OCIDs, see `Resource Identifiers`__. You must also specify a name for the group, which must be unique across all groups in your tenancy and cannot be changed. You can use this name or the OCID when writing policies that apply to the group. For more information about policies, see `How Policies Work`__. You must also specify a description for the group (although it can be an empty string). It does not
def create_group(self, create_group_details, **kwargs): resource_path = "/groups" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_group got unknown kwargs: {!r}".format(extra_kwargs)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, header_params=header_params, body=create_group_details, response_type="Group") else: return self.base_client.call_api( resource_path=resource_path, method=method, header_params=header_params, body=create_group_details, response_type="Group")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_group(group_id, group_name):\n\n kwargs = config.DEFAULT_REST_KWARGS\n kwargs[\"data\"] = {\"id\": group_id, \"name\": group_name}\n http_response = call_rest_api(\"/identities/groups/\", \"post\", **kwargs)\n if http_response.status_code != 201: # 201 = 'new group created'\n raise ValueError(http_response.text)\n logger.log(f\"New custom group, {group_name}, with ID: {group_id}, was created successfully.\")", "def create_group():\n groupname = request.get_json().get(\"name\")\n description = request.get_json().get(\"description\")\n grp = admin.create_group(current_app.scoped_session(), groupname, description)\n if grp:\n response = admin.get_group_info(current_app.scoped_session(), groupname)\n else:\n response = {\"result\": \"group creation failed\"}\n response = jsonify(response)\n return response", "def create_group(self, **kwargs):\n post_body = json.dumps({'group': kwargs})\n resp, body = self.post('groups', post_body)\n self.expected_success(201, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)", "def create_group(self, groupname):\n data = {\"groupname\": groupname}\n headers = {\"user-agent\": self.u_agent}\n req_url = self.normalize_admin_url(\"groups\")\n res = requests.post(\n req_url,\n headers=headers,\n auth=self.auth,\n data=json.dumps(data),\n verify=False,\n )\n if res.status_code == 201:\n return Response(0, u\"Group {} has been created\".format(groupname))\n else:\n return Response(res.status_code, res)", "def create_group(self, group_name, group_type):\n grp_data = {\"name\": group_name, \"type\": group_type}\n return requests.post(self.groups_url, data=json.dumps(grp_data),\n headers=self.headers)", "def product_group_create(obj, name, department):\n client = get_client(obj)\n\n with Action('Creating product_group: {}'.format(name), nl=True):\n pg = client.product_group_create(name, department)\n\n print(json.dumps(pg, indent=4))", "def create_group(self, event):\n body = event['body']\n body = json.loads(body)\n\n # Required field in POST body\n if 'group_name' not in body:\n return self.get_bad_request('POST body missing group_name')\n\n group_name = body['group_name']\n user = self.mealShareUsers.get_user_cognito_data(event)\n user_id = user['user_id']\n \n # Add the creator to the group, as the initial member\n group_id = self.mealShareGroups.create_group(group_name)\n success = self.mealShareGroups.add_user_to_group(user_id, group_id)\n if success:\n return {\n 'statusCode': 200,\n 'statusMessage': 'Successfully created group {} with ID {}'.format(group_name, group_id),\n 'group_id': group_id,\n 'group_name': group_name,\n 'user_id': user_id\n }\n else:\n return {\n 'statusCode': 500,\n 'statusMessage': 'FAILED to create group {} by user {}'.format(group_name, user_id),\n 'group_id': group_id,\n 'group_name': group_name,\n 'user_id': user_id\n }", "def create_group(self, name):\n\t\tdata = {\"name\":name}\n\t\tresponse = self.client.post(self._endpoint + \"/group\", content=data)\n\t\treturn Group(\n\t\t\tresponse.json['group_id'],\n\t\t\tself.user_id,\n\t\t\tself.site_id,\n\t\t\tdata=response.json\n\t\t)", "def test_create_group(self):\n groupid = 'villains'\n\n # create the group\n resp = self.app.post('/groups', data=json.dumps({'name':groupid}))\n assert resp.status_code == 200\n\n # Fetch the group to check that it persists\n resp = self.app.get('/groups/{}'.format(groupid))\n assert resp.status_code == 200", "def __create_new_group(self, group_name) -> None:\n group = Group(name=group_name)\n group.save()\n\n self.__add_permission_to_group(group)", "def CreateGroup(self, name, alloc_policy=None, dry_run=False, reason=None):\n query = []\n _AppendDryRunIf(query, dry_run)\n _AppendReason(query, reason)\n\n body = {\n \"name\": name,\n \"alloc_policy\": alloc_policy,\n }\n\n return self._SendRequest(HTTP_POST, \"/%s/groups\" % GANETI_RAPI_VERSION,\n query, body)", "async def create(\n self,\n resource_group_name: str,\n project_name: str,\n group_name: str,\n group: Optional[\"models.Group\"] = None,\n **kwargs\n ) -> \"models.Group\":\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.Group\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2018-06-01\"\n content_type = kwargs.pop(\"content_type\", \"application/json\")\n accept = \"application/json\"\n\n # Construct URL\n url = self.create.metadata['url'] # type: ignore\n path_format_arguments = {\n 'subscriptionId': self._serialize.url(\"self._config.subscription_id\", self._config.subscription_id, 'str'),\n 'resourceGroupName': self._serialize.url(\"resource_group_name\", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\\w\\._\\(\\)]+$'),\n 'projectName': self._serialize.url(\"project_name\", project_name, 'str'),\n 'groupName': self._serialize.url(\"group_name\", group_name, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Content-Type'] = self._serialize.header(\"content_type\", content_type, 'str')\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n body_content_kwargs = {} # type: Dict[str, Any]\n if group is not None:\n body_content = self._serialize.body(group, 'Group')\n else:\n body_content = None\n body_content_kwargs['content'] = body_content\n request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)\n pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200, 201]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, error_format=ARMErrorFormat)\n\n response_headers = {}\n if response.status_code == 200:\n response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))\n deserialized = self._deserialize('Group', pipeline_response)\n\n if response.status_code == 201:\n response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))\n deserialized = self._deserialize('Group', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, response_headers)\n\n return deserialized", "def create_security_group(self, body=None):\r\n return self.post(self.security_groups_path, body=body)", "def create():\n name = request.json['name']\n level = request.json['level']\n manager = request.json['manager']\n if models.user.Group.get(name):\n raise Conflict('Group already exists.', creation=False)\n else:\n authorize(manager, level=level)\n group = models.user.Group(name=name, level=level, manager=manager)\n models.db.session.add(group)\n models.db.session.commit()\n return response(200, creation=True)", "def create_group(\n self,\n name,\n group,\n validate_only=None,\n retry=google.api_core.gapic_v1.method.DEFAULT,\n timeout=google.api_core.gapic_v1.method.DEFAULT,\n metadata=None,\n ):\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n # Wrap the transport method to add retry and timeout logic.\n if \"create_group\" not in self._inner_api_calls:\n self._inner_api_calls[\n \"create_group\"\n ] = google.api_core.gapic_v1.method.wrap_method(\n self.transport.create_group,\n default_retry=self._method_configs[\"CreateGroup\"].retry,\n default_timeout=self._method_configs[\"CreateGroup\"].timeout,\n client_info=self._client_info,\n )\n\n request = group_service_pb2.CreateGroupRequest(\n name=name, group=group, validate_only=validate_only,\n )\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n try:\n routing_header = [(\"name\", name)]\n except AttributeError:\n pass\n else:\n routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(\n routing_header\n )\n metadata.append(routing_metadata)\n\n return self._inner_api_calls[\"create_group\"](\n request, retry=retry, timeout=timeout, metadata=metadata\n )", "def create(self, group_name):\n METHOD = 'POST'\n API_PATH = '/groups/create'\n\n data = {'group_name': group_name}\n\n # Make REST call\n resp = self._rest_call[METHOD](API_PATH, data=data)\n if resp.status_code == 200:\n return resp.json()\n\n elif resp.status_code == 403:\n raise AuthorizationError(\"User is not authorized or token is incorrect.\")\n\n else:\n if resp.json().get(\"error_code\") in ERROR_CODES:\n raise ERROR_CODES[resp.json().get('error_code')](resp.json().get('message'))\n else:\n raise APIError(\"Response code {0}: {1} {2}\".format(resp.status_code,\n resp.json().get('error_code'),\n resp.json().get('message')))", "def create_namespaced_group(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_group`\")\n\n resource_path = '/oapi/v1/groups'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Group',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def test_createGroup(self):\n\t\tself.client.force_authenticate(user=User.objects.get(id=1))\n\t\turl = \"/groups/\"\n\t\tdata = {\n\t\t\t'name' : 'testGroup3',\n\t\t\t'description' : 'This is another test group that just created.',\n\t\t\t'isPublic' : True\n\t\t}\n\t\tresponse = self.client.post(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\t\tself.assertEqual(response.data[\"id\"], 3)\n\t\tself.assertEqual(response.data[\"name\"], 'testGroup3')", "async def create_group(ctx, name: str, role: str, group_type: str=None, comp: str=None, rating: int=None, time: str=None):\n\n owner = ctx.message.author.name\n \n if comp:\n comp = [int(i) for i in comp.split()] # convert string input to array\n\n new_group = Group(owner, name, role, group_type, rating, time, comp)\n bg_bot.manager.add_group(owner, new_group)\n \n await ctx.send(f'Created new {group_type} group for leader {owner}!')", "def create(self, name, desc, tenant_id):\n data = {\"security_group\": {\"name\": name, \n \"description\": desc, \n \"tenant_id\":tenant_id}}\n\n path = '%s/security-groups' % self.ver\n res = self.client.call(path, 'POST', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Create openstack security group: %s' % truncate(res))\n return res[0]['security_group']", "def createGroup(self, name):\n new_group = ET.SubElement(self._root,'group')\n group_name = ET.SubElement(new_group, 'name')\n group_name.text = name\n # update the document's groups\n self._groups = self._root.findall('group') \n print 'Creating group, \\'%s\\'' % name\n return CAGroup(new_group)", "def create_group(self, identifier: str, group_name: str) -> Group:\n\n # APM-137701 - Namespace for custom device calculation should not be set\n group_id = get_group_id(\"\", identifier)\n if group_id in self._groups:\n raise ValueError(\"Group \" + group_name + \" already exist, id: \" + str(group_id))\n else:\n group = Group(group_id, group_name, self._technologies, self._results_builder)\n\n self._groups[group_id] = group\n return group", "def add_group():\n name = request.form['name']\n data, code, message = FIELD_SERVICE.add_group(name)\n return __result(data, code, message)", "def create_placement_group(self, name, strategy='cluster'):\r\n params = {'GroupName':name, 'Strategy':strategy}\r\n group = self.get_status('CreatePlacementGroup', params, verb='POST')\r\n return group", "def security_group_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(keep_name=True, **kwargs)\n return cloud.create_security_group(**kwargs)", "def create_new_group(self, group_id, poll_id, name):\n obj = self.table()\n obj.group_id = str(group_id)\n obj.poll_id = poll_id\n obj.name = name\n self.db.session.add(obj)\n self.db.session.commit()", "def create_group(self, group):\n if self.dryrun:\n self.logger.info(\"Would create group %s\", group)\n return FakeGroupId()\n result = self.conn.usergroup.create(name=group)\n groupid = result['usrgrpids'][0]\n self.logger.info(\"Create group %s with id %s\", group, groupid)\n return groupid", "def createGroup(self, *group):\n if not self.rank:\n logging.info('Creating atom group {}'.format(group))\n\n if not len(group):\n for idSS in self.pargs['idSS']:\n self.lmp.command('group group{} type {}'.format(idSS, idSS))\n else:\n self.lmp.command('group ' + ('{} ' * len(group)).format(*group))", "def create_group(self, group_name, user_ids=[], role_ids=[]):\n payload = {}\n payload['name'] = group_name\n payload['user_ids'] = user_ids\n payload['role_ids'] = role_ids\n return Client._post(self, payload)", "def groups_create(self, mar, request):\n if not permissions.CanCreateGroup(mar.perms):\n raise permissions.PermissionException(\n 'The user is not allowed to create groups.')\n\n user_dict = self._services.user.LookupExistingUserIDs(\n mar.cnxn, [request.groupName])\n if request.groupName.lower() in user_dict:\n raise exceptions.GroupExistsException(\n 'group %s already exists' % request.groupName)\n\n if request.ext_group_type:\n ext_group_type = str(request.ext_group_type).lower()\n else:\n ext_group_type = None\n group_id = self._services.usergroup.CreateGroup(\n mar.cnxn, self._services, request.groupName,\n str(request.who_can_view_members).lower(),\n ext_group_type)\n\n return api_pb2_v1.GroupsCreateResponse(\n groupID=group_id)", "def create(self, body: CloudSecurityGroup) -> Dict:\n\t\treturn self._post(route=AWSSecurityGroupConsts.CLOUD_SECURITY_GROUP.value, body=body)", "def request_group_create():\n return Response(render_template('admin/group/create-update.html',\n csrf_token=(\n get_raw_jwt() or {}).get(\"csrf\"),\n target=\"/admin/group/create\"),\n mimetype='text/html')", "def capacitygroup_create(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_capacitygroup_create(cmd_ctx, cpc, options))", "def post(self):\n args = parser.parse_args()\n user_group = UserGroup()\n user_group.name = args['name']\n user_group.createdby = auth.username()\n db_session.add(user_group)\n db_session.commit()\n return user_group, 201", "def newgroup(self, groupname, groupou=None, grouptype=None,\n description=None, mailaddress=None, notes=None, sd=None,\n gidnumber=None, nisdomain=None):\n\n group_dn = \"CN=%s,%s,%s\" % (groupname, (groupou or \"CN=Users\"), self.domain_dn())\n\n # The new user record. Note the reliance on the SAMLDB module which\n # fills in the default informations\n ldbmessage = {\"dn\": group_dn,\n \"sAMAccountName\": groupname,\n \"objectClass\": \"group\"}\n\n if grouptype is not None:\n ldbmessage[\"groupType\"] = normalise_int32(grouptype)\n\n if description is not None:\n ldbmessage[\"description\"] = description\n\n if mailaddress is not None:\n ldbmessage[\"mail\"] = mailaddress\n\n if notes is not None:\n ldbmessage[\"info\"] = notes\n\n if gidnumber is not None:\n ldbmessage[\"gidNumber\"] = normalise_int32(gidnumber)\n\n if nisdomain is not None:\n ldbmessage[\"msSFU30Name\"] = groupname\n ldbmessage[\"msSFU30NisDomain\"] = nisdomain\n\n if sd is not None:\n ldbmessage[\"nTSecurityDescriptor\"] = ndr_pack(sd)\n\n self.add(ldbmessage)", "def post_groups(\n data: PostGroupIn, tkn: Token = Depends(from_authotization_header_nondyn),\n):\n assert_has_clearance(tkn.owner, \"sni.create_group\")\n grp = Group(\n description=data.description,\n members=[tkn.owner],\n group_name=data.group_name,\n owner=tkn.owner,\n ).save()\n logging.debug(\n \"Created group %s (%s) owned by %s\",\n data.group_name,\n str(grp.pk),\n tkn.owner.character_name,\n )\n return GetGroupOut.from_record(grp)", "def createGroup(self):\n return _libsbml.GroupsModelPlugin_createGroup(self)", "def create_secgroup(self, args):\n message = MessageClass()\n region = args[\"Region\"]\n sgid = args[\"Group-Name\"]\n desc = args[\"Description\"]\n\n # Boto3 client creation by providing the access_id and access_secret\n ec2 = boto3.client(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n response = ec2.describe_vpcs()\n vpc_id = response.get('Vpcs', [{}])[0].get('VpcId', '')\n\n response = ec2.create_security_group(GroupName=sgid,\n Description=desc,\n VpcId=vpc_id)\n attachment = MessageAttachmentsClass()\n d = response[\"GroupId\"]\n attachment.title = d\n message.message_text = \"Security group created:\"\n message.attach(attachment)\n\n return message.to_json()", "def create_adgroup(self, account_id, name, campaign_id,\n creative_id, bid_type=None, bid_info=None, max_bid=None,\n tracking_specs=None, view_tags=None, objective=None,\n adgroup_status=None, targeting=None, conversion_specs=None, batch=False):\n path = 'act_%s/adgroups' % account_id\n args = {\n 'name': name,\n 'campaign_id': campaign_id,\n 'creative': json.dumps({'creative_id': creative_id}),\n }\n if bid_type:\n args['bid_type'] = bid_type\n if max_bid:\n # can only use max_bid with CPM bidding\n args['max_bid'] = max_bid\n elif bid_info:\n args['bid_info'] = json.dumps(bid_info)\n\n if tracking_specs:\n args['tracking_specs'] = json.dumps(tracking_specs)\n if view_tags:\n args['view_tags'] = json.dumps(view_tags)\n if objective:\n args['objective'] = objective\n if adgroup_status:\n args['adgroup_status'] = adgroup_status\n if targeting:\n args['targeting'] = json.dumps(targeting)\n if conversion_specs:\n args['conversion_specs'] = json.dumps(conversion_specs)\n return self.make_request(path, 'POST', args, batch=batch)", "def create_group_with_http_info(self, bucket_id, group, **kwargs):\n\n all_params = ['bucket_id', 'group', 'if_match', 'if_none_match']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_group\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'bucket_id' is set\n if ('bucket_id' not in params) or (params['bucket_id'] is None):\n raise ValueError(\"Missing the required parameter `bucket_id` when calling `create_group`\")\n # verify the required parameter 'group' is set\n if ('group' not in params) or (params['group'] is None):\n raise ValueError(\"Missing the required parameter `group` when calling `create_group`\")\n\n if 'if_match' in params and not re.search('\\\\\\\"[0-9]+\\\\\\\"', params['if_match']):\n raise ValueError(\"Invalid value for parameter `if_match` when calling `create_group`, must conform to the pattern `/\\\\\\\"[0-9]+\\\\\\\"/`\")\n if 'if_none_match' in params and not re.search('\\\\\\\"[0-9]+\\\\\\\"', params['if_none_match']):\n raise ValueError(\"Invalid value for parameter `if_none_match` when calling `create_group`, must conform to the pattern `/\\\\\\\"[0-9]+\\\\\\\"/`\")\n\n collection_formats = {}\n\n resource_path = '/buckets/{bucket_id}/groups'.replace('{format}', 'json')\n path_params = {}\n if 'bucket_id' in params:\n path_params['bucket_id'] = params['bucket_id']\n\n query_params = {}\n\n header_params = {}\n if 'if_match' in params:\n header_params['If-Match'] = params['if_match']\n if 'if_none_match' in params:\n header_params['If-None-Match'] = params['if_none_match']\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'group' in params:\n body_params = params['group']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['basicAuth']\n\n return self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Group',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def test_create_group(self):\n pass", "def test_create_group(self):\n pass", "def new_group(request):\n return edit_group(request, None)", "def create(person_group_id, name=None, user_data=None):\n name = person_group_id if name is None else name\n url = 'persongroups/{}'.format(person_group_id)\n json = {\n 'name': name,\n 'userData': user_data,\n }\n\n return util.request('PUT', url, json=json)", "def create_ad_group(client, customer_id, campaign_resource_name):\n ad_group_service = client.get_service(\"AdGroupService\")\n\n # Creates the ad group.\n # Note that the ad group type must not be set.\n # Since the advertising_channel_sub_type is APP_CAMPAIGN,\n # 1- you cannot override bid settings at the ad group level.\n # 2- you cannot add ad group criteria.\n ad_group_operation = client.get_type(\"AdGroupOperation\")\n ad_group = ad_group_operation.create\n ad_group.name = f\"Earth to Mars cruises {uuid4()}\"\n ad_group.status = client.enums.AdGroupStatusEnum.ENABLED\n ad_group.campaign = campaign_resource_name\n\n ad_group_response = ad_group_service.mutate_ad_groups(\n customer_id=customer_id, operations=[ad_group_operation]\n )\n\n ad_group_resource_name = ad_group_response.results[0].resource_name\n print(f'Ad Group created with resource name: \"{ad_group_resource_name}\".')\n return ad_group_resource_name", "def _assert_create_group(self, personality, response=400):\n group_response = self.autoscale_behaviors.create_scaling_group_given(\n lc_personality=personality)\n self.assertEquals(group_response.status_code, response, msg='Create group '\n 'with invalid lc_personality returned {0} as against '\n '{1}'.format(group_response.status_code, response))\n if response is 200:\n group = group_response.entity\n self.resources.add(group, self.empty_scaling_group)\n return group", "def create_group_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n required_properties = {\n 'displayName': str(args.get('display_name')),\n 'mailNickname': str(args.get('mail_nickname')),\n 'mailEnabled': args.get('mail_enabled') == 'true',\n 'securityEnabled': args.get('security_enabled')\n }\n\n # create the group\n group = client.create_group(required_properties)\n\n # display the new group and it's properties\n group_readable, group_outputs = parse_outputs(group)\n human_readable = tableToMarkdown(name=f\"{required_properties['displayName']} was created successfully:\",\n t=group_readable,\n headers=['ID', 'Display Name', 'Description', 'Created Date Time', 'Mail',\n 'Security Enabled', 'Mail Enabled'],\n removeNull=True)\n entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': group_outputs}\n return human_readable, entry_context, group", "def create_TestGroup(test_case, # type: AnyMagpieTestCaseType\n override_group_name=null, # type: Optional[Str]\n override_discoverable=null, # type: Optional[bool]\n override_data=null, # type: Optional[JSON]\n override_headers=null, # type: Optional[HeadersType]\n override_cookies=null, # type: Optional[CookiesType]\n ): # type: (...) -> JSON\n app_or_url = get_app_or_url(test_case)\n data = override_data\n if override_data is null:\n data = {\"group_name\": override_group_name if override_group_name is not null else test_case.test_group_name}\n # only add 'discoverable' if explicitly provided here to preserve original behaviour of 'no value provided'\n if override_discoverable is not null:\n data[\"discoverable\"] = override_discoverable\n grp_name = (data or {}).get(\"group_name\")\n if grp_name:\n test_case.extra_group_names.add(grp_name) # indicate potential removal at a later point\n resp = test_request(app_or_url, \"POST\", \"/groups\", json=data,\n headers=override_headers if override_headers is not null else test_case.json_headers,\n cookies=override_cookies if override_cookies is not null else test_case.cookies)\n return check_response_basic_info(resp, 201, expected_method=\"POST\")", "def create_group():\n incoming = request.get_json()\n chatroom = Chatroom(\n name = incoming['name'],\n tag = incoming['tag'],\n )\n db.session.add(chatroom)\n db.session.commit()\n participant = Participant(\n user_id = session['user_id'],\n room_id = chatroom.room_id,\n )\n db.session.add(participant)\n db.session.commit()\n return jsonify(results = chatroom.room_id)", "def create_group(self, properties: dict[str, Any | None]) -> dict:\n group = self.ms_client.http_request(method='POST', url_suffix='groups', json_data=properties)\n return group", "def allocate_group(remote, objectid):\n cmd1 = mmapi.StoredCommands()\n key1 = cmd1.AppendSceneCommand_AllocateNewGroupID(objectid)\n remote.runCommand(cmd1)\n result_val = mmapi.any_result()\n cmd1.GetSceneCommandResult_AllocateNewGroupID(key1, result_val)\n return result_val.i", "def create(\n self, draft: CustomerGroupDraft, *, expand: OptionalListStr = None\n ) -> CustomerGroup:\n params = self._serialize_params({\"expand\": expand}, traits.ExpandableSchema)\n return self._client._post(\n endpoint=\"customer-groups\",\n params=params,\n data_object=draft,\n response_class=CustomerGroup,\n )", "def create_security_group(self, name, description, vpc_id=None):\r\n params = {\r\n 'GroupName': name,\r\n 'GroupDescription': description\r\n }\r\n\r\n if vpc_id is not None:\r\n params['VpcId'] = vpc_id\r\n\r\n group = self.get_object('CreateSecurityGroup', params,\r\n SecurityGroup, verb='POST')\r\n group.name = name\r\n group.description = description\r\n return group", "def create_security_group(group_name):\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n for g in ec2.get_all_security_groups():\n if g.name == group_name:\n return # We already have this group setup\n group = ec2.create_security_group(group_name,\n \"%s SSH access group\" % group_name)\n group.authorize(\"tcp\", 22, 22, \"0.0.0.0/0\") # SSH is on port 22, all IPs\n group.authorize(\"tcp\", 80, 80, \"0.0.0.0/0\")\n group.authorize(\"tcp\", 61000, 65000, \"0.0.0.0/0\")\n print \"Created new security group\"", "def create_group(name, nodes, description=None):\n group, created = Group.get_or_create(name=name)\n if created:\n print('Group created with PK={} and name {}'.format(group.pk, group.name))\n else:\n print('Group with name {} and pk {} already exists. Do you want to add nodes?[y/n]'.format(group.name, group.pk))\n answer = raw_input()\n if answer.strip().lower() == 'y':\n pass\n else:\n return\n nodes2 = []\n nodes2_pks = []\n for node in nodes:\n try:\n node = int(node)\n except ValueError:\n pass\n nodes2_pks.append(node)\n try:\n nodes2.append(load_node(node))\n except:# NotExistentError:\n pass\n\n group.add_nodes(nodes2)\n print('added nodes: {} to group {} {}'.format(nodes2_pks, group.name, group.pk))\n\n if description:\n group.description = description\n\n return group", "def create(self, context=None):\n values = self.obj_get_changes()\n db_nodegroup = self.dbapi.create_nodegroup(values)\n self._from_db_object(self, db_nodegroup)", "async def async_create_group(\n hass: HomeAssistant,\n name: str,\n entity_ids: Collection[str] | None = None,\n user_defined: bool = True,\n icon: str | None = None,\n object_id: str | None = None,\n mode: bool | None = None,\n order: int | None = None,\n ) -> Group:\n group = Group.async_create_group_entity(\n hass, name, entity_ids, user_defined, icon, object_id, mode, order\n )\n\n # If called before the platform async_setup is called (test cases)\n await _async_get_component(hass).async_add_entities([group])\n return group", "def create_groups(**kwargs):\n for gname in SEC_GROUP_NAMES.itervalues():\n Group.objects.get_or_create(name=gname)", "def create(self, name, desc):\n body = {'security_group': {'name': name,\n 'description': desc,\n 'tenant_id': self.request.user.project_id}}\n secgroup = self.client.create_security_group(body)\n return SecurityGroup(secgroup.get('security_group'))", "def create_rule_group(self: object,\n body: dict,\n cs_username: str = None # pylint: disable=W0613 # cs_username is deprecated\n ) -> dict:\n # [POST] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/create-rule-groupMixin0\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"create_rule_groupMixin0\",\n body=body\n )", "def test_create_resource_group(self):\n pass", "def create_sec_group(self, conn, name, project):\n sec_group = conn.create_security_group(\n name=name, description=\"Security Group\",\n project_id=project.id)\n conn.create_security_group_rule(sec_group)\n return sec_group", "def post(self):\n status = ErrorCode.SUCCESS\n try:\n data = DotDict(json_decode(self.request.body))\n logging.info(\"[UWEB] add group request: %s, cid: %s\",\n data, self.current_user.cid)\n except Exception as e:\n status = ErrorCode.ILLEGAL_DATA_FORMAT\n logging.exception(\"[UWEB] Invalid data format. body:%s, Exception: %s\",\n self.request.body, e.args)\n self.write_ret(status)\n return\n\n try: \n cid = data.cid\n name = data.name\n group = self.get_group_by_cid(cid, name)\n if group:\n status = ErrorCode.GROUP_EXIST\n self.write_ret(status)\n return\n\n group_info = dict(cid=cid,\n name=name,\n type=UWEB.GROUP_TYPE.NEW)\n gid = add_group(group_info, self.db, self.redis)\n # NOTE: wspush to client\n tid = self.current_user.tid\n if status == ErrorCode.SUCCESS:\n WSPushHelper.pushS3(tid, self.db, self.redis)\n\n self.write_ret(status,\n dict_=dict(gid=gid,\n cid=cid,\n name=name))\n\n except Exception as e:\n logging.exception(\"[UWEB] Create group failed. uid: %s, Exception: %s\",\n self.current_user.uid, e.args)\n status = ErrorCode.SERVER_BUSY\n self.write_ret(status)", "def test_create_team_user_group(client):\n group = client.create_team_user_group(TEAM_ID, {\n \"name\": \"Python group\",\n \"is_reviewer\": True,\n \"is_admin\": True,\n \"admin_rights\": [\"upload\"]\n })\n assert group.team_id == TEAM_ID\n assert group.group_id == NEW_GROUP_ID\n assert group.name == \"Python group\"\n assert group.permissions['is_admin']\n assert group.permissions['is_reviewer']\n assert group.permissions['admin_rights'] == [\"upload\"]", "def create_group(self, tenant_id, group_id):\n maas_client = self._get_maas_client()\n d = maas_client.add_notification_and_plan()\n\n def create_group_in_db((notification, notification_plan)):\n return cass.create_group(\n self._db, tenant_id, group_id, notification, notification_plan)\n d.addCallback(create_group_in_db)\n\n return d", "def createNewGroup():\n if request.method == 'POST':\n groupname = request.form['groupname1']\n internal = request.form['internal1']\n external = request.form['external1']\n userNo = request.form['usersNo1']\n if 'node1' in request.form:\n node = int(request.form['node1'])\n else:\n node = -1\n\n if int(userNo) == 0:\n if hl.createGroup(groupname, internal, external, node):\n return True\n elif int(userNo) > 0:\n if hl.createGroup(groupname, internal, external, node, genUsers=True, numUsers=int(userNo)):\n return True\n\n return False", "def create_group(self, bucket_id, group, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.create_group_with_http_info(bucket_id, group, **kwargs)\n else:\n (data) = self.create_group_with_http_info(bucket_id, group, **kwargs)\n return data", "def create( self, trans, payload, **kwd ):\n group_dict = dict( message='', status='ok' )\n name = payload.get( 'name', '' )\n if name:\n description = payload.get( 'description', '' )\n if not description:\n description = ''\n else:\n # TODO add description field to the model\n group_dict = self.group_manager.create( trans, name=name ).to_dict( view='element', value_mapper=self.__get_value_mapper( trans ) )\n else:\n raise RequestParameterMissingException( 'Missing required parameter \"name\".' )\n return group_dict", "def create_group(username: str, gid: int=None, system: bool=False) -> Result[Group]:\n try:\n group = get_group(username)\n except KeyError:\n return add_group(username, gid, system)\n else:\n if group.gr_gid != gid:\n raise ValueError(\"Group {!r} has GID {}, expected {}\"\n .format(username, group.gr_gid, gid))\n return Result(State.unchanged, group)", "def create_sec_group(ec2, sec_group_name):\n sec = ec2.create_security_group(sec_group_name, 'Jvivian Boto SecGroup')\n port = 22\n sec.authorize('tcp', port, port, '0.0.0.0/0')", "def __create_group(self):\n\n group = time.strftime(_GROUP_NAME_FORMAT, time.localtime())\n LOG.info(\"Creating backup group '%s'.\", group)\n\n group_path = self.group_path(group)\n\n try:\n os.mkdir(group_path)\n except EnvironmentError as e:\n if e.errno != errno.EEXIST:\n raise Error(\"Unable to create a new backup group '{}': {}.\",\n group_path, psys.e(e))\n\n self.__on_group_created(group)\n\n return group", "def create_group(user):\n if connexion.request.is_json:\n users_group = [User.from_dict(d) for d in connexion.request.get_json()]\n response = (\"success\", 201)\n if len(users_group) > 4:\n response = (\"Max number of player is 4\", 400)\n else:\n groupId = GroupStorageController.add_new_group(users_group)\n return response", "def create_group(self, groupname, filters=[], filter_options=[], **kwargs):\n pytan.utils.check_for_help(kwargs=kwargs)\n clean_kwargs = pytan.utils.clean_kwargs(kwargs=kwargs)\n\n filter_defs = pytan.utils.dehumanize_question_filters(question_filters=filters)\n option_defs = pytan.utils.dehumanize_question_options(question_options=filter_options)\n\n h = (\n \"Issue a GetObject to get the full object of specified sensors for inclusion in a \"\n \"group\"\n )\n filter_defs = self._get_sensor_defs(defs=filter_defs, pytan_help=h, **clean_kwargs)\n\n add_group_obj = pytan.utils.build_group_obj(\n q_filter_defs=filter_defs, q_option_defs=option_defs,\n )\n add_group_obj.name = groupname\n\n h = \"Issue an AddObject to add a Group object\"\n group_obj = self._add(obj=add_group_obj, pytan_help=h, **clean_kwargs)\n\n m = \"New group {!r} created with ID {!r}, filter text: {!r}\".format\n self.mylog.info(m(group_obj.name, group_obj.id, group_obj.text))\n return group_obj", "def _make_group(self, _rk, _group_hint):\n\n if isinstance(_group_hint, dict):\n # _group_hint is a single key/value pair\n g = _group_hint[list(_group_hint)[0]]\n\n r_type = g.get(\"type\", \"none\")\n if r_type != \"OS::Nova::ServerGroup\":\n return \"support only ServerGroup resource\"\n\n properties = g.get(\"properties\", {})\n if len(properties) == 0:\n return \"no properties\"\n\n group_name = properties.get(\"name\", None)\n if group_name is None:\n return \"no group name\"\n group_name = group_name.strip()\n\n policies = properties.get(\"policies\", [])\n if len(policies) == 0:\n return \"no policy of the group\"\n\n if len(policies) > 1:\n return \"multiple policies\"\n\n # TODO: exclude soft-affinity and soft-anti-affinity?\n\n if group_name in self.groups.keys():\n group = self.groups[group_name]\n else:\n group = Group(group_name)\n\n policy = policies[0].strip()\n if policy == \"anti-affinity\":\n group_type = \"diversity\"\n else:\n group_type = policy\n\n group.group_type = group_type\n group.factory = \"server-group\"\n group.level = \"host\"\n\n self.groups[group_name] = group\n else:\n # group hint is uuid string.\n rg = self.resource.get_group_by_uuid(_group_hint)\n if rg is None:\n return \"unknown group found while making group\"\n\n # TODO: exclude soft-affinity and soft-anti-affinity?\n\n if rg.name in self.groups.keys():\n group = self.groups[rg.name]\n else:\n group = Group(rg.name)\n\n group.group_type = rg.group_type\n group.factory = rg.factory\n group.level = \"host\"\n\n self.groups[rg.name] = group\n\n if group is not None:\n group.server_list.append(self.app_name + \":\" + _rk)\n\n return \"ok\"", "def create(isamAppliance, id, check_mode=False, force=False):\n if force is True or _check(isamAppliance, id=id) is False:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_post(\"Creating group\", \"/sysaccount/groups/v1\",\n {\n 'id': id\n })\n\n return isamAppliance.create_return_object()", "def group(self, request, group_id):\n return OtterGroup(self.store, self.tenant_id,\n group_id, self.dispatcher).app.resource()", "def group_add_name(org_id, data):\n if data.has_key('groupname'):\n groupname = data['groupname']\n add_group(org_id, groupname, False)", "def test_create_group_409(self):\n request = {\n 'name': self.test_group1_groupid\n }\n # First create a group indirectly by making a user with a group\n resp = self.app.post('/users', data=json.dumps(self.test_user1_data))\n assert resp.status_code == 200\n\n # Now create a group that is already there\n resp = self.app.post('/groups', data=json.dumps(request))\n assert resp.status_code == 409", "def create_group(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/GroupV2/Create/\"))", "async def create_contact_group(dbcon: DBConnection, name: str, active: bool) -> str:\n q = \"\"\"insert into contact_groups (name, active) values (%s, %s)\"\"\"\n q_args = (name, active)\n contact_group_id = await dbcon.operation(q, q_args)\n return contact_group_id", "def createGroup(self, group, members):\n connection = self.sock\n\n connection.send(\"create_group\".encode())\n\n status_code = connection.recv(2)\n\n if status_code != SUCCESS:\n print(\"Error\")\n return -1\n message = []\n message.append(\"gname:\")\n message.append(group)\n message.append(\";\")\n message.append(\"members:\")\n for i in members:\n message.append(i)\n message.append(\",\")\n if members:\n message.pop()\n message = ''.join(message)\n message = message.encode()\n connection.send(message)\n result = connection.recv(2)\n if result != SUCCESS:\n return -1\n\n packed_gid = connection.recv(4)\n gid = struct.unpack(\"<L\", packed_gid)\n repoids.append(gid)\n return 1", "def create_sg(vpc_id, description, group_name):\n client = boto3.client('ec2')\n security_group = str(group_name + \"_sg\")\n\n # get the security groups\n idle_sg = get_sg()\n\n print(idle_sg)\n print(security_group)\n\n # if security group doesnt exist, create it\n if security_group not in idle_sg:\n print(\"Creating SG\")\n return client.create_security_group(\n Description=description,\n GroupName=security_group,\n VpcId=vpc_id\n )\n return get_sg_id(security_group)", "def ex_create_security_group(self, name, description):\n params = {'Action': 'CreateSecurityGroup',\n 'GroupName': name,\n 'GroupDescription': description}\n return self.connection.request(self.path, params=params).object", "def creategroup(body):\n group = body.get(\"groupname\", None)\n pps = body.get(\"pilotpoints\", None)\n print('lol',group, pps)\n print(type(pps))\n\n # Does the person exist already?\n if group not in group_dict and group is not None:\n group_dict[group] = {\n \"groupname\": group,\n \"pilotpoints\": pps,\n }\n return group_dict[group], 201\n\n # Otherwise, they exist, that's an error\n else:\n abort(\n 406,\n \"Person with last name {group} already exists\".format(group=group),\n )", "def create_project(self, conn, name, description=\"\"):\n group = conn.group.allocate(name, description)\n # returns Project object\n return group", "def create_group_scene(self, name, group):\n data = {\n \"name\": name,\n \"group\": group,\n \"recycle\": True,\n \"type\": \"GroupScene\"\n }\n return self.bridge.bridge.post('/scenes', data)", "def add_secgroup(self, name=None, description=None):\n # print (\"UUUU\")\n if self.cloudman:\n if description is None:\n description = name\n try:\n self.cloudman.network.create_security_group(\n name=name,\n description=description)\n except:\n Console.warning(f\"secgroup {name} already exists in cloud. \"\n f\"skipping.\")\n else:\n raise ValueError(\"cloud not initialized\")", "def test_060_add_group_to_group(self):\n\n testflow.step(\"Adding group %s to group %s\", TEST_GROUP1, TEST_GROUP2)\n assert MANAGE_CLI.run(\n 'groupadd',\n TEST_GROUP1,\n group=TEST_GROUP2,\n )[0], \"Failed to add group to group '%s'\" % TEST_GROUP1", "def create_research_group(self, name, code=None, description=None):\n ResearchGroupRepository = get_repository('ResearchGroupRepository')\n\n research_group = ResearchGroup(unit_id=self.id, code=code)\n\n research_group.name = name\n\n research_group.description=description\n research_group.user_id= self.user_id\n research_group.startdate = None\n research_group.enddate = None\n research_group.license = None\n research_group.ids = None\n ResearchGroupRepository.save(research_group)\n\n return research_group", "def createGroup(self):\n return _libsbml.ListOfGroups_createGroup(self)", "def register_group(self, **fields):\n if 'group_key' not in fields.keys():\n raise KeyError('Primary key is missing')\n existing_fields = [i.name for i in self._db.get_columns('groups')]\n needed_fields = {}\n for key, value in fields.items():\n if key in existing_fields:\n needed_fields[key] = value\n check = Groups.get_or_none(group_key=needed_fields['group_key'])\n if check is not None:\n return check\n dummy_teacher = Teachers.get(teacher_key=fields['teacher']) if 'teacher' in fields else Teachers.get(teacher_key='0')\n dummy_tutor = Tutors.get(tutor_key=fields['tutor']) if 'tutor' in fields else Tutors.get(tutor_key='0')\n dummy_course = Courses.get(course_key=fields['course']) if 'course' in fields else Courses.get(course_key='0')\n new_group = Groups.get_or_create(teacher=dummy_teacher, tutor=dummy_tutor, course=dummy_course, **needed_fields)\n return new_group", "async def command_create(self, context):\n # await self._create_new_role(context, name, target=GROUP_CATEGORY_NAME)\n print('main create')", "def create_new_group(self, a, b):\n self.groups[self.group_id] = set([a,b])\n self.node_id[a] = self.node_id[b] = self.group_id\n self.group_id += 1", "def handle(self, *args, **options):\n new_group, created = Group.objects.get_or_create(name=options.get('group_name')) \n self.stdout.write(f\"Group {options.get('group_name')} created\")", "def create_group(self, name) -> \"GroupBase\":\n ancestor, group_names, last_name = self._descend(name)\n parent = ancestor._require_descendant_groups(*group_names)\n if last_name in parent:\n raise FileExistsError(f\"Group or dataset found at '{name}'\")\n return parent._create_child_group(last_name)", "def createMainGroup(self):\n\t\tmc.group( n = self.grp.name, em = True )", "def post_security_group_create(self, resource_dict):\n pass", "def create(self, validated_data):\n admin_id = validated_data.pop('admin_id', None)\n users = validated_data.pop('users', None)\n group = Group.objects.create(\n admin=admin_id, **validated_data\n )\n if users is not None:\n group.users.set(users)\n group.save()\n return group", "def add_group(self, resolvable):\n group = self._resolve_group(resolvable)\n return self._client.group_memberships.create({\n 'account': self,\n 'group': group,\n })", "async def add_country_group_async(\n body: Optional[AddCountryGroupRequest] = None,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = AddCountryGroup.create(\n body=body,\n namespace=namespace,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )" ]
[ "0.7795205", "0.76985514", "0.75594074", "0.7485939", "0.7361224", "0.7275385", "0.7226914", "0.7171012", "0.7022135", "0.7012331", "0.70026654", "0.6958713", "0.6953057", "0.6946662", "0.6930038", "0.68761617", "0.68680024", "0.6829689", "0.6791976", "0.67673075", "0.67458194", "0.6739689", "0.67273766", "0.6694578", "0.6683829", "0.66818786", "0.6666198", "0.66433865", "0.66410846", "0.663097", "0.6591401", "0.65880996", "0.658466", "0.657879", "0.65678144", "0.6565356", "0.65637505", "0.6554267", "0.6546635", "0.6514908", "0.6507296", "0.6507296", "0.6469481", "0.6465262", "0.64547205", "0.6448505", "0.64376134", "0.64343834", "0.64154005", "0.6414627", "0.6396364", "0.6376567", "0.6365447", "0.63453394", "0.6341325", "0.6341231", "0.633921", "0.6331937", "0.6328687", "0.62847745", "0.6277781", "0.6267766", "0.6262555", "0.625486", "0.6246472", "0.62288123", "0.6228652", "0.6227351", "0.6223595", "0.62165976", "0.62141347", "0.6211684", "0.62101644", "0.6208205", "0.61926514", "0.6189376", "0.6182613", "0.6181146", "0.61696476", "0.6164491", "0.61601025", "0.6146641", "0.61334515", "0.61317176", "0.6128281", "0.6102575", "0.6084897", "0.6079304", "0.60532016", "0.60482174", "0.6046931", "0.60465133", "0.6035445", "0.60300994", "0.6008034", "0.6000354", "0.59863174", "0.5980952", "0.5971276", "0.5953805" ]
0.6698751
23
Creates a new identity provider in your tenancy. For more information, see `Identity Providers and Federation`__. You must specify your tenancy's OCID as the compartment ID in the request object. Remember that the tenancy is simply the root compartment. For information about OCIDs, see `Resource Identifiers`__. You must also specify a name for the `IdentityProvider`, which must be unique across all `IdentityProvider` objects in your tenancy and cannot be changed. You must also specify a description for the `IdentityProvider` (although it can be an empty string). It does not have to be unique, and you can change it anytime with
def create_identity_provider(self, create_identity_provider_details, **kwargs): resource_path = "/identityProviders" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_identity_provider got unknown kwargs: {!r}".format(extra_kwargs)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, header_params=header_params, body=create_identity_provider_details, response_type="IdentityProvider") else: return self.base_client.call_api( resource_path=resource_path, method=method, header_params=header_params, body=create_identity_provider_details, response_type="IdentityProvider")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_identity_provider(module, sdk, cloud, name):\n\n if module.check_mode:\n return True, None\n\n description = module.params.get('description')\n enabled = module.params.get('enabled')\n domain_id = module.params.get('domain_id')\n remote_ids = module.params.get('remote_ids')\n\n if enabled is None:\n enabled = True\n if remote_ids is None:\n remote_ids = []\n\n attributes = {\n 'domain_id': domain_id,\n 'enabled': enabled,\n 'remote_ids': remote_ids,\n }\n if description is not None:\n attributes['description'] = description\n\n try:\n idp = cloud.identity.create_identity_provider(id=name, **attributes)\n except sdk.exceptions.OpenStackCloudException as ex:\n module.fail_json(msg='Failed to create identity provider: {0}'.format(str(ex)))\n return (True, idp)", "def create_provider(\n provider_id:UUID = Form(...),\n name:str = Form(...),\n qualification:str = Form(...),\n speciality:str = Form(...),\n phone:str = Form(...),\n department:Optional[str] = Form(\"N/A\"),\n organization:str = Form(...),\n location:Optional[str] = Form(\"N/A\"),\n address:str = Form(...),\n active:bool = Form(...)\n ):\n\n post_data = {\n \"name\": name,\n \"qualification\": qualification,\n \"speciality\": speciality,\n \"phone\": phone,\n \"department\": department,\n \"organization\": organization,\n \"location\": location,\n \"address\": address,\n \"active\": active\n }\n provider_data = open_for_reading()\n if str(provider_id) in provider_data.keys():\n response = {\"message\": \"ID already exists\"}\n else:\n provider_data[str(provider_id)] = post_data\n open_for_writing(data=provider_data)\n response = {\"message\": \"provider created\"}\n\n return response", "def create_cloud_provider(providername):\n backend_name = request.get_json().get(\"backend\")\n service_name = request.get_json().get(\"service\")\n response = jsonify(\n admin.create_provider(\n current_app.scoped_session(),\n providername,\n backend=backend_name,\n service=service_name,\n )\n )\n return response", "def create(self, identity, record=None, data=None, **kwargs):\n data['id'] = data['id'].lower()\n self._validate(data['id'])\n record['id'] = data['id']\n try:\n provider = record.__class__.pid.field._provider.create(record=record)\n except PIDAlreadyExists:\n raise ValidationError(\n 'A community with this identifier already exists.',\n field_name='id',\n )\n setattr(record, 'pid', provider.pid)", "def m_create_identity(DID, domain_name, website, commercial_name, parent_node_account, password, overwrite):\n\n error, didDoc = create_identity(\n DID, domain_name, website, commercial_name, parent_node_account, password, overwrite)\n if error is not None:\n print(error)\n\n print(f\"Created\")", "def add_new_provider(self, provider_name, provider_type, endpoints, zone_id, provider_region):\n try:\n result = self.client.post(self.providers_url, name=provider_name,\n type=ManageIQProvider.PROVIDER_TYPES[provider_type],\n zone={'id': zone_id},\n connection_configurations=endpoints,\n provider_region=provider_region)\n provider_id = result['results'][0]['id']\n self.changed = True\n except Exception as e:\n self.module.fail_json(msg=\"Failed to add provider. Error: {!r}\".format(e))\n return provider_id", "def create(self, identity, data=None, record=None, **kwargs):\n self._populate_access_and_validate(identity, data, record, **kwargs)\n self._init_owners(identity, record, **kwargs)", "def create(self, identity, data=None, record=None, **kwargs):\n if system_process in identity.provides:\n return\n\n member = {\n \"type\": \"user\",\n \"id\": str(identity.id),\n }\n self.service.members.add(\n # the user is not yet owner of the community (is being added)\n # therefore we cannot use `identity`\n system_identity,\n record.id,\n {\"members\": [member], \"role\": current_roles.owner_role.name},\n uow=self.uow,\n )\n\n # Invalidate the membership cache\n on_user_membership_change(identity=identity)", "def __init__(__self__, *,\n identity_pool_id: pulumi.Input[str],\n identity_provider_name: pulumi.Input[str],\n principal_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n use_defaults: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"identity_pool_id\", identity_pool_id)\n pulumi.set(__self__, \"identity_provider_name\", identity_provider_name)\n if principal_tags is not None:\n pulumi.set(__self__, \"principal_tags\", principal_tags)\n if use_defaults is not None:\n pulumi.set(__self__, \"use_defaults\", use_defaults)", "def test_create_identity(self):\n pass", "def __init__(__self__, *,\n principal_id: Optional[pulumi.Input[str]] = None,\n tenant_id: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[Union[str, 'ManagedIdentityType']]] = None,\n user_assigned_identities: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n if principal_id is not None:\n pulumi.set(__self__, \"principal_id\", principal_id)\n if tenant_id is not None:\n pulumi.set(__self__, \"tenant_id\", tenant_id)\n if type is not None:\n pulumi.set(__self__, \"type\", type)\n if user_assigned_identities is not None:\n pulumi.set(__self__, \"user_assigned_identities\", user_assigned_identities)", "def _create_resource_provider(context, uuid, name,\n parent_provider_uuid=None):\n return {\n 'uuid': uuid,\n 'name': name,\n 'generation': 0,\n 'parent_provider_uuid': parent_provider_uuid\n }", "def __init__(__self__, *,\n identity_pool_id: Optional[pulumi.Input[str]] = None,\n identity_provider_name: Optional[pulumi.Input[str]] = None,\n principal_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n use_defaults: Optional[pulumi.Input[bool]] = None):\n if identity_pool_id is not None:\n pulumi.set(__self__, \"identity_pool_id\", identity_pool_id)\n if identity_provider_name is not None:\n pulumi.set(__self__, \"identity_provider_name\", identity_provider_name)\n if principal_tags is not None:\n pulumi.set(__self__, \"principal_tags\", principal_tags)\n if use_defaults is not None:\n pulumi.set(__self__, \"use_defaults\", use_defaults)", "def _create_resource_provider(self, uuid, name):\n url = \"/resource_providers\"\n payload = {\n 'uuid': uuid,\n 'name': name,\n }\n resp = self.post(url, payload)\n if resp.status_code == 201:\n msg = _LI(\"Created resource provider record via placement API \"\n \"for resource provider with UUID {0} and name {1}.\")\n msg = msg.format(uuid, name)\n LOG.info(msg)\n return objects.ResourceProvider(\n uuid=uuid,\n name=name,\n generation=1,\n )\n elif resp.status_code == 409:\n # Another thread concurrently created a resource provider with the\n # same UUID. Log a warning and then just return the resource\n # provider object from _get_resource_provider()\n msg = _LI(\"Another thread already created a resource provider \"\n \"with the UUID {0}. Grabbing that record from \"\n \"the placement API.\")\n msg = msg.format(uuid)\n LOG.info(msg)\n return self._get_resource_provider(uuid)\n else:\n msg = _LE(\"Failed to create resource provider record in \"\n \"placement API for UUID %(uuid)s. \"\n \"Got %(status_code)d: %(err_text)s.\")\n args = {\n 'uuid': uuid,\n 'status_code': resp.status_code,\n 'err_text': resp.text,\n }\n LOG.error(msg, args)", "async def create_issuer(self, issuer_name: str, provider: str, **kwargs) -> CertificateIssuer:\n\n enabled = kwargs.pop(\"enabled\", None)\n account_id = kwargs.pop(\"account_id\", None)\n password = kwargs.pop(\"password\", None)\n organization_id = kwargs.pop(\"organization_id\", None)\n admin_contacts = kwargs.pop(\"admin_contacts\", None)\n\n if account_id or password:\n issuer_credentials = self._models.IssuerCredentials(account_id=account_id, password=password)\n else:\n issuer_credentials = None\n if admin_contacts:\n admin_details: Optional[List[Any]] = [\n self._models.AdministratorDetails(\n first_name=contact.first_name,\n last_name=contact.last_name,\n email_address=contact.email,\n phone=contact.phone,\n )\n for contact in admin_contacts\n ]\n else:\n admin_details = None\n if organization_id or admin_details:\n organization_details = self._models.OrganizationDetails(id=organization_id, admin_details=admin_details)\n else:\n organization_details = None\n if enabled is not None:\n issuer_attributes = self._models.IssuerAttributes(enabled=enabled)\n else:\n issuer_attributes = None\n\n parameters = self._models.CertificateIssuerSetParameters(\n provider=provider,\n credentials=issuer_credentials,\n organization_details=organization_details,\n attributes=issuer_attributes,\n )\n\n issuer_bundle = await self._client.set_certificate_issuer(\n vault_base_url=self.vault_url, issuer_name=issuer_name, parameter=parameters, **kwargs\n )\n return CertificateIssuer._from_issuer_bundle(issuer_bundle=issuer_bundle)", "def create_namespaced_identity(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_identity\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_identity`\")\n\n resource_path = '/oapi/v1/identities'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Identity',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def __init__(__self__,\n resource_name: str,\n args: OpenIdConnectProviderArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def create(self, identity, data=None, record=None, **kwargs):\n if record.access.visibility != \"public\":\n return\n\n community_set = self._create_set_from_community(record)\n # NOTE: will be indexed via a listener in oaiserver module\n db.session.add(community_set)", "def create(self, identity, data=None, record=None, **kwargs):\n self._populate_access_and_validate(identity, data, record, **kwargs)", "def _create_entity_in_domain(entity_type, domain_id):\n if entity_type == 'users':\n new_entity = unit.new_user_ref(domain_id=domain_id)\n new_entity = self.identity_api.create_user(new_entity)\n elif entity_type == 'groups':\n new_entity = unit.new_group_ref(domain_id=domain_id)\n new_entity = self.identity_api.create_group(new_entity)\n elif entity_type == 'roles':\n new_entity = self._create_role(domain_id=domain_id)\n else:\n # Must be a bad test plan\n raise exception.NotImplemented()\n return new_entity", "def create_with_instance_principal(iam_auth_uri=None):\n if iam_auth_uri is None:\n return SignatureProvider(\n oci.auth.signers.InstancePrincipalsSecurityTokenSigner())\n else:\n return SignatureProvider(\n oci.auth.signers.InstancePrincipalsSecurityTokenSigner(\n federation_endpoint=iam_auth_uri))", "def __init__(__self__,\n resource_name: str,\n args: IdentityPoolProviderPrincipalTagArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(self, region, user_pool_id, app_client_id):\n self.region = region\n self.user_pool_id = user_pool_id\n self.client_id = app_client_id\n self.client = boto3.client('cognito-idp', region_name=self.region)", "def register_provider(args):\n if len(args) == 0:\n click.echo(\"Usage: mephisto register <provider_type> --arg1:value --arg2:value\")\n return\n\n from mephisto.core.local_database import LocalMephistoDB\n from mephisto.core.registry import get_crowd_provider_from_type\n from mephisto.core.argparse_parser import parse_arg_dict, get_extra_argument_dicts\n\n provider_type, requester_args = args[0], args[1:]\n args_dict = dict(arg.split(\":\") for arg in requester_args)\n transformed = dict(\n (key, {\"option_string\": key, \"value\": value})\n for (key, value) in args_dict.items()\n )\n\n crowd_provider = get_crowd_provider_from_type(provider_type)\n RequesterClass = crowd_provider.RequesterClass\n\n if len(requester_args) == 0:\n from tabulate import tabulate\n\n params = get_extra_argument_dicts(RequesterClass)\n for param in params:\n click.echo(param[\"desc\"])\n click.echo(tabulate(param[\"args\"].values(), headers=\"keys\"))\n return\n\n try:\n parsed_options = parse_arg_dict(RequesterClass, transformed)\n except Exception as e:\n click.echo(str(e))\n\n if \"name\" not in parsed_options:\n click.echo(\"No name was specified for the requester.\")\n\n db = LocalMephistoDB()\n requesters = db.find_requesters(requester_name=parsed_options[\"name\"])\n if len(requesters) == 0:\n requester = RequesterClass.new(db, parsed_options[\"name\"])\n else:\n requester = requesters[0]\n try:\n requester.register(parsed_options)\n click.echo(\"Registered successfully.\")\n except Exception as e:\n click.echo(str(e))", "def add_tomcat7_idp():\n pass", "def sso_test_create_user(request, idp_slug):\n if settings.SERVER_ENVIRONMENT not in ['staging']:\n raise Http404()\n\n username = request.GET.get('username')\n if username:\n prepare_session_with_sso_username(request, username)\n\n invitation_uuid = request.GET.get('invitation')\n invitation = Invitation.objects.get(uuid=invitation_uuid)\n if invitation:\n prepare_session_for_sso_invitation(request, invitation)\n\n return HttpResponseRedirect(reverse(\"sso_saml_login\", args=(idp_slug,)))", "def test_create_resource_provider(self):\n uuid = uuids.compute_node\n name = 'computehost'\n resp_mock = mock.Mock(status_code=200)\n self.ks_adap_mock.post.return_value = resp_mock\n\n self.assertEqual(\n resp_mock.json.return_value,\n self.client._create_resource_provider(self.context, uuid, name))\n\n expected_payload = {\n 'uuid': uuid,\n 'name': name,\n }\n\n expected_url = '/resource_providers'\n self.ks_adap_mock.post.assert_called_once_with(\n expected_url, json=expected_payload, microversion='1.20',\n global_request_id=self.context.global_id)", "def __init__(__self__, *,\n identity_namespace: Optional[pulumi.Input[str]] = None,\n identity_provider: Optional[pulumi.Input[str]] = None,\n workload_pool: Optional[pulumi.Input[str]] = None):\n if identity_namespace is not None:\n pulumi.set(__self__, \"identity_namespace\", identity_namespace)\n if identity_provider is not None:\n pulumi.set(__self__, \"identity_provider\", identity_provider)\n if workload_pool is not None:\n pulumi.set(__self__, \"workload_pool\", workload_pool)", "def create(self, request, *args, **kwargs):\n \n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n provider = serializer.data.get('provider')\n\n access_token = serializer.data.get('access_token')\n user = None if request.user.is_anonymous else request.user\n\n # strategy sets up the required custom configuration for working with Django\n strategy = load_strategy(request)\n try:\n # Loads backends defined on SOCIAL_AUTH_AUTHENTICATION_BACKENDS,\n # checks the appropriate one by using the provider given\n\n backend = load_backend(strategy=strategy, name=provider, redirect_uri=None)\n access_token = self.update_access_token(backend, request, access_token)\n\n except MissingBackend:\n return Response({\n \"errors\": {\n \"provider\": [\"Invalid provider\"]\n }\n }, status=status.HTTP_400_BAD_REQUEST)\n\n try:\n # creates a user in our user model \n # If the user exists, we just authenticate the user.\n user = backend.do_auth(access_token, user=user)\n\n except BaseException as error:\n return Response({\"error\": str(error)}, status=status.HTTP_400_BAD_REQUEST)\n\n # Since the user is using social authentication, there is no need for email verification.\n # We therefore set the user to active here.\n # And also subscribe them for notifications\n\n user.is_active = True\n user.save()\n\n subscribe_user(user, self.subscription_class)\n\n serializer = UserSerializer(user)\n \n return Response(serializer.data, status=status.HTTP_200_OK)", "def createTenant(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def create_epic():\n client = RequestManager()\n client.set_method(\"POST\")\n client.set_endpoint(\"/projects/{0}/epics\".format(STORED_ID['project_id']))\n name = \"\".join(choices(string.ascii_letters, k=6))\n body = {\"name\": name}\n client.set_body(json.dumps(body))\n response = client.execute_request()\n STORED_ID['epic_id'] = response.json()['id']", "def application_create(request, name, redirect_uris, scopes=['all_info'],\n client_type='confidential', description=None,\n grant_type='authorization_code', **kwargs):\n manager = api.keystone.keystoneclient(request, admin=True).oauth2.consumers\n return manager.create(name=name,\n redirect_uris=redirect_uris,\n description=description,\n scopes=scopes,\n client_type=client_type,\n grant_type=grant_type,\n **kwargs)", "def __init__(__self__, *,\n auth_type: pulumi.Input[str],\n client_id: pulumi.Input[str],\n subscription_id: pulumi.Input[str]):\n pulumi.set(__self__, \"auth_type\", 'userAssignedIdentity')\n pulumi.set(__self__, \"client_id\", client_id)\n pulumi.set(__self__, \"subscription_id\", subscription_id)", "def create(self, request):\n\n return ObtainAuthToken().post(request)", "def register_api_provider(api_provider, contact_info):\n\n req = requests.post(domain_name + \"register_api_provider?\",\n data=json.dumps({'api_provider': api_provider,\n 'contact_info': contact_info}),\n headers={'Content-type': 'application/json',\n 'Accept': 'application/json'})\n response = req.json()\n return response", "def create(self, request):\n return ObtainAuthToken().post(request)", "def __init__(__self__, *,\n auth_type: pulumi.Input[str]):\n pulumi.set(__self__, \"auth_type\", 'systemAssignedIdentity')", "def create_customer(email=None, name=None, user_type='customer'):\n if user_type == 'charity':\n stripe.api_key = Config.STRIPE_SECRET_KEY_FOR_PLAN\n else:\n stripe.api_key = Config.STRIPE_SECRET_KEY\n if email and name:\n customer = stripe.Customer.create(email=email, name=name)\n else:\n customer = stripe.Customer.create()\n return customer.id", "def __init__(__self__, *,\n cognitive_service_region: Optional[pulumi.Input[str]] = None,\n cognitive_service_resource_id: Optional[pulumi.Input[str]] = None,\n cognitive_service_subscription_key: Optional[pulumi.Input[str]] = None,\n default_locale: Optional[pulumi.Input[str]] = None,\n id: Optional[pulumi.Input[str]] = None,\n provider_name: Optional[pulumi.Input[str]] = None):\n if cognitive_service_region is not None:\n pulumi.set(__self__, \"cognitive_service_region\", cognitive_service_region)\n if cognitive_service_resource_id is not None:\n pulumi.set(__self__, \"cognitive_service_resource_id\", cognitive_service_resource_id)\n if cognitive_service_subscription_key is not None:\n pulumi.set(__self__, \"cognitive_service_subscription_key\", cognitive_service_subscription_key)\n if default_locale is not None:\n pulumi.set(__self__, \"default_locale\", default_locale)\n if id is not None:\n pulumi.set(__self__, \"id\", id)\n if provider_name is not None:\n pulumi.set(__self__, \"provider_name\", provider_name)", "def identity_provider(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"identity_provider\")", "def get_identity_provider(self, identity_provider_id, **kwargs):\n resource_path = \"/identityProviders/{identityProviderId}\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"get_identity_provider got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"identityProviderId\": identity_provider_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"IdentityProvider\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"IdentityProvider\")", "def test_create_resource_provider_with_parent(self):\n parent_uuid = uuids.parent\n uuid = uuids.compute_node\n name = 'computehost'\n resp_mock = mock.Mock(status_code=200)\n self.ks_adap_mock.post.return_value = resp_mock\n\n self.assertEqual(\n resp_mock.json.return_value,\n self.client._create_resource_provider(\n self.context,\n uuid,\n name,\n parent_provider_uuid=parent_uuid,\n )\n )\n\n expected_payload = {\n 'uuid': uuid,\n 'name': name,\n 'parent_provider_uuid': parent_uuid,\n }\n expected_url = '/resource_providers'\n self.ks_adap_mock.post.assert_called_once_with(\n expected_url, json=expected_payload, microversion='1.20',\n global_request_id=self.context.global_id)", "def create_identity(self, realm=None, type=\"users\", user_data=None):\n if not user_data:\n raise ValueError(\"Please provide correct user information.\")\n\n user_data = self._to_string(data=user_data)\n type = self._type_validator(type=type)\n uri = self._uri_realm_creator(realm=realm, uri=type + '/?_action=create')\n data = self._post(uri=uri, data=user_data, headers=self.headers)\n return data.json()", "def create_tenant(self, tenant_info):\n LOG_OBJ.debug(\"Creating Tenant:%s\" % tenant_info['project_name'])\n _tenant_name = tenant_info['project_name']\n _user_name = tenant_info.get('user_name', _tenant_name + \"_user\")\n _password = tenant_info.get('password', _tenant_name + \"_pass\")\n\n _url = \"http://\" + self.host_ip + \":35357/v2.0/tenants\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.cloud_admin_info['token_project']}\n _tenant_data = {\"tenant\": {\"enabled\": True, \"name\": _tenant_name,\n \"description\": \"Testing API 3\"}}\n\n _body = json.dumps(_tenant_data)\n\n response = self.request(\"POST\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while creating tenant: %s\"\n % _tenant_name)\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Create tenant Failed with status %s \" %\n response.status)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Created tenant: %s successfully.\" % _tenant_name)\n\n _tenant_id = output['tenant']['id']\n # If user id is passed then, directly add that user to the tenant.\n # otherwise Create a new user.\n _user_id = tenant_info.get('user_id', None)\n if not _user_id:\n _user_data = {\"user\": {\"email\": None,\n \"password\": _password,\n \"enabled\": True,\n \"name\": _user_name,\n \"tenantId\": _tenant_id}}\n _user_id = self.create_user(_user_data)\n if not isinstance(_user_id, unicode):\n return\n tenant_info['userID'] = _user_id\n\n # Add the user roles.\n for role_name in tenant_info['roles']:\n role_id = self.get_role_id(role_name)\n if not isinstance(role_id, unicode):\n return\n # Add user role.\n if not self.add_user_role(_tenant_id, _user_id, role_id):\n return\n # Get the token.\n token_id = self.get_token(_tenant_name, _user_name, _password)\n if not isinstance(token_id, unicode):\n return\n # Set the new context. note: This is v2 token, so only project scope.\n self.set_tenant_info(_tenant_name, token_id, token_id, _tenant_id)\n\n # Adding Security Group Rules\n # Add the ICMP rule.\n # if not isinstance(self.add_security_group_rules(\"icmp\"), bool):\n # return\n # Add the rule for ssh\n # if not isinstance(self.add_security_group_rules(\n # \"tcp\", from_port='22', to_port='22'), bool):\n # return\n # Add the rule for all udp\n # if not isinstance(self.add_security_group_rules(\n # \"udp\", from_port='1', to_port='65535'), bool):\n # return\n\n # Modify the tenant quota.\n # if not isinstance(self.set_quota(_tenant_id), bool):\n # return\n # Update the quota\n # fields = {\"network\": 50, \"subnet\": 50, \"port\": 100, \"floatingip\": 50}\n # quotas = self.quota_update(_tenant_id, fields)\n # if not isinstance(quotas, dict):\n # return\n # LOG_OBJ.info(\"Quota for tenant[%s] is:%s\" % (_tenant_id,\n # str(quotas)))\n return _tenant_id", "def testCreateOrg(self):\n self.timeline.orgSignup()\n self.data.createProfile()\n self.record.createOrgApp('new_org', self.data.user)\n\n url = '/gci/profile/organization/' + self.gci.key().name()\n create_url = url + '?org_id=new_org'\n response = self.get(create_url)\n self.assertResponseOK(response)\n self.assertOrgProfilePageTemplatesUsed(response)\n \n postdata = {\n 'founder': self.data.user, 'home': self.createDocument().key(),\n 'scope': self.gci, 'irc_channel': 'irc://example.com',\n 'pub_mailing_list': 'http://example.com',\n }\n response, properties = self.modelPost(create_url, GCIOrganization, postdata)\n self.assertResponseRedirect(response, url + '/new_org?validated')\n profile = db.get(self.data.profile.key())\n self.assertEqual(1, len(profile.org_admin_for))", "def create_tenant(tenant):\n exists = identity.Tenant.query.filter_by(name=tenant.name).first()\n if exists:\n abort(409, \"Tenant Already Exists\")\n db.session.add(tenant)\n db.session.commit()\n return tenant.id", "def __init__(self, *args, **kwargs):\n super(EnterpriseCustomerIdentityProviderAdminForm, self).__init__(*args, **kwargs)\n idp_choices = utils.get_idp_choices()\n if idp_choices is not None:\n self.fields['provider_id'] = forms.TypedChoiceField(choices=idp_choices)", "def create_org(self, provider='qbo', status=CONNECTED, set_provider_config=True):\n\n if set_provider_config:\n provider_config = self.provider_configs[provider]\n else:\n provider_config = None\n\n org = Org(provider=provider, id='test', status=status, provider_config=provider_config).put()\n\n OrgCredentials(id='test', parent=org, token={'expires_at': 0}).put()", "async def register_investigator(request):\n required_fields = ['name']\n general.validate_fields(required_fields, request.json)\n\n name = request.json.get('name')\n\n clinic_signer = request.app.config.SIGNER_INVESTIGATOR # .get_public_key().as_hex()\n\n client_txn = consent_transaction.create_investigator_client(\n txn_signer=clinic_signer,\n batch_signer=clinic_signer\n )\n clinic_txn = ehr_transaction.create_investigator(\n txn_signer=clinic_signer,\n batch_signer=clinic_signer,\n name=name\n )\n batch, batch_id = ehr_transaction.make_batch_and_id([client_txn, clinic_txn], clinic_signer)\n\n await security_messaging.add_investigator(\n request.app.config.VAL_CONN,\n request.app.config.TIMEOUT,\n [batch])\n\n try:\n await security_messaging.check_batch_status(\n request.app.config.VAL_CONN, [batch_id])\n except (ApiBadRequest, ApiInternalError) as err:\n # await auth_query.remove_auth_entry(\n # request.app.config.DB_CONN, request.json.get('email'))\n raise err\n\n return response.json(body={'status': general.DONE},\n headers=general.get_response_headers())", "def create_manager(self, username, tenancy):\n raise NotImplementedError", "def makeIdentity(self) -> None:\n ...", "def create_tenant(tenant_name, description, enabled, auth_admin_url, admin_token):\n keystone = get_client(auth_admin_url, admin_token)\n tenant = keystone.tenants.create(tenant_name=tenant_name, description=description, enabled=enabled)\n print tenant\n return tenant.to_dict()", "async def async_oauth_create_entry(self, data):\n\n await self.async_set_unique_id(unique_id=f\"{DOMAIN}Cloud\")\n return self.async_create_entry(title=f\"{DOMAIN}Cloud\", data=data)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n identity_pool_id: Optional[pulumi.Input[str]] = None,\n identity_provider_name: Optional[pulumi.Input[str]] = None,\n principal_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n use_defaults: Optional[pulumi.Input[bool]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n identity_type: Optional[pulumi.Input[Union[str, 'CmkIdentityType']]] = None,\n user_assigned_identity_resource_id: Optional[pulumi.Input[str]] = None):\n if identity_type is not None:\n pulumi.set(__self__, \"identity_type\", identity_type)\n if user_assigned_identity_resource_id is not None:\n pulumi.set(__self__, \"user_assigned_identity_resource_id\", user_assigned_identity_resource_id)", "def create_partner(name):\n\n return Partner.objects.create(name=name)", "def post(self):\n data = request.json\n return save_new_provider(data=data)", "def identity_provider_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"identity_provider_name\")", "def __init__(__self__, *,\n principal_id: str,\n tenant_id: str,\n type: Optional[str] = None,\n user_assigned_identities: Optional[Mapping[str, 'outputs.UserAssignedIdentityResponse']] = None):\n pulumi.set(__self__, \"principal_id\", principal_id)\n pulumi.set(__self__, \"tenant_id\", tenant_id)\n if type is not None:\n pulumi.set(__self__, \"type\", type)\n if user_assigned_identities is not None:\n pulumi.set(__self__, \"user_assigned_identities\", user_assigned_identities)", "def __init__(__self__, *,\n principal_id: str,\n tenant_id: str,\n type: Optional[str] = None,\n user_assigned_identities: Optional[Mapping[str, 'outputs.UserAssignedIdentityResponse']] = None):\n pulumi.set(__self__, \"principal_id\", principal_id)\n pulumi.set(__self__, \"tenant_id\", tenant_id)\n if type is not None:\n pulumi.set(__self__, \"type\", type)\n if user_assigned_identities is not None:\n pulumi.set(__self__, \"user_assigned_identities\", user_assigned_identities)", "def test_create_provider(self):\n url = reverse('provider-list')\n data = {'name': 'foo'}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Provider.objects.count(), 1)", "def CreateOIDCClient(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def create(self, validated_data, request: HttpRequest = None):\n # Writing of .get(\"xy\", None) or None makes sure that empty strings will be mapped to None\n user = user_helper.get_user(request=request)\n get_capabilities_uri = validated_data.get(\"uri\", None) or None\n registering_with_group = validated_data.get(\"group\", None) or None\n registering_for_org = validated_data.get(\"for-org\", None) or None\n has_ext_auth = validated_data.get(\"ext-auth\", False) or False\n ext_auth_username = validated_data.get(\"ext-username\", None) or None\n ext_auth_password = validated_data.get(\"ext-password\", None) or None\n ext_auth_type = validated_data.get(\"ext-auth-type\", None) or None\n\n # Split uri in components as it is done with RegisterNewServiceWizardPage1\n url_dict = service_helper.split_service_uri(get_capabilities_uri)\n ogc_request = url_dict[\"request\"]\n ogc_service = url_dict[\"service\"].value\n ogc_version = url_dict[\"version\"]\n uri = url_dict[\"base_uri\"]\n\n init_data = {\n \"ogc_request\": ogc_request,\n \"ogc_service\": ogc_service,\n \"ogc_version\": ogc_version,\n \"uri\": uri,\n \"registering_with_group\": registering_with_group,\n \"registering_for_other_organization\": registering_for_org,\n \"service_needs_authentication\": has_ext_auth,\n \"username\": ext_auth_username,\n \"password\": ext_auth_password,\n \"authentication_type\": ext_auth_type,\n }\n\n # Use RegisterNewResourceWizardPage2 workflow as for frontend registration\n form = RegisterNewResourceWizardPage2(\n data=init_data,\n request=request\n )\n if form.is_valid():\n pending_task = service_helper.create_new_service(form, user)\n return pending_task\n return form", "def create(self,request):\n return CustomAuthToken().post(request)", "def __init__(__self__, *,\n authorizations: Sequence['outputs.AuthorizationResponse'],\n managed_by_tenant_id: str,\n managed_by_tenant_name: str,\n managee_tenant_id: str,\n managee_tenant_name: str,\n provisioning_state: str,\n description: Optional[str] = None,\n eligible_authorizations: Optional[Sequence['outputs.EligibleAuthorizationResponse']] = None,\n registration_definition_name: Optional[str] = None):\n pulumi.set(__self__, \"authorizations\", authorizations)\n pulumi.set(__self__, \"managed_by_tenant_id\", managed_by_tenant_id)\n pulumi.set(__self__, \"managed_by_tenant_name\", managed_by_tenant_name)\n pulumi.set(__self__, \"managee_tenant_id\", managee_tenant_id)\n pulumi.set(__self__, \"managee_tenant_name\", managee_tenant_name)\n pulumi.set(__self__, \"provisioning_state\", provisioning_state)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if eligible_authorizations is not None:\n pulumi.set(__self__, \"eligible_authorizations\", eligible_authorizations)\n if registration_definition_name is not None:\n pulumi.set(__self__, \"registration_definition_name\", registration_definition_name)", "def _create_citizen(self, citizen_id, relatives):\n return models.Citizen(\n citizen_id=citizen_id,\n town='a', street='b', building='c', apartment=1, name='e', birth_date='01.01.1970',\n gender='male', relatives=relatives\n )", "def setup_test_tenant(self):\n self.test_tenant = rand_name('test_tenant_')\n self.test_description = rand_name('desc_')\n resp, self.tenant = self.client.create_tenant(\n name=self.test_tenant,\n description=self.test_description)\n self.tenants.append(self.tenant)", "def __init__(__self__,\n resource_name: str,\n args: ApplicationFederatedIdentityCredentialArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__, *,\n authorizations: Optional[Sequence['outputs.AuthorizationResponse']] = None,\n description: Optional[str] = None,\n eligible_authorizations: Optional[Sequence['outputs.EligibleAuthorizationResponse']] = None,\n managed_by_tenant_id: Optional[str] = None,\n managed_by_tenant_name: Optional[str] = None,\n managee_tenant_id: Optional[str] = None,\n managee_tenant_name: Optional[str] = None,\n provisioning_state: Optional[str] = None,\n registration_definition_name: Optional[str] = None):\n if authorizations is not None:\n pulumi.set(__self__, \"authorizations\", authorizations)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if eligible_authorizations is not None:\n pulumi.set(__self__, \"eligible_authorizations\", eligible_authorizations)\n if managed_by_tenant_id is not None:\n pulumi.set(__self__, \"managed_by_tenant_id\", managed_by_tenant_id)\n if managed_by_tenant_name is not None:\n pulumi.set(__self__, \"managed_by_tenant_name\", managed_by_tenant_name)\n if managee_tenant_id is not None:\n pulumi.set(__self__, \"managee_tenant_id\", managee_tenant_id)\n if managee_tenant_name is not None:\n pulumi.set(__self__, \"managee_tenant_name\", managee_tenant_name)\n if provisioning_state is not None:\n pulumi.set(__self__, \"provisioning_state\", provisioning_state)\n if registration_definition_name is not None:\n pulumi.set(__self__, \"registration_definition_name\", registration_definition_name)", "def create_identity(msg: CreateIdentity_request):\n \n # Check if we have received some data in the POST\n if len(msg.DID) == 0:\n log.error(\"No data received\")\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=\"No data received\")\n\n # Create the identity using the library\n try:\n error, didDoc = tf.create_identity_subnode(\n msg.DID, msg.domain_name, msg.website, msg.commercial_name, msg.new_privatekey, msg.parent_privatekey)\n except Exception as e:\n detail=str(e)\n log.error(detail)\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=detail)\n\n if error is not None:\n log.error(error)\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=error)\n\n return {\"didDoc\": didDoc.to_dict()}", "def catalog_alias_create(self, args):\n try:\n if args.id:\n alias = self.server.connect_ermrest_alias(args.id)\n try:\n if alias.retrieve():\n print(\"Catalog alias already exists\")\n return\n except requests.HTTPError as e:\n if e.response.status_code == 404:\n pass\n else:\n raise\n owner = args.owner if args.owner else None\n alias = self.server.create_ermrest_alias(args.id, owner, args.alias_target)\n if not args.quiet:\n print(\"Created new catalog alias %s with the following configuration:\\n\" % alias.alias_id)\n pp(alias.retrieve())\n except HTTPError as e:\n if e.response.status_code == requests.codes.not_found:\n raise ResourceException('Catalog alias not found', e)\n elif e.response.status_code == requests.codes.conflict:\n raise ResourceException(\"Catalog alias already exists\", e)\n else:\n raise", "def __init__(__self__, *,\n authentication_type: Optional[pulumi.Input[str]] = None,\n connection_string: Optional[pulumi.Input[str]] = None,\n endpoint_uri: Optional[pulumi.Input[str]] = None,\n entity_path: Optional[pulumi.Input[str]] = None,\n identity_id: Optional[pulumi.Input[str]] = None,\n iothub_id: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None):\n if authentication_type is not None:\n pulumi.set(__self__, \"authentication_type\", authentication_type)\n if connection_string is not None:\n pulumi.set(__self__, \"connection_string\", connection_string)\n if endpoint_uri is not None:\n pulumi.set(__self__, \"endpoint_uri\", endpoint_uri)\n if entity_path is not None:\n pulumi.set(__self__, \"entity_path\", entity_path)\n if identity_id is not None:\n pulumi.set(__self__, \"identity_id\", identity_id)\n if iothub_id is not None:\n pulumi.set(__self__, \"iothub_id\", iothub_id)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if resource_group_name is not None:\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)", "def __init__(__self__, *,\n acs_endpoint: Optional[pulumi.Input[str]] = None,\n acs_resource_id: Optional[pulumi.Input[str]] = None,\n acs_secret: Optional[pulumi.Input[str]] = None,\n cognitive_service_region: Optional[pulumi.Input[str]] = None,\n cognitive_service_resource_id: Optional[pulumi.Input[str]] = None,\n cognitive_service_subscription_key: Optional[pulumi.Input[str]] = None,\n default_locale: Optional[pulumi.Input[str]] = None,\n id: Optional[pulumi.Input[str]] = None,\n offer_type: Optional[pulumi.Input[str]] = None,\n phone_number: Optional[pulumi.Input[str]] = None):\n if acs_endpoint is not None:\n pulumi.set(__self__, \"acs_endpoint\", acs_endpoint)\n if acs_resource_id is not None:\n pulumi.set(__self__, \"acs_resource_id\", acs_resource_id)\n if acs_secret is not None:\n pulumi.set(__self__, \"acs_secret\", acs_secret)\n if cognitive_service_region is not None:\n pulumi.set(__self__, \"cognitive_service_region\", cognitive_service_region)\n if cognitive_service_resource_id is not None:\n pulumi.set(__self__, \"cognitive_service_resource_id\", cognitive_service_resource_id)\n if cognitive_service_subscription_key is not None:\n pulumi.set(__self__, \"cognitive_service_subscription_key\", cognitive_service_subscription_key)\n if default_locale is not None:\n pulumi.set(__self__, \"default_locale\", default_locale)\n if id is not None:\n pulumi.set(__self__, \"id\", id)\n if offer_type is not None:\n pulumi.set(__self__, \"offer_type\", offer_type)\n if phone_number is not None:\n pulumi.set(__self__, \"phone_number\", phone_number)", "def createCustomer(self, **params):\n return self.__req('create_customer', params)", "def CreateIDPConnector(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def create_tenant(name, domain):\n manager = get_manager()\n tenant = manager.resolve_tenant_id(name, domain=domain)\n if not tenant:\n manager.create_tenant(tenant_name=name,\n domain=domain,\n description='Created by Juju')\n log(\"Created new tenant '%s' in domain '%s'\" % (name, domain),\n level=DEBUG)\n return\n\n log(\"Tenant '%s' already exists.\" % name, level=DEBUG)", "def m_create_test_identities():\n\n # Get the ROOT account (it was created in the deployment of the Smart Contracts)\n ROOT_address, ROOT_key = wallet.account_from_name(\"ROOT\", \"ThePassword\")\n\n # Create the Alastria account for node \"ala\"\n print(f\"\\n==> Creating the Alastria account\")\n Alastria_account = wallet.new_account(\n \"Alastria\", \"ThePassword\")\n alakey = Alastria_account.key\n print(f\"Alastria key: {alakey}\")\n\n print(f\"Done\")\n\n # Set the subnode \"ala\"\n print(f\"\\n==> Creating the ala subnode in the Trust Framework\")\n success, _, _ = ens.setSubnodeOwner(\n node_name=\"root\",\n label=\"ala\",\n new_owner_address=Alastria_account.address,\n current_owner_key=ROOT_key\n )\n print(f\"ala subnode created\")\n\n # Assign the name for reverse resolution\n resolver.setName(\"ala\", \"ala\", Alastria_account.key)\n\n # And assign approval to the PublicResolver contract so it can call ENS methods on behalf of Alastria\n print(f\"Resolver address for ROOT: {resolver.address()}\")\n ens.setApprovalForAll(resolver.address(), True, Alastria_account.key)\n\n ################################\n # Heathrow airport\n print(f\"\\n==> Creating the Heathrow identity\")\n\n DID = \"did:elsi:VATGB-927365404\"\n domain_name = \"heathrow.ala\"\n website = \"www.heathrow.com\"\n commercial_name = \"Heathrow Airport Limited\"\n\n error, didDoc = create_identity(DID, domain_name, website, commercial_name, \"Alastria\", \"ThePassword\", False)\n if didDoc is not None:\n pprint(didDoc)\n\n ################################\n # AENA\n print(f\"\\n==> Creating the AENA identity\")\n\n DID = \"did:elsi:VATES-A86212420\"\n domain_name = \"aena.ala\"\n website = \"www.aena.es\"\n commercial_name = \"Aena\"\n\n error, didDoc = create_identity(DID, domain_name, website, commercial_name, \"Alastria\", \"ThePassword\", False)\n if didDoc is not None:\n pprint(didDoc)\n\n ################################\n # Lanzarote airport\n # The airport belongs to AENA and does not have independent entity (shares the same VAT, for example)\n # In production, the node should be created by AENA, as a subnode controlled by them.\n # In this PoC, the node is created automatically to facilitate the tests\n print(f\"\\n==> Creating the César Manrique airport identity\")\n\n DID = \"did:elsi:VATES-A86212420-1\"\n domain_name = \"ace.ala\"\n website = \"www.aena.es/es/aeropuerto-lanzarote\"\n commercial_name = \"Aeropuerto de Lanzarote-Cesar Manrique\"\n\n error, didDoc = create_identity(DID, domain_name, website, commercial_name, \"Alastria\", \"ThePassword\", False)\n if didDoc is not None:\n pprint(didDoc)\n\n ################################\n # Metrovacesa\n print(f\"\\n==> Creating the Metrovacesa identity\")\n\n DID = \"did:elsi:VATES-A87471264\"\n domain_name = \"metrovacesa.ala\"\n website = \"metrovacesa.com\"\n commercial_name = \"Metrovacesa\"\n\n error, didDoc = create_identity(DID, domain_name, website, commercial_name, \"Alastria\", \"ThePassword\", False)\n if didDoc is not None:\n pprint(didDoc)\n\n ################################\n # IN2\n print(f\"\\n==> Creating the IN2 identity\")\n\n DID = \"did:elsi:VATES-B60645900\"\n domain_name = \"in2.ala\"\n website = \"www.in2.es\"\n commercial_name = \"IN2 Innovating 2gether\"\n\n error, didDoc = create_identity(DID, domain_name, website, commercial_name, \"Alastria\", \"ThePassword\", False)\n if didDoc is not None:\n pprint(didDoc)\n\n ################################\n # Perfect Health\n print(f\"\\n==> Creating the Perfect Health identity\")\n\n DID = \"did:elsi:VATES-X12345678X\"\n domain_name = \"perfecthealth.ala\"\n website = \"www.perfecthealth.org\"\n commercial_name = \"Perfect Health plc\"\n\n error, didDoc = create_identity(DID, domain_name, website, commercial_name, \"Alastria\", \"ThePassword\", False)\n if didDoc is not None:\n pprint(didDoc)\n\n ################################\n # BME\n print(f\"\\n==> Creating the BME identity\")\n\n DID = \"did:elsi:VATES-A83246314\"\n domain_name = \"bme.ala\"\n website = \"www.bolsasymercados.es\"\n commercial_name = \"Bolsas y Mercados Españoles\"\n\n error, didDoc = create_identity(DID, domain_name, website, commercial_name, \"Alastria\", \"ThePassword\", False)\n if didDoc is not None:\n pprint(didDoc)", "def create_endpoint(coriolis, name, platform_type, connection_info,\n barbican=None, description=''):\n # check provider type is installed server-side:\n providers_dict = coriolis.providers.list().to_dict()\n if platform_type not in providers_dict:\n raise ValueError(\n 'platform_type must be one of %s' % providers_dict.keys())\n\n # if Barbican is available, store the connection info in it:\n if barbican:\n secret_ref = store_barbican_secret_for_coriolis(\n barbican, connection_info, name='Coriolis Endpoint %s' % name)\n connection_info = {'secret_ref': secret_ref}\n\n # create the endpoint:\n endpoint = coriolis.endpoints.create(\n name, platform_type, connection_info, description)\n\n return endpoint", "def create(self, identity, data=None, record=None, **kwargs):\n record.metadata = data.get('metadata', {})", "def create_identity(name, tags=None, typ=None):\n if tags:\n tags = frozenset(tags.items())\n return (typ, name, tags)", "def __init__(__self__, *,\n iothub_id: pulumi.Input[str],\n resource_group_name: pulumi.Input[str],\n authentication_type: Optional[pulumi.Input[str]] = None,\n connection_string: Optional[pulumi.Input[str]] = None,\n endpoint_uri: Optional[pulumi.Input[str]] = None,\n entity_path: Optional[pulumi.Input[str]] = None,\n identity_id: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"iothub_id\", iothub_id)\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n if authentication_type is not None:\n pulumi.set(__self__, \"authentication_type\", authentication_type)\n if connection_string is not None:\n pulumi.set(__self__, \"connection_string\", connection_string)\n if endpoint_uri is not None:\n pulumi.set(__self__, \"endpoint_uri\", endpoint_uri)\n if entity_path is not None:\n pulumi.set(__self__, \"entity_path\", entity_path)\n if identity_id is not None:\n pulumi.set(__self__, \"identity_id\", identity_id)\n if name is not None:\n pulumi.set(__self__, \"name\", name)", "def create_providerinfo(self, setup=None):\n pcr_class = self.server.message_factory.get_response_type(\n \"configuration_endpoint\"\n )\n _provider_info = copy.deepcopy(self.capabilities.to_dict())\n\n if self.jwks_uri and self.keyjar:\n _provider_info[\"jwks_uri\"] = self.jwks_uri\n\n for endp in self.endp:\n if not self.baseurl.endswith(\"/\"):\n baseurl = self.baseurl + \"/\"\n else:\n baseurl = self.baseurl\n _provider_info[\"{}_endpoint\".format(endp.etype)] = urljoin(\n baseurl, endp.url\n )\n\n if setup and isinstance(setup, dict):\n for key in pcr_class.c_param.keys():\n if key in setup:\n _provider_info[key] = setup[key]\n\n _provider_info[\"issuer\"] = self.name\n _provider_info[\"version\"] = \"3.0\"\n\n return pcr_class(**_provider_info)", "def __init__(__self__, *,\n identity_client_id: Optional[str] = None,\n resource_id: Optional[str] = None):\n if identity_client_id is not None:\n pulumi.set(__self__, \"identity_client_id\", identity_client_id)\n if resource_id is not None:\n pulumi.set(__self__, \"resource_id\", resource_id)", "def create_course(self, org, offering, user_id=None, fields=None, store_name='default', **kwargs):\r\n store = self.modulestores[store_name]\r\n\r\n if not hasattr(store, 'create_course'):\r\n raise NotImplementedError(u\"Cannot create a course on store %s\" % store_name)\r\n\r\n return store.create_course(org, offering, user_id, fields, **kwargs)", "def create_policy(policystore_url, create_policy_request, verbose):\n\n if verbose:\n logging.info('Creating policy')\n pprint.pprint(create_policy_request)\n\n create_url = policystore_url + POLICYSTORE_PREFIX + 'CreateEntitlementPolicy'\n\n r = requests.post(\n create_url, headers=headers(), json=create_policy_request)\n if r.status_code != 200:\n logging.error(f'ERROR: Unexpected response: {r.status_code}')\n pprint.pprint(r.json())\n\n sys.exit('Failed to create policy')\n\n resp = r.json()\n\n logging.info(\n f'SUCCESS: Created policy - ID: {resp[\"policy_id\"]}, Token: {resp[\"token\"]}'\n )\n\n return resp", "def create_authorizer(self, ApiId: str, AuthorizerType: str, AuthorizerUri: str, IdentitySource: List, Name: str, AuthorizerCredentialsArn: str = None, AuthorizerResultTtlInSeconds: int = None, IdentityValidationExpression: str = None, ProviderArns: List = None) -> Dict:\n pass", "def provider_setup(cls, args, config):\n if len(args) < 1:\n print \"USAGE: molns provider setup name\"\n print \"\\tCreates a new provider with the given name.\"\n return\n # check if provider exists\n try:\n provider_obj = config.get_object(args[0], kind='Provider')\n except DatastoreException:\n # ask provider type\n print \"Select a provider type:\"\n for n, p in enumerate(VALID_PROVIDER_TYPES):\n print \"\\t[{0}] {1}\".format(n, p)\n while True:\n try:\n provider_ndx = int(raw_input_default(\"Enter the number of type:\", default='0'))\n provider_type = VALID_PROVIDER_TYPES[provider_ndx]\n break\n except (ValueError, IndexError):\n pass\n logging.debug(\"Provider type '{0}'\".format(provider_type))\n # Create provider\n try:\n provider_obj = config.create_object(name=args[0], ptype=provider_type, kind='Provider')\n except DatastoreException as e:\n logging.exception(e)\n print e\n return\n print \"Enter configuration for provider {0}:\".format(args[0])\n setup_object(provider_obj)\n config.save_object(provider_obj, kind='Provider')\n\n cls.provider_initialize(args[0], config)", "def create(self, request, *args, **kwargs):\n response = super(ProviderViewSet, self).create(request, *args, **kwargs)\n response.data['message'] = \"Provedor ha sido creado\"\n return response", "def add_new_identity(self, identity):\n query = \"\"\"INSERT INTO yubikeys (\n active,\n created,\n modified,\n yk_publicname,\n yk_counter,\n yk_use,\n yk_low,\n yk_high,\n nonce\n ) VALUES (\n %(active)s,\n %(created)s,\n %(modified)s,\n %(yk_publicname)s,\n %(yk_counter)s,\n %(yk_use)s,\n %(yk_low)s,\n %(yk_high)s,\n %(nonce)s\n )\"\"\"\n self._execute(query, identity)", "def register_vim(cls,\n cloud_owner: str,\n cloud_region_id: str,\n default_tenant: str = None) -> None:\n cls.send_message(\n \"POST\",\n \"Register VIM instance to ONAP\",\n f\"{cls.base_url}/{cloud_owner}/{cloud_region_id}/registry\",\n data={\"defaultTenant\": default_tenant} if default_tenant else None\n )", "def test_creating_new_dietitian(self):\n\n form_data = {\"fname\": \"Jill\", \"lname\": \"Jones\", \n \"email\": \"jill23@gmail.com\", \"password\": \"password\", \n \"street-address\": \"33 Blue St\", \"city\": \"San Francisco\", \n \"state\": \"CA\", \"zipcode\": \"43223\"}\n\n dietitian_id = create_new_dietitian_account(form_data)\n\n self.assertEqual(2, dietitian_id)", "def create_organization(user, name):\n organization = Organization.objects.create(name=name)\n new_user = OrganizationUser.objects.create(organization=organization,\n user=user, is_admin=True)\n OrganizationOwner.objects.create(organization=organization,\n organization_user=new_user)\n return organization", "def create(request: PlantRequestCreate) -> Plant:\n logger.debug(f'Executing Plant create with request:{request}')\n return Plant(request.name, request.bed_id)", "def create(self, identity, record=None, data=None, **kwargs):\n self.set_slug(record, data[\"slug\"])", "def create_entity(self, data):\n url = '{}/ngsi-ld/v1/entities'.format(self.url)\n return self.post(url, data=data, headers=self.headers_ld)", "def catalog_create(self, args):\n try:\n if args.id and self.server.connect_ermrest(args.id).exists():\n print(\"Catalog already exists\")\n return\n owner = args.owner if args.owner else None\n catalog = self.server.create_ermrest_catalog(args.id, owner)\n if args.auto_configure:\n model = catalog.getCatalogModel()\n model.configure_baseline_catalog(**args.configure_args)\n if not args.quiet:\n print(\"Created new catalog %s with the following default configuration:\\n\" % catalog.catalog_id)\n pp(catalog.get('/').json())\n except HTTPError as e:\n if e.response.status_code == requests.codes.not_found:\n raise ResourceException('Catalog not found', e)\n elif e.response.status_code == requests.codes.conflict:\n raise ResourceException(\"Catalog already exists\", e)\n else:\n raise e", "def create_provider(storage_auth):\n if not storage_auth:\n provider = Local(storage_auth)\n elif storage_auth.type == 'MINIO':\n provider = Minio(storage_auth)\n elif storage_auth.type == 'ONEDATA':\n provider = Onedata(storage_auth)\n elif storage_auth.type == 'S3':\n provider = S3(storage_auth)\n elif storage_auth.type == 'WEBDAV':\n provider = WebDav(storage_auth)\n else:\n raise InvalidStorageProviderError(storage_type=storage_auth.type)\n return provider", "def identity_provider_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"identity_provider_name\")", "def __init__(__self__, *,\n identity: pulumi.Input['UserAssignedIdentityArgs'],\n name: pulumi.Input[str],\n namespace: pulumi.Input[str],\n binding_selector: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"identity\", identity)\n pulumi.set(__self__, \"name\", name)\n pulumi.set(__self__, \"namespace\", namespace)\n if binding_selector is not None:\n pulumi.set(__self__, \"binding_selector\", binding_selector)", "def set_up_identity(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n identity = None\n enable_managed_identity = self.context.get_enable_managed_identity()\n assign_identity = self.context.get_assign_identity()\n if enable_managed_identity and not assign_identity:\n identity = self.models.ManagedClusterIdentity(\n type=\"SystemAssigned\"\n )\n elif enable_managed_identity and assign_identity:\n user_assigned_identity = {\n assign_identity: self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()\n }\n identity = self.models.ManagedClusterIdentity(\n type=\"UserAssigned\",\n user_assigned_identities=user_assigned_identity\n )\n mc.identity = identity\n return mc" ]
[ "0.6144289", "0.5990092", "0.5924737", "0.5494253", "0.5485202", "0.53507775", "0.53166866", "0.51905113", "0.5181825", "0.51246256", "0.5108798", "0.51040316", "0.5087071", "0.50794953", "0.50547856", "0.50211", "0.50116605", "0.5002519", "0.4999784", "0.49900097", "0.49755615", "0.49569455", "0.49335718", "0.49200395", "0.48801878", "0.48650733", "0.48602", "0.4849936", "0.48346928", "0.48288682", "0.4822313", "0.4812243", "0.4808785", "0.47864598", "0.478513", "0.47799176", "0.47696725", "0.4766216", "0.47441348", "0.474185", "0.47396144", "0.47211447", "0.4691938", "0.4669608", "0.46668106", "0.46544063", "0.46327364", "0.46303168", "0.46154886", "0.46135175", "0.4600081", "0.45957485", "0.45938447", "0.45807824", "0.45780686", "0.45743057", "0.45720914", "0.45684895", "0.45622417", "0.45622417", "0.4562011", "0.45581207", "0.455003", "0.45374", "0.45317268", "0.45253313", "0.45212337", "0.45187786", "0.45102787", "0.45082206", "0.44999415", "0.4498287", "0.44897586", "0.44825053", "0.44784242", "0.44638413", "0.4463559", "0.4457431", "0.4455178", "0.44503322", "0.44465393", "0.44435173", "0.4442427", "0.4429296", "0.44247583", "0.44201916", "0.441777", "0.44053167", "0.44037873", "0.4395962", "0.4393481", "0.439326", "0.43897533", "0.43885937", "0.43873098", "0.43854132", "0.4380034", "0.4375211", "0.4374899", "0.43659025" ]
0.61856276
0
Creates a single mapping between an IdP group and an IAM Service
def create_idp_group_mapping(self, create_idp_group_mapping_details, identity_provider_id, **kwargs): resource_path = "/identityProviders/{identityProviderId}/groupMappings" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_idp_group_mapping got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "identityProviderId": identity_provider_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=create_idp_group_mapping_details, response_type="IdpGroupMapping") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=create_idp_group_mapping_details, response_type="IdpGroupMapping")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_api_mapping(self, ApiId: str, DomainName: str, Stage: str, ApiMappingKey: str = None) -> Dict:\n pass", "def get_service_mapping():\r\n # Get all Service types:\r\n all_service_type = requests.get(base_url + 'services/v2/service_types', headers=headers3).json()\r\n # Make Dict of service names and ids\r\n service_name_to_id = {service_type['attributes']['name']:service_type['id'] for service_type in all_service_type['data']} \r\n return service_name_to_id", "def test_ipam_services_create(self):\n pass", "def normalize_idp(idp):\n if idp is None:\n return None\n\n _idp = idp.to_dict()\n _idp['enabled'] = idp['is_enabled']\n _idp['name'] = idp['id']\n return _idp", "def test_aws_service_api_keypair_generate_post(self):\n pass", "def create_mapping(self):\n\n indice = client.IndicesClient(self.es)\n\n indice.put_mapping(index=self.es_main_index,\n doc_type=self.es_main_type,\n body=self.mapping)", "def create_participant_group_mapping(\n self,\n qualification_name: str,\n requester_id: str,\n prolific_project_id: str,\n prolific_participant_group_name: str,\n prolific_participant_group_id: str,\n ) -> None:\n try:\n with self.table_access_condition, self._get_connection() as conn:\n c = conn.cursor()\n c.execute(\n \"\"\"\n INSERT INTO participant_groups(\n qualification_name,\n requester_id,\n prolific_project_id,\n prolific_participant_group_name,\n prolific_participant_group_id\n ) VALUES (?, ?, ?, ?, ?);\n \"\"\",\n (\n qualification_name,\n requester_id,\n prolific_project_id,\n prolific_participant_group_name,\n prolific_participant_group_id,\n ),\n )\n return None\n\n except sqlite3.IntegrityError as e:\n if is_unique_failure(e):\n # Ignore attempt to add another mapping for an existing key\n db_qualification = self.get_qualification_mapping(qualification_name)\n\n logger.debug(\n f\"Multiple Prolific mapping creations \"\n f'for qualification \"{qualification_name}\". '\n f\"Found existing one: {db_qualification}. \"\n )\n assert (\n db_qualification is not None\n ), \"Cannot be none given is_unique_failure on insert\"\n\n db_requester_id = db_qualification[\"requester_id\"]\n db_prolific_qualification_name = db_qualification[\"prolific_participant_group_name\"]\n\n if db_requester_id != requester_id:\n logger.warning(\n f\"Prolific Qualification mapping create for {qualification_name} \"\n f\"under requester {requester_id}, already exists under {db_requester_id}.\"\n )\n\n if db_prolific_qualification_name != prolific_participant_group_name:\n logger.warning(\n f\"Prolific Qualification mapping create for {qualification_name} \"\n f\"with Prolific name {prolific_participant_group_name}, \"\n f\"already exists under {db_prolific_qualification_name}.\"\n )\n\n return None\n else:\n raise e", "def _build_ec2_mapping_from_sg(resource_to_analyse, result_dict, session):\n for instance, security_group in _generate_ec2_instance_and_sg(resource_to_analyse):\n sg_dict = _check_if_in_list(result_dict, security_group[\"GroupId\"], \"sg_id\")\n if sg_dict is not None:\n sg_dict[\"resources_attached\"].append({\n \"resource_id\": instance.id,\n \"resource_type\": \"ec2\",\n \"resource_name\": \"\" if _check_if_in_list(instance.tags, \"Name\", \"Key\") is None else _check_if_in_list(instance.tags, \"Name\", \"Key\").get(\"Value\", \"\")\n })\n else:\n result_dict.append({\n \"sg_id\": security_group[\"GroupId\"],\n \"sg_name\": security_group[\"GroupName\"],\n \"resources_attached\": [{\n \"resource_id\": instance.id,\n \"resource_type\": \"ec2\",\n \"resource_name\": \"\" if _check_if_in_list(instance.tags, \"Name\", \"Key\") is None else _check_if_in_list(instance.tags, \"Name\", \"Key\").get(\"Value\", \"\")\n }]\n })\n return result_dict", "async def groups_service_handler(service: ServiceCall) -> None:\n object_id = service.data[ATTR_OBJECT_ID]\n entity_id = f\"{DOMAIN}.{object_id}\"\n group = component.get_entity(entity_id)\n\n # new group\n if service.service == SERVICE_SET and group is None:\n entity_ids = (\n service.data.get(ATTR_ENTITIES)\n or service.data.get(ATTR_ADD_ENTITIES)\n or None\n )\n\n extra_arg = {\n attr: service.data[attr]\n for attr in (ATTR_ICON,)\n if service.data.get(attr) is not None\n }\n\n await Group.async_create_group(\n hass,\n service.data.get(ATTR_NAME, object_id),\n object_id=object_id,\n entity_ids=entity_ids,\n user_defined=False,\n mode=service.data.get(ATTR_ALL),\n **extra_arg,\n )\n return\n\n if group is None:\n _LOGGER.warning(\"%s:Group '%s' doesn't exist!\", service.service, object_id)\n return\n\n # update group\n if service.service == SERVICE_SET:\n need_update = False\n\n if ATTR_ADD_ENTITIES in service.data:\n delta = service.data[ATTR_ADD_ENTITIES]\n entity_ids = set(group.tracking) | set(delta)\n await group.async_update_tracked_entity_ids(entity_ids)\n\n if ATTR_REMOVE_ENTITIES in service.data:\n delta = service.data[ATTR_REMOVE_ENTITIES]\n entity_ids = set(group.tracking) - set(delta)\n await group.async_update_tracked_entity_ids(entity_ids)\n\n if ATTR_ENTITIES in service.data:\n entity_ids = service.data[ATTR_ENTITIES]\n await group.async_update_tracked_entity_ids(entity_ids)\n\n if ATTR_NAME in service.data:\n group.name = service.data[ATTR_NAME]\n need_update = True\n\n if ATTR_ICON in service.data:\n group.icon = service.data[ATTR_ICON]\n need_update = True\n\n if ATTR_ALL in service.data:\n group.mode = all if service.data[ATTR_ALL] else any\n need_update = True\n\n if need_update:\n group.async_write_ha_state()\n\n return\n\n # remove group\n if service.service == SERVICE_REMOVE:\n await component.async_remove_entity(entity_id)", "def update_pin_group():\n create_instance(new=False)", "def create_sg(vpc_id, description, group_name):\n client = boto3.client('ec2')\n security_group = str(group_name + \"_sg\")\n\n # get the security groups\n idle_sg = get_sg()\n\n print(idle_sg)\n print(security_group)\n\n # if security group doesnt exist, create it\n if security_group not in idle_sg:\n print(\"Creating SG\")\n return client.create_security_group(\n Description=description,\n GroupName=security_group,\n VpcId=vpc_id\n )\n return get_sg_id(security_group)", "def create_mapping_group(access_path, block_volumes, client_group, host=None):\n retval = -1\n if not isinstance(access_path, int):\n access_path = AccessPath.get_access_path_id(access_path, host)\n if not isinstance(client_group, int):\n client_group = ClientGroup.get_client_group_id(client_group, host)\n if access_path == -1 or client_group == -1:\n print \"[Error] The access path or client group name/id is invalid.\"\n else:\n try:\n retval, block_volumes = _handle_block_volumes(block_volumes, host)\n if retval != 0:\n return retval\n except Exception as e:\n print \"[Error] The block volumes are not valid.\"\n else:\n cmd = utils.XMS_CLI_HEADER + \"mapping-group create --access-path {ap} --block-volumes {bvs} --client-group {cg}\".format(ap=access_path, bvs=block_volumes, cg=client_group)\n print cmd\n ret = utils.execute_cmd_in_host(cmd, host)\n if ret[2] != 0:\n retval = -1\n print \"[Error] Failed to create mapping group. Error message: [{err}]\".format(err=ret[1])\n else:\n retval = 0\n return retval", "def testServiceMapping_NoRegistry(self):\n mapping = self.DoMappingTest({'/my-service': MyService}, None)", "def add_tomcat7_idp():\n pass", "def create_mapping(project, img):\n with BMI(_username, _password, project) as bmi:\n ret = bmi.mount_image(img)\n if ret[constants.STATUS_CODE_KEY] == 200:\n click.echo('Success')\n else:\n click.echo(ret[constants.MESSAGE_KEY])", "def _addProteinIdsToGroupMapping(self, proteinIds, groupId):\n for proteinId in AUX.toList(proteinIds):\n self._proteinToGroupIds[proteinId].add(groupId)", "def _build_ec2_mapping_from_resources(resource_to_analyse, result_dict, session):\n for instance, security_group in _generate_ec2_instance_and_sg(resource_to_analyse):\n resource_dict = _check_if_in_list(result_dict, instance.id, \"resource_id\")\n if resource_dict is not None:\n resource_dict[\"sg_attached\"].append({\n \"sg_id\": security_group[\"GroupId\"],\n \"sg_name\": security_group[\"GroupName\"]\n })\n else:\n result_dict.append({\n \"resource_id\": instance.id,\n \"resource_type\": \"ec2\",\n \"resource_name\": \"\" if _check_if_in_list(instance.tags, \"Name\", \"Key\") is None else _check_if_in_list(instance.tags, \"Name\", \"Key\").get(\"Value\", \"\"),\n \"sg_attached\": [{\n \"sg_id\": security_group[\"GroupId\"],\n \"sg_name\": security_group[\"GroupName\"]\n }]\n })\n return result_dict", "def test_create_user_identity_mapping(self):\n pass", "def post_service_instance_create(self, resource_dict):\n pass", "def handle_region(self, region, args):\n result = [CHECKMARK, str(region), \"created security group '{}'\".format(GROUP_NAME)]\n\n try:\n # Create the security group\n response = region.conn.create_security_group(\n Description='Security group for Alia replicas and clients.',\n GroupName=GROUP_NAME,\n )\n\n # Get the newly created group id\n group_id = response[\"GroupId\"]\n\n # Allow all network traffic from within the security group\n response = region.conn.authorize_security_group_ingress(\n GroupId = group_id,\n IpPermissions = [\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 0, \"ToPort\": 65535,\n \"UserIdGroupPairs\": [\n {\n \"GroupId\": group_id,\n \"Description\": \"allow all traffic from the same group\",\n }\n ]\n }\n ]\n )\n\n # Open Alia-specific ports for access\n reponse = region.conn.authorize_security_group_ingress(\n GroupId = group_id,\n IpPermissions = [\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 22, \"ToPort\": 22,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"allow remote SSH access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 3264, \"ToPort\": 3285,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"external Alia service access\",\n }\n ],\n \"Ipv6Ranges\": [\n {\n \"CidrIpv6\": \"::/0\",\n \"Description\": \"external Alia service IPv6 access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 5356, \"ToPort\": 5356,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"research services access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 4157, \"ToPort\": 4157,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"master services access\",\n }\n ]\n },\n ]\n )\n\n\n except Exception as e:\n result[0] = CROSSMARK\n result[2] = str(e)\n\n\n return result", "def createFieldMapping(sgidPoints):\n # Create field mappings\n sgidFMs = arcpy.FieldMappings()\n\n # Perform some field renaming\n mapPairs = [\n ('State', 'State'),\n ('City', 'Inc_Muni'),\n ('CountyID', 'County'),\n ('ZipCode', 'Zip_Code'),\n ('PrefixDir', 'StN_PreDir'),\n ('StreetName', 'StreetName'),\n ('StreetType', 'StN_PosTyp'),\n ('SuffixDir', 'StN_PosDir'),\n ('AddNum', 'Add_Number'),\n ('LandmarkName', 'landmkName'),\n ('Building', 'Building'),\n ('UnitType', 'Unit'),\n ('AddSource', 'AddAuth'),\n ('AddSystem', 'UniqWithin'),\n ('LoadDate', 'LastUpdate')]\n\n for p in mapPairs:\n print p\n sgidFMs.addFieldMap(getRenameFieldMap(sgidPoints, p[0], p[1]))\n\n return sgidFMs", "def remap_ids(self, id_map: Dict[int, int]) -> None:", "def pre_service_instance_create(self, resource_dict):\n pass", "def servicebindingmaps(self, servicebindingmap_id, data, tenant_id=None, api_version=\"v2.1\"):\n\n if tenant_id is None and self._parent_class.tenant_id:\n # Pull tenant_id from parent namespace cache.\n tenant_id = self._parent_class.tenant_id\n elif not tenant_id:\n # No value for tenant_id.\n raise TypeError(\"tenant_id is required but not set or cached.\")\n cur_ctlr = self._parent_class.controller\n\n url = str(cur_ctlr) + \"/{}/api/tenants/{}/servicebindingmaps/{}\".format(api_version,\n tenant_id,\n servicebindingmap_id)\n\n api_logger.debug(\"URL = %s\", url)\n return self._parent_class.rest_call(url, \"put\", data=data)", "def post_security_group_create(self, resource_dict):\n pass", "def remap_ids(self, id_map: Dict[int, int]) -> None:\n super().remap_ids(id_map)\n self.puppet = id_map.get(self.puppet, 0)", "def _build_rds_mapping_from_sg(resource_to_analyse, result_dict, session):\n for db_instance, security_group, sg_name in _generate_rds_instances_and_sg(resource_to_analyse, session):\n resource_dict = _check_if_in_list(result_dict, security_group[\"VpcSecurityGroupId\"], \"sg_id\")\n if resource_dict is not None:\n resource_dict[\"resources_attached\"].append({\n \"resource_id\": db_instance[\"DBInstanceIdentifier\"],\n \"resource_type\": \"rds\"\n })\n else:\n result_dict.append({\n \"sg_id\": security_group[\"VpcSecurityGroupId\"],\n \"sg_name\": sg_name,\n \"resources_attached\": [{\n \"resource_id\": db_instance[\"DBInstanceIdentifier\"],\n \"resource_type\": \"rds\"\n }]\n })\n return result_dict", "def __init__(__self__, *,\n arn: Optional[pulumi.Input[str]] = None,\n auto_scaling_configuration_arn: Optional[pulumi.Input[str]] = None,\n encryption_configuration: Optional[pulumi.Input['ServiceEncryptionConfigurationArgs']] = None,\n health_check_configuration: Optional[pulumi.Input['ServiceHealthCheckConfigurationArgs']] = None,\n instance_configuration: Optional[pulumi.Input['ServiceInstanceConfigurationArgs']] = None,\n network_configuration: Optional[pulumi.Input['ServiceNetworkConfigurationArgs']] = None,\n observability_configuration: Optional[pulumi.Input['ServiceObservabilityConfigurationArgs']] = None,\n service_id: Optional[pulumi.Input[str]] = None,\n service_name: Optional[pulumi.Input[str]] = None,\n service_url: Optional[pulumi.Input[str]] = None,\n source_configuration: Optional[pulumi.Input['ServiceSourceConfigurationArgs']] = None,\n status: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n if arn is not None:\n pulumi.set(__self__, \"arn\", arn)\n if auto_scaling_configuration_arn is not None:\n pulumi.set(__self__, \"auto_scaling_configuration_arn\", auto_scaling_configuration_arn)\n if encryption_configuration is not None:\n pulumi.set(__self__, \"encryption_configuration\", encryption_configuration)\n if health_check_configuration is not None:\n pulumi.set(__self__, \"health_check_configuration\", health_check_configuration)\n if instance_configuration is not None:\n pulumi.set(__self__, \"instance_configuration\", instance_configuration)\n if network_configuration is not None:\n pulumi.set(__self__, \"network_configuration\", network_configuration)\n if observability_configuration is not None:\n pulumi.set(__self__, \"observability_configuration\", observability_configuration)\n if service_id is not None:\n pulumi.set(__self__, \"service_id\", service_id)\n if service_name is not None:\n pulumi.set(__self__, \"service_name\", service_name)\n if service_url is not None:\n pulumi.set(__self__, \"service_url\", service_url)\n if source_configuration is not None:\n pulumi.set(__self__, \"source_configuration\", source_configuration)\n if status is not None:\n pulumi.set(__self__, \"status\", status)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if tags_all is not None:\n pulumi.set(__self__, \"tags_all\", tags_all)", "def pre_security_group_create(self, resource_dict):\n pass", "def _build_elbv2_mapping_from_sg(resource_to_analyse, result_dict, session):\n for elb_instance, security_group_id, security_group_name in _generate_elb_instances_and_sg(resource_to_analyse, session):\n resource_dict = _check_if_in_list(result_dict, security_group_id, \"sg_id\")\n if resource_dict is not None:\n resource_dict[\"resources_attached\"].append({\n \"resource_id\": elb_instance[\"LoadBalancerName\"],\n \"resource_type\": \"elb\"\n })\n else:\n result_dict.append({\n \"sg_id\": security_group_id,\n \"sg_name\": security_group_name,\n \"resources_attached\": [{\n \"resource_id\": elb_instance[\"LoadBalancerName\"],\n \"resource_type\": \"elb\"\n }]\n })\n return result_dict", "def map_service_info(port, nmap_store):\n service = port.find(\"service\")\n nmap_store[\"service_name\"] = service.get(\"name\")\n nmap_store[\"service_method\"] = service.get(\"method\")\n nmap_store[\"service_conf\"] = service.get(\"conf\")", "def testDomainMappingCreateAlreadyExistsPrompts(self):\n\n self.operations.CreateDomainMapping.side_effect = [\n exceptions.DomainMappingAlreadyExistsError(), self.domain_mapping\n ]\n\n with mock.patch(\n 'googlecloudsdk.api_lib.run.global_methods.GetServerlessClientInstance',\n return_value=self.mock_serverless_client):\n verified_domains = [\n self.mock_serverless_client.MESSAGES_MODULE.AuthorizedDomain(\n id='www.example.com')\n ]\n with mock.patch(\n 'googlecloudsdk.api_lib.run.global_methods.ListVerifiedDomains',\n return_value=verified_domains):\n self.WriteInput('y\\n')\n self.Run('run domain-mappings create '\n '--service myapp --domain www.example.com')\n self.AssertOutputContains(\n \"\"\"NAME RECORD TYPE CONTENTS\n myapp A 216.239.32.21\"\"\",\n normalize_space=True)", "def generate_public_ID(self, mapping):\n raise exception.NotImplemented() # pragma: no cover", "def createMappingSetMapping(self,mappingSetId:str=None,mapping:dict=None,verbose:bool=False)->dict:\n if mappingSetId is None:\n raise ValueError(\"Require a mapping ID\")\n if mapping is None or type(mapping)!=dict:\n raise Exception(\"Require a dictionary as mapping\")\n path = f\"/mappingSets/{mappingSetId}/mappings\"\n res = self.connector.postData(self.endpoint+path,data=mapping,verbose=verbose)\n return res", "def __init__(__self__, *,\n region: str,\n service_id: str,\n tag: str):\n pulumi.set(__self__, \"region\", region)\n pulumi.set(__self__, \"service_id\", service_id)\n pulumi.set(__self__, \"tag\", tag)", "def __init__(__self__, *,\n application_name: pulumi.Input[str],\n cluster_name: pulumi.Input[str],\n resource_group_name: pulumi.Input[str],\n service_kind: pulumi.Input[Union[str, 'ServiceKind']],\n correlation_scheme: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceCorrelationDescriptionArgs']]]] = None,\n default_move_cost: Optional[pulumi.Input[Union[str, 'MoveCost']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n partition_description: Optional[pulumi.Input[Union['NamedPartitionSchemeDescriptionArgs', 'SingletonPartitionSchemeDescriptionArgs', 'UniformInt64RangePartitionSchemeDescriptionArgs']]] = None,\n placement_constraints: Optional[pulumi.Input[str]] = None,\n service_dns_name: Optional[pulumi.Input[str]] = None,\n service_load_metrics: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceLoadMetricDescriptionArgs']]]] = None,\n service_name: Optional[pulumi.Input[str]] = None,\n service_package_activation_mode: Optional[pulumi.Input[Union[str, 'ArmServicePackageActivationMode']]] = None,\n service_placement_policies: Optional[pulumi.Input[Sequence[pulumi.Input['ServicePlacementPolicyDescriptionArgs']]]] = None,\n service_type_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"application_name\", application_name)\n pulumi.set(__self__, \"cluster_name\", cluster_name)\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n pulumi.set(__self__, \"service_kind\", service_kind)\n if correlation_scheme is not None:\n pulumi.set(__self__, \"correlation_scheme\", correlation_scheme)\n if default_move_cost is not None:\n pulumi.set(__self__, \"default_move_cost\", default_move_cost)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if partition_description is not None:\n pulumi.set(__self__, \"partition_description\", partition_description)\n if placement_constraints is not None:\n pulumi.set(__self__, \"placement_constraints\", placement_constraints)\n if service_dns_name is not None:\n pulumi.set(__self__, \"service_dns_name\", service_dns_name)\n if service_load_metrics is not None:\n pulumi.set(__self__, \"service_load_metrics\", service_load_metrics)\n if service_name is not None:\n pulumi.set(__self__, \"service_name\", service_name)\n if service_package_activation_mode is not None:\n pulumi.set(__self__, \"service_package_activation_mode\", service_package_activation_mode)\n if service_placement_policies is not None:\n pulumi.set(__self__, \"service_placement_policies\", service_placement_policies)\n if service_type_name is not None:\n pulumi.set(__self__, \"service_type_name\", service_type_name)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)", "def create_mapset(self, mapset, dbase=None, location=None):\n module = 'g.c.mapset'\n gs.run_command(module, mapset=mapset, dbase=dbase, location=location)", "def creategroup(body):\n group = body.get(\"groupname\", None)\n pps = body.get(\"pilotpoints\", None)\n print('lol',group, pps)\n print(type(pps))\n\n # Does the person exist already?\n if group not in group_dict and group is not None:\n group_dict[group] = {\n \"groupname\": group,\n \"pilotpoints\": pps,\n }\n return group_dict[group], 201\n\n # Otherwise, they exist, that's an error\n else:\n abort(\n 406,\n \"Person with last name {group} already exists\".format(group=group),\n )", "def _make_group(self, _rk, _group_hint):\n\n if isinstance(_group_hint, dict):\n # _group_hint is a single key/value pair\n g = _group_hint[list(_group_hint)[0]]\n\n r_type = g.get(\"type\", \"none\")\n if r_type != \"OS::Nova::ServerGroup\":\n return \"support only ServerGroup resource\"\n\n properties = g.get(\"properties\", {})\n if len(properties) == 0:\n return \"no properties\"\n\n group_name = properties.get(\"name\", None)\n if group_name is None:\n return \"no group name\"\n group_name = group_name.strip()\n\n policies = properties.get(\"policies\", [])\n if len(policies) == 0:\n return \"no policy of the group\"\n\n if len(policies) > 1:\n return \"multiple policies\"\n\n # TODO: exclude soft-affinity and soft-anti-affinity?\n\n if group_name in self.groups.keys():\n group = self.groups[group_name]\n else:\n group = Group(group_name)\n\n policy = policies[0].strip()\n if policy == \"anti-affinity\":\n group_type = \"diversity\"\n else:\n group_type = policy\n\n group.group_type = group_type\n group.factory = \"server-group\"\n group.level = \"host\"\n\n self.groups[group_name] = group\n else:\n # group hint is uuid string.\n rg = self.resource.get_group_by_uuid(_group_hint)\n if rg is None:\n return \"unknown group found while making group\"\n\n # TODO: exclude soft-affinity and soft-anti-affinity?\n\n if rg.name in self.groups.keys():\n group = self.groups[rg.name]\n else:\n group = Group(rg.name)\n\n group.group_type = rg.group_type\n group.factory = rg.factory\n group.level = \"host\"\n\n self.groups[rg.name] = group\n\n if group is not None:\n group.server_list.append(self.app_name + \":\" + _rk)\n\n return \"ok\"", "def create(person_group_id, name=None, user_data=None):\n name = person_group_id if name is None else name\n url = 'persongroups/{}'.format(person_group_id)\n json = {\n 'name': name,\n 'userData': user_data,\n }\n\n return util.request('PUT', url, json=json)", "def pre_network_ipam_create(self, resource_dict):\n pass", "def create_mapset(self, mapset, dbase=None, location=None):\n module = 'g.mapset'\n gs.run_command(module, flags='c', mapset=mapset, dbase=dbase, location=location)", "def create(self, mapItem: MapItem) -> int:\n pass", "def create_sec_group(ec2, sec_group_name):\n sec = ec2.create_security_group(sec_group_name, 'Jvivian Boto SecGroup')\n port = 22\n sec.authorize('tcp', port, port, '0.0.0.0/0')", "def _build_rds_mapping_from_resources(resource_to_analyse, result_dict, session):\n for db_instance, security_group, sg_name in _generate_rds_instances_and_sg(resource_to_analyse, session):\n resource_dict = _check_if_in_list(result_dict, db_instance[\"DBInstanceIdentifier\"], \"resource_id\")\n if resource_dict is not None:\n resource_dict[\"sg_attached\"].append({\n \"sg_id\": security_group[\"VpcSecurityGroupId\"],\n \"sg_name\": sg_name\n })\n else:\n result_dict.append({\n \"resource_id\": db_instance[\"DBInstanceIdentifier\"],\n \"resource_type\": \"rds\",\n \"sg_attached\": [{\n \"sg_id\": security_group[\"VpcSecurityGroupId\"],\n \"sg_name\": sg_name\n }]\n })\n return result_dict", "def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('ACCO').get('abstractTypes')\n exolinks = globalMap.get('ACCO').get('exolinks')\n\n # Class AccessControlStore\n currentMap = {}\n abstractTypes['AccessControlStore'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:18:10_00001'] = currentMap\n loadMaps['ACCO.AccessControlStore'] = currentMap\n currentMap['tag'] = 'ACCO.AccessControlStore'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:18:10_00001'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'accessControlStores'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'name'\n currentMap['class'] = memops.api.AccessControl.AccessControlStore\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AccessControlStore.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AccessControlStore.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AccessControlStore.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute AccessControlStore.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AccessControlStore.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AccessControlStore.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:38_00006'] = currentMap\n loadMaps['ACCO.AccessControlStore.name'] = currentMap\n currentMap['tag'] = 'ACCO.AccessControlStore.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:38_00006'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role AccessControlStore.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role AccessControlStore.accessObjects\n currentMap = {}\n contentMap['accessObjects'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00013'] = currentMap\n loadMaps['ACCO.AccessControlStore.accessObjects'] = currentMap\n currentMap['tag'] = 'ACCO.AccessControlStore.accessObjects'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00013'\n currentMap['name'] = 'accessObjects'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ACCO').get('abstractTypes')\n\n # Role AccessControlStore.userGroups\n currentMap = {}\n contentMap['userGroups'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:38_00003'] = currentMap\n loadMaps['ACCO.AccessControlStore.userGroups'] = currentMap\n currentMap['tag'] = 'ACCO.AccessControlStore.userGroups'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:38_00003'\n currentMap['name'] = 'userGroups'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ACCO').get('abstractTypes')\n\n # Role AccessControlStore.users\n currentMap = {}\n contentMap['users'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:38_00001'] = currentMap\n loadMaps['ACCO.AccessControlStore.users'] = currentMap\n currentMap['tag'] = 'ACCO.AccessControlStore.users'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:38_00001'\n currentMap['name'] = 'users'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ACCO').get('abstractTypes')\n # End of AccessControlStore\n\n currentMap = abstractTypes.get('AccessControlStore')\n aList = ['createdBy', 'guid', 'isModifiable', 'lastUnlockedBy']\n currentMap['headerAttrs'] = aList\n aList = ['name']\n currentMap['simpleAttrs'] = aList\n aList = ['users', 'userGroups', 'accessObjects', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['accessObjects', 'userGroups', 'users']\n currentMap['children'] = aList\n\n # Class AccessObject\n currentMap = {}\n abstractTypes['AccessObject'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00014'] = currentMap\n loadMaps['ACCO.AccessObject'] = currentMap\n currentMap['tag'] = 'ACCO.AccessObject'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00014'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'accessObjects'\n currentMap['objkey'] = 'name'\n currentMap['class'] = memops.api.AccessControl.AccessObject\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AccessObject.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AccessObject.description\n currentMap = {}\n contentMap['description'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:27_00005'] = currentMap\n loadMaps['ACCO.AccessObject.description'] = currentMap\n currentMap['tag'] = 'ACCO.AccessObject.description'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:27_00005'\n currentMap['name'] = 'description'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00035')\n\n # Attribute AccessObject.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:27_00004'] = currentMap\n loadMaps['ACCO.AccessObject.name'] = currentMap\n currentMap['tag'] = 'ACCO.AccessObject.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:27_00004'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role AccessObject.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role AccessObject.permissions\n currentMap = {}\n contentMap['permissions'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:27_00001'] = currentMap\n loadMaps['ACCO.AccessObject.permissions'] = currentMap\n currentMap['tag'] = 'ACCO.AccessObject.permissions'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:27_00001'\n currentMap['name'] = 'permissions'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('ACCO').get('abstractTypes')\n # End of AccessObject\n\n currentMap = abstractTypes.get('AccessObject')\n aList = ['description', 'name']\n currentMap['simpleAttrs'] = aList\n aList = ['permissions', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['permissions']\n currentMap['children'] = aList\n\n # Class Permission\n currentMap = {}\n abstractTypes['Permission'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00018'] = currentMap\n loadMaps['ACCO.Permission'] = currentMap\n currentMap['tag'] = 'ACCO.Permission'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00018'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'permissions'\n currentMap['class'] = memops.api.AccessControl.Permission\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Permission.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Permission.opType\n currentMap = {}\n contentMap['opType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00021'] = currentMap\n loadMaps['ACCO.Permission.opType'] = currentMap\n currentMap['tag'] = 'ACCO.Permission.opType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00021'\n currentMap['name'] = 'opType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'any'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute Permission.permission\n currentMap = {}\n contentMap['permission'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00023'] = currentMap\n loadMaps['ACCO.Permission.permission'] = currentMap\n currentMap['tag'] = 'ACCO.Permission.permission'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00023'\n currentMap['name'] = 'permission'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = True\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute Permission.permissionClass\n currentMap = {}\n contentMap['permissionClass'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00020'] = currentMap\n loadMaps['ACCO.Permission.permissionClass'] = currentMap\n currentMap['tag'] = 'ACCO.Permission.permissionClass'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00020'\n currentMap['name'] = 'permissionClass'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'any'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute Permission.roleName\n currentMap = {}\n contentMap['roleName'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00022'] = currentMap\n loadMaps['ACCO.Permission.roleName'] = currentMap\n currentMap['tag'] = 'ACCO.Permission.roleName'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00022'\n currentMap['name'] = 'roleName'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'any'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role Permission.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role Permission.userGroup\n currentMap = {}\n contentMap['userGroup'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00016'] = currentMap\n loadMaps['ACCO.Permission.userGroup'] = currentMap\n currentMap['tag'] = 'ACCO.Permission.userGroup'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00016'\n currentMap['name'] = 'userGroup'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['copyOverride'] = True\n # End of Permission\n\n currentMap = abstractTypes.get('Permission')\n aList = ['opType', 'permission', 'permissionClass', 'roleName']\n currentMap['headerAttrs'] = aList\n aList = ['userGroup']\n currentMap['optLinks'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class User\n currentMap = {}\n abstractTypes['User'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00017'] = currentMap\n loadMaps['ACCO.User'] = currentMap\n currentMap['tag'] = 'ACCO.User'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00017'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'users'\n currentMap['objkey'] = 'name'\n currentMap['class'] = memops.api.AccessControl.User\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute User.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute User.isSuperuser\n currentMap = {}\n contentMap['isSuperuser'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-05-06-13:30:17_00060'] = currentMap\n loadMaps['ACCO.User.isSuperuser'] = currentMap\n currentMap['tag'] = 'ACCO.User.isSuperuser'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-05-06-13:30:17_00060'\n currentMap['name'] = 'isSuperuser'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute User.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00019'] = currentMap\n loadMaps['ACCO.User.name'] = currentMap\n currentMap['tag'] = 'ACCO.User.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00019'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute User.passwordHashed\n currentMap = {}\n contentMap['passwordHashed'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2009-08-19-17:31:11_00005'] = currentMap\n loadMaps['ACCO.User.passwordHashed'] = currentMap\n currentMap['tag'] = 'ACCO.User.passwordHashed'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2009-08-19-17:31:11_00005'\n currentMap['name'] = 'passwordHashed'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00035')\n\n # Role User.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role User.ledGroups\n currentMap = {}\n contentMap['ledGroups'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00014'] = currentMap\n loadMaps['ACCO.User.ledGroups'] = currentMap\n currentMap['tag'] = 'ACCO.User.ledGroups'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00014'\n currentMap['name'] = 'ledGroups'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n\n # Role User.userGroups\n currentMap = {}\n contentMap['userGroups'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00012'] = currentMap\n loadMaps['ACCO.User.userGroups'] = currentMap\n currentMap['tag'] = 'ACCO.User.userGroups'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00012'\n currentMap['name'] = 'userGroups'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of User\n\n currentMap = abstractTypes.get('User')\n aList = ['isSuperuser']\n currentMap['headerAttrs'] = aList\n aList = ['name', 'passwordHashed', 'ledGroups', 'userGroups']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class UserGroup\n currentMap = {}\n abstractTypes['UserGroup'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00016'] = currentMap\n loadMaps['ACCO.UserGroup'] = currentMap\n currentMap['tag'] = 'ACCO.UserGroup'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00016'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'userGroups'\n currentMap['objkey'] = 'name'\n currentMap['class'] = memops.api.AccessControl.UserGroup\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute UserGroup.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute UserGroup.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00018'] = currentMap\n loadMaps['ACCO.UserGroup.name'] = currentMap\n currentMap['tag'] = 'ACCO.UserGroup.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00018'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role UserGroup.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role UserGroup.leaders\n currentMap = {}\n contentMap['leaders'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00015'] = currentMap\n loadMaps['ACCO.UserGroup.leaders'] = currentMap\n currentMap['tag'] = 'ACCO.UserGroup.leaders'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00015'\n currentMap['name'] = 'leaders'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n\n # Role UserGroup.members\n currentMap = {}\n contentMap['members'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00013'] = currentMap\n loadMaps['ACCO.UserGroup.members'] = currentMap\n currentMap['tag'] = 'ACCO.UserGroup.members'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00013'\n currentMap['name'] = 'members'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = False\n\n # Role UserGroup.permissions\n currentMap = {}\n contentMap['permissions'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00017'] = currentMap\n loadMaps['ACCO.UserGroup.permissions'] = currentMap\n currentMap['tag'] = 'ACCO.UserGroup.permissions'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00017'\n currentMap['name'] = 'permissions'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = False\n # End of UserGroup\n\n currentMap = abstractTypes.get('UserGroup')\n aList = ['name', 'leaders', 'members', 'permissions']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Out-of-package link to AccessControlStore\n currentMap = {}\n exolinks['AccessControlStore'] = currentMap\n loadMaps['ACCO.exo-AccessControlStore'] = currentMap\n currentMap['tag'] = 'ACCO.exo-AccessControlStore'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:18:10_00001'\n currentMap['name'] = 'AccessControlStore'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = memops.api.AccessControl.AccessControlStore\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n\n # Out-of-package link to AccessObject\n currentMap = {}\n exolinks['AccessObject'] = currentMap\n loadMaps['ACCO.exo-AccessObject'] = currentMap\n currentMap['tag'] = 'ACCO.exo-AccessObject'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00014'\n currentMap['name'] = 'AccessObject'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = memops.api.AccessControl.AccessObject\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to Permission\n currentMap = {}\n exolinks['Permission'] = currentMap\n loadMaps['ACCO.exo-Permission'] = currentMap\n currentMap['tag'] = 'ACCO.exo-Permission'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00018'\n currentMap['name'] = 'Permission'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = memops.api.AccessControl.Permission\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(globalMap.get('ACCO').get('exolinks'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037'))\n\n # Out-of-package link to User\n currentMap = {}\n exolinks['User'] = currentMap\n loadMaps['ACCO.exo-User'] = currentMap\n currentMap['tag'] = 'ACCO.exo-User'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00017'\n currentMap['name'] = 'User'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = memops.api.AccessControl.User\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to UserGroup\n currentMap = {}\n exolinks['UserGroup'] = currentMap\n loadMaps['ACCO.exo-UserGroup'] = currentMap\n currentMap['tag'] = 'ACCO.exo-UserGroup'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00016'\n currentMap['name'] = 'UserGroup'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = memops.api.AccessControl.UserGroup\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))", "def generate_object(self, **kwargs):\n from ranger_performance_tool import perf_globals\n service_type_mapping = perf_globals.CONFIG_READER.get_config_value(\"secondary\", \"service_type_mapping\")\n if \"id\" not in kwargs:\n kwargs[\"id\"] = self.random_generator.generate_int()\n if \"name\" not in kwargs:\n kwargs[\"name\"] = self.random_generator.generate_string()\n if \"service\" not in kwargs:\n enabled_service = random.choice(perf_globals.CONFIG_READER.get_config_value(\"secondary\", \"enabled_services\"))\n service_type = service_type_mapping[enabled_service]\n kwargs[\"service\"] = enabled_service\n kwargs[\"serviceType\"] = service_type\n policy_object = RangerPolicy()\n for key, value in kwargs.items():\n if key not in dir(policy_object):\n raise Exception(\"Invalid key: \" + key)\n policy_object[key] = value\n service_type = policy_object.serviceType\n service_store = perf_globals.OBJECT_STORE.service_store[service_type]\n policy_object.resources = service_store.generate_resources()\n return policy_object", "def role_create(ctx, name, service):\n # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/iam.html#IAM.Client.create_role\n service_code = SERVICE.get(service, None)\n if service_code:\n res = IAM.client().create_role(\n RoleName=name,\n AssumeRolePolicyDocument=service_policy(service_code),\n )\n click.echo(J(res))", "def execute(self, pool, vthunder):\n\n args = {'service_group': self.meta(pool, 'service_group', {})}\n try:\n conf_templates = self.readConf('SERVICE_GROUP','templates').strip('\"')\n service_group_temp = {}\n service_group_temp['template-server'] = conf_templates\n except:\n service_group_temp = None\n\n try:\n c = self.client_factory(vthunder)\n lb_method=openstack_mappings.service_group_lb_method(c,pool.lb_algorithm)\n out = c.slb.service_group.create(pool.id, pool.protocol, lb_method,\n service_group_temp, axapi_args=args)\n LOG.info(\"Pool created successfully.\")\n except Exception as e:\n print(str(e))\n LOG.info(\"Error occurred\")", "def create_project_apikey(self\n ,project_id\n ,description='pyatlas generated project apikey'\n ,roles='PROJECT_OWNER'):\n print('pyatlas - create_apikey') \n roles = roles.split(',')\n pprint.pprint(roles)\n data = { 'desc' : description, 'roles' : roles }\n pprint.pprint(data)\n \n target = f'{ApiVersion.CM1.value}/groups/{project_id}/apiKeys'\n print( f'target={target}' )\n print( f'data={data}' )\n response = self.post(target, body=data)\n return response", "def _db_key_to_servicekey(key):\n return ServiceKey(\n name=key.name,\n kid=key.kid,\n service=key.service,\n jwk=key.jwk,\n metadata=key.metadata,\n created_date=key.created_date,\n expiration_date=key.expiration_date,\n rotation_duration=key.rotation_duration,\n approval=key.approval,\n )", "def mk_rg1(self):\n name = f\"{self.env_name}/sg/project-default\"\n self.sg_project_default = ec2.SecurityGroup(\n \"SecurityGroupProjectDefault\",\n rp_GroupDescription=\"Resources that has this security can talk to each other\",\n p_GroupName=name,\n p_VpcId=self.vpc_id,\n p_SecurityGroupIngress=[\n ec2.PropSecurityGroupIngress(\n rp_IpProtocol=\"-1\",\n p_FromPort=-1,\n p_ToPort=-1,\n p_CidrIp=f\"{authorized_ip}/32\",\n )\n for authorized_ip in self.sg_authorized_ips\n ],\n p_Tags=cf.Tag.make_many(\n Name=name\n ),\n )", "def placement_group(template, name):\n p = PlacementGroup(name, template=template)\n p.Strategy = 'cluster'\n return p", "def create_datasource_mapping(connection: Connection, body, error_msg: Optional[str] = None):\n url = f\"{connection.base_url}/api/datasources/mappings\"\n response = connection.session.post(url=url, json=body)\n if not response.ok:\n if error_msg is None:\n error_msg = \"Error creating Datasource mapping\"\n response_handler(response, error_msg)\n return response", "def create_placement_group(self, name, strategy='cluster'):\r\n params = {'GroupName':name, 'Strategy':strategy}\r\n group = self.get_status('CreatePlacementGroup', params, verb='POST')\r\n return group", "def _build_elbv2_mapping_from_resources(resource_to_analyse, result_dict, session):\n for elb_instance, security_group_id, security_group_name in _generate_elb_instances_and_sg(resource_to_analyse, session):\n resource_dict = _check_if_in_list(result_dict, elb_instance[\"LoadBalancerName\"], \"resource_id\")\n if resource_dict is not None:\n resource_dict[\"sg_attached\"].append({\n \"sg_id\": security_group_id,\n \"sg_name\": security_group_name\n })\n else:\n result_dict.append({\n \"resource_id\": elb_instance[\"LoadBalancerName\"],\n \"resource_type\": \"elb\",\n \"sg_attached\": [{\n \"sg_id\": security_group_id,\n \"sg_name\": security_group_name\n }]\n })\n return result_dict", "def setUp(self):\n user = User.objects.create(email=\"test1@test.com\", first_name=\"Test1\", last_name=\"User\")\n group = AnaGroup.objects.create(name=\"test group\")\n IAM.objects.create(user=user,\n aws_user=\"AWS user\",\n aws_access_key=\"AWS access key\",\n aws_secret_access_key=\"AWS secret key\",\n group=group)", "def service_account(configure_security):\n try:\n name = config.SERVICE_NAME\n secret = \"{}-secret\".format(name)\n sdk_security.create_service_account(\n service_account_name=name, service_account_secret=secret)\n # TODO(mh): Fine grained permissions needs to be addressed in DCOS-16475\n sdk_cmd.run_cli(\n \"security org groups add_user superusers {name}\".format(name=name))\n yield {\"name\": name, \"secret\": secret}\n finally:\n return", "def test_create_resource_group(self):\n pass", "def mapping(self, release_id, grouping):\n\n helper = Known(self.config, self.session)\n classes = [g['name']['full'] for g in grouping]\n return helper.mapping(release_id, classes)", "def test_create_namespaced_image_stream_mapping(self):\n pass", "def create_group(group_id, group_name):\n\n kwargs = config.DEFAULT_REST_KWARGS\n kwargs[\"data\"] = {\"id\": group_id, \"name\": group_name}\n http_response = call_rest_api(\"/identities/groups/\", \"post\", **kwargs)\n if http_response.status_code != 201: # 201 = 'new group created'\n raise ValueError(http_response.text)\n logger.log(f\"New custom group, {group_name}, with ID: {group_id}, was created successfully.\")", "def make_mapping(self) -> None:\n start_mark = StreamMark('generated node', 0, 0, 0)\n end_mark = StreamMark('generated node', 0, 0, 0)\n self.yaml_node = yaml.MappingNode('tag:yaml.org,2002:map', list(),\n start_mark, end_mark)", "def group_single(self, dss_group, unix_user, hadoop_user=None):\n self.raw['type'] = 'SINGLE_MAPPING'\n self.raw['dssGroup'] = dss_group\n self.raw['targetUnix'] = unix_user\n self.raw['targetHadoop'] = hadoop_user\n return self", "def post_network_ipam_create(self, resource_dict):\n pass", "def __init__(__self__, *,\n domain_id: Optional[pulumi.Input[str]] = None,\n group_id: Optional[pulumi.Input[str]] = None,\n project_id: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n role_id: Optional[pulumi.Input[str]] = None,\n user_id: Optional[pulumi.Input[str]] = None):\n if domain_id is not None:\n pulumi.set(__self__, \"domain_id\", domain_id)\n if group_id is not None:\n pulumi.set(__self__, \"group_id\", group_id)\n if project_id is not None:\n pulumi.set(__self__, \"project_id\", project_id)\n if region is not None:\n pulumi.set(__self__, \"region\", region)\n if role_id is not None:\n pulumi.set(__self__, \"role_id\", role_id)\n if user_id is not None:\n pulumi.set(__self__, \"user_id\", user_id)", "def testServiceMapping_ByList(self):\n self.DoMappingTest(\n [('/my-service1', MyService.new_factory('service1')),\n ('/my-service2', MyService.new_factory('service2')),\n ])", "def create_asu_map(mapin, mapout):\n\n # Masking object\n masker = CommandManager('mapmask')\n # Set input files\n masker.add_command_line_arguments(['mapin',mapin,'mapout',mapout])\n # Set stdin\n masker.add_standard_input(['XYZLIM ASU','END'])\n # Run!\n masker.run()\n # Report errors\n if masker.process.returncode!=0:\n raise RuntimeError('mapmask failed to create asu map from {!s}'.format(mapin))\n\n # Return Command Managers for flexible handling of out & err\n return masker", "def create_iam_role(iam):\n print(\"Creating a new IAM Role\") \n try:\n resp = iam.create_role(Path='/',\n RoleName=DWH_IAM_ROLE_NAME,\n Description = \"Allows Redshift clusters to call AWS services on your behalf.\",\n AssumeRolePolicyDocument=json.dumps({'Statement': [{'Action': 'sts:AssumeRole',\n 'Effect': 'Allow',\n 'Principal': {'Service': 'redshift.amazonaws.com'}}],\n 'Version': '2012-10-17'}\n )\n )\n print(\"IAM Role created:\")\n print(resp)\n except iam.exceptions.EntityAlreadyExistsException:\n print(\"IAM Role already created\")\n except Exception as e:\n print(\"Error creating IAM Role:\", e)", "def assignGroupIDs(self):\n components = self.getComponents(graph_dictionary=self.graph_dict)\n self._gIDs = np.zeros(self.no_plants, dtype='object')\n for i in components.keys():\n self._gIDs[components[i]] = 'gID_' + str(i)", "def create_srv_entry(srv_name, ip, port):\n res = {}\n res['service'] = \"_{}._tcp.marathon.mesos\".format(srv_name)\n res['host'] = \"{}-74b1w-s1.marathon.mesos.\".format(srv_name)\n res['ip'] = ip\n res['port'] = port\n\n return res", "def generate_control_mappings(self, control):\n acr_creator = all_models.AccessControlRole.query.filter_by(\n name=\"Creators\", object_type=\"Assessment\"\n ).first()\n with factories.single_commit():\n person = factories.PersonFactory()\n asmnt_ids = []\n for _ in range(2):\n asmnt = factories.AssessmentFactory()\n asmnt_ids.append(asmnt.id)\n factories.AccessControlListFactory(\n object=asmnt, person=person, ac_role=acr_creator\n )\n\n for asmnt_id in asmnt_ids:\n asmnt = all_models.Assessment.query.get(asmnt_id)\n self.gen.generate_relationship(source=asmnt, destination=control)", "def create_namespaced_user_identity_mapping(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_user_identity_mapping\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_user_identity_mapping`\")\n\n resource_path = '/oapi/v1/useridentitymappings'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1UserIdentityMapping',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def testServiceMapping_ByFactory(self):\n self.DoMappingTest({'/my-service': MyService.new_factory('new-value')})", "def testMapping(self):\n factory = service_handlers.ServiceHandlerFactory(Service)\n path, mapped_factory = factory.mapping('/my_service')\n\n self.assertEquals(r'(/my_service)' + service_handlers._METHOD_PATTERN, path)\n self.assertEquals(id(factory), id(mapped_factory))\n match = re.match(path, '/my_service.my_method')\n self.assertEquals('/my_service', match.group(1))\n self.assertEquals('my_method', match.group(2))\n\n path, mapped_factory = factory.mapping('/my_service/nested')\n self.assertEquals('(/my_service/nested)' +\n service_handlers._METHOD_PATTERN, path)\n match = re.match(path, '/my_service/nested.my_method')\n self.assertEquals('/my_service/nested', match.group(1))\n self.assertEquals('my_method', match.group(2))", "def amazonEc2_create(amazonEc2):\n\treturn amazonEc2", "def _mrp_service(self, _, address, port, properties):\n identifier = properties.get(\"UniqueIdentifier\")\n name = properties.get(\"Name\")\n service = conf.MrpService(identifier, port, properties=properties)\n self._handle_service(address, name, service)", "def create_inbound(self, keys):", "def post_service_appliance_set_create(self, resource_dict):\n pass", "def create_new_group(self, a, b):\n self.groups[self.group_id] = set([a,b])\n self.node_id[a] = self.node_id[b] = self.group_id\n self.group_id += 1", "def create_entry_group(self, location_id, entry_group_id):\n entry_group = self.__datacatalog.create_entry_group(\n parent=f'projects/{self.__project_id}/locations/{location_id}',\n entry_group_id=entry_group_id,\n entry_group=datacatalog.EntryGroup())\n logging.info('Entry Group created: %s', entry_group.name)\n return entry_group", "def _makeimap(self):\n self.map_[\"source\"] = \"nasa\"\n self.map_[\"instrument\"] = \"goes\"\n self.map_[\"physobs\"] = \"irradiance\"\n self.map_[\"provider\"] = \"sdac\"", "def _transform(self, resource_from_api):\n for (project_id, backend_services) in resource_from_api.iteritems():\n for backend_service in backend_services:\n yield {'project_id': project_id,\n 'id': backend_service.get('id'),\n 'creation_timestamp': parser.format_timestamp(\n backend_service.get('creationTimestamp'),\n self.MYSQL_DATETIME_FORMAT),\n 'name': backend_service.get('name'),\n 'description': backend_service.get('description'),\n 'affinity_cookie_ttl_sec': self._to_int(\n backend_service.get('affinityCookieTtlSec')),\n 'backends': parser.json_stringify(\n backend_service.get('backends', [])),\n 'cdn_policy': parser.json_stringify(\n backend_service.get('cdnPolicy', {})),\n 'connection_draining': parser.json_stringify(\n backend_service.get('connectionDraining', {})),\n 'enable_cdn': self._to_bool(\n backend_service.get('enableCDN')),\n 'health_checks': parser.json_stringify(\n backend_service.get('healthChecks', [])),\n 'iap': parser.json_stringify(\n backend_service.get('iap', {})),\n 'load_balancing_scheme': backend_service.get(\n 'loadBalancingScheme'),\n 'port': self._to_int(backend_service.get('port')),\n 'port_name': backend_service.get('portName'),\n 'protocol': backend_service.get('protocol'),\n 'region': backend_service.get('region'),\n 'session_affinity': backend_service.get(\n 'sessionAffinity'),\n 'timeout_sec': backend_service.get('timeoutSec'),\n 'raw_backend_service':\n parser.json_stringify(backend_service)}", "def test_create_group(self):\n pass", "def test_create_group(self):\n pass", "def create(self, name, public_key=None):\n data = {\n \"keypair\": {\n \"name\": name\n }\n }\n if public_key is not None:\n data['keypair']['public_key'] = public_key\n \n path = '/os-keypairs'\n res = self.client.call(path, 'POST', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Create/import openstack keypair: %s' % truncate(res))\n return res[0]['keypair']", "def createService(data):\n return Service(data).create()", "def form_point_to_tract_mapping(voter, mapping, geoid_to_id_mapping: GeoIDToIDMapping):\n # voter.name refers to the point ID\n mapping[voter.name] = geoid_to_id_mapping[voter[\"GEOID\"]]", "def create_from_changeset(changeset, stub=None, update_sender=None, update_recipient=None):\n group = Identity.create_from_changeset(changeset,\n stub=stub, update_sender=update_sender, update_recipient=update_recipient, kind=Group)\n\n group.description = changeset[\"description\"]\n group.set_state(changeset[\"state\"])\n request_list = list()\n\n # Update admin\n admin = Persona.query.get(changeset[\"admin_id\"])\n if admin is None or admin._stub:\n request_list.append({\n \"type\": \"Persona\",\n \"id\": changeset[\"admin_id\"],\n \"author_id\": update_recipient.id if update_recipient else None,\n \"recipient_id\": update_sender.id if update_sender else None,\n })\n\n if admin is None:\n admin = Persona(\n id=changeset[\"admin_id\"],\n )\n admin._stub = True\n\n group.admin = admin\n\n mc = group.update_members(changeset[\"members\"])\n for m in mc:\n request_list.append({\n \"type\": \"Persona\",\n \"id\": m,\n \"author_id\": update_recipient.id if update_recipient else None,\n \"recipient_id\": update_sender.id if update_sender else None,\n })\n\n for req in request_list:\n request_objects.send(Group.create_from_changeset, message=req)\n\n return group", "def write_map(sample_group, sample_dir):\n\n\n map_fname = new_file(\"map.txt\", basedir=sample_dir)\n\n def _write(targets):\n with open(map_fname, 'w') as map_file:\n # print the headers first\n print >> map_file, \"#\"+\"\\t\".join(sample_group[0]._fields)\n\n for _, samples_bycode in itertools.groupby(\n sample_group, operator.attrgetter(\"BarcodeSequence\")):\n # get the first (hopefully only) sample from the samples\n # grouped by ID then barcode. Ignore any other samples\n # under the same ID for the same barcode\n sample = samples_bycode.next()\n bcode = sample.BarcodeSequence\n # uniq-ify to make qiime happy\n sample = list(sample)\n sample[0] += \"_\" + bcode\n print >> map_file, \"\\t\".join(sample)\n\n return {\n \"name\": \"write_map:\"+map_fname,\n \"actions\": [_write],\n \"targets\": [map_fname],\n \"title\": lambda t: t.name+\" Estimated mem=200 time=5 threads=1\"\n }", "def test_groups_group_id_state_put(self):\n pass", "def get_idp_group_mapping(self, identity_provider_id, mapping_id, **kwargs):\n resource_path = \"/identityProviders/{identityProviderId}/groupMappings/{mappingId}\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"get_idp_group_mapping got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"identityProviderId\": identity_provider_id,\n \"mappingId\": mapping_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"IdpGroupMapping\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"IdpGroupMapping\")", "def test_valid_service_account_registration_multiple_service_accounts(\n app,\n db_session,\n client,\n encoded_jwt_service_accounts_access,\n cloud_manager,\n valid_google_project_patcher,\n valid_service_account_patcher,\n):\n proj_patcher = valid_google_project_patcher\n project = Project(id=1, auth_id=\"some_auth_id\")\n\n bucket = Bucket(id=1)\n\n db_session.add(project)\n db_session.add(bucket)\n db_session.commit()\n\n project_to_bucket = ProjectToBucket(project_id=1, bucket_id=1)\n\n db_session.add(project_to_bucket)\n db_session.commit()\n\n gbag = GoogleBucketAccessGroup(id=1, bucket_id=1, email=\"gbag@gmail.com\")\n\n db_session.add(gbag)\n db_session.commit()\n\n google_project_id = \"project-id\"\n encoded_creds_jwt = encoded_jwt_service_accounts_access[\"jwt\"]\n project_access = [\"some_auth_id\"]\n proj_patcher[\"get_service_account_ids_from_google_members\"].return_value = [\n \"test-{}@test.com\".format(google_project_id),\n \"{}@compute-system.iam.gserviceaccount.com\".format(google_project_id),\n ]\n valid_service_account = {\n \"service_account_email\": \"sa@gmail.com\",\n \"google_project_id\": google_project_id,\n \"project_access\": project_access,\n }\n\n (\n cloud_manager.return_value.__enter__.return_value.get_service_account.return_value\n ) = {\"uniqueId\": \"sa_unique_id\", \"email\": \"sa@gmail.com\"}\n\n (\n cloud_manager.return_value.__enter__.return_value.add_member_to_group.return_value\n ) = {\"email\": \"sa@gmail.com\"}\n\n assert len(db_session.query(UserServiceAccount).all()) == 0\n assert len(db_session.query(ServiceAccountAccessPrivilege).all()) == 0\n assert len(db_session.query(ServiceAccountToGoogleBucketAccessGroup).all()) == 0\n\n response = client.post(\n \"/google/service_accounts\",\n headers={\"Authorization\": \"Bearer \" + encoded_creds_jwt},\n data=json.dumps(valid_service_account),\n content_type=\"application/json\",\n )\n\n assert response.status_code == 200\n\n assert len(db_session.query(UserServiceAccount).all()) == 1\n assert len(db_session.query(ServiceAccountAccessPrivilege).all()) == 1\n assert len(db_session.query(ServiceAccountToGoogleBucketAccessGroup).all()) == 1", "def test_creation_mapped_control(self):\n control = factories.ControlFactory()\n # Map original of control to several assessments to get propagated roles\n self.generate_control_mappings(control)\n\n # Existing control should be updated to create new revision with ACL\n self.api.put(control, {\"title\": \"Test Control\"})\n\n program = factories.ProgramFactory()\n factories.RelationshipFactory(source=program, destination=control)\n response = self.api.post(all_models.Audit, [{\n \"audit\": {\n \"title\": \"New Audit\",\n \"program\": {\"id\": program.id},\n \"status\": \"Planned\",\n \"context\": None\n }\n }])\n self.assert200(response)", "def service_for_map(self, map_name):\n return getattr(self.map_services[map_name], self.service_name)", "def create_tap_service(attrs=None):\n attrs = attrs or {}\n tap_service_attrs = {\n 'id': uuidutils.generate_uuid(),\n 'tenant_id': uuidutils.generate_uuid(),\n 'name': 'test_tap_service' + uuidutils.generate_uuid(),\n 'status': 'ACTIVE',\n }\n tap_service_attrs.update(attrs)\n return copy.deepcopy(tap_service_attrs)", "def pre_service_template_create(self, resource_dict):\n pass", "def pairing_group_create(curve='MNT224'):\n return PairingGroup(curve)", "def create_group(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/GroupV2/Create/\"))", "def createGroup(self, *group):\n if not self.rank:\n logging.info('Creating atom group {}'.format(group))\n\n if not len(group):\n for idSS in self.pargs['idSS']:\n self.lmp.command('group group{} type {}'.format(idSS, idSS))\n else:\n self.lmp.command('group ' + ('{} ' * len(group)).format(*group))" ]
[ "0.5638617", "0.535589", "0.52390397", "0.5175165", "0.5099908", "0.50906384", "0.5057816", "0.5040309", "0.50400156", "0.5031047", "0.5025609", "0.49890697", "0.49736708", "0.49677986", "0.49640617", "0.49320868", "0.49188626", "0.4918003", "0.4915699", "0.48859853", "0.48664403", "0.48657614", "0.48556882", "0.48550412", "0.48329398", "0.482938", "0.48289818", "0.48210335", "0.4819092", "0.4818401", "0.4805326", "0.47917113", "0.47863165", "0.47797883", "0.47793153", "0.4774191", "0.47699392", "0.47538427", "0.4747265", "0.47448146", "0.47445607", "0.47281212", "0.4727731", "0.4707899", "0.47053295", "0.46970582", "0.4692016", "0.46903306", "0.46802178", "0.4678766", "0.46715838", "0.46642473", "0.4659402", "0.46573117", "0.4653107", "0.4641925", "0.4641725", "0.4639226", "0.46284246", "0.46226", "0.46220678", "0.46193025", "0.4618647", "0.46103933", "0.46021172", "0.4589371", "0.45870268", "0.45811903", "0.45695955", "0.45692232", "0.45653656", "0.45454472", "0.45433107", "0.45367375", "0.45331404", "0.45317465", "0.45283136", "0.45281196", "0.45264888", "0.45229253", "0.45193213", "0.45170024", "0.4516713", "0.4516558", "0.4516558", "0.4515323", "0.45145604", "0.450457", "0.45032442", "0.45021445", "0.45010355", "0.44975236", "0.4495599", "0.44953844", "0.44917595", "0.44906196", "0.44883204", "0.44861823", "0.448293", "0.44793564" ]
0.5351723
2
Creates a new MFA TOTP device for the user. A user can have one MFA TOTP device.
def create_mfa_totp_device(self, user_id, **kwargs): resource_path = "/users/{userId}/mfaTotpDevices" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_mfa_totp_device got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "userId": user_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="MfaTotpDevice") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="MfaTotpDevice")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def activate_mfa_totp_device(self, user_id, mfa_totp_device_id, mfa_totp_token, **kwargs):\n resource_path = \"/users/{userId}/mfaTotpDevices/{mfaTotpDeviceId}/actions/activate\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"activate_mfa_totp_device got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id,\n \"mfaTotpDeviceId\": mfa_totp_device_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing),\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=mfa_totp_token,\n response_type=\"MfaTotpDeviceSummary\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=mfa_totp_token,\n response_type=\"MfaTotpDeviceSummary\")", "def post(self, request):\n request_data = request.data\n if 'mobile' not in request_data or not request_data['mobile']:\n return StandardHttpResponse.bad_rsp([], 'Invalid Data.')\n email = None\n mobile = None\n if '@' in request_data['mobile']:\n email = request_data['mobile']\n else:\n mobile = request_data['mobile']\n if mobile:\n user_profile_obj = UserModelQueries.get_user_profile_by_mobile(mobile)\n if not user_profile_obj:\n return StandardHttpResponse.bad_rsp([], 'Mobile is yet not registered or not verified.')\n else:\n user_profile_obj = UserModelQueries.get_user_profile_by_email(email)\n if not user_profile_obj:\n return StandardHttpResponse.bad_rsp([], 'Looks like You haven\\'t registered yet. Please Registered.')\n\n result, response = MobileOtpService.create_otp(user_profile_obj.mobile,\n 'ForgetPassword')\n if not result:\n return StandardHttpResponse.bad_rsp([], response)\n # TODO: Code to send the otp to mobile\n SentOTP.send_otp_to_email(email, response['otp'], 'Forget Password')\n response_data = {'otp_ref': response['otp_ref']}\n return StandardHttpResponse.rsp_200(response_data, 'An OTP Sent to {} to reset the password'\n .format(user_profile_obj.mobile.__str__()))", "def generate_totp_seed(self, user_id, mfa_totp_device_id, **kwargs):\n resource_path = \"/users/{userId}/mfaTotpDevices/{mfaTotpDeviceId}/actions/generateSeed\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"generate_totp_seed got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id,\n \"mfaTotpDeviceId\": mfa_totp_device_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"MfaTotpDevice\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"MfaTotpDevice\")", "def post(self, request):\n\n email = request.data.get('email')\n phone_number = request.data.get('phone_number')\n otp = request.data.get('otp')\n\n # check that otp is correct or not (otp should match with email or phone number\n otp_obj = Otp.objects.filter(Q(email_phone=email) | Q(email_phone=phone_number) & Q(code=otp)).first()\n if not otp_obj:\n response_json = {\n 'status': False,\n 'message': 'otp is incorrect',\n 'data': {}\n }\n\n return Response(response_json, status=400)\n\n # create new user\n request_json = {\n \"username\": request.data.get('username'),\n \"password\": make_password(request.data.get('password')),\n \"email\": email,\n \"phone_number\": phone_number\n }\n\n user_serialized = UserProfileSerializer(data=request_json)\n if not user_serialized.is_valid():\n return validate_error(user_serialized)\n user_serialized.save()\n\n user_obj = UserProfile.objects.filter(id=user_serialized.data.get('id')).first()\n if not user_obj:\n return existence_error('user')\n\n # create following and follower object\n following_obj = UserFollowing.objects.create(user=user_obj)\n follower_obj = UserFollower.objects.create(user=user_obj)\n\n token, created = Token.objects.get_or_create(user=user_obj)\n\n otp_obj.delete()\n\n response_json = {\n 'status': True,\n 'message': 'User successfully registered',\n 'data': 'Token {}'.format(token.key)\n }\n\n return Response(response_json, status=201)", "def create(self, validated_data):\n return MFAMethod.objects.get_or_create(\n user=self.user,\n name=self.context['name'],\n defaults={\n 'secret': create_secret(),\n 'is_active': False,\n }\n )", "def do_user_create():\n target = User(\n request.form['gender'],\n request.form['first_name'],\n request.form['name'],\n request.form['mail'],\n request.form['meter_id'],\n request.form['group_id'],\n secrets.token_hex(33))\n target.set_role(request.form['role'])\n target.nick = request.form['nick']\n db.session.add(target)\n db.session.commit()\n return user_list(\"Created user \" + target.name)", "def post(self, request):\n\n email = request.data.get('email')\n phone_number = request.data.get('phone_number')\n otp = request.data.get('otp')\n\n # check that otp is correct or not (otp should match with email or phone number\n otp_obj = Otp.objects.filter(Q(email_phone=email) | Q(email_phone=phone_number) & Q(code=otp)).first()\n if not otp_obj:\n response_json = {\n 'status': False,\n 'message': 'otp is incorrect',\n 'data': {}\n }\n\n return Response(response_json, status=400)\n\n # login user\n user_obj = UserProfile.objects.filter(\n Q(phone_number=request.data.get('phone_number')) | Q(email=request.data.get('email'))).first()\n\n token, created = Token.objects.get_or_create(user=user_obj)\n\n otp_obj.delete()\n\n response_json = {\n 'status': True,\n 'message': 'User successfully Logged in',\n 'data': 'Token {}'.format(token.key)\n }\n\n return Response(response_json, status=200)", "def create_token(request, user):\n\n key = get_random_string(100)\n data = {}\n ip = get_client_ip_address(request)\n\n return Token.objects.create(user=user, key=key, data=json.dumps(data), ip=ip)", "def create_auth_token(sender, instance=None, created=False, **kwargs):\n if created:\n Token.objects.create(user=instance)", "def create_auth_token(sender, instance=None, created=False, **kwargs):\n if created:\n Token.objects.create(user=instance)", "def create_auth_token(sender, instance=None, created=False, **kwargs):\n if created:\n Token.objects.create(user=instance)", "def create_user(self, phone_number, type, password, is_staff):\n return self.__create_user(phone_number, type, password, is_staff, False, False)", "def create(self, user, token):\n\n session['user'] = {\n 'id': str(user.id),\n 'login': user.login,\n 'token': token\n }\n\n return UserSession.create(session['user'])", "def create_gateway_device(self, body=None):\r\n return self.post(self.gateway_devices_path, body=body)", "def create_teacher(username, password, email, preferred_language,skype_id,name, phone_number, country,availability):\n person.create_person(username,password,email,preferred_language,skype_id,name,phone_number,country)\n teacher_account_id = person.get_last()\n query = 'INSERT INTO teacher VALUES( %s,%s );'\n args = (teacher_account_id, availability)\n database.connection.save_data(query, args)", "def _create_user(self, email, mobile_number, password, **extra_fields):\n\n print('model number')\n print(mobile_number)\n \n user = self.model(email=email,mobile_number = mobile_number, **extra_fields)\n user.set_password(password)\n \n user.save(using=self._db)\n return user", "def create_user(session, phone_number, name, pass_hash, funds=0.0):\n # Perform the db job\n user = User(phone_number=phone_number, name=name, pass_hash=pass_hash, funds=funds)\n session.add(user)\n session.commit()\n return USER_GET_URI.format(user_id=phone_number)", "def create(self,request):\n return CustomAuthToken().post(request)", "def create_new_user(cls, user_email, user_password, user_phone):\n\n new_user = User(email=user_email, password=user_password, mobile_phone=user_phone)\n\n db.session.add(new_user)\n db.session.commit()\n\n print \"Successfully added new user with the email: %s\" % user_email", "def add_user(self):\n\n pin, code = self.get_auth_pin() \n print(\"Enter the PIN '{}' into the Add Application window and click Add Application\".format(pin))\n input(\"waiting press enter to continue...\")\n\n access_token, refresh_token = self.get_tokens(code)\n user_id = self.tokens.get_next_user_id()\n self.tokens.insert_user(user_id, access_token, refresh_token)\n tstat_ids = self.get_tstat_ids(access_token)\n for tstat_id in tstat_ids:\n logger.info(\"Adding Thermostat ID: {}\".format(tstat_id))\n self.tokens.insert_tstat(user_id, tstat_id)", "def create_user(self):\n User.objects.create_user('test', 'testing@test.com', 'testing')", "def create_device(self, device_dict):\n devices = {'devices': [device_dict]}\n url = '{}/iot/devices'.format(self.url)\n return self.post(url, data=json.dumps(devices), headers=self.headers)", "def create_user_device(self, email, device_str):\n if self.database is None:\n raise Exception(\"No database.\")\n if email is None or len(email) == 0:\n raise Exception(\"Email address not provided.\")\n if device_str is None or len(device_str) == 0:\n raise Exception(\"Device string not provided.\")\n user_id, _, _ = self.database.retrieve_user(email)\n return self.database.create_user_device(user_id, device_str)", "def user(self, user_token, user_device=None):\n self.set('user', user_token)\n self.set('device', user_device)", "def register_user_device(username: str, password: str, mac_address: str, email: Optional[str] = None) -> \\\n Union[str, Token]:\n ret = register_user(username, password, email)\n if isinstance(ret, str):\n return ret\n else:\n user_id = ret\n token, device_id = _add_update_device(user_id, mac_address)\n client_logger_security().info(f\"Successfully added new device: user_id={user_id}, device_id={device_id}\")\n _set_user_authenticated(user_id, device_id)\n return token", "def create(self, validated_data):\n request = self._kwargs['context']['request']\n user = User.objects.create(**validated_data)\n user.set_password(validated_data[\"password\"])\n user.save()\n category_list = ['Fuel', 'Bill', 'Entertainment', 'Education', 'Food']\n for category in category_list:\n user.user_categories.create(name=category)\n login(request, user)\n token, created = Token.objects.get_or_create(user=user)\n validated_data[\"token\"] = token.key\n return validated_data", "def create_user(self, email_or_phone, password=None, **extra_fields):\n return self._create_user(email_or_phone, password, False, False, **extra_fields)", "def RegisterDevice(self, device_id, machine_id, type, username):\n dmtoken_chars = []\n while len(dmtoken_chars) < 32:\n dmtoken_chars.append(random.choice('0123456789abcdef'))\n dmtoken = ''.join(dmtoken_chars)\n allowed_policy_types = {\n dm.DeviceRegisterRequest.BROWSER: [\n 'google/chrome/user',\n 'google/chrome/extension'\n ],\n dm.DeviceRegisterRequest.USER: [\n 'google/chromeos/user',\n 'google/chrome/extension'\n ],\n dm.DeviceRegisterRequest.DEVICE: [\n 'google/chromeos/device',\n 'google/chromeos/publicaccount',\n 'google/chrome/extension',\n 'google/chromeos/signinextension'\n ],\n dm.DeviceRegisterRequest.ANDROID_BROWSER: [\n 'google/android/user'\n ],\n dm.DeviceRegisterRequest.TT: ['google/chromeos/user',\n 'google/chrome/user'],\n }\n if machine_id in KIOSK_MACHINE_IDS:\n enrollment_mode = dm.DeviceRegisterResponse.RETAIL\n else:\n enrollment_mode = dm.DeviceRegisterResponse.ENTERPRISE\n self._registered_tokens[dmtoken] = {\n 'device_id': device_id,\n 'device_token': dmtoken,\n 'allowed_policy_types': allowed_policy_types[type],\n 'machine_name': 'chromeos-' + machine_id,\n 'machine_id': machine_id,\n 'enrollment_mode': enrollment_mode,\n 'username': username,\n }\n self.WriteClientState()\n return self._registered_tokens[dmtoken]", "def create_user():\n record = request.get_json()\n if record is None:\n return {\"Error\": \"No data Supplied.\"}, 400\n\n schema = user_schema.load(record)\n\n if UserModel.objects(email=schema['email']):\n return {\"Error\": \"User Data already exists.\"}, 400\n user = UserModel(**schema)\n user.hash_password()\n user.save()\n ser_data = user_schema.dump(user)\n token = Auth.generate_token(ser_data[\"_id\"])\n return {\"message\": \"User Created Successfully\", \"Token\": token, \"id\": str(user.id)}, 200", "def create_user(self, email, mobile_number, password, **extra_fields):\n extra_fields.setdefault('is_staff', False)\n extra_fields.setdefault('is_superuser', False)\n return self._create_user(email, mobile_number, password , **extra_fields)", "def create(self, data):\n data.pop('password_confirmation')\n try:\n availability = data.pop(\"availability\")\n babysitter = data.pop(\"user_bbs\")\n user = User.objects.create_user(**data, is_verified=False)\n if babysitter:\n bbs = Babysitter.objects.create(user_bbs=user, **babysitter)\n for shift in availability:\n Availability.objects.create(bbs=bbs, **shift)\n except KeyError:\n logging.info('This is a instance client')\n user = User.objects.create_user(**data, is_verified=False)\n logging.info(f'User created, whit pk {user.pk}')\n client = Client.objects.create(user_client=user)\n logging.info(f'User pk is already to pass {user.pk}')\n send_confirmation_email.delay(username=user.username, email=user.email )\n return user", "def get_mfa_totp_device(self, user_id, mfa_totp_device_id, **kwargs):\n resource_path = \"/users/{userId}/mfaTotpDevices/{mfaTotpDeviceId}\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"get_mfa_totp_device got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id,\n \"mfaTotpDeviceId\": mfa_totp_device_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"MfaTotpDeviceSummary\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"MfaTotpDeviceSummary\")", "def create_auth_token(sender, instance=None, created=False, **kwargs): # pylint: disable=unused-argument\n if created:\n Token.objects.create(user=instance) # pylint: disable=no-member", "def test_api_create_atmuser(self):\n users_num = ATMUser.objects.count()\n\n atmuser = ATMUser.objects.get(card='0000000000000000') # get admin\n view = ATMUserViewSet.as_view({'post': 'create'})\n\n data = {'card': '7777777777777777', 'password': '7777', 'cash': 700}\n request = factory.post(reverse('atmuser-list'), data, format='json')\n\n force_authenticate(request, user=atmuser)\n response = view(request)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(ATMUser.objects.count(), users_num + 1)", "def new_user(testapp):\n SessionFactory = testapp.app.registry[\"dbsession_factory\"]\n with transaction.manager:\n dbsession = get_tm_session(SessionFactory, transaction.manager)\n new_user = User(username=\"test\", password=pwd_context.hash(\"test\"))\n dbsession.add(new_user)", "def _create_auth_token(self, user=None):\n token, created = Token.objects.get_or_create(user=user)\n return token", "def create_form_user(self, **kwargs):\n user = User.objects.create_user(\n **kwargs\n )\n return user", "def create(self, data):\n # Make User\n code = (random.randint(1000, 9999))\n user = User.objects.get(pk=self.context['user'].pk)\n new = str(code).strip()\n hs = hashlib.sha1(new.encode()).hexdigest()\n user.password = hs\n user.save()\n send_verification_email.delay(email=data['email'], code=code)\n return user", "def create_user(self, password=None, phone=None, **extra_fields):\n extra_fields.setdefault('is_staff', False)\n extra_fields.setdefault('is_superuser', False)\n\n return self._create_user(password=password, phone=phone, **extra_fields)", "def _add_update_device(user_id: int, mac_address: str) -> Tuple[Token, int]:\n possible_device = Device.get_by_mac(mac_address)\n if possible_device is not None:\n if Token.is_token_expired(possible_device.token_expires):\n new_token = Token()\n possible_device.token = new_token\n possible_device.token_expires = Token.get_next_expired()\n return new_token, possible_device.device_id\n else:\n return possible_device.token, possible_device.device_id\n device_id = Device.create(user_id, mac_address, Token(), Token.get_next_expired())\n device_token = Device.from_id(device_id).token\n assert os.path.exists(path_utils.get_users_root_folder(user_id))\n server_json.create_changes_file_for_new_device(user_id, device_id, empty=True)\n return device_token, device_id", "def create(cls, imei, device_id):\n try:\n imei_device = cls(imei, device_id)\n imei_device.save()\n except Exception:\n raise Exception", "def create_user(fname, phone_num):\n\n user = User(fname=fname, phone_num=phone_num)\n\n db.session.add(user)\n db.session.commit()\n\n return user", "def new_user(cls, user):\r\n pass", "def create_custom_user(sender, instance, signal, created, **kwargs):\n from gpsfun.main.User.models import GPSFunUser\n if created:\n GPSFunUser.objects.create(user=instance)\n instance.gpsfunuser.save()", "def create_new_user():\n return get_user_model().objects.create_user(\n email='test@gmail.com',\n password='test@londodnjisdjfois',\n username='tempusername'\n )", "def create_device(self, app_name='FooBar', device_type='Raspberry Pi 2'):\n\n app = self.resin.models.application.create(app_name, device_type)\n return app, self.resin.models.device.register(app['id'], self.resin.models.device.generate_uuid())", "def create_user_by_id(cls, m_id):", "def create_register_user(self, data, user_type):\n data.pop('password_confirm')\n data['user_type'] = user_type\n user = User.objects.create_user(**data)\n return user", "def create_user(self):\n unique_id = str(uuid.uuid4())\n new_user_properties = {\n \"name\": self.name,\n \"mission_statement\": self.mission_statement,\n \"unique_id\": unique_id,\n \"email\": self.email.lower(),\n \"is_mentor\": True,\n \"is_tutor\": True,\n \"is_visible\": True,\n \"is_available_for_in_person\": True,\n \"is_admin\": True}\n new_user_node = Node.cast(AgoraLabel.USER, new_user_properties)\n try:\n self.graph_db.create(new_user_node)\n except:\n pass\n return new_user_node", "def register():\n user_gender = request.form['user_gender']\n user_age = request.form['user_age']\n sensors = {\n 'accelerometer': request.form.get('accelerometer', False),\n 'ambient_temperature': request.form.get('ambient_temperature', False),\n 'gravity': request.form.get('gravity', False),\n 'gyroscope': request.form.get('gyroscope', False),\n 'light': request.form.get('light', False),\n 'linear_accelerometer': request.form.get('linear_accelerometer', False),\n 'magnetic_field': request.form.get('magnetic_field', False),\n 'orientation': request.form.get('orientation', False),\n 'pressure': request.form.get('pressure', False),\n 'proximity': request.form.get('proximity', False),\n 'relative_humidity': request.form.get('relative_humidity', False),\n 'rotation_vector': request.form.get('rotation_vector', False),\n 'temperature': request.form.get('temperature', False)\n }\n\n device, token = create_subject(user_gender, user_age, sensors)\n\n response = jsonify(status=\"Register Success\", message=\"Your device has been registered.\",\n device=device, token=token)\n\n response.status_code = 201\n\n return response", "def test_create_token_for_user(self):\r\n payload = {\r\n 'email': 'test@max.net',\r\n 'password': 'Testpass123',\r\n 'name': 'Maks'\r\n }\r\n create_user(**payload)\r\n\r\n res = self.client.post(TOKEN_URL, payload)\r\n\r\n self.assertIn('token', res.data)\r\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def create_user(self):\n u = USER.objects.create(username='test_user1',\n email='test_email@example.com', )\n u.set_password('test_password')\n u.save()\n self.user = u\n return u", "def post(self):\r\n return create_user(request)", "def post(self, request, *args, **kwargs):\n self.create(request, *args, **kwargs)\n token, created = Token.objects.get_or_create(user=self.user)\n return Response({'token': token.key}, status=201)", "def create_user(fname, lname, email, password, phone_number):\n user = User(fname = fname, lname = lname , email = email ,password = password, phone_number = phone_number)\n #setting password hash\n user.set_password(password)\n db.session.add(user)\n db.session.commit()\n\n return user", "def create_user_wallet(sender, instance, created, **kwargs):\n if created:\n Wallet.objects.create(user=instance)", "async def create_new_user(*, user: User):\n with Session(engine) as session:\n user.password = simple_hash(user.name, user.password) #Hashing password for security\n session.add(user)\n session.commit()\n return {\"message\": \"User {user_id} created\".format(user_id = user.id)}", "def new_user(cls, user):\n pass", "def create_user(self, phone, password=None, **extra_fields):\n print(extra_fields)\n if not phone:\n raise ValueError('Users must have an phone number')\n if not password:\n raise ValueError('Users must have a password')\n try:\n extra_fields['role']\n except Exception:\n raise ValueError('Users must have a role')\n try:\n extra_fields['name']\n except Exception:\n raise ValueError('Users must have a name') \n user = self.model(phone=phone, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def test_create_new_user(self, mapp, new_user_id):\n\n result = mapp.out_devpi(\n 'user', '-c',\n new_user_id, 'password=1234',\n 'email=%s@example.com' % new_user_id)\n assert result.ret == 0\n result.stdout.fnmatch_lines('user created: %s' % new_user_id)\n result = mapp.out_devpi('user', '-l')\n assert result.ret == 0\n result.stdout.fnmatch_lines(new_user_id)\n result = mapp.out_devpi('user', new_user_id)\n assert result.ret == 0\n result.stdout.fnmatch_lines('*email=%s@example.com' % new_user_id)", "def create_user(self):\n username = \"\".join(choice(\n string.ascii_letters) for x in range (randint(7,10)))\n params = {\n \"first_name\":\"ugali\",\n \"last_name\":\"mayai\",\n \"email\":\"ugalimayai@gmail.com\",\n \"username\":username,\n \"password\":\"password\"\n }\n path = \"/api/v2/auth/signup\"\n user = self.client.post(path,\n data=json.dumps(params),\n content_type=\"application/json\")\n \n user_id = user.json['user_id']\n auth_token = user.json['AuthToken']\n return int(user_id), auth_token", "def create_user(UserName=None, MessageAction=None, FirstName=None, LastName=None, AuthenticationType=None):\n pass", "def flask_create_device():\n try:\n # retrieve the authorization token\n token = retrieve_auth_token(request)\n\n params = {\n 'count': request.args.get('count', '1'),\n 'verbose': request.args.get('verbose', 'false'),\n 'content_type': request.headers.get('Content-Type'),\n 'data': request.data\n }\n\n result = DeviceHandler.create_device(params, token)\n devices = result.get('devices')\n deviceId = devices[0].get('id')\n LOGGER.info(f' Creating a new device with id {deviceId}.')\n return make_response(jsonify(result), 200)\n except HTTPRequestError as e:\n LOGGER.error(f' {e.message} - {e.error_code}.')\n if isinstance(e.message, dict):\n return make_response(jsonify(e.message), e.error_code)\n\n return format_response(e.error_code, e.message)", "def create_user(self) -> 'outputs.ActingUserResponse':\n return pulumi.get(self, \"create_user\")", "def create_user(self):\n if not self.is_valid():\n return None\n # generate a username \n ids = User.objects.values_list('id', flat=True).order_by('-id')[:1]\n if len(ids) > 0:\n # ids[0] will be the maximum value (due to order_by: '-id')\n idnum = ids[0] + 1\n else:\n idnum = 1\n # create User object \n username = \"user%s\" % idnum\n # NOTE: store email in lower case\n email = self.clean_email().lower()\n password = self.clean_password2()\n user = User(username=username, email=email, password='tmp')\n user.save()\n # set the real password\n user.set_password(password)\n # make user inactive (until user has confirmed account)\n user.is_active = False\n # update\n user.save()\n return user", "def _create_user(self, new_user):\n new_user = User(user_name=new_user['user_name'], pin=new_user['pin'], user_type='customer')\n self.session.output(new_user.get_user_info(), '\\n[ New user created ]')", "def create_user_device_for_user_id(self, user_id, device_str):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None:\n raise Exception(\"User ID not provided.\")\n if device_str is None or len(device_str) == 0:\n raise Exception(\"Device string not provided.\")\n return self.database.create_user_device(user_id, device_str)", "def delete_mfa_totp_device(self, user_id, mfa_totp_device_id, **kwargs):\n resource_path = \"/users/{userId}/mfaTotpDevices/{mfaTotpDeviceId}\"\n method = \"DELETE\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"delete_mfa_totp_device got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id,\n \"mfaTotpDeviceId\": mfa_totp_device_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)", "def create_ticket(self, user):\n return Ticket.objects.create_ticket('test', user)", "def create_user_object():\n user = User.objects.get_or_create(username='testuser',\n first_name='Test',\n last_name='User',\n email='test@test.com')[0]\n user.set_password('testabc123')\n user.save()\n return user", "def create(self, request):\n\n return ObtainAuthToken().post(request)", "def create(self, data):\n token, created = Token.objects.get_or_create(user=self.context['user'])\n return self.context['user'], token.key", "def create(self, data):\n token, created = Token.objects.get_or_create(user=self.context['user'])\n return self.context['user'], token.key", "def create(self, data):\n token, created = Token.objects.get_or_create(user=self.context['user'])\n return self.context['user'], token.key", "def create(self, data):\n token, created = Token.objects.get_or_create(user=self.context['user'])\n return self.context['user'], token.key", "def create(self, data):\n token, created = Token.objects.get_or_create(user=self.context['user'])\n return self.context['user'], token.key", "def create(self, data):\n token, created = Token.objects.get_or_create(user=self.context['user'])\n return self.context['user'], token.key", "def create(self, request):\n return ObtainAuthToken().post(request)", "def test_create_token_for_user(self):\n payload = {\n 'email': 'test@gmail.com',\n 'password': 'testpass'\n }\n create_user(**payload)\n res = self.client.post(TOKEN_URI, payload)\n self.assertIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def test_create_token_for_user(self):\n payload = {'email': 'test1@test1.ri',\n 'password': 'testPassWord',\n 'time_zone': 'Europe/Dublin'}\n create_user(**payload)\n res = self.client.post(TOKEN_URL, payload)\n\n self.assertIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def test_create_token_for_user(self):\n payload = {'email': 'test@test.com', 'password': 'testpass'}\n create_user(**payload)\n res = self.client.post(TOKEN_URL, payload)\n\n self.assertIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def __initialize_totp(self) -> pyotp.totp.TOTP:\n return pyotp.totp.TOTP(self.user.totp_secret)", "def create_user_object():\n user = User.objects.get_or_create(username='testuser',\n first_name='Test',\n last_name='User',\n email='test@test.com')[0]\n user.set_password('testabc123')\n user.save()\n\n return user", "def req_display_otp(self):\n\n ret = self.ui_auth.create_new_one_time_pwd()\n if ret is not None:\n self.error_msg_queue_list.append(ret)", "def create_user(self):\n return User.objects.create_user(**self.user_data)", "def _create_user(self, phone_number, password, **extra_fields):\n if not phone_number:\n raise ValueError('The given phone_number must be set')\n phone_number = phone_number\n user = self.model(phone_number=phone_number, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_and_login(self):\n with self.context():\n user = self.factory(meido.factories.UserFactory)\n self.client.post('/management/login', data={\n 'username': 'admin', 'password': 'pretender'\n })", "def create_device():\n sonyapilib.device.TIMEOUT = 0.1\n device = SonyDevice(\"test\", \"test\")\n device.api_version = 3\n device.cookies = jsonpickle.decode(read_file(\"data/cookies.json\"))\n return device", "def create_user(context, params):\n form_user = dict()\n # form_user['edited_by'] = context.user\n if params.get('username'):\n form_user['username'] = params.get('username')\n else:\n form_user['username'] = create_username(params) # 'email_user{}'.format(MISUser.objects.latest('id').id + 1\n form_user['first_name'] = params.get('first_name')\n form_user['last_name'] = params.get('last_name')\n form_person = create_person(params)\n form_user.update(form_person)\n user = User.objects.create(**form_user)\n user.set_password(params.get('password'))\n\n email = {'label': 'Work', 'val': params.get('email'), 'person': user, 'is_main': True}\n create_email(context, email)\n\n user.save()\n return user", "def _create_user(userid, **kw):\n\n new_user = User(userid, **kw)\n USERS[new_user.token] = new_user\n return USERS[new_user.token]", "def create(self, data):\n # ensure 'create()' calls the specific 'create_user()' method\n # note that the 'data' gets validated\n user = get_user_model().objects.create_user(**data)\n return user", "def _create(cls, model_class, *args, **kwargs):\n manager = cls._get_manager(model_class)\n # The default would use ``manager.create(*args, **kwargs)``\n return manager.create_user(*args, **kwargs)", "def fusion_api_add_user(self, body, api=None, headers=None):\n return self.user.create(body, api, headers)", "def create(cls, sender, instance, created, **kdws):\n if created:\n username = helpers.make_username(instance.first_name, instance.last_name, instance.email)\n user = User(username=username)\n user.save()\n user = User.objects.get(username=username)\n instance.user = user\n instance.save()", "def createCode():\n code = randNums()\n phone = {'phone': request.json['phone'], 'code': code}\n # # inst. twilio client\n client = Client(os.environ['ACCOUNT_SID'], os.environ['AUTH_TOKEN'])\n client.api.account.messages.create(\n to=phone['phone'],\n from_=\"5162899596\",\n body=phone['code'])\n # temp_code_holder.append(phone['code'])\n u = models.User(name='man2', phone=phone['phone'], code=phone['code'], is_verified=False)\n db.session.add(u)\n db.session.commit()\n resp = Response(json.dumps(phone), status=200, mimetype='application/json')\n return resp", "def perform_create(self, serializer):\n km_user = KMUser.objects.get(pk=self.kwargs.get(\"pk\"))\n\n return serializer.save(km_user=km_user)", "def perform_create(self, serializer):\n km_user = KMUser.objects.get(pk=self.kwargs.get(\"pk\"))\n\n return serializer.save(km_user=km_user)", "def perform_create(self, serializer):\n km_user = KMUser.objects.get(pk=self.kwargs.get(\"pk\"))\n\n return serializer.save(km_user=km_user)", "def _create_user(self, email_or_phone, password, is_staff, is_superuser, **extra_fields):\n if not email_or_phone:\n raise ValueError('The given email_or_phone must be set')\n\n if \"@\" in email_or_phone:\n username, email, phone = (email_or_phone, email_or_phone, \"\")\n else:\n username, email, phone = (email_or_phone, \"\", email_or_phone)\n\n now = timezone.now()\n extra_fields.setdefault('is_staff', True)\n is_active = extra_fields.pop(\"is_active\", True)\n user = self.model(username=username, email=email,\n mobile=phone,\n is_staff=is_staff,\n is_active=is_active,\n is_superuser=is_superuser,\n\n date_joined=now,\n **extra_fields\n )\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, phone_number, password, **extra_fields):\n\n if not phone_number:\n raise ValueError(\"Phone number is missing\")\n\n user = self.model(phone_number=phone_number, **extra_fields)\n user.set_password(password)\n\n user.save(using=self._db)\n\n # Create a Profile object for the user\n profile = Profile(user=user)\n profile.save()\n\n # Create a Cart object for the user\n cart = Cart(user=user)\n cart.save()\n\n # Create a UserAddress object for the user\n address = UserAddress(user=user)\n address.save()\n\n return user" ]
[ "0.5845079", "0.58070564", "0.57539624", "0.56343126", "0.5609716", "0.55635047", "0.5540228", "0.5524604", "0.552029", "0.552029", "0.552029", "0.5508349", "0.5505848", "0.5504013", "0.54644525", "0.5441662", "0.53983897", "0.53840804", "0.5339161", "0.5339142", "0.5335467", "0.5331013", "0.5312626", "0.53079784", "0.52986294", "0.52529204", "0.52490073", "0.52367395", "0.5231955", "0.52287084", "0.5223296", "0.52210355", "0.5209417", "0.52067244", "0.5199353", "0.5197798", "0.51932234", "0.5184169", "0.5172278", "0.51712", "0.5168732", "0.5162775", "0.516121", "0.5156215", "0.5151114", "0.51503265", "0.51378304", "0.51275796", "0.5123919", "0.5121867", "0.51215404", "0.5120832", "0.51180947", "0.5111238", "0.51074827", "0.5101321", "0.5094755", "0.50858164", "0.5085451", "0.5078075", "0.5076978", "0.5073164", "0.50666213", "0.50615466", "0.5058694", "0.5053253", "0.50515765", "0.50504434", "0.50499463", "0.50488245", "0.5042433", "0.5042116", "0.5042116", "0.5042116", "0.5042116", "0.5042116", "0.5042116", "0.50406975", "0.5038303", "0.50382364", "0.5035723", "0.5031232", "0.5027109", "0.5023327", "0.5020947", "0.5018745", "0.50138724", "0.50113523", "0.5001658", "0.49997082", "0.49973541", "0.49810016", "0.49799585", "0.4979715", "0.49777052", "0.49776423", "0.49776423", "0.49776423", "0.49766824", "0.49728402" ]
0.7277988
0
Creates a new network source in your tenancy. You must specify your tenancy's OCID as the compartment ID in the request object (remember that the tenancy is simply the root compartment). Notice that IAM resources (users, groups, compartments, and some policies) reside within the tenancy itself, unlike cloud resources such as compute instances, which typically reside within compartments inside the tenancy. For information about OCIDs, see `Resource Identifiers`__. You must also specify a name for the network source, which must be unique across all network sources in your tenancy, and cannot be changed. You can use this name or the OCID when writing policies that apply to the network source. For more information about policies, see `How Policies Work`__. You must also specify a description for the network source (although it can be an empty string). It does not
def create_network_source(self, create_network_source_details, **kwargs): resource_path = "/networkSources" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_network_source got unknown kwargs: {!r}".format(extra_kwargs)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, header_params=header_params, body=create_network_source_details, response_type="NetworkSources") else: return self.base_client.call_api( resource_path=resource_path, method=method, header_params=header_params, body=create_network_source_details, response_type="NetworkSources")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def network_create(request, **kwargs):\n LOG.debug(\"network_create(): kwargs = %s\", kwargs)\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body = {'network': kwargs}\n network = neutronclient(request).create_network(body=body).get('network')\n return Network(network)", "def new_source(self, name):\n params = {\"name\": name}\n return JSONRPCRequest(self, \"newSource\", params)", "def copy_network(source_net):\n return make_net_model({\"id\": source_net.id,\n \"subnets\": source_net.subnets,\n \"ports\": source_net.ports,\n \"tenant_id\": source_net.tenant_id,\n \"mtu\": source_net.mtu})", "def create_network(self, body=None):\r\n return self.post(self.networks_path, body=body)", "def network_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(keep_name=True, **kwargs)\n return cloud.create_network(**kwargs)", "def create_network(self, tenant_id, network):\n self.create_network_bulk(tenant_id, [network])", "def ex_create_network(self, resource_group, network, extra=None, location=None):\n if location is None:\n if self.default_location:\n location = self.default_location\n else:\n raise ValueError(\"location is required.\")\n target = \"/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s\" % (\n self.subscription_id, resource_group, network)\n params = {\"api-version\": \"2016-03-30\"}\n data = {\n \"tags\": {},\n \"location\": location.id,\n }\n\n if extra:\n data[\"properties\"] = extra\n\n r = self.connection.request(action=target,\n params=params,\n data=data,\n method=\"PUT\")\n\n while r.object is None:\n time.sleep(1)\n\n return AzureNetwork(r.object[\"id\"], r.object[\"name\"], r.object[\"location\"], r.object[\"properties\"])", "def Create(self):\n\n gateway = None\n netmask = None\n\n self._AcquireNetworkDetails()\n\n if self.is_vpc:\n # Create a VPC first\n\n cidr = '10.0.0.0/16'\n vpc = self.cs.create_vpc(self.vpc_name,\n self.zone_id,\n cidr,\n self.vpc_offering_id,\n self.project_id)\n self.vpc_id = vpc['id']\n gateway = '10.0.0.1'\n netmask = '255.255.255.0'\n\n acl = self.cs.get_network_acl('default_allow', self.project_id)\n assert acl, \"Default allow ACL not found\"\n\n\n # Create the network\n network = self.cs.create_network(self.network_name,\n self.network_offering_id,\n self.zone_id,\n self.project_id,\n self.vpc_id,\n gateway,\n netmask,\n acl['id'])\n\n\n\n assert network, \"No network could be created\"\n\n self.network_id = network['id']\n self.id = self.network_id", "def create_network(self, context, network):\n LOG.debug(_(\"NeutronRestProxyV2: create_network() called\"))\n\n self._warn_on_state_status(network['network'])\n\n with context.session.begin(subtransactions=True):\n # Validate args\n tenant_id = self._get_tenant_id_for_create(context,\n network[\"network\"])\n\n # create network in DB\n new_net = super(NeutronRestProxyV2, self).create_network(context,\n network)\n self._process_l3_create(context, new_net, network['network'])\n mapped_network = self._get_mapped_network_with_subnets(new_net,\n context)\n\n # create network on the network controller\n self.servers.rest_create_network(tenant_id, mapped_network)\n\n # return created network\n return new_net", "def fusion_api_create_network_set(self, body, api=None, headers=None):\n return self.network_set.create(body, api, headers)", "def create_default_network(context):\n return [{\n 'type': 'templates/network.py',\n 'name': 'fc-network',\n 'properties': {\n 'resourceName': 'network',\n 'name': 'network',\n 'projectId': '$(ref.fc-project.projectId)',\n 'autoCreateSubnetworks': True,\n # We pass the dependsOn list into the network template as a\n # parameter. Deployment Manager doesn't support dependsOn for\n # template-call nodes, so we can't have this resource itself depend on\n # the project-wide resources.\n 'dependsOn': '$(ref.fc-project.resourceNames)',\n },\n }]", "def create_network(self, context, network):\n\n LOG.debug(_(\"QuantumRestProxyV2: create_network() called\"))\n\n # Validate args\n tenant_id = self._get_tenant_id_for_create(context, network[\"network\"])\n net_name = network[\"network\"][\"name\"]\n if network[\"network\"][\"admin_state_up\"] is False:\n LOG.warning(_(\"Network with admin_state_up=False are not yet \"\n \"supported by this plugin. Ignoring setting for \"\n \"network %s\"), net_name)\n\n # create in DB\n new_net = super(QuantumRestProxyV2, self).create_network(context,\n network)\n\n # create on networl ctrl\n try:\n resource = NET_RESOURCE_PATH % tenant_id\n data = {\n \"network\": {\n \"id\": new_net[\"id\"],\n \"name\": new_net[\"name\"],\n }\n }\n ret = self.servers.post(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n except RemoteRestError as e:\n LOG.error(_(\"QuantumRestProxyV2:Unable to create remote \"\n \"network: %s\"), e.message)\n super(QuantumRestProxyV2, self).delete_network(context,\n new_net['id'])\n raise\n\n # return created network\n return new_net", "def create_network_profile(self, body=None):\r\n return self.post(self.network_profiles_path, body=body)", "def subnetpool_create(request, name, prefixes, **kwargs):\n LOG.debug(\"subnetpool_create(): name=%(name)s, prefixes=%(prefixes)s, \"\n \"kwargs=%(kwargs)s\", {'name': name, 'prefixes': prefixes,\n 'kwargs': kwargs})\n body = {'subnetpool':\n {'name': name,\n 'prefixes': prefixes,\n }\n }\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body['subnetpool'].update(kwargs)\n subnetpool = \\\n neutronclient(request).create_subnetpool(body=body).get('subnetpool')\n return SubnetPool(subnetpool)", "def create_from_src(self, cgsnapshot_id, source_cgid, name=None,\n description=None, user_id=None,\n project_id=None):\n body = {'consistencygroup-from-src': {'name': name,\n 'description': description,\n 'cgsnapshot_id': cgsnapshot_id,\n 'source_cgid': source_cgid,\n 'user_id': user_id,\n 'project_id': project_id,\n 'status': \"creating\",\n }}\n\n self.run_hooks('modify_body_for_update', body,\n 'consistencygroup-from-src')\n resp, body = self.api.client.post(\n \"/consistencygroups/create_from_src\", body=body)\n return common_base.DictWithMeta(body['consistencygroup'], resp)", "def CreateGcpWorkloadSource(\n client,\n messages,\n workload_source_id: str,\n resources: Optional[List[str]],\n attached_service_accounts: Optional[List[str]],\n parent: str,\n for_managed_identity: bool = False,\n):\n conditions = []\n if resources is not None:\n conditions += [\n messages.WorkloadSourceCondition(attribute='resource', value=resource)\n for resource in resources\n ]\n if attached_service_accounts is not None:\n conditions += [\n messages.WorkloadSourceCondition(\n attribute='attached_service_account', value=account\n )\n for account in attached_service_accounts\n ]\n new_workload_source = messages.WorkloadSource(\n conditionSet=messages.WorkloadSourceConditionSet(conditions=conditions)\n )\n if for_managed_identity:\n return client.projects_locations_workloadIdentityPools_namespaces_managedIdentities_workloadSources.Create(\n messages.IamProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesWorkloadSourcesCreateRequest(\n parent=parent,\n workloadSource=new_workload_source,\n workloadSourceId=workload_source_id,\n )\n )\n else:\n return client.projects_locations_workloadIdentityPools_namespaces_workloadSources.Create(\n messages.IamProjectsLocationsWorkloadIdentityPoolsNamespacesWorkloadSourcesCreateRequest(\n parent=parent,\n workloadSource=new_workload_source,\n workloadSourceId=workload_source_id,\n )\n )", "def subnet_create(request, network_id, **kwargs):\n LOG.debug(\"subnet_create(): netid=%(network_id)s, kwargs=%(kwargs)s\",\n {'network_id': network_id, 'kwargs': kwargs})\n body = {'subnet': {'network_id': network_id}}\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body['subnet'].update(kwargs)\n subnet = neutronclient(request).create_subnet(body=body).get('subnet')\n return Subnet(subnet)", "def init_network(session: \"Session\", new_network_name: str) -> None:\n url_tail = f\"/{CoordConstsV2.RSC_NETWORKS}\"\n _post(session, url_tail, None, params={CoordConstsV2.QP_NAME: new_network_name})", "def create(self):\n logging.debug(\"%s create called\" % self)\n # networks = self.infra.get(\"networks\")\n notify(\"Creating network %s\" % self.name)\n self.cloudnet = cn.create(self.name, cidr=self.cidr)\n return True", "def create_source(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.create_source_with_http_info(**kwargs)\n else:\n (data) = self.create_source_with_http_info(**kwargs)\n return data", "def create_network_gateway(self, body=None):\r\n return self.post(self.network_gateways_path, body=body)", "def add_provider_network(network_id, network_type, segmentation_id):\n session = db.get_session()\n if session.query(network_models_v2.ProviderNetwork).filter_by(\n network_id=network_id).first():\n raise c_exc.ProviderNetworkExists(network_id)\n pnet = network_models_v2.ProviderNetwork(network_id=network_id,\n network_type=network_type,\n segmentation_id=segmentation_id)\n session.add(pnet)\n session.flush()", "def test_03_network_create(self):\n # Validate the following\n # 1. Create a project.\n # 2. Add virtual/direct network resource to the project. User shared\n # network resource for the project\n # 3. Verify any number of Project level Virtual/Direct networks can be\n # created and used for vm deployment within the project.\n # 4. Verify shared networks (zone and domain wide) from outside the\n # project can also be used in a project.\n\n # Create project as a domain admin\n project = Project.create(\n self.apiclient,\n self.services[\"project\"],\n account=self.account.name,\n domainid=self.account.domainid\n )\n # Cleanup created project at end of test\n self.cleanup.append(project)\n self.debug(\"Created project with domain admin with ID: %s\" %\n project.id)\n\n network_offerings = list_network_offerings(\n self.apiclient,\n projectid=project.id,\n supportedServices='SourceNat',\n type='isolated',\n state='Enabled'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a network with network offering ID: %s\" %\n network_offering.id)\n self.services[\"network\"][\"zoneid\"] = self.zone.id\n network = Network.create(\n self.apiclient,\n self.services[\"network\"],\n networkofferingid=network_offering.id,\n projectid=project.id\n )\n self.debug(\"Created network with ID: %s\" % network.id)\n networks = Network.list(\n self.apiclient,\n projectid=project.id,\n listall=True\n )\n self.assertEqual(\n isinstance(networks, list),\n True,\n \"Check for the valid network list response\"\n )\n\n self.debug(\"Deploying VM with network: %s\" % network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n network_offerings = list_network_offerings(\n self.apiclient,\n state='Enabled',\n guestiptype='Shared',\n name='DefaultSharedNetworkOffering',\n displaytext='Offering for Shared networks'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a shared network in domain: %s\" %\n self.domain.id)\n\n # Getting physical network and free vlan in it\n physical_network, vlan = get_free_vlan(self.apiclient, self.zone.id)\n\n self.services[\"domain_network\"][\"vlan\"] = vlan\n self.services[\"domain_network\"][\"physicalnetworkid\"] = physical_network.id\n\n # Generating random subnet number for shared network creation\n shared_network_subnet_number = random.randrange(1,254)\n\n self.services[\"domain_network\"][\"gateway\"] = \"172.16.\"+str(shared_network_subnet_number)+\".1\"\n self.services[\"domain_network\"][\"startip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".2\"\n self.services[\"domain_network\"][\"endip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".20\"\n\n domain_network = Network.create(\n self.apiclient,\n self.services[\"domain_network\"],\n domainid=self.domain.id,\n networkofferingid=network_offering.id,\n zoneid=self.zone.id\n )\n self.cleanup.append(domain_network)\n self.debug(\"Created network with ID: %s\" % domain_network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(domain_network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n # Delete VM before network gets deleted in cleanup\n virtual_machine.delete(self.apiclient, expunge=True)\n return", "def _create_network_resources(self, tenant_id):\n logger.info(\"Creating network resources...\")\n net_name = \"ostf-autoscaling-test-service-net\"\n net_body = {\n \"network\": {\n \"name\": net_name,\n \"tenant_id\": tenant_id\n }\n }\n ext_net = None\n net = None\n for network in self.neutron_cli.list_networks()[\"networks\"]:\n if not net and network[\"name\"] == net_name:\n net = network\n if not ext_net and network[\"router:external\"]:\n ext_net = network\n if not net:\n net = self.neutron_cli.create_network(net_body)[\"network\"]\n subnet = self.helpers.os_conn.create_subnet(\n \"sub\" + net_name, net[\"id\"], \"10.1.7.0/24\", tenant_id=tenant_id\n )\n router_name = 'ostf-autoscaling-test-service-router'\n router = self.helpers.os_conn.create_router(\n router_name, self.helpers.os_conn.get_tenant(\"admin\"))\n self.neutron_cli.add_interface_router(\n router[\"id\"], {\"subnet_id\": subnet[\"id\"]})\n return net[\"id\"]", "def create_network(client, overwrite_net=False, network_name=DOCK_NETWORK_NAME, subnetwork=DOCK_NETWORK_SUBNET,\n gw=DOCK_NETWORK_GW):\n\n if overwrite_net:\n try:\n client.networks.get(network_name).remove()\n logging.info(\" Overwriting existing network\")\n except docker.errors.APIError:\n logging.info(\" Warning: Couldn't find network to overwrite (does it exist?)\")\n\n ipam_pool = docker.types.IPAMPool(subnet=subnetwork, gateway=gw)\n ipam_config = docker.types.IPAMConfig(pool_configs=[ipam_pool])\n client.networks.create(network_name, driver=\"bridge\", ipam=ipam_config)", "def run(self, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.get_network(network[\"id\"])", "def new_network():\n new_names = Names()\n new_devices = Devices(new_names)\n return Network(new_names, new_devices)", "def create_network(address=None, **options):\n return NetworkDefinition(address, **options)", "def create_network(self, *, name: t.Optional[str] = None) -> Network:\n network = Network(self, name=name)\n self._networks.add(network)\n return network", "def test_networking_project_network_create(self):\n pass", "def get_network_source(self, network_source_id, **kwargs):\n resource_path = \"/networkSources/{networkSourceId}\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"get_network_source got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"networkSourceId\": network_source_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"NetworkSources\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"NetworkSources\")", "def create_network(options, vsm_obj):\n edge_id = get_edge(vsm_obj)\n if not edge_id:\n if not add_edge(options):\n print(\"Failed to create edge\")\n return False\n edge_id = get_edge(vsm_obj)\n\n vdn_scope = get_transport_zone(options)\n virtual_wire = VirtualWire(vdn_scope)\n name = get_network_name(options)\n response = virtual_wire.read_by_name(name)\n if response != \"FAILURE\":\n print(\"Found network %s already exists\" % options.name)\n return True\n\n virtual_wire_create = VirtualWireCreateSpecSchema()\n virtual_wire_create.name = name\n virtual_wire_create.tenantId = name\n virtual_wire_create.description = 'NSX network %s' % name\n\n # check if user needs to enable guest vlan tagging,\n # this is require if one needs to run vlan tests in nested\n # environment.\n if hasattr(options, 'guest_vlan'):\n if options.guest_vlan is True:\n print(\"network %s has guest vlan tagging enabled\"\\\n % options.name)\n virtual_wire_create.guestVlanAllowed = True\n\n print(\"Creating network %s\" % options.name)\n result = virtual_wire.create(virtual_wire_create)\n if (result[0].response.status != 201):\n print \"response: %s\" % result[0].response.status\n print \"response: %s\" % result[0].response.reason\n return False\n print(\"Changing security settings on the network\")\n set_network_security_policy(options)\n return add_edge_interface(options, edge_id)", "def create_net(self, net_name, shared=\"false\"):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks.json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _net_info = {\"network\":\n {\"name\": net_name,\n \"shared\": shared,\n \"admin_state_up\": True}}\n _body = json.dumps(_net_info)\n\n response = self.request(\"POST\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while creating network.\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Creation of network Failed with status %s \" %\n response.status)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Network is created successfully. Details : %s \" %\n output['network'])\n\n return output['network']['id']", "def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1,\n router_create_args=None):\n subnet_create_args = dict(subnet_create_args or {})\n subnet_create_args[\"start_cidr\"] = subnet_cidr_start\n\n self.neutron.create_network_topology(\n network_create_args=(network_create_args or {}),\n router_create_args=(router_create_args or {}),\n router_per_subnet=True,\n subnet_create_args=subnet_create_args,\n subnets_count=subnets_per_network\n )\n self.neutron.list_routers()", "def _precreate_network(self):\n # check cidr format\n net_cidr = CONF.azure.vnet_cidr\n subnet_cidr = CONF.azure.vsubnet_cidr\n if not (self._is_valid_cidr(net_cidr) and\n self._is_valid_cidr(subnet_cidr)):\n msg = 'Invalid network: %(net_cidr)s/subnet: %(subnet_cidr)s' \\\n ' CIDR' % dict(net_cidr=net_cidr, subnet_cidr=subnet_cidr)\n LOG.error(msg)\n raise exception.NetworkCreateFailure(reason=msg)\n # Creaet Network\n try:\n nets = self.network.virtual_networks.list(\n CONF.azure.resource_group)\n net_exist = False\n for i in nets:\n if i.name == CONF.azure.vnet_name:\n net_exist = True\n break\n if not net_exist:\n network_info = dict(location=CONF.azure.location,\n address_space=dict(\n address_prefixes=[net_cidr]))\n async_vnet_creation = \\\n self.network.virtual_networks.create_or_update(\n CONF.azure.resource_group,\n CONF.azure.vnet_name,\n network_info)\n async_vnet_creation.wait(CONF.azure.async_timeout)\n LOG.info(_LI(\"Create Network\"))\n except Exception as e:\n msg = six.text_type(e)\n ex = exception.NetworkCreateFailure(reason=msg)\n LOG.exception(msg)\n raise ex\n\n # Create Subnet\n try:\n # subnet can't recreate, check existing before create.\n subnets = self.network.subnets.list(\n CONF.azure.resource_group,\n CONF.azure.vnet_name)\n subnet_exist = False\n subnet_details = None\n for i in subnets:\n if i.name == CONF.azure.vsubnet_name:\n subnet_exist = True\n subnet_details = i\n break\n if not subnet_exist:\n subnet_info = {'address_prefix': subnet_cidr}\n async_subnet_creation = self.network.subnets.create_or_update(\n CONF.azure.resource_group,\n CONF.azure.vnet_name,\n CONF.azure.vsubnet_name,\n subnet_info\n )\n subnet_details = async_subnet_creation.result()\n except Exception as e:\n # delete network if subnet create fail.\n try:\n async_vm_action = self.network.virtual_networks.delete(\n CONF.azure.resource_group, CONF.azure.vnet_name)\n async_vm_action.wait(CONF.azure.async_timeout)\n LOG.info(_LI(\"Deleted Network %s after Subnet create \"\n \"failed.\"), CONF.azure.vnet_name)\n except Exception:\n LOG.error(_LE('Delete Network %s failed after Subnet create '\n 'failed.'), CONF.azure.vnet_name)\n msg = six.text_type(e)\n ex = exception.SubnetCreateFailure(reason=msg)\n LOG.exception(msg)\n raise ex\n CONF.set_override('vsubnet_id', subnet_details.id, 'azure')\n LOG.info(_LI(\"Create/Update Subnet: %s\"), CONF.azure.vsubnet_id)", "def run(self, network_create_args=None):\n self.neutron.create_network(**(network_create_args or {}))\n self.neutron.list_networks()", "def _add_source_net_filter(self, rule_list, source_net):\n for rule in rule_list:\n if (\"source\" in rule.keys()):\n if (\"nets\" in rule[\"source\"].keys()):\n rule[\"source\"][\"nets\"].append(source_net)\n else:\n rule[\"source\"].update({\"nets\": [source_net]})\n else:\n rule.update({\"source\": {\"nets\": [source_net]}})", "def create_source_with_http_info(self, **kwargs):\n\n all_params = ['body']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_source\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['Using HTTP Header', 'Using URL Query Parameter']\n\n return self.api_client.call_api('/sources', 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='str',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def network_initial(request, SPIC_group, SPIC_id):\n SPIC_obj = get_object_or_404(SPIC, group=SPIC_group, local_id=SPIC_id)\n network_obj, created = Network.objects.get_or_create(user_id=request.user.pk, SPIC=SPIC_obj, local_id=0, deleted=False)\n\n if created is True:\n # Check if prototype exists\n prototype = get_object_or_404(Network, user_id=0, SPIC=SPIC_obj)\n network_obj.nodes_json = prototype.nodes_json\n network_obj.links_json = prototype.links_json\n network_obj.save()\n\n return network(request, SPIC_group, SPIC_id, 0)", "def create_network(region_name, vpc_cidr, tag_prefix,\n tls_priv_key=None, tls_fullchain_cert=None,\n ssh_key_name=None, ssh_key_content=None, sally_ip=None,\n s3_logs_bucket=None, s3_identities_bucket=None,\n storage_enckey=None,\n dry_run=False):\n sg_tag_prefix = tag_prefix\n\n LOGGER.info(\"Provisions network ...\")\n ec2_client = boto3.client('ec2', region_name=region_name)\n\n # Create a VPC\n vpc_id, vpc_cidr_read = _get_vpc_id(tag_prefix, ec2_client=ec2_client,\n region_name=region_name)\n if vpc_id:\n if vpc_cidr != vpc_cidr_read:\n raise RuntimeError(\n \"%s cidr block for VPC is %s while it was expected to be %s\" %\n (tag_prefix, vpc_cidr_read, vpc_cidr))\n else:\n if not vpc_cidr:\n raise RuntimeError(\n \"%s could not find VPC and no cidr block is specified\"\\\n \" to create one.\" % tag_prefix)\n resp = ec2_client.create_vpc(\n DryRun=dry_run,\n CidrBlock=vpc_cidr,\n AmazonProvidedIpv6CidrBlock=False,\n InstanceTenancy='default')\n vpc_id = resp['Vpc']['VpcId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[vpc_id],\n Tags=[\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\", 'Value': \"%s-vpc\" % tag_prefix}])\n LOGGER.info(\"%s created VPC %s\", tag_prefix, vpc_id)\n\n # Create subnets for app, dbs and web services\n # ELB will require that there is at least one subnet per availability zones.\n # RDS will require that there is at least two subnets for databases.\n resp = ec2_client.describe_availability_zones()\n zones = {(zone['ZoneId'], zone['ZoneName'])\n for zone in resp['AvailabilityZones']}\n web_subnet_cidrs, dbs_subnet_cidrs, app_subnet_cidrs = _split_cidrs(\n vpc_cidr, zones=zones, region_name=region_name)\n\n LOGGER.info(\"%s provisioning web subnets...\", tag_prefix)\n web_zones = set([])\n web_subnet_by_cidrs = _get_subnet_by_cidrs(\n web_subnet_cidrs, tag_prefix, vpc_id=vpc_id, ec2_client=ec2_client)\n for cidr_block, subnet in web_subnet_by_cidrs.items():\n if subnet:\n web_zones |= {\n (subnet['AvailabilityZoneId'], subnet['AvailabilityZone'])}\n for cidr_block, subnet in web_subnet_by_cidrs.items():\n if not subnet:\n available_zones = zones - web_zones\n zone_id, zone_name = available_zones.pop()\n try:\n resp = ec2_client.create_subnet(\n AvailabilityZoneId=zone_id,\n CidrBlock=cidr_block,\n VpcId=vpc_id,\n TagSpecifications=[{\n 'ResourceType': 'subnet',\n 'Tags': [\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s %s web\" % (tag_prefix, zone_name)}]}],\n DryRun=dry_run)\n subnet = resp['Subnet']\n web_subnet_by_cidrs[cidr_block] = subnet\n web_zones |= set([(zone_id, zone_name)])\n subnet_id = subnet['SubnetId']\n LOGGER.info(\"%s created subnet %s in zone %s for cidr %s\",\n tag_prefix, subnet_id, zone_name, cidr_block)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidSubnet.Conflict':\n raise\n # We have a conflict, let's just skip over it.\n LOGGER.warning(\n \"%s (skip) created subnet in zone %s because '%s'\",\n tag_prefix, zone_name, err)\n if subnet and not subnet['MapPublicIpOnLaunch']:\n subnet_id = subnet['SubnetId']\n if not dry_run:\n resp = ec2_client.modify_subnet_attribute(\n SubnetId=subnet_id,\n MapPublicIpOnLaunch={'Value': True})\n LOGGER.info(\"%s modify web subnet %s so instance can receive\"\\\n \" a public IP by default\", tag_prefix, subnet_id)\n\n LOGGER.info(\"%s provisioning dbs subnets...\", tag_prefix)\n dbs_zones = set([])\n dbs_subnet_by_cidrs = _get_subnet_by_cidrs(\n dbs_subnet_cidrs, tag_prefix, vpc_id=vpc_id, ec2_client=ec2_client)\n for cidr_block, subnet in dbs_subnet_by_cidrs.items():\n if subnet:\n dbs_zones |= {\n (subnet['AvailabilityZoneId'], subnet['AvailabilityZone'])}\n for cidr_block, subnet in dbs_subnet_by_cidrs.items():\n if not subnet:\n available_zones = zones - dbs_zones\n zone_id, zone_name = available_zones.pop()\n resp = ec2_client.create_subnet(\n AvailabilityZoneId=zone_id,\n CidrBlock=cidr_block,\n VpcId=vpc_id,\n TagSpecifications=[{\n 'ResourceType': 'subnet',\n 'Tags': [\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s %s dbs\" % (tag_prefix, zone_name)}]}],\n DryRun=dry_run)\n subnet = resp['Subnet']\n dbs_subnet_by_cidrs[cidr_block] = subnet\n dbs_zones |= set([(zone_id, zone_name)])\n subnet_id = subnet['SubnetId']\n LOGGER.info(\"%s created subnet %s in zone %s for cidr %s\",\n tag_prefix, subnet_id, zone_name, cidr_block)\n if subnet['MapPublicIpOnLaunch']:\n subnet_id = subnet['SubnetId']\n if not dry_run:\n resp = ec2_client.modify_subnet_attribute(\n SubnetId=subnet_id,\n MapPublicIpOnLaunch={'Value': False})\n LOGGER.info(\"%s modify dbs subnet %s so instance do not receive\"\\\n \" a public IP by default\", tag_prefix, subnet_id)\n\n LOGGER.info(\"%s provisioning apps subnets...\", tag_prefix)\n app_zones = set([])\n app_subnet_by_cidrs = _get_subnet_by_cidrs(\n app_subnet_cidrs, tag_prefix, vpc_id=vpc_id, ec2_client=ec2_client)\n for cidr_block, subnet in app_subnet_by_cidrs.items():\n if subnet:\n app_zones |= {\n (subnet['AvailabilityZoneId'], subnet['AvailabilityZone'])}\n for cidr_block, subnet in app_subnet_by_cidrs.items():\n if not subnet:\n available_zones = zones - app_zones\n zone_id, zone_name = available_zones.pop()\n resp = ec2_client.create_subnet(\n AvailabilityZoneId=zone_id,\n CidrBlock=cidr_block,\n VpcId=vpc_id,\n # COMMIT MSG:\n # this requires boto3>=1.14, using `createTag` might fail\n # because the subnet is not fully created yet.\n TagSpecifications=[{\n 'ResourceType': 'subnet',\n 'Tags': [\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s %s app\" % (tag_prefix, zone_name)}]}],\n DryRun=dry_run)\n subnet = resp['Subnet']\n app_subnet_by_cidrs[cidr_block] = subnet\n app_zones |= set([(zone_id, zone_name)])\n subnet_id = subnet['SubnetId']\n LOGGER.info(\"%s created subnet %s in %s for cidr %s\",\n tag_prefix, subnet_id, zone_name, cidr_block)\n if subnet['MapPublicIpOnLaunch']:\n subnet_id = subnet['SubnetId']\n if not dry_run:\n resp = ec2_client.modify_subnet_attribute(\n SubnetId=subnet_id,\n MapPublicIpOnLaunch={'Value': False})\n LOGGER.info(\"%s modify app subnet %s so instance do not receive\"\\\n \" a public IP by default\", tag_prefix, subnet_id)\n\n # Ensure that the VPC has an Internet Gateway.\n resp = ec2_client.describe_internet_gateways(\n Filters=[{'Name': 'attachment.vpc-id', 'Values': [vpc_id]}])\n if resp['InternetGateways']:\n igw_id = resp['InternetGateways'][0]['InternetGatewayId']\n LOGGER.info(\"%s found Internet Gateway %s\", tag_prefix, igw_id)\n else:\n resp = ec2_client.describe_internet_gateways(\n Filters=[{'Name': 'tag:Prefix', 'Values': [tag_prefix]}])\n if resp['InternetGateways']:\n igw_id = resp['InternetGateways'][0]['InternetGatewayId']\n LOGGER.info(\"%s found Internet Gateway %s\", tag_prefix, igw_id)\n else:\n resp = ec2_client.create_internet_gateway(DryRun=dry_run)\n igw_id = resp['InternetGateway']['InternetGatewayId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[igw_id],\n Tags=[{'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s internet gateway\" % tag_prefix}])\n LOGGER.info(\"%s created Internet Gateway %s\", tag_prefix, igw_id)\n resp = ec2_client.attach_internet_gateway(\n DryRun=dry_run,\n InternetGatewayId=igw_id,\n VpcId=vpc_id)\n\n # Create the NAT gateway by which private subnets connect to Internet\n # XXX Why do we have a Network interface eni-****?\n nat_elastic_ip = None\n web_elastic_ip = None\n resp = ec2_client.describe_addresses(\n Filters=[{'Name': 'tag:Prefix', 'Values': [tag_prefix]}])\n if resp['Addresses']:\n for resp_address in resp['Addresses']:\n for resp_tag in resp_address['Tags']:\n if resp_tag['Key'] == 'Name':\n if 'NAT gateway' in resp_tag['Value']:\n nat_elastic_ip = resp_address['AllocationId']\n break\n if 'Sally' in resp_tag['Value']:\n web_elastic_ip = resp_address['AllocationId']\n break\n\n if nat_elastic_ip:\n LOGGER.info(\"%s found NAT gateway public IP %s\",\n tag_prefix, nat_elastic_ip)\n else:\n resp = ec2_client.allocate_address(\n DryRun=dry_run,\n Domain='vpc')\n nat_elastic_ip = resp['AllocationId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[nat_elastic_ip],\n Tags=[{'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s NAT gateway public IP\" % tag_prefix}])\n LOGGER.info(\"%s created NAT gateway public IP %s\",\n tag_prefix, nat_elastic_ip)\n if web_elastic_ip:\n LOGGER.info(\"%s found Sally public IP %s\",\n tag_prefix, web_elastic_ip)\n else:\n resp = ec2_client.allocate_address(\n DryRun=dry_run,\n Domain='vpc')\n web_elastic_ip = resp['AllocationId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[web_elastic_ip],\n Tags=[{'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s Sally public IP\" % tag_prefix}])\n LOGGER.info(\"%s created Sally public IP %s\",\n tag_prefix, web_elastic_ip)\n\n # We have 2 EIP addresses. They need to be connected to machines\n # running in an Internet facing subnet.\n client_token = tag_prefix\n # XXX shouldn't it be the first web subnet instead?\n resp = ec2_client.describe_nat_gateways(Filters=[\n {'Name': \"vpc-id\", 'Values': [vpc_id]},\n {'Name': \"state\", 'Values': ['pending', 'available']}])\n if resp['NatGateways']:\n if len(resp['NatGateways']) > 1:\n LOGGER.warning(\"%s found more than one NAT gateway.\"\\\n \" Using first one in the list.\", tag_prefix)\n nat_gateway = resp['NatGateways'][0]\n nat_gateway_id = nat_gateway['NatGatewayId']\n nat_gateway_subnet_id = nat_gateway['SubnetId']\n LOGGER.info(\"%s found NAT gateway %s\", tag_prefix, nat_gateway_id)\n else:\n nat_gateway_subnet_id = next(web_subnet_by_cidrs.values())['SubnetId']\n resp = ec2_client.create_nat_gateway(\n AllocationId=nat_elastic_ip,\n ClientToken=client_token,\n SubnetId=nat_gateway_subnet_id)\n nat_gateway_id = resp['NatGateway']['NatGatewayId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[nat_gateway_id],\n Tags=[{'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s NAT gateway\" % tag_prefix}])\n LOGGER.info(\"%s created NAT gateway %s\",\n tag_prefix, nat_gateway_id)\n\n # Set up public and NAT-protected route tables\n resp = ec2_client.describe_route_tables(\n Filters=[{'Name': \"vpc-id\", 'Values': [vpc_id]}])\n public_route_table_id = None\n private_route_table_id = None\n for route_table in resp['RouteTables']:\n for route in route_table['Routes']:\n if 'GatewayId' in route and route['GatewayId'] == igw_id:\n public_route_table_id = route_table['RouteTableId']\n LOGGER.info(\"%s found public route table %s\",\n tag_prefix, public_route_table_id)\n break\n if ('NatGatewayId' in route and\n route['NatGatewayId'] == nat_gateway_id):\n private_route_table_id = route_table['RouteTableId']\n LOGGER.info(\"%s found private route table %s\",\n tag_prefix, private_route_table_id)\n\n if not public_route_table_id:\n resp = ec2_client.create_route_table(\n DryRun=dry_run,\n VpcId=vpc_id)\n public_route_table_id = resp['RouteTable']['RouteTableId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[public_route_table_id],\n Tags=[\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\", 'Value': \"%s public\" % tag_prefix}])\n LOGGER.info(\"%s created public subnet route table %s\",\n tag_prefix, public_route_table_id)\n resp = ec2_client.create_route(\n DryRun=dry_run,\n DestinationCidrBlock='0.0.0.0/0',\n GatewayId=igw_id,\n RouteTableId=public_route_table_id)\n\n if not private_route_table_id:\n resp = ec2_client.create_route_table(\n DryRun=dry_run,\n VpcId=vpc_id)\n private_route_table_id = resp['RouteTable']['RouteTableId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[private_route_table_id],\n Tags=[\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\", 'Value': \"%s internal\" % tag_prefix}])\n private_route_table_id = resp['RouteTable']['RouteTableId']\n LOGGER.info(\"%s created private route table %s\",\n tag_prefix, private_route_table_id)\n for _ in range(0, NB_RETRIES):\n # The NAT Gateway takes some time to be fully operational.\n try:\n resp = ec2_client.create_route(\n DryRun=dry_run,\n DestinationCidrBlock='0.0.0.0/0',\n NatGatewayId=nat_gateway_id,\n RouteTableId=private_route_table_id)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidNatGatewayID.NotFound':\n raise\n time.sleep(RETRY_WAIT_DELAY)\n\n resp = ec2_client.describe_route_tables(\n DryRun=dry_run,\n RouteTableIds=[public_route_table_id])\n assocs = resp['RouteTables'][0]['Associations']\n if len(assocs) > 1:\n LOGGER.warning(\"%s found more than one route table association for\"\\\n \" public route table. Using first one in the list.\", tag_prefix)\n if not assocs[0]['Main']:\n LOGGER.warning(\"%s public route table is not the main one for the VPC.\",\n tag_prefix)\n\n for cidr_block, subnet in web_subnet_by_cidrs.items():\n if not subnet:\n # Maybe there was a conflict and we skipped this cidr_block.\n continue\n subnet_id = subnet['SubnetId']\n resp = ec2_client.describe_route_tables(\n DryRun=dry_run,\n Filters=[{\n 'Name': 'association.subnet-id',\n 'Values': [subnet_id]\n }])\n # The Main route table does not show as an explicit association.\n found_association = not bool(resp['RouteTables'])\n if found_association:\n LOGGER.info(\n \"%s found public route table %s associated to web subnet %s\",\n tag_prefix, public_route_table_id, subnet_id)\n else:\n resp = ec2_client.associate_route_table(\n DryRun=dry_run,\n RouteTableId=public_route_table_id,\n SubnetId=subnet_id)\n LOGGER.info(\n \"%s associate public route table %s to web subnet %s\",\n tag_prefix, public_route_table_id, subnet_id)\n\n for cidr_block, subnet in dbs_subnet_by_cidrs.items():\n subnet_id = subnet['SubnetId']\n resp = ec2_client.describe_route_tables(\n DryRun=dry_run,\n Filters=[{\n 'Name': 'association.subnet-id',\n 'Values': [subnet_id]\n }])\n # The Main route table does not show as an explicit association.\n found_association = False\n if resp['RouteTables']:\n found_association = (\n resp['RouteTables'][0]['Associations'][0]['RouteTableId'] ==\n private_route_table_id\n )\n if found_association:\n LOGGER.info(\n \"%s found private route table %s associated to dbs subnet %s\",\n tag_prefix, private_route_table_id, subnet_id)\n else:\n resp = ec2_client.associate_route_table(\n DryRun=dry_run,\n RouteTableId=private_route_table_id,\n SubnetId=subnet_id)\n LOGGER.info(\n \"%s associate private route table %s to dbs subnet %s\",\n tag_prefix, private_route_table_id, subnet_id)\n\n for cidr_block, subnet in app_subnet_by_cidrs.items():\n subnet_id = subnet['SubnetId']\n resp = ec2_client.describe_route_tables(\n DryRun=dry_run,\n Filters=[{\n 'Name': 'association.subnet-id',\n 'Values': [subnet_id]\n }])\n # The Main route table does not show as an explicit association.\n found_association = False\n if resp['RouteTables']:\n found_association = (\n resp['RouteTables'][0]['Associations'][0]['RouteTableId'] ==\n private_route_table_id\n )\n if found_association:\n LOGGER.info(\n \"%s found private route table %s associated to app subnet %s\",\n tag_prefix, private_route_table_id, subnet_id)\n else:\n resp = ec2_client.associate_route_table(\n DryRun=dry_run,\n RouteTableId=private_route_table_id,\n SubnetId=subnet_id)\n LOGGER.info(\n \"%s associate private route table %s to app subnet %s\",\n tag_prefix, private_route_table_id, subnet_id)\n\n # Create the ELB, proxies and databases security groups\n # The app security group (as the instance role) will be specific\n # to the application.\n #pylint:disable=unbalanced-tuple-unpacking\n moat_name, vault_name, gate_name, kitchen_door_name = \\\n _get_security_group_names([\n 'moat', 'vault', 'castle-gate', 'kitchen-door'],\n tag_prefix=sg_tag_prefix)\n moat_sg_id, vault_sg_id, gate_sg_id, kitchen_door_sg_id = \\\n _get_security_group_ids(\n [moat_name, vault_name, gate_name, kitchen_door_name],\n tag_prefix, vpc_id=vpc_id, ec2_client=ec2_client)\n\n update_moat_rules = (not moat_sg_id)\n update_gate_rules = (not gate_sg_id)\n update_vault_rules = (not vault_sg_id)\n update_kitchen_door_rules = (not kitchen_door_sg_id)\n\n if not moat_sg_id:\n resp = ec2_client.create_security_group(\n Description='%s ELB' % tag_prefix,\n GroupName=moat_name,\n VpcId=vpc_id,\n DryRun=dry_run)\n moat_sg_id = resp['GroupId']\n LOGGER.info(\"%s created %s security group %s\",\n tag_prefix, moat_name, moat_sg_id)\n if not gate_sg_id:\n resp = ec2_client.create_security_group(\n Description='%s session managers' % tag_prefix,\n GroupName=gate_name,\n VpcId=vpc_id,\n DryRun=dry_run)\n gate_sg_id = resp['GroupId']\n LOGGER.info(\"%s created %s security group %s\",\n tag_prefix, gate_name, gate_sg_id)\n if not vault_sg_id:\n resp = ec2_client.create_security_group(\n Description='%s databases' % tag_prefix,\n GroupName=vault_name,\n VpcId=vpc_id,\n DryRun=dry_run)\n vault_sg_id = resp['GroupId']\n LOGGER.info(\"%s created %s security group %s\",\n tag_prefix, vault_name, vault_sg_id)\n # kitchen_door_sg_id: Kitchen door security group is created later on\n # if we have ssh keys.\n\n resp = ec2_client.describe_security_groups(\n DryRun=dry_run,\n GroupIds=[moat_sg_id, vault_sg_id, gate_sg_id])\n for security_group in resp['SecurityGroups']:\n if security_group['GroupId'] == moat_sg_id:\n # moat rules\n LOGGER.info(\"%s check ingress rules for %s\", tag_prefix, moat_name)\n check_security_group_ingress(security_group, expected_rules=[\n {'port': 80, 'source': '0.0.0.0/0'},\n {'port': 80, 'source': '::/0'},\n {'port': 443, 'source': '0.0.0.0/0'},\n {'port': 443, 'source': '::/0'},\n ],\n tag_prefix=tag_prefix)\n elif security_group['GroupId'] == gate_sg_id:\n # castle-gate rules\n LOGGER.info(\"%s check ingress rules for %s\", tag_prefix, gate_name)\n check_security_group_ingress(security_group, expected_rules=[\n {'port': 80, 'source': moat_sg_id},\n {'port': 443, 'source': moat_sg_id}\n ],\n tag_prefix=tag_prefix)\n elif security_group['GroupId'] == vault_sg_id:\n # vault rules\n LOGGER.info(\"%s check ingress rules for %s\", tag_prefix, vault_name)\n check_security_group_ingress(security_group, expected_rules=[\n {'port': 5432, 'source': gate_sg_id}\n ],\n tag_prefix=tag_prefix)\n\n # moat allow rules\n if update_moat_rules:\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=moat_sg_id,\n IpPermissions=[{\n 'FromPort': 80,\n 'IpProtocol': 'tcp',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0'\n }],\n 'ToPort': 80\n }, {\n 'FromPort': 80,\n 'IpProtocol': 'tcp',\n 'Ipv6Ranges': [{\n 'CidrIpv6': '::/0',\n }],\n 'ToPort': 80\n }, {\n 'FromPort': 443,\n 'IpProtocol': 'tcp',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0'\n }],\n 'ToPort': 443\n }, {\n 'FromPort': 443,\n 'IpProtocol': 'tcp',\n 'Ipv6Ranges': [{\n 'CidrIpv6': '::/0',\n }],\n 'ToPort': 443\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n\n if update_gate_rules:\n # castle-gate allow rules\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=gate_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 80,\n 'ToPort': 80,\n 'UserIdGroupPairs': [{'GroupId': moat_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=gate_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 443,\n 'ToPort': 443,\n 'UserIdGroupPairs': [{'GroupId': moat_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_egress(\n DryRun=dry_run,\n GroupId=gate_sg_id,\n IpPermissions=[{\n 'IpProtocol': '-1',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0',\n }]}])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n # vault allow rules\n if update_vault_rules:\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=vault_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 5432,\n 'ToPort': 5432,\n 'UserIdGroupPairs': [{'GroupId': gate_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_egress(\n DryRun=dry_run,\n GroupId=vault_sg_id,\n IpPermissions=[{\n 'IpProtocol': '-1',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0',\n }]}])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n\n # Create uploads and logs S3 buckets\n # XXX create the identities bucket?\n # XXX need to force private.\n if not s3_identities_bucket:\n s3_identities_bucket = '%s-identities' % tag_prefix\n s3_uploads_bucket = tag_prefix\n s3_client = boto3.client('s3')\n if s3_logs_bucket:\n try:\n resp = s3_client.create_bucket(\n ACL='private',\n Bucket=s3_logs_bucket,\n CreateBucketConfiguration={\n 'LocationConstraint': region_name\n })\n LOGGER.info(\"%s created S3 bucket for logs %s\",\n tag_prefix, s3_logs_bucket)\n except botocore.exceptions.ClientError as err:\n LOGGER.info(\"%s found S3 bucket for logs %s\",\n tag_prefix, s3_logs_bucket)\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'BucketAlreadyOwnedByYou':\n raise\n # Apply bucket encryption by default\n found_encryption = False\n try:\n resp = s3_client.get_bucket_encryption(\n Bucket=s3_logs_bucket)\n if resp['ServerSideEncryptionConfiguration']['Rules'][0][\n 'ApplyServerSideEncryptionByDefault'][\n 'SSEAlgorithm'] == 'AES256':\n found_encryption = True\n LOGGER.info(\"%s found encryption AES256 enabled on %s bucket\",\n tag_prefix, s3_logs_bucket)\n except botocore.exceptions.ClientError as err:\n LOGGER.info(\"%s found S3 bucket for logs %s\",\n tag_prefix, s3_logs_bucket)\n if not err.response.get('Error', {}).get('Code', 'Unknown') == \\\n 'ServerSideEncryptionConfigurationNotFoundError':\n raise\n if not found_encryption:\n s3_client.put_bucket_encryption(\n Bucket=s3_logs_bucket,\n ServerSideEncryptionConfiguration={\n 'Rules': [{\n 'ApplyServerSideEncryptionByDefault': {\n 'SSEAlgorithm': 'AES256',\n }\n }]\n })\n LOGGER.info(\"%s enable encryption on %s bucket\",\n tag_prefix, s3_logs_bucket)\n\n # Set versioning and lifecycle policies\n resp = s3_client.get_bucket_versioning(\n Bucket=s3_logs_bucket)\n if 'Status' in resp and resp['Status'] == 'Enabled':\n LOGGER.info(\"%s found versioning enabled on %s bucket\",\n tag_prefix, s3_logs_bucket)\n else:\n s3_client.put_bucket_versioning(\n Bucket=s3_logs_bucket,\n VersioningConfiguration={\n 'MFADelete': 'Disabled',\n 'Status': 'Enabled'\n })\n LOGGER.info(\"%s enable versioning on %s bucket\",\n tag_prefix, s3_logs_bucket)\n found_policy = False\n #pylint:disable=too-many-nested-blocks\n try:\n resp = s3_client.get_bucket_lifecycle_configuration(\n Bucket=s3_logs_bucket)\n for rule in resp['Rules']:\n if rule['Status'] == 'Enabled':\n found_rule = True\n for transition in rule['Transitions']:\n if transition['StorageClass'] == 'GLACIER':\n if transition.get('Days', 0) < 90:\n found_rule = False\n LOGGER.warning(\"%s lifecycle for 'GLACIER'\"\\\n \" is less than 90 days.\", tag_prefix)\n break\n if rule['Expiration'].get('Days', 0) < 365:\n found_rule = False\n LOGGER.warning(\n \"%s lifecycle expiration is less than 365 days.\",\n tag_prefix)\n for transition in rule['NoncurrentVersionTransitions']:\n if transition['StorageClass'] == 'GLACIER':\n if transition.get('NoncurrentDays', 0) < 90:\n found_rule = False\n LOGGER.warning(\n \"%s version lifecycle for 'GLACIER'\"\\\n \" is less than 90 days.\", tag_prefix)\n break\n if rule['NoncurrentVersionExpiration'].get(\n 'NoncurrentDays', 0) < 365:\n found_rule = False\n LOGGER.warning(\"%s lifecycle version expiration is\"\\\n \" less than 365 days.\", tag_prefix)\n if found_rule:\n found_policy = True\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'NoSuchLifecycleConfiguration':\n raise\n if found_policy:\n LOGGER.info(\"%s found lifecycle policy on %s bucket\",\n tag_prefix, s3_logs_bucket)\n else:\n s3_client.put_bucket_lifecycle_configuration(\n Bucket=s3_logs_bucket,\n LifecycleConfiguration={\n \"Rules\": [{\n \"Status\": \"Enabled\",\n \"ID\": \"expire-logs\",\n \"Filter\": {\n \"Prefix\": \"\", # This is required.\n },\n \"Transitions\": [{\n \"Days\": 90,\n \"StorageClass\": \"GLACIER\"\n }],\n \"Expiration\" : {\n \"Days\": 365\n },\n \"NoncurrentVersionTransitions\": [{\n \"NoncurrentDays\": 90,\n \"StorageClass\": \"GLACIER\"\n }],\n 'NoncurrentVersionExpiration': {\n 'NoncurrentDays': 365\n },\n }]})\n LOGGER.info(\"%s update lifecycle policy on %s bucket\",\n tag_prefix, s3_logs_bucket)\n\n # https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html#attach-bucket-policy\n elb_account_ids_per_region = {\n 'us-east-1': '127311923021',\n 'us-east-2': '033677994240',\n 'us-west-1': '027434742980',\n 'us-west-2': '797873946194',\n 'af-south-1': '098369216593',\n 'ca-central-1': '985666609251',\n 'eu-central-1': '054676820928',\n 'eu-west-1': '156460612806',\n 'eu-west-2': '652711504416',\n 'eu-south-1': '635631232127',\n 'eu-west-3': '009996457667',\n 'eu-north-1': '897822967062',\n 'ap-east-1': '754344448648',\n 'ap-northeast-1': '582318560864',\n 'ap-northeast-2': '600734575887',\n 'ap-northeast-3': '383597477331',\n 'ap-southeast-1': '114774131450',\n 'ap-southeast-2': '783225319266',\n 'ap-south-1': '718504428378',\n 'me-south-1': '076674570225',\n 'sa-east-1': '507241528517'\n }\n elb_account_id = elb_account_ids_per_region[region_name]\n s3_client.put_bucket_policy(\n Bucket=s3_logs_bucket,\n Policy=json.dumps({\n \"Version\": \"2008-10-17\",\n \"Id\": \"WriteLogs\",\n \"Statement\": [{\n # billing reports\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"billingreports.amazonaws.com\"\n },\n \"Action\": [\n \"s3:GetBucketAcl\",\n \"s3:GetBucketPolicy\"\n ],\n \"Resource\": \"arn:aws:s3:::%s\" % s3_logs_bucket\n }, {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"billingreports.amazonaws.com\"\n },\n \"Action\": \"s3:PutObject\",\n \"Resource\": \"arn:aws:s3:::%s/*\" % s3_logs_bucket\n }, {\n # ELB access logs\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"AWS\": \"arn:aws:iam::%s:root\" % elb_account_id\n },\n \"Action\": \"s3:PutObject\",\n \"Resource\":\n \"arn:aws:s3:::%s/var/log/elb/*\" % s3_logs_bucket\n }, {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"delivery.logs.amazonaws.com\"\n },\n \"Action\": \"s3:PutObject\",\n \"Resource\":\n (\"arn:aws:s3:::%s/var/log/elb/*\" % s3_logs_bucket),\n \"Condition\": {\n \"StringEquals\": {\n \"s3:x-amz-acl\": \"bucket-owner-full-control\"\n }\n }\n }, {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"delivery.logs.amazonaws.com\"\n },\n \"Action\": \"s3:GetBucketAcl\",\n \"Resource\": \"arn:aws:s3:::%s\" % s3_logs_bucket\n }]\n }))\n\n if s3_uploads_bucket:\n try:\n resp = s3_client.create_bucket(\n ACL='private',\n Bucket=s3_uploads_bucket,\n CreateBucketConfiguration={\n 'LocationConstraint': region_name\n })\n LOGGER.info(\"%s created S3 bucket for uploads %s\",\n tag_prefix, s3_uploads_bucket)\n except botocore.exceptions.ClientError as err:\n LOGGER.info(\"%s found S3 bucket for uploads %s\",\n tag_prefix, s3_uploads_bucket)\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'BucketAlreadyOwnedByYou':\n raise\n\n # Create instance profiles ...\n iam_client = boto3.client('iam')\n # ... for webfront instances\n create_instance_profile(\n create_gate_role(gate_name,\n s3_logs_bucket=s3_logs_bucket, s3_uploads_bucket=s3_uploads_bucket,\n iam_client=iam_client, tag_prefix=tag_prefix),\n iam_client=iam_client, region_name=region_name,\n tag_prefix=tag_prefix, dry_run=dry_run)\n # ... for databases instances\n create_instance_profile(\n create_vault_role(vault_name,\n s3_logs_bucket=s3_logs_bucket, s3_uploads_bucket=s3_uploads_bucket,\n iam_client=iam_client, tag_prefix=tag_prefix),\n iam_client=iam_client, region_name=region_name,\n tag_prefix=tag_prefix, dry_run=dry_run)\n\n if ssh_key_name:\n if not ssh_key_content:\n ssh_key_path = os.path.join(os.getenv('HOME'),\n '.ssh', '%s.pub' % ssh_key_name)\n if os.path.exists(ssh_key_path):\n with open(ssh_key_path, 'rb') as ssh_key_obj:\n ssh_key_content = ssh_key_obj.read()\n else:\n LOGGER.warning(\"%s no content for SSH key %s\",\n tag_prefix, ssh_key_name)\n # import SSH keys\n try:\n resp = ec2_client.import_key_pair(\n DryRun=dry_run,\n KeyName=ssh_key_name,\n PublicKeyMaterial=ssh_key_content)\n LOGGER.info(\"%s imported SSH key %s\", tag_prefix, ssh_key_name)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidKeyPair.Duplicate':\n raise\n LOGGER.info(\"%s found SSH key %s\", tag_prefix, ssh_key_name)\n\n # ... for sally instances\n create_instance_profile(\n create_logs_role(kitchen_door_name,\n s3_logs_bucket=s3_logs_bucket,\n iam_client=iam_client, tag_prefix=tag_prefix),\n iam_client=iam_client, region_name=region_name,\n tag_prefix=tag_prefix, dry_run=dry_run)\n\n # allows SSH connection to instances for debugging\n update_kitchen_door_rules = (not kitchen_door_sg_id)\n if not kitchen_door_sg_id:\n resp = ec2_client.create_security_group(\n Description='%s SSH access' % tag_prefix,\n GroupName=kitchen_door_name,\n VpcId=vpc_id,\n DryRun=dry_run)\n kitchen_door_sg_id = resp['GroupId']\n LOGGER.info(\"%s created %s security group %s\",\n tag_prefix, kitchen_door_name, kitchen_door_sg_id)\n\n if update_kitchen_door_rules:\n try:\n if sally_ip:\n cidr_block = '%s/32' % sally_ip\n else:\n LOGGER.warning(\"no IP range was specified to restrict\"\\\n \" access to SSH port\")\n cidr_block = '0.0.0.0/0'\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=kitchen_door_sg_id,\n CidrIp=cidr_block,\n IpProtocol='tcp',\n FromPort=22,\n ToPort=22)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_egress(\n DryRun=dry_run,\n GroupId=kitchen_door_sg_id,\n IpPermissions=[{\n 'IpProtocol': '-1',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0',\n }]}])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=gate_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 22,\n 'ToPort': 22,\n 'UserIdGroupPairs': [{'GroupId': kitchen_door_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=vault_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 22,\n 'ToPort': 22,\n 'UserIdGroupPairs': [{'GroupId': kitchen_door_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n\n # Creates encryption keys (KMS) in region\n if not storage_enckey:\n storage_enckey = _get_or_create_storage_enckey(\n region_name, tag_prefix, dry_run=dry_run)\n\n # Create an Application ELB and WAF\n load_balancer_arn = create_elb(\n tag_prefix, web_subnet_by_cidrs, moat_sg_id,\n s3_logs_bucket=s3_logs_bucket,\n tls_priv_key=tls_priv_key, tls_fullchain_cert=tls_fullchain_cert,\n region_name=region_name)\n create_waf(\n tag_prefix,\n elb_arn=load_balancer_arn,\n s3_logs_bucket=s3_logs_bucket,\n region_name=region_name,\n dry_run=dry_run)", "def test_create_network_and_subnet(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 254\n self.__create_network_and_subnet_test_helper__(network_name, network_cidr)", "def create_network(layers):\r\n return NeuronNetwork(layers)", "def post(self):\n context = request.environ.get('context')\n json = util.copy_project_id_into_json(context, g.json)\n network_obj = dbapi.networks_create(context, json)\n return jsonutils.to_primitive(network_obj), 200, None", "def create_network(num_subs):\n\n # Need one host for each subscriber, one for a publisher, and one for a broker\n n_hosts = num_subs + 2\n\n topo = SingleSwitchTopo(n=n_hosts)\n\n return Mininet(topo=topo, controller=OVSController)", "def create_platform_network(enode, category, config):\n # Check if this category has a defined netns\n netns = config.get('netns', None)\n if netns is None:\n return\n\n # Create the given network namespace\n enode._docker_exec('ip netns add {}'.format(netns))\n\n # lo should always be up\n enode._docker_exec('ip netns exec {} ip link set dev lo up'.format(netns))", "def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1,\n router_create_args=None):\n subnet_create_args = dict(subnet_create_args or {})\n subnet_create_args[\"start_cidr\"] = subnet_cidr_start\n\n net_topo = self.neutron.create_network_topology(\n network_create_args=(network_create_args or {}),\n router_create_args=(router_create_args or {}),\n router_per_subnet=True,\n subnet_create_args=subnet_create_args,\n subnets_count=subnets_per_network\n )\n\n for router in net_topo[\"routers\"]:\n self.neutron.get_router(router[\"id\"])", "def add_network(self, # pylint: disable=too-many-arguments\n network: \"Network\",\n line_of_business: \"LineOfBusiness\",\n platform: \"Platform\",\n cloud_region: \"CloudRegion\" = None,\n tenant: \"Tenant\" = None,\n network_instance_name: str = None,\n subnets: Iterator[\"Subnet\"] = None) -> \"NetworkInstantiation\":\n if not self.active:\n msg = f'Service orchestration status must be \"Active\"'\n raise StatusError(msg)\n\n return NetworkInstantiation.instantiate_ala_carte(\n self,\n network,\n line_of_business,\n platform,\n cloud_region=cloud_region,\n tenant=tenant,\n network_instance_name=network_instance_name,\n subnets=subnets\n )", "def create(profile):\n client = boto3client.get(\"ec2\", profile)\n return client.create_internet_gateway()", "def test_create_network():\n _network = Network()", "def update_network_source(self, network_source_id, update_network_source_details, **kwargs):\n resource_path = \"/networkSources/{networkSourceId}\"\n method = \"PUT\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"update_network_source got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"networkSourceId\": network_source_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_network_source_details,\n response_type=\"NetworkSources\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_network_source_details,\n response_type=\"NetworkSources\")", "def test_create_router_external_network(self):\n\n # skip test if suite couldn't start from an empty, clean list of allocated IPs (to avoid cascading failures)\n if self.suite_world['allocated_ips']:\n self.skipTest(\"There were pre-existing, not deallocated IPs\")\n\n # First, get external network id\n external_network_id = self.__get_external_network_test_helper__()\n\n # Then, create router\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n router_name = TEST_ROUTER_PREFIX + \"_ext_\" + suffix\n self.__create_router_test_helper__(router_name, external_network_id)", "def create(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVirtNet_Create'))", "def nic_add(args):\n name = args.name\n network = args.network\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n if network is None:\n common.pprint(\"Missing network. Leaving...\", color='red')\n os._exit(1)\n common.pprint(\"Adding Nic to %s...\" % name)\n k.add_nic(name=name, network=network)", "def create_VirtualNetwork(network_name, network_subnet, network_mask, vnc, domain, project_name):\n\n project = vnc.project_read(fq_name = [domain, project_name])\n\n vn_obj = vnc_api.VirtualNetwork(name=network_name, parent_obj=project)\n vn_obj.add_network_ipam(vnc_api.NetworkIpam(),\n vnc_api.VnSubnetsType([vnc_api.IpamSubnetType(subnet = vnc_api.SubnetType(network_subnet,network_mask))]))\n\n vnc.virtual_network_create(vn_obj)\n\n print 'Network \"{}\" created successfully\\n'.format(network_name)", "def ogrCreateLayer(sourceLayer, pgConn, destinationLayer):\r\n print \" Creating {0}\".format(destinationLayer)\r\n newLayer = pgConn.CreateLayer(destinationLayer)\r\n\r\n lyrDefn = sourceLayer.GetLayerDefn()\r\n for i in range( lyrDefn.GetFieldCount() ):\r\n ##print \"Creating field: {0}\".format(lyrDefn.GetFieldDefn( i ).GetName())\r\n\r\n fieldName = lyrDefn.GetFieldDefn( i ).GetName()\r\n fieldType = lyrDefn.GetFieldDefn( i ).GetType()\r\n newField = ogr.FieldDefn(fieldName, fieldType)\r\n newLayer.CreateField(newField)", "def new(name, source):", "def mutate(self, info, input):\n # Convert input to dictionary\n data = api_utils.input_to_dictionary(input)\n data_source_type = Operation('ModelDataSourceType').create(**data)\n return CreateDataSourceType(data_source_type=data_source_type)", "def test_add_network(self):\n pass", "def fusion_api_create_ethernet_network(self, body, api=None, headers=None):\n return self.ethernet_network.create(body, api, headers)", "def _create_network(self, name):\n network = self.network(self.num_actions, self.quantile_embedding_dim,\n name=name)\n return network", "def fusion_api_create_fc_network(self, body, api=None, headers=None):\n return self.fc_network.create(body, api, headers)", "def test_create_router_no_external_network_and_add_network_port(self):\n # Create Router\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n router_name = TEST_ROUTER_PREFIX + \"_ports_\" + suffix\n router_id = self.__create_router_test_helper__(router_name)\n\n # Create Network with only one subnet\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 253\n network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name, network_cidr)\n\n port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)\n self.test_world['ports'].append(port_id)", "def add_source(ctx, username, id, features, no_validation, token=None, indent=None):\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = (\n f\"{mapbox_api}/tilesets/v1/sources/{username}/{id}?access_token={mapbox_token}\"\n )\n\n with tempfile.TemporaryFile() as file:\n for feature in features:\n if not no_validation:\n utils.validate_geojson(feature)\n file.write((json.dumps(feature) + \"\\n\").encode(\"utf-8\"))\n\n file.seek(0)\n m = MultipartEncoder(fields={\"file\": (\"file\", file)})\n resp = requests.post(\n url,\n data=m,\n headers={\n \"Content-Disposition\": \"multipart/form-data\",\n \"Content-type\": m.content_type,\n },\n )\n\n if resp.status_code == 200:\n click.echo(json.dumps(resp.json(), indent=indent))\n else:\n raise errors.TilesetsError(resp.text)", "def create_network_precommit(self, mech_context):\n\n LOG.debug(\"create_network_precommit: called\")\n network = mech_context.current\n context = mech_context._plugin_context\n tenant_id = network['tenant_id']\n network_id = network['id']\n\n segments = mech_context.network_segments\n # currently supports only one segment per network\n segment = segments[0]\n\n network_type = segment['network_type']\n vlan_id = segment['segmentation_id']\n segment_id = segment['id']\n\n if network_type != 'vlan':\n raise Exception(\n _(\"SeaMicro Mechanism: failed to create network, \"\n \"only network type vlan is supported\"))\n\n try:\n seamicro_db.create_network(context, network_id, vlan_id,\n segment_id, network_type, tenant_id)\n except Exception:\n LOG.exception(\n _LE(\"SeaMicro Mechanism: failed to create network in db\"))\n raise Exception(\n _(\"SeaMicro Mechanism: create_network_precommit failed\"))\n\n LOG.info(_LI(\"create network (precommit): %(network_id)s \"\n \"of network type = %(network_type)s \"\n \"with vlan = %(vlan_id)s \"\n \"for tenant %(tenant_id)s\"),\n {'network_id': network_id,\n 'network_type': network_type,\n 'vlan_id': vlan_id,\n 'tenant_id': tenant_id})", "def create(self, objectType, initialParameters):\n command_line = 'rsg create -ot=\"%s\" -pl=\"%s\" -ht=\"%s\"' % (\n objectType, initialParameters, self.host_name)\n return self._execTool(command_line)", "def create_connection(location=None, bandwidth=None, connectionName=None):\n pass", "def create_high_security_network(context):\n subnetworks = []\n private_ip_google_access = context.properties.get('privateIpGoogleAccess', False)\n for region in FIRECLOUD_NETWORK_REGIONS:\n subnetworks.append({\n # We append the region to the subnetwork's DM resource name, since\n # each resource name needs to be globally unique within the deployment.\n 'resourceName': FIRECLOUD_VPC_SUBNETWORK_NAME + '_' + region,\n # We want all subnetworks to have the same object name, since this most\n # closely mirrors how auto-mode subnets work and is what PAPI expects.\n 'name': FIRECLOUD_VPC_SUBNETWORK_NAME,\n 'region': region,\n 'ipCidrRange': FIRECLOUD_NETWORK_REGIONS[region],\n 'enableFlowLogs': context.properties.get('enableFlowLogs', False),\n 'privateIpGoogleAccess': private_ip_google_access\n })\n\n return [{\n 'type': 'templates/network.py',\n 'name': 'fc-network',\n 'properties': {\n 'resourceName': 'network',\n 'name': FIRECLOUD_VPC_NETWORK_NAME,\n 'projectId': '$(ref.fc-project.projectId)',\n 'autoCreateSubnetworks': False,\n 'subnetworks': subnetworks,\n # We pass the dependsOn list into the network template as a\n # parameter. Deployment Manager doesn't support dependsOn for\n # template-call nodes, so we can't have this resource itself depend on\n # the project-wide resources.\n 'dependsOn': '$(ref.fc-project.resourceNames)',\n 'createCustomStaticRoute': private_ip_google_access\n },\n }]", "def test_add_flow_request_with_sources(self):\n res = self._add_flow_request(flow_request=self.flow_request)\n self.assertEqual(res.status_code, 201)\n flow_request = res.json()\n destination = Destination.objects.get(name='Destination 1')\n self.assertEqual(flow_request['flow_id'], self.flow_request['flow_id'])\n self.assertEqual(flow_request['status'], 'PE')\n self.assertDictEqual(flow_request['profile'], self.flow_request['profile'])\n self.assertEqual(FlowRequest.objects.all().count(), 4)\n self.assertEqual(ConfirmationCode.objects.all().count(), 1)\n self.assertEqual(FlowRequest.objects.get(flow_id=flow_request['flow_id']).destination, destination)\n self.assertEqual(FlowRequest.objects.get(flow_id=flow_request['flow_id']).sources.count(), 1)\n source = FlowRequest.objects.get(flow_id=flow_request['flow_id']).sources.first()\n self.assertDictEqual(\n {'source_id': source.source_id, 'name': source.name},\n {'source_id': SOURCE_1_ID, 'name': SOURCE_1_NAME}\n )", "def test_networking_project_network_tag_create(self):\n pass", "def start_network(self):\n try:\n self.topo.build_topo()\n except:\n error('Cannot build the topology.')\n try:\n self.net = IPNet(topo=self.topo, use_v4=False, use_v6=True)\n self.net.start()\n except:\n self.stop_network()\n error('Cannot start the network.')", "def list_network_sources(self, compartment_id, **kwargs):\n resource_path = \"/networkSources\"\n method = \"GET\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"page\",\n \"limit\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_network_sources got unknown kwargs: {!r}\".format(extra_kwargs))\n\n query_params = {\n \"compartmentId\": compartment_id,\n \"page\": kwargs.get(\"page\", missing),\n \"limit\": kwargs.get(\"limit\", missing)\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[NetworkSourcesSummary]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[NetworkSourcesSummary]\")", "def __init__(__self__, *,\n id: str,\n source: str):\n pulumi.set(__self__, \"id\", id)\n pulumi.set(__self__, \"source\", 'Azure')", "def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1):\n network = self.neutron.create_network(**(network_create_args or {}))\n for _ in range(subnets_per_network):\n self.neutron.create_subnet(network[\"id\"],\n start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n self.neutron.list_subnets()", "def add_Circuit(self, name, circuit_type, neuropil = None, data_source = None):\n assert isinstance(name, str), 'name must be of str type'\n self._database_writeable_check()\n connect_DataSource = self._default_DataSource if data_source is None \\\n else self._get_obj_from_str(data_source)\n if connect_DataSource is None:\n raise TypeError('Default DataSource is missing.')\n self._uniqueness_check('Circuit', unique_in = connect_DataSource,\n name = name)\n\n circuit_info = {'name': name}\n \n batch = self.graph.batch()\n node_name = _to_var_name('Circuit_{}'.format(name))\n plural = getattr(models, circuit_type).element_plural\n batch[node_name] = getattr(batch, plural).create(**circuit_info)\n\n # Link subsystem if specified\n if neuropil is not None:\n if isinstance(neuropil, str):\n neuropil_obj = self.get('Neuropil', neuropil, connect_DataSource)\n elif isinstance(neuropil, models.Neuropil):\n if self._is_in_datasource(connect_DataSource, neuropil):\n neuropil_obj = neuropil\n else:\n raise ValueError(\n 'Neuropil {} with rid {} to be linked with subregion is \\\n not in the same datasource {} version {}'.format(\n neuropil.name, neuropil._id,\n connect_DataSource.name, connect_DataSource.version))\n self.link_with_batch(batch, neuropil_obj, batch[:node_name], 'Owns')\n\n # Link data_source\n self.link_with_batch(batch, connect_DataSource, batch[:node_name],\n 'Owns')\n\n circuit = batch['${}'.format(node_name)]\n batch.commit(20)\n\n self.set('Circuit', name, circuit, data_source = connect_DataSource)\n return circuit", "def run(self, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.delete_network(network[\"id\"])", "def network_create_event(self, network_info):\n net = network_info['network']\n net_id = net['id']\n net_name = net.get('name')\n network_db_elem = self.get_network(net_id)\n # Check if the source of network creation is FW and if yes, skip\n # this event.\n # Check if there's a way to read the DB from service class\n # TODO(padkrish)\n if self.fw_api.is_network_source_fw(network_db_elem, net_name):\n LOG.info(_LI(\"Service network %s, returning\"), net_name)\n return\n self.network[net_id] = {}\n self.network[net_id].update(net)\n\n net_name = net.get('name')\n tenant_id = net.get('tenant_id')\n\n # Extract segmentation_id from the network name\n net_ext_name = self.cfg.dcnm.dcnm_net_ext\n nobj = re.search(net_ext_name, net_name)\n try:\n seg_id = int((net_name[nobj.start(0) + len(net_ext_name) - 1:]\n if nobj else None))\n except (IndexError, TypeError, ValueError):\n seg_id = None\n\n # Check if network is already created.\n query_net = self.get_network_by_segid(seg_id) if seg_id else None\n if query_net:\n # The network is already created no need to process the event.\n if query_net.source.lower() == 'dcnm':\n # DCNM created the network. Only update network id in database.\n prev_id = query_net.network_id\n params = dict(columns=dict(network_id=net_id))\n self.update_network(prev_id, **params)\n\n # Update the network cache.\n prev_info = self.network.pop(prev_id)\n prev_info['id'] = net_id\n self.network[net_id] = prev_info\n\n # Update the network name. After extracting the segmentation_id\n # no need to keep it in the name. Removing it and update\n # the network.\n updated_net_name = (\n net_name[:nobj.start(0) + len(net_ext_name) - 1])\n try:\n body = {'network': {'name': updated_net_name, }}\n dcnm_net = self.neutronclient.update_network(\n net_id, body=body).get('network')\n LOG.debug('Updated network %(network)s', dcnm_net)\n except Exception as exc:\n LOG.exception(_LE('Failed to update network '\n '%(network)s. Reason %(err)s.'),\n {'network': updated_net_name,\n 'err': str(exc)})\n return\n\n LOG.info(_LI('network_create_event: network %(name)s was created '\n 'by %(source)s. Ignoring processing the event.'),\n {'name': net_name, 'source': 'dcnm'})\n return\n\n # Check if project (i.e. tenant) exist.\n tenant_name = self.get_project_name(tenant_id)\n if not tenant_name:\n LOG.error(_LE('Failed to create network %(name)s. Project '\n '%(tenant_id)s does not exist.'),\n {'name': net_name, 'tenant_id': tenant_id})\n return\n\n pseg_id = self.network[net_id].get('provider:segmentation_id')\n seg_id = self._get_segmentation_id(net_id, pseg_id, 'openstack')\n self.network[net_id]['segmentation_id'] = seg_id\n try:\n cfgp, fwd_mod = self.dcnm_client.get_config_profile_for_network(\n net.get('name'))\n self.network[net_id]['config_profile'] = cfgp\n self.network[net_id]['fwd_mod'] = fwd_mod\n self.add_network_db(net_id, self.network[net_id],\n 'openstack',\n constants.RESULT_SUCCESS)\n LOG.debug('network_create_event: network=%s', self.network)\n except dexc.DfaClientRequestFailed:\n # Fail to get config profile from DCNM.\n # Save the network info with failure result and send the request\n # to DCNM later.\n self.add_network_db(net_id, self.network[net_id], 'openstack',\n constants.CREATE_FAIL)\n LOG.error(_LE('Failed to create network=%s.'), self.network)", "def initialize_network(self, cidr, is_external):\n raise NotImplementedError()", "def create_network(\n self, is_internal: bool = True\n ) -> None:\n if self.network:\n self.log.warn(f\"Network {self.network_name} was already created!\")\n return\n\n existing_networks = self.docker.networks.list(\n names=[self.network_name]\n )\n if existing_networks:\n if len(existing_networks) > 1:\n self.log.error(\n f\"Found multiple ({len(existing_networks)}) existing \"\n f\"networks {self.network_name}. Please delete all or all \"\n \"but one before starting the server!\")\n exit(1)\n self.log.info(f\"Network {self.network_name} already exists! Using \"\n \"existing network\")\n self.network = existing_networks[0]\n self.network.reload() # required to initialize containers in netw\n else:\n self.network = self.docker.networks.create(\n self.network_name,\n driver=\"bridge\",\n internal=is_internal,\n scope=\"local\",\n )", "def run(self, network_update_args, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.update_network(network[\"id\"], **network_update_args)", "def create_network_profile(projectArn=None, name=None, description=None, type=None, uplinkBandwidthBits=None, downlinkBandwidthBits=None, uplinkDelayMs=None, downlinkDelayMs=None, uplinkJitterMs=None, downlinkJitterMs=None, uplinkLossPercent=None, downlinkLossPercent=None):\n pass", "def createLotsNetworks(proxy_url, sessiontoken,network_number):\n myHeader = {\"Content-Type\": \"application/json\",\"Accept\": \"application/json\", 'csp-auth-token': sessiontoken}\n for x in range(0,network_number):\n display_name = \"network-name\"+str(x)\n myURL = (proxy_url + \"/policy/api/v1/infra/tier-1s/cgw/segments/\" + display_name)\n # '/tier-1s/cgw' might only be applicable for multi tier-1s architecture. To be confirmed.\n # print(myURL)\n json_data = {\n \"subnets\":[{\"gateway_address\":\"10.200.\"+str(x)+\".1/24\"}],\n \"type\":\"ROUTED\",\n \"display_name\":display_name,\n \"advanced_config\":{\"connectivity\":\"ON\"},\n \"id\":\"network-test\"+str(x)\n }\n response = requests.put(myURL, headers=myHeader, json=json_data)\n json_response_status_code = response.status_code", "def CreateRequests(self, args):\n\n target_vpn_gateway_ref = self.TARGET_VPN_GATEWAY_ARG.ResolveAsResource(\n args,\n self.resources,\n scope_lister=compute_flags.GetDefaultScopeLister(self.compute_client,\n self.project))\n network_ref = self.NETWORK_ARG.ResolveAsResource(args, self.resources)\n\n request = self.messages.ComputeTargetVpnGatewaysInsertRequest(\n project=self.project,\n region=target_vpn_gateway_ref.region,\n targetVpnGateway=self.messages.TargetVpnGateway(\n description=args.description,\n name=target_vpn_gateway_ref.Name(),\n network=network_ref.SelfLink()\n ))\n return [request]", "def _source_subnet(value):\n match = SOURCE_SUBNET_RE.match(value)\n if match:\n named_groups = match.groupdict()\n subnet_id = named_groups['SubnetID']\n role = named_groups['Role']\n registration_status = named_groups['RegistrationStatus']\n return SourceSubnet(\n role = role,\n registration_status = registration_status,\n subnet_id = subnet_id\n )\n\n raise argparse.ArgumentTypeError('Given argument \"%s\" is not a valid source subnet' % value)", "def empty_network(network_id=NETWORK_ID):\n return make_net_model({\"id\": network_id,\n \"subnets\": [],\n \"ports\": [],\n \"tenant_id\": \"calico\",\n \"mtu\": neutron_constants.DEFAULT_NETWORK_MTU})", "def testCreateWmsSource(self):\n\n path = '/minerva_source_wms'\n name = 'testWMS'\n username = ''\n password = ''\n baseURL = 'http://demo.boundlessgeo.com/geoserver/ows'\n params = {\n 'name': name,\n 'username': username,\n 'password': password,\n 'baseURL': baseURL\n }\n response = self.request(path=path, method='POST', params=params, user=self._user)\n self.assertStatusOk(response)\n wmsSource = response.json\n minerva_metadata = wmsSource['meta']['minerva']\n self.assertEquals(wmsSource['name'], name, 'incorrect wms source name')\n self.assertEquals(minerva_metadata['source_type'], 'wms', 'incorrect wms source type')\n self.assertEquals(minerva_metadata['wms_params']['base_url'], baseURL, 'incorrect wms source baseURL')", "def create_community_resource(dataset_id, cr_file):\n logging.debug(\"Creating a community resource on dataset %s\", dataset_id)\n headers = {\"X-API-KEY\": DATAGOUV_API_KEY}\n files = {\"file\": open(cr_file, \"rb\")}\n url = f\"{DATAGOUV_API}/datasets/{dataset_id}/upload/community/\"\n\n ret = requests.post(url, headers=headers, files=files)\n ret.raise_for_status()\n json = ret.json()\n\n logging.debug(\n \"Created a new community resource %s on dataset %s\", json[\"id\"], dataset_id\n )\n\n return json", "def create_stac_item(\n source: Union[str, DatasetReader, DatasetWriter, WarpedVRT, MemoryFile],\n input_datetime: Optional[datetime.datetime] = None,\n extensions: Optional[List[str]] = None,\n collection: Optional[str] = None,\n collection_url: Optional[str] = None,\n properties: Optional[Dict] = None,\n id: Optional[str] = None,\n assets: Optional[Dict[str, pystac.Asset]] = None,\n asset_name: str = \"asset\",\n asset_roles: Optional[List[str]] = None,\n asset_media_type: Optional[Union[str, pystac.MediaType]] = None,\n asset_href: Optional[str] = None,\n with_proj: bool = False,\n with_raster: bool = False,\n raster_max_size: int = 1024,\n) -> pystac.Item:\n properties = properties or {}\n extensions = extensions or []\n\n with ExitStack() as ctx:\n if isinstance(source, (DatasetReader, DatasetWriter, WarpedVRT)):\n dataset = source\n else:\n dataset = ctx.enter_context(rasterio.open(source))\n\n if dataset.gcps[0]:\n src_dst = ctx.enter_context(\n WarpedVRT(\n dataset,\n src_crs=dataset.gcps[1],\n src_transform=transform.from_gcps(dataset.gcps[0]),\n )\n )\n else:\n src_dst = dataset\n\n meta = get_metadata(src_dst)\n\n media_type = (\n get_media_type(dataset) if asset_media_type == \"auto\" else asset_media_type\n )\n\n # add projection properties\n if with_proj:\n properties.update(\n {\n f\"proj:{name}\": value\n for name, value in get_projection_info(src_dst).items()\n }\n )\n extensions.append(\n f\"https://stac-extensions.github.io/projection/{PROJECTION_EXT_VERSION}/schema.json\",\n )\n\n # add raster properties\n raster_info = {}\n if with_raster:\n raster_info = {\"raster:bands\": get_raster_info(dataset)}\n extensions.append(\n f\"https://stac-extensions.github.io/raster/{RASTER_EXT_VERSION}/schema.json\",\n )\n\n # item\n item = pystac.Item(\n id=id or os.path.basename(dataset.name),\n geometry=meta[\"footprint\"],\n bbox=meta[\"bbox\"],\n collection=collection,\n stac_extensions=extensions,\n datetime=input_datetime,\n properties=properties,\n )\n\n # if we add a collection we MUST add a link\n if collection:\n item.add_link(\n pystac.Link(\n pystac.RelType.COLLECTION,\n collection_url or collection,\n media_type=pystac.MediaType.JSON,\n )\n )\n\n # item.assets\n if assets:\n for key, asset in assets.items():\n item.add_asset(\n key=key, asset=asset,\n )\n\n else:\n item.add_asset(\n key=asset_name,\n asset=pystac.Asset(\n href=asset_href or dataset.name,\n media_type=media_type,\n extra_fields=raster_info,\n ),\n )\n\n return item", "def __init__(__self__, *,\n managed_network_name: pulumi.Input[str],\n resource_group_name: pulumi.Input[str],\n kind: Optional[pulumi.Input[Union[str, 'Kind']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n managed_network_group_name: Optional[pulumi.Input[str]] = None,\n management_groups: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceIdArgs']]]] = None,\n subnets: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceIdArgs']]]] = None,\n subscriptions: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceIdArgs']]]] = None,\n virtual_networks: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceIdArgs']]]] = None):\n pulumi.set(__self__, \"managed_network_name\", managed_network_name)\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n if kind is not None:\n pulumi.set(__self__, \"kind\", kind)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if managed_network_group_name is not None:\n pulumi.set(__self__, \"managed_network_group_name\", managed_network_group_name)\n if management_groups is not None:\n pulumi.set(__self__, \"management_groups\", management_groups)\n if subnets is not None:\n pulumi.set(__self__, \"subnets\", subnets)\n if subscriptions is not None:\n pulumi.set(__self__, \"subscriptions\", subscriptions)\n if virtual_networks is not None:\n pulumi.set(__self__, \"virtual_networks\", virtual_networks)", "def CreateRequests(self, args):\n\n image = self.messages.Image(\n name=args.name,\n description=args.description,\n sourceType=self.messages.Image.SourceTypeValueValuesEnum.RAW,\n family=args.family)\n\n csek_keys = csek_utils.CsekKeyStore.FromArgs(\n args, self._ALLOW_RSA_ENCRYPTED_CSEK_KEYS)\n if csek_keys:\n image_ref = self.resources.Parse(args.name, collection='compute.images')\n image.imageEncryptionKey = csek_utils.MaybeToMessage(\n csek_keys.LookupKey(image_ref,\n raise_if_missing=args.require_csek_key_create),\n self.compute_client.apitools_client)\n\n # Validate parameters.\n if args.source_disk_zone and not args.source_disk:\n raise exceptions.ToolException(\n 'You cannot specify [--source-disk-zone] unless you are specifying '\n '[--source-disk].')\n\n if args.source_disk and args.source_uri:\n raise exceptions.ConflictingArgumentsException(\n '--source-uri', '--source-disk')\n\n if not (args.source_disk or args.source_uri):\n raise exceptions.MinimumArgumentException(\n ['--source-uri', '--source-disk'],\n 'Please specify either the source disk or the Google Cloud Storage '\n 'URI of the disk image.'\n )\n\n # TODO(user): use resources.REGISTRY.Parse() for GCS URIs (b/30086260).\n if args.source_uri:\n source_uri = utils.NormalizeGoogleStorageUri(args.source_uri)\n image.rawDisk = self.messages.Image.RawDiskValue(source=source_uri)\n else:\n source_disk_ref = flags.SOURCE_DISK_ARG.ResolveAsResource(\n args, self.resources,\n scope_lister=compute_flags.GetDefaultScopeLister(\n self.compute_client, self.project))\n image.sourceDisk = source_disk_ref.SelfLink()\n image.sourceDiskEncryptionKey = csek_utils.MaybeLookupKeyMessage(\n csek_keys, source_disk_ref, self.compute_client.apitools_client)\n\n if args.licenses:\n image.licenses = args.licenses\n\n guest_os_features = getattr(args, 'guest_os_features', [])\n if guest_os_features:\n guest_os_feature_messages = []\n for feature in guest_os_features:\n gf_type = self.messages.GuestOsFeature.TypeValueValuesEnum(feature)\n guest_os_feature = self.messages.GuestOsFeature()\n guest_os_feature.type = gf_type\n guest_os_feature_messages.append(guest_os_feature)\n image.guestOsFeatures = guest_os_feature_messages\n\n request = self.messages.ComputeImagesInsertRequest(\n image=image,\n project=self.project)\n\n return [request]", "def request_workspace_add(self, request):\n user_id = request['user_id']\n alias = request['alias'] \n source_uuid = request['source'] \n# print('###', user_id)\n# print('###', alias)\n# print('###', source_uuid)\n \n response = self.copy_workspace(user_id, source_uuid=source_uuid, target_alias=alias)\n \n return response", "def new_network(router_simulator):\n router = input('Enter router name: ')\n network = input('Enter network: ')\n try:\n distance = int(input('Enter distance: '))\n except ValueError:\n print('Distance not valid.')\n return\n\n try:\n router_simulator.add_network(router, network, distance)\n except KeyError:\n print('Router was not found.')", "def __init__(__self__, *,\n kind: str,\n name: Optional[str] = None,\n source_id: Optional[str] = None):\n pulumi.set(__self__, \"kind\", kind)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if source_id is not None:\n pulumi.set(__self__, \"source_id\", source_id)", "def AddNetwork(parser):\n parser.add_argument(\n '--network',\n help=(\n 'Network in the current project that the instance will be part '\n 'of. To specify using a network with a shared VPC, use the full '\n \"URL of the network. For an example host project, 'testproject', \"\n \"and shared network, 'testsharednetwork', this would use the \"\n 'form: '\n '`--network`=`projects/testproject/global/networks/'\n 'testsharednetwork`'\n ),\n )", "def __init__(__self__, *,\n machine_name: str,\n source: str,\n source_computer_id: str,\n vmuuid: str,\n workspace_id: str):\n pulumi.set(__self__, \"machine_name\", machine_name)\n pulumi.set(__self__, \"source\", 'OnPremise')\n pulumi.set(__self__, \"source_computer_id\", source_computer_id)\n pulumi.set(__self__, \"vmuuid\", vmuuid)\n pulumi.set(__self__, \"workspace_id\", workspace_id)", "def copy_to_region(self, region, name=None):\r\n if region.name == self.region:\r\n raise BotoClientError('Unable to copy to the same Region')\r\n conn_params = self.connection.get_params()\r\n rconn = region.connect(**conn_params)\r\n sg = rconn.create_security_group(name or self.name, self.description)\r\n source_groups = []\r\n for rule in self.rules:\r\n grant = rule.grants[0]\r\n for grant in rule.grants:\r\n if grant.name:\r\n if grant.name not in source_groups:\r\n source_groups.append(grant.name)\r\n sg.authorize(None, None, None, None, grant)\r\n else:\r\n sg.authorize(rule.ip_protocol, rule.from_port, rule.to_port,\r\n grant.cidr_ip)\r\n return sg", "def create(ctx, iface, resource_config, **_):\n resource_id = \\\n utils.get_resource_id(\n ctx.node,\n ctx.instance,\n resource_config.get(VPN_CONNECTION_ID),\n use_instance_id=True\n )\n utils.update_resource_id(ctx.instance, resource_id)\n # Actually create the resource\n create_response = iface.create(resource_config)\n ctx.instance.runtime_properties['create_response'] = \\\n utils.JsonCleanuper(create_response).to_dict()\n ctx.instance.runtime_properties['VPN_CONNECTION_ID'] = \\\n resource_config.get(VPN_CONNECTION_ID)\n ctx.instance.runtime_properties['DESTINATION_CIDR_BLOCK'] = \\\n resource_config.get(DESTINATION_CIDR_BLOCK)", "def GenerateAssetForCreateRequest(args):\n module = dataplex_api.GetMessageModule()\n resource_spec_field = module.GoogleCloudDataplexV1AssetResourceSpec\n resource_spec = module.GoogleCloudDataplexV1AssetResourceSpec(\n name=args.resource_name,\n type=resource_spec_field.TypeValueValuesEnum(args.resource_type),\n )\n request = module.GoogleCloudDataplexV1Asset(\n description=args.description,\n displayName=args.display_name,\n labels=dataplex_api.CreateLabels(module.GoogleCloudDataplexV1Asset, args),\n resourceSpec=resource_spec,\n )\n discovery = GenerateDiscoverySpec(args)\n if discovery != module.GoogleCloudDataplexV1AssetDiscoverySpec():\n setattr(request, 'discoverySpec', discovery)\n return request", "def _create_graph(netlist):\n G = nx.Graph()\n for t in netlist:\n G.add_edges_from([(t.name, t.drain), (t.name, t.gate), (t.name, t.source)])\n return G", "def create_network(model_file=DEFAULT_MODEL_FILE, pretrained=DEFAULT_PRETRAINED, *args, **kwargs):\n net = imagenet_classifier(*args,**kwargs)\n net.set_phase_test()\n net.set_mode_cpu()\n return net", "def make_network_set(name, networkUris=[]):\n\n return {\n 'name': name,\n 'type': 'network-set',\n 'nativeNetworkUri': None,\n 'networkUris': networkUris[:],\n 'connectionTemplateUri': None}" ]
[ "0.65968263", "0.64328766", "0.63080657", "0.60401773", "0.585405", "0.5773544", "0.5750441", "0.5715569", "0.569809", "0.55888134", "0.5588614", "0.5553594", "0.54480547", "0.5373371", "0.5347243", "0.53328407", "0.5326178", "0.53198993", "0.5311947", "0.52965164", "0.52929527", "0.5270218", "0.5234661", "0.5220858", "0.52133614", "0.52063507", "0.5205628", "0.5166636", "0.51565284", "0.51444143", "0.51334774", "0.5131441", "0.51275235", "0.51014805", "0.50814223", "0.50769323", "0.5074249", "0.5062834", "0.5058499", "0.5056606", "0.50371295", "0.5033076", "0.502778", "0.50139195", "0.50113475", "0.5001417", "0.49939573", "0.49828845", "0.49820232", "0.4970704", "0.49672854", "0.4965855", "0.4939921", "0.49304464", "0.49101698", "0.49085504", "0.49052817", "0.48938397", "0.4884536", "0.48842368", "0.4879119", "0.48719698", "0.48550883", "0.48500666", "0.4846681", "0.4831963", "0.48290277", "0.48251617", "0.48162788", "0.47990924", "0.47881907", "0.47873548", "0.47703046", "0.47696108", "0.47659346", "0.47645563", "0.47627604", "0.47617888", "0.4750368", "0.47498903", "0.4742167", "0.47378832", "0.47321358", "0.47317696", "0.47269976", "0.47260365", "0.47219142", "0.47218364", "0.471429", "0.47125453", "0.47111624", "0.4704347", "0.46965212", "0.46930036", "0.4690475", "0.4689405", "0.4686237", "0.4679039", "0.46789414", "0.46759167" ]
0.6830833
0
Creates Oauth token for the user
def create_o_auth_client_credential(self, user_id, create_o_auth2_client_credential_details, **kwargs): resource_path = "/users/{userId}/oauth2ClientCredentials" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_o_auth_client_credential got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "userId": user_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=create_o_auth2_client_credential_details, response_type="OAuth2ClientCredential") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=create_o_auth2_client_credential_details, response_type="OAuth2ClientCredential")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_auth_token(self, user=None):\n token, created = Token.objects.get_or_create(user=user)\n return token", "def token_auth(self):\n self.client = APIClient()\n self.user = User.objects.create_user(username='testuser', email='test@test.com', password='testpassword')\n self.token = Token.objects.create(user=self.user)\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)", "def create_token(request, user):\n\n key = get_random_string(100)\n data = {}\n ip = get_client_ip_address(request)\n\n return Token.objects.create(user=user, key=key, data=json.dumps(data), ip=ip)", "def create_auth_token():\n data = get_request_data(request)\n address = data.get(\"address\")\n expiration = int(data.get(\"expiration\"))\n\n pk = get_provider_private_key(use_universal_key=True)\n token = jwt.encode({\"exp\": expiration, \"address\": address}, pk, algorithm=\"HS256\")\n token = token.decode(\"utf-8\") if isinstance(token, bytes) else token\n\n valid, message = is_token_valid(token, address)\n if not valid:\n if message == \"Token is deleted.\":\n force_restore_token(token)\n else:\n return jsonify(error=message), 400\n\n return jsonify(token=token)", "def create_token(user):\n access_token = create_access_token(user)\n payload = jwt.decode(\n access_token,\n app.config['JWT_SECRET_KEY'],\n algorithms=app.config['JWT_DECODE_ALGORITHMS'])\n data = {\n 'token':access_token,\n 'username': user.username,\n }\n data.update(payload)\n data['exp'] = datetime.fromtimestamp(data['exp'])\n app.logger.debug(str(data))\n if app.config.get('KEEP_TOKEN'):\n # deletes old tokens\n tokens = app.data.driver.db[config.DOMAIN['token']['datasource']['source']]\n tokens.delete_many({'username': user.username})\n # insets new token\n result = app.data.insert('token', data)\n return access_token, str(result[0])\n\n return access_token, None", "def test_create_o_auth_access_token(self):\n pass", "def generate_token(user):\n if JWT_AUTH:\n payload = JWT_PAYLOAD_HANDLER(user)\n return JWT_ENCODE_HANDLER(payload)\n else:\n token = Token.objects.create(user=user)\n token.save()\n return token", "def create_auth_token(sender, instance=None, created=False, **kwargs):\n if created:\n Token.objects.create(user=instance)", "def create_auth_token(sender, instance=None, created=False, **kwargs):\n if created:\n Token.objects.create(user=instance)", "def create_auth_token(sender, instance=None, created=False, **kwargs):\n if created:\n Token.objects.create(user=instance)", "def post(self, **kwargs):\n username: str = request.json.get(\"username\", None)\n password: str = request.json.get(\"password\", None)\n user = get_user_instance(username, password)\n return {\"access_token\": create_access_token(identity=user)}, 200", "def create_token():\n def token_helper():\n token = util.prompt_for_user_token(username=\"robbo1992\", scope='user-library-read playlist-modify-private playlist-modify',\n client_id=config[\"spotify\"][\"client_id\"], client_secret=config[\"spotify\"][\"secret_id\"],\n redirect_uri='http://localhost:8080', cache_path=spotify_cache)\n return token\n if token_helper():\n log.debug(\"Succesfully generated a spotify token for authentication\")\n return spotipy.Spotify(auth=token_helper())\n else:\n if motley.internet:\n if token_helper():\n log.debug(\"Succesfully generated a spotify token for authentication\")\n return spotipy.Spotify(auth=token_helper())\n else:\n log.error(\"Authentication error in create_token method.\")", "def create(self, request):\n token = AuthTokenClass().post(request)\n return token", "def post(self):\n _purge_expired_user_tokens()\n\n request_dict = get_json_and_verify_params({\n 'description': {'type': str, 'optional': True},\n 'expiration_date': {'optional': True},\n })\n\n expiration_date = request_dict.get('expiration_date')\n if expiration_date:\n expiration_date = parse_utc_datetime(\n expiration_date, timezone=\"UTC\")\n\n return current_user.create_auth_token(request_dict.get('description'),\n expiration_date)", "def generate_token(user):\n try:\n # generate the auth token\n auth_token = User.encode_auth_token(user.id)\n response_object = {\n \"status\": \"success\",\n \"message\": \"Successfully registered.\",\n \"Authorization\": auth_token.decode(),\n }\n return response_object, 201\n except Exception as e:\n response_object = {\n \"status\": \"fail\",\n \"message\": \"Some error occurred. Please try again.\",\n }\n return response_object, 401", "async def create_token(self, *args, **kwargs) -> OAuth2Token:\n token = await super().create_token(*args, **kwargs)\n # NOTE: Save data from token to db here.\n return token", "def create_auth_token(sender, instance=None, created=False, **kwargs): # pylint: disable=unused-argument\n if created:\n Token.objects.create(user=instance) # pylint: disable=no-member", "def create(self,request):\n return CustomAuthToken().post(request)", "def generate_token(usr):\n token = jwt.encode({\"user\":usr, \"exp\":datetime.datetime.utcnow()\n + datetime.timedelta(minutes=30)}, KEY)\n user = User.update(token=token).where(User.username == usr)\n user.execute()\n return token", "def test_create_token_for_user(self):\n\n credentials = {'email': 'testuser@gmail.com', 'password': 'Testpass12'}\n get_user_model().objects.create_user(**credentials)\n\n response = self.client.post(URL_TOKEN, credentials)\n\n # Check that the response is HTTP 200, and contains a token.\n self.assertIn('token', response.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def create(self, request):\n return ObtainAuthToken().post(request)", "def generate_token(self):\n self.__get_auth_token_and_secret()\n return self.get_token()", "def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.save()\n headers = self.get_success_headers(serializer.data)\n return Response(\n dict(serializer.data, token=str(user.auth_token)),\n status=status.HTTP_201_CREATED,\n headers=headers\n )", "def create_bearer_token(self):\n headers = {\"Content-Type\": \"application/x-www-form-urlencoded\"}\n\n data = {\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": self.refresh_token,\n \"client_id\": self.client_id,\n \"client_secret\": self.client_secret,\n }\n\n r = requests.post(self.token_endpoint, headers=headers, data=data)\n\n if r.status_code == 200:\n logging.info(\"Successfully obtained bearer token\")\n self.bearer_token = r.json()[\"access_token\"]\n else:\n logging.warning(\"HTTP Error {}\".format(r.status_code))", "def post(self, request, *args, **kwargs):\n self.create(request, *args, **kwargs)\n token, created = Token.objects.get_or_create(user=self.user)\n return Response({'token': token.key}, status=201)", "def create(self, data):\n token, created = Token.objects.get_or_create(user=self.context['user'])\n return self.context['user'], token.key", "def create(self, data):\n token, created = Token.objects.get_or_create(user=self.context['user'])\n return self.context['user'], token.key", "def create(self, data):\n token, created = Token.objects.get_or_create(user=self.context['user'])\n return self.context['user'], token.key", "def create(self, data):\n token, created = Token.objects.get_or_create(user=self.context['user'])\n return self.context['user'], token.key", "def create(self, data):\n token, created = Token.objects.get_or_create(user=self.context['user'])\n return self.context['user'], token.key", "def create(self, data):\n token, created = Token.objects.get_or_create(user=self.context['user'])\n return self.context['user'], token.key", "def create_auth_token(\n username: str,\n admin: t.Optional[bool] = False,\n spotify: t.Optional[bool] = False,\n) -> JWT:\n auth_token: JWT = auth_manager.auth_token(\n username, {\"admin\": admin, \"spotify\": spotify}\n )\n return auth_token", "def test_create_o_auth_authorize_token(self):\n pass", "def test_create_token_for_user(self):\n payload = {'email': 'test1@test1.ri',\n 'password': 'testPassWord',\n 'time_zone': 'Europe/Dublin'}\n create_user(**payload)\n res = self.client.post(TOKEN_URL, payload)\n\n self.assertIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def create(self, request):\n\n return ObtainAuthToken().post(request)", "def test_create_token_for_user(self):\n payload = {\n 'email': 'test@gmail.com',\n 'password': 'abcd1234',\n }\n\n create_user(**payload)\n response = self.client.post(TOKEN_URL, payload)\n\n # We expect a token and should get a HTTP 200\n self.assertIn('token', response.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def get_token(self):\n client_auth = requests.auth.HTTPBasicAuth(self.client, self.secret)\n post_data = {'grant_type': 'password', 'username': self.user, 'password': self.password}\n headers = {'User-Agent': self.user_agent}\n response = requests.Session()\n response2 = response.post(self.token_url, auth=client_auth, data=post_data, headers=headers)\n self.token = response2.json()['access_token']\n self.t_type = response2.json()['token_type']", "def get_token(self):\n client_auth = requests.auth.HTTPBasicAuth(self.client, self.secret)\n post_data = {'grant_type': 'password', 'username': self.user, 'password': self.password}\n headers = {'User-Agent': self.user_agent}\n response = requests.Session()\n response2 = response.post(self.token_url, auth=client_auth, data=post_data, headers=headers)\n self.token = response2.json()['access_token']\n self.t_type = response2.json()['token_type']", "def test_create_token_for_user(self):\r\n payload = {\r\n 'email': 'test@max.net',\r\n 'password': 'Testpass123',\r\n 'name': 'Maks'\r\n }\r\n create_user(**payload)\r\n\r\n res = self.client.post(TOKEN_URL, payload)\r\n\r\n self.assertIn('token', res.data)\r\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def create_access_token(oauth):\n #create parameters for API authorization\n\tredirect_uri = 'oob'\n\tparams = {'client_secret': oauth.client_secret,\n\t\t\t 'redirect_uri': redirect_uri,\n\t\t\t 'response_type': 'code'}\n\t#store the access code\n\turl = oauth.get_authorize_url(**params)\n\n\t#open a web browser to get access token and then store it via manual input\n\twebbrowser.open(url)\n\tcode = input('Enter code: ')\n\t#create credentials item\n\tstart_time = time.time()\n\t#create dictionary to hold credentials and store beginning time\n\tcredentials = {'token_time': start_time}\n\n\t#NEED TO ADD IN 'REFRESH TOKEN' FUNCTION HERE SOMEWHERE\n\t#\n\t\n\t#create parameters\n\tdata = {'code': code,\n\t\t\t'redirect_uri': redirect_uri,\n\t\t\t'grant_type': 'authorization_code'}\n\t#build the headers\n\theaders = oauth_headers(oauth)\n\t#create the raw access token\n\traw_access = oauth.get_raw_access_token(data=data, headers=headers)\n\t#parse the raw access token and add to credentials variable\n\tcredentials.update(access_parse(raw_access))\n\n\t#parse access token from credentials\n\taccess_token = credentials['access_token']\n\t#return access token\n\treturn access_token", "def build_access_token_normal_user():\n return do_build_access_token(tenant_id='intility_tenant_id', admin=False)", "def grant_token(request):\n\n grant_token_svc = request.find_service(name=\"grant_token\")\n h_user = request.lti_user.h_user\n\n return {\"grant_token\": grant_token_svc.generate_token(h_user)}", "def get_token(self):\n\t\tself.client.post('/api/v1/auth/signup', data=json.dumps(self.signup_user), content_type='application/json')\n\t\tresponse = self.client.post('/api/v1/auth/login', data=json.dumps(self.login_user), content_type='application/json')\n\t\tresp = json.loads(response.data.decode())\n\t\treturn 'Bearer ' + resp['access_token']", "def create_token(self):\n ts_datetime = self.logged_at or self.created_at\n ts = int(mktime(ts_datetime.timetuple()))\n key = base64.encodestring(self.email)\n base = \"{}{}\".format(key, ts)\n salt, hsh = self.password.split('$')\n return \"{}$${}\".format(key, get_hexdigest(salt, base))", "def test_create_token_for_user(self):\n payload = {'email': 'test@test.com', 'password': 'testpass'}\n create_user(**payload)\n res = self.client.post(TOKEN_URL, payload)\n\n self.assertIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def UserToken(self) -> object:", "def test_create_token_for_user(self):\n payload = {\n 'email': 'test@gmail.com',\n 'password': 'testpass'\n }\n create_user(**payload)\n res = self.client.post(TOKEN_URI, payload)\n self.assertIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def create_token(user):\n payload = {\n 'sub': user.id,\n 'iat': datetime.utcnow(),\n 'exp': datetime.utcnow() + timedelta(days=1)\n }\n token = jwt.encode(payload, config.SECRET_KEY, algorithm='HS256')\n return token.decode('unicode_escape')", "def set_auth_token_header(self):\n\n username = 'test-user'\n passwd = 'testuserpass1234'\n user = User.objects.create(username=username)\n user.set_password(passwd)\n user.save()\n\n assert Account.objects.get(user=user) is not None\n url = reverse('token_obtain_pair')\n res = self.client.post(url,\n data={'username': username, 'password': passwd})\n self.client.credentials(HTTP_AUTHORIZATION=\n f\"Bearer {res.data['access']}\")\n return user", "def create_token_response(self):\n client = self.request.client\n scope = self.request.credential.get_scope()\n token = self.generate_token(\n user=self.request.user,\n scope=scope,\n include_refresh_token=client.check_grant_type('refresh_token'),\n )\n log.debug('Issue token %r to %r', token, client)\n self.save_token(token)\n self.execute_hook('process_token', token=token)\n return 200, token, self.TOKEN_RESPONSE_HEADER", "def create_token(self, consumer, token_type, timestamp, scope,\n user=None, callback=None, callback_confirmed=False):\n token = self.create(consumer=consumer, \n token_type=token_type, \n timestamp=timestamp,\n scope=scope,\n user=user,\n callback=callback,\n callback_confirmed=callback_confirmed,\n key=uuid.uuid4().hex,\n secret=get_random_string(length=SECRET_SIZE))\n\n return token", "def create_auth_token(sender, instance=None, created=False, **kwargs):\n\n if created:\n # Generate API token for user.\n api_token = Token.objects.create(user=instance)\n\n # Only create agent using username and API token for non-admin users.\n if instance.is_superuser is False:\n Agent.objects.create(scan_agent=instance, api_token=api_token)", "def get_user_token(username, expires_at=None):\n issued_at = datetime.utcnow()\n token = AccessToken()\n token.payload.update(\n {\n \"email\": f\"{username}@funmooc.fr\",\n \"exp\": expires_at or issued_at + timedelta(days=2),\n \"iat\": issued_at,\n \"language\": settings.LANGUAGE_CODE,\n \"username\": username,\n }\n )\n return token", "def token_gen_call(username, password, exp=None):\n #pdb.set_trace()\n \n #username_set = params['AUTH']['username_set']\n #password_set = params['AUTH']['password_set']\n username_set = username\n password_set = password\n \"\"\"\n Creates JWT Token\n :return:\n \"\"\"\n if exp is None:\n exp = datetime.utcnow() + timedelta(seconds=3600)\n _token = {\n 'aud': JWT_AUDIENCE,\n 'exp': exp,\n 'iss': JWT_ISSUER,\n 'user': username,\n 'role': 'admin',\n 'time':time.time()\n }\n _token.update(_token)\n \n if password_set == password and username_set == username: # example, don't do this in production\n return {\"token\" : jwt.encode(_token, SECRET_KEY, algorithm=JWT_OPTIONS_ALGORITHM).decode('utf-8') }\n return 'Invalid username and/or password for user: {0}'.format(username)", "def get_token():\n if g.current_user.is_anonymous or g.token_used:\n return unauthorized('Invalid credentials')\n return jsonify({'token': g.current_user.generate_auth_token(\n expiration=3600), 'expiration': 3600})", "def get_token():\n if g.current_user.is_anonymous or g.token_used:\n return unauthorized('Invalid credentials')\n return jsonify({'token': g.current_user.generate_auth_token(\n expiration=3600), 'expiration': 3600})", "def generate_token(email):\n access_token = create_access_token(email)\n return access_token", "def create_oauth(self, user):\r\n from oauth_provider.models import Consumer, Token, Resource\r\n\r\n # Necessary setup for ``oauth_provider``.\r\n resource, _ = Resource.objects.get_or_create(url='test', defaults={\r\n 'name': 'Test Resource'\r\n })\r\n consumer, _ = Consumer.objects.get_or_create(key='123', defaults={\r\n 'name': 'Test',\r\n 'description': 'Testing...'\r\n })\r\n token, _ = Token.objects.get_or_create(key='foo', token_type=Token.ACCESS, defaults={\r\n 'consumer': consumer,\r\n 'resource': resource,\r\n 'secret': '',\r\n 'user': user,\r\n })\r\n\r\n # Then generate the header.\r\n oauth_data = {\r\n 'oauth_consumer_key': '123',\r\n 'oauth_nonce': 'abc',\r\n 'oauth_signature': '&',\r\n 'oauth_signature_method': 'PLAINTEXT',\r\n 'oauth_timestamp': str(int(time.time())),\r\n 'oauth_token': 'foo',\r\n }\r\n return 'OAuth %s' % ','.join([key + '=' + value for key, value in oauth_data.items()])", "def post(self):\n current_user_id = get_jwt_identity()\n new_token = create_access_token(identity=current_user_id)\n response, status = {\n 'message': 'Access token was successfully refreshed',\n 'access_token': new_token\n }, 200\n return Response(dumps(response), status=status, mimetype='application/json')", "def test_create_token_to_user(self):\n data = {\n 'email': 'test@test.com', \n 'password': \"testtest\"\n }\n res = self.client.post(TOKEN_URL, data)\n\n self.assertNotIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def get_token():\n if not request.is_json:\n return jsonify({\"msg\": \"Missing JSON in request\"}), 400\n username = request.json.get('username', None)\n password = request.json.get('password', None)\n\n if not username:\n abort(400, \"Invalid username or password\")\n if not password:\n abort(400, \"Invalid username or password\")\n users = app.data.driver.db[config.DOMAIN['user']['datasource']['source']]\n user = users.find_one({'email':username})\n # validate the user in the user's service\n if not user:\n abort(401, \"Invalid username or password\")\n if not check_password_hash(user.get('password'), password):\n abort(401, \"Invalid username or password\")\n role = user.get('role', 'user')\n user_id = str(user.get('_id'))\n user = User(user_id, username, role)\n access_token, refresh_token = create_token(user)\n return jsonify(\n token=access_token,\n type='bearer',\n roles=role,\n user=username,\n refreshToken=refresh_token), 200", "def get_token(cls, user, full_result=False):\n if user is None:\n return EMPTY_KNOX_TOKEN\n result = AuthToken.objects.create(user=user)\n return result if full_result else result[1]", "def test_create_token(self):\n data = {'email': 'test@test.com', 'password': 'testtest'}\n sigin_in_user(**data)\n payload = {\n 'username': 'test@test.com',\n 'password': 'testtest'\n }\n res = self.client.post(TOKEN_URL, payload)\n \n self.assertIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def create_access_token(identity: Union[str,int], type_token: str, fresh: Optional[bool] = False) -> bytes:\n return AuthJWT.create_token(\n identity=identity,\n type_token=type_token,\n fresh=fresh,\n exp_time=timedelta(minutes=AuthJWT._ACCESS_TOKEN_EXPIRES)\n )", "def generate_token_from_user(user, expires_at=None):\n issued_at = datetime.utcnow()\n token = AccessToken()\n token.payload.update(\n {\n \"email\": user.email,\n \"exp\": expires_at or issued_at + timedelta(days=2),\n \"iat\": issued_at,\n \"language\": user.language,\n \"username\": user.username,\n \"full_name\": user.get_full_name(),\n }\n )\n return token", "def auth_token_generate(identity_param_val, expires_delta=False):\n access_token = ''\n try:\n if expires_delta is not False:\n expires_delta = timedelta(minutes=expires_delta)\n access_token = create_access_token(identity=identity_param_val, expires_delta=expires_delta)\n except Exception as e:\n print(e)\n\n return access_token", "async def oauth2_token(\n request: Request, oauth2_request=Depends(_oauth2_request)\n):", "def create_access_token(self):\n\t\t# Wraper for also caching invalid results\n #def getMetadataRofs(path):\n #\ttry:\n # \treturn self.client.metadata(path)\n # except Exception, e:\n # log.write('Exception at getMetadataRofs for path '+ path + '\\n')\n # pprint(e, log)\n # return False\n\n\t\ttry:\n\t\t\trequest_token = self.session.obtain_request_token()\n\t\t\turl = self.session.build_authorize_url(request_token)\n\t\t\tprint url\n\t\t\traw_input()\n\t\t\taccess_token = self.session.obtain_access_token(request_token)\n\t\t\tself.client = client.DropboxClient(self.session)\n\t\t\t\n\t\t\t# Build cache for metadata querying\n\n\t\t\t# Wraper for also caching invalid results\n\t\t\tdef getMetadataRofs(path):\n\t\t\t\ttry:\n\t\t\t\t\treturn self.client.metadata(path)\n\t\t\t\texcept Exception, e:\n\t\t\t\t\tlogger.error('Exception at getMetadataRofs for path '+ path + '\\n')\n\t\t logger.debug(sys.exc_info()[0])\n\t\t\t\t\treturn False\n\n\t\t\tself.cache_metadata = Cache(getMetadataRofs)\n\t\t\tself.cache_files = {}\n\n\t\texcept Exception, e:\n\t\t\tlogger.error('Exception %s at create_access_token' % (sys.exc_info()[0]))\n\t\t\tlogger.debug(pformat(sys.exc_info()))", "def _generate_jwt_token(self):\n import jwt\n from datetime import datetime, timedelta\n from django.conf import settings\n\n dt = datetime.now() + timedelta(days=60)\n\n token = jwt.encode({\n 'id': self.pk,\n 'username': self.username,\n 'exp': int(dt.strftime('%s')),\n }, settings.SECRET_KEY, algorithm='HS256')\n # print(token)\n return token", "def post(self):\n data = request.get_json()\n is_verified = actions.verify(data['username'], data['password'])\n if not is_verified:\n abort(404, message='A user with matching credentials does not exist.')\n else:\n token = actions.create_token(data['username'], data['password'])\n token = token.decode('utf-8')\n return{'token': token}, 200\n pass", "def get_auth_token():\n token = g.user.generate_auth_token(24*3600)\n return jsonify({'user_id': g.user.id, 'token': token.decode('ascii')})", "def _request_token(self):\n response = requests.post(\n \"%s/generateToken\" % self.root_uri.rstrip(\"/\"), {\n \"username\": self.username,\n \"password\": self.password,\n \"expiration\": '60',\n \"referer\": 'https://wsdot.maps.arcgis.com',\n \"f\": 'json'\n })\n\n token_info = response.json()\n if \"error\" in token_info:\n raise TokenError(token_info[\"error\"])\n self._token = token_info[\"token\"]\n self._expires = datetime.fromtimestamp(token_info[\"expires\"] / 1000)", "def test_create_token_for_user(setup_client):\n client = setup_client\n payload = {\n 'email': 'test@gmail.com',\n 'password': 'testpass',\n }\n create_user(**payload, **{'role': 'Supplier'})\n res = client.post(TOKEN_URL, payload)\n assert \"token\" in res.data\n assert res.status_code == status.HTTP_200_OK", "def create_token(self, consumer, token_type, timestamp, user=None):\n token, created = self.first_or_create(consumer=consumer, \n token_type=token_type, \n timestamp=timestamp,\n user=user)\n\n if created:\n token.key, token.secret = self.generate_random_codes()\n token.save()\n\n return token", "def create_auth_token(self, create_auth_token_details, user_id, **kwargs):\n resource_path = \"/users/{userId}/authTokens\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"create_auth_token got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=create_auth_token_details,\n response_type=\"AuthToken\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=create_auth_token_details,\n response_type=\"AuthToken\")", "def get_access_token(self, path='/oauth/token', data={}):\n if data.keys():\n data.update(self.data)\n else:\n data = self.data.copy()\n data.update({\n 'grant_type': 'password',\n 'email': self.env.get('TESLA_EMAIL'),\n 'password': self.env.get('TESLA_PASSWORD')\n })\n try:\n req = requests.post(url='%s%s' % (self.url, path), data=data)\n # print(req.status_code)\n # print(req.content)\n self.token.update(req.json())\n except:\n raise 'invalid credentials'\n return self.token", "def _get_token(self, client):\n\n url = self._url('token')\n data = {'grant_type': 'password',\n 'username': self.user,\n 'password': self.password,\n 'scope': 'PRODUCTION'}\n client_data = self.clients[client]\n consumer_key = client_data['response']['consumerKey']\n consumer_secret = client_data['response']['consumerSecret']\n auth = requests.auth.HTTPBasicAuth(consumer_key, consumer_secret)\n return self.POST(url, data=data, auth=auth)", "def get_new_token(self):\n self.register_user(self.user_data2)\n result = self.login_user(self.login_data2)\n header_access_token = json.loads(result.data.decode())['header_access_token']\n return header_access_token", "def get_oauth_token():\n\n # make a request to goodreads authorization url, and pass in request tokens\n gr_session = goodreads.get_auth_session(session['request_token'],\n session['request_token_secret'])\n\n ACCESS_TOKEN = gr_session.access_token\n ACCESS_TOKEN_SECRET = gr_session.access_token_secret\n\n # add OAuth tokens to Account object.\n acct = Account.query.get(session[\"acct\"])\n acct.access_token = ACCESS_TOKEN\n acct.access_token_secret = ACCESS_TOKEN_SECRET\n # get goodreads ID and url for a user and assign to user record.\n gr_id, gr_url, name, image_url = get_acct_id(acct, GR_KEY, GR_SECRET)\n acct.user.gr_id = gr_id\n acct.user.gr_url = gr_url\n acct.user.gr_name = name\n acct.user.image_url = image_url\n # commit changes to db.\n db.session.commit()\n\n return redirect(\"/\")", "def _create_jwt(request, user, expires_in):\n oauth_application = _get_login_oauth_client()\n access_token = create_dot_access_token(\n # Note: Scopes for JWT cookies do not require additional permissions\n request, user, oauth_application, expires_in=expires_in, scopes=['user_id', 'email', 'profile'],\n )\n return create_jwt_from_token(access_token, DOTAdapter(), use_asymmetric_key=True)", "def generateAuthToken(self):\n try:\n payload = {\n 'exp': datetime.utcnow() + timedelta(days=0, minutes=30),\n 'iat': datetime.utcnow(),\n 'sub': self.id\n }\n return jwt.encode(payload, current_app.config['SECRET_KEY'], algorithm='HS256').decode()\n except Exception as error:\n print(error)\n return error", "def post(self):\n current_user = get_jwt_identity()\n return {\n # Mark the token as un-fresh since we used the refresh token to regenerate this\n \"accessToken\": create_access_token(identity=current_user, fresh=False),\n \"userId\": current_user\n }", "def test_make_token(self):\n user = self.create_user()\n\n token_generator = EmailActivationTokenGenerator()\n token = token_generator.make_token(user)\n self.assertTrue(token_generator.check_token(user, token))", "def auth_token(self):", "def token(self):\n token = jwt.encode(\n {\n \"id\": self.pk,\n \"username\": self.get_full_name,\n \"email\": self.email,\n \"iat\": datetime.utcnow(),\n \"exp\": datetime.utcnow() + timedelta(minutes=int(os.getenv('TIME_DELTA')))\n },\n settings.SECRET_KEY, algorithm='HS256').decode()\n return token", "def createAccessTokenReplacement(self):\r\n\r\n url = self._config['OAUTH2ENDPOINT']['huddleAuthServer'] + \"request?response_type=code\" + \\\r\n \"&client_id=\" + self._config['OAUTH2']['clientID'] + \\\r\n \"&redirect_uri=\" + self._config['OAUTH2']['redirectUri']\r\n webbrowser.open_new(url)\r\n code = input('Please enter the code from your web browser:')\r\n\r\n response = self._oauth.obtainAccessTokenBy3LeggedOAuth(code)\r\n responseBody = json.loads(response['Body'])\r\n\r\n try:\r\n oauthToken = Token(responseBody)\r\n except TypeError as e:\r\n print (\"Bad response when requesting a token \" + str(response))\r\n sys.exit()\r\n\r\n return oauthToken", "def get_auth_token_teacher():\n\n token = g.user.generate_auth_token(600)\n return jsonify({'token': token.decode('ascii'), 'duration': 600})", "def generate_auth_token(self, expires_in=600):\n return jwt.encode(\n {'STULOGINID': self.STULOGINID, 'exp': time.time() + expires_in},\n app.config['SECRET_KEY'], algorithm='HS256')", "def create(self, user, token):\n\n session['user'] = {\n 'id': str(user.id),\n 'login': user.login,\n 'token': token\n }\n\n return UserSession.create(session['user'])", "def create_token_no_user(self):\n payload = {'email': 'test@test.com', 'password': 'testpass'}\n res = self.client.post(TOKEN_URL, payload)\n self.assertNotIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def for_user(cls, user):\n\n token = super().for_user(user)\n\n TokenMeta.objects.get_or_create(\n jti=token['jti'],\n token=str(token),\n )\n\n return token", "def create(self, validated_data):\n request = self._kwargs['context']['request']\n user = User.objects.create(**validated_data)\n user.set_password(validated_data[\"password\"])\n user.save()\n category_list = ['Fuel', 'Bill', 'Entertainment', 'Education', 'Food']\n for category in category_list:\n user.user_categories.create(name=category)\n login(request, user)\n token, created = Token.objects.get_or_create(user=user)\n validated_data[\"token\"] = token.key\n return validated_data", "def get_token():\n\n def token_helper():\n token = util.prompt_for_user_token(username=\"robbo1992\",\n scope='user-library-read playlist-modify-private playlist-modify',\n client_id=config[\"spotify\"][\"client_id\"],\n client_secret=config[\"spotify\"][\"secret_id\"],\n redirect_uri='http://localhost:8080', cache_path=spotify_cache)\n return token\n\n if token_helper():\n log.debug(\"Succesfully generated a spotify token for authentication\")\n return spotipy.Spotify(auth=token_helper())\n else:\n if motley.internet:\n if token_helper():\n log.debug(\"Succesfully generated a spotify token for authentication\")\n return spotipy.Spotify(auth=token_helper())\n else:\n log.error(\"Authentication error in create_token method.\")\n raise Exception", "def post(self):\r\n try:\r\n\r\n data = request.get_json()\r\n user = user_login.find_by_username(data['username'])\r\n if user and safe_str_cmp(user.password, data['password']):\r\n access_token = create_access_token(\r\n identity=user.id, fresh=True)\r\n return {\r\n 'access_token': \"Bearer \" + access_token,\r\n }, 200\r\n return {\"message\": \"Invalid Credentials!\"}, 401\r\n except Exception as e:\r\n return {\"message\": str(e)}", "def create_token(self, token_type=DEFAULT_TOKEN, extra_data='{}'):\n if token_type not in dict(TOKEN_TYPES).keys():\n raise ValueError(\"Unable to create token, unknown type\")\n\n value = calc_checksum(self.email, salt=randint(0, maxint))\n\n return LoginToken.objects.create(user=self, value=value, token_type=token_type, extra_data=extra_data)", "def create_user(self):\n username = \"\".join(choice(\n string.ascii_letters) for x in range (randint(7,10)))\n params = {\n \"first_name\":\"ugali\",\n \"last_name\":\"mayai\",\n \"email\":\"ugalimayai@gmail.com\",\n \"username\":username,\n \"password\":\"password\"\n }\n path = \"/api/v2/auth/signup\"\n user = self.client.post(path,\n data=json.dumps(params),\n content_type=\"application/json\")\n \n user_id = user.json['user_id']\n auth_token = user.json['AuthToken']\n return int(user_id), auth_token", "def create_token(self, token_id, data):\n raise exception.NotImplemented() # pragma: no cover", "async def login_access_token(\n form_data: OAuth2PasswordRequestForm = Depends()\n):\n user = await crud.user.authenticate(\n username=form_data.username, password=form_data.password\n )\n if not user:\n raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail=\"Incorrect credentials\")\n elif not user.is_active:\n raise HTTPException(status_code=HTTP_403_FORBIDDEN, detail=\"Inactive user\")\n elif not user.is_email_verified:\n raise HTTPException(status_code=HTTP_403_FORBIDDEN, detail=\"Please verify your account via email\")\n access_token_expires = timedelta(minutes=config.ACCESS_TOKEN_EXPIRE_MINUTES)\n return {\n \"access_token\": create_access_token(\n data={\"user_id\": user.id}, expires_delta=access_token_expires\n ),\n \"token_type\": \"bearer\",\n }", "def create(self, validated_data):\n\t\tinstance = super(UserSerializer, self).create(validated_data)\n\t\tinstance.set_password(validated_data['password'])\n\t\ttoken = Token.objects.create(user=instance)\n\t\tinstance.token = token\n\t\treturn instance", "def make_token(self, user):\n return super()._make_token_with_timestamp(user, int(time.time()))", "def create_token(user, title, expiration=_default_expiration_duration_opt):\n if expiration == _default_expiration_duration_opt:\n duration = _default_expiration_duration()\n expiration = duration + datetime.now() if duration else None\n\n token_code = random_string_generator(TOKEN_NAME_PREFIX_LENGTH + MINIMUM_TOKEN_SUFFIX_LENGTH)()\n token_name = token_code[:TOKEN_NAME_PREFIX_LENGTH]\n token_secret = token_code[TOKEN_NAME_PREFIX_LENGTH:]\n\n assert token_name\n assert token_secret\n\n return AppSpecificAuthToken.create(\n user=user,\n title=title,\n expiration=expiration,\n token_name=token_name,\n token_secret=DecryptedValue(token_secret),\n )" ]
[ "0.7509521", "0.7463173", "0.7187646", "0.71259236", "0.70469594", "0.7042824", "0.70313114", "0.6959093", "0.6959093", "0.6959093", "0.6947535", "0.6934357", "0.6917978", "0.6896815", "0.68936986", "0.6844643", "0.68159", "0.6812227", "0.6790298", "0.67814326", "0.675172", "0.6751377", "0.67371583", "0.6735068", "0.67300373", "0.67291194", "0.67291194", "0.67291194", "0.67291194", "0.67291194", "0.67291194", "0.67079645", "0.6673444", "0.66593754", "0.664754", "0.66255367", "0.6624569", "0.6624569", "0.6607051", "0.6604992", "0.65988153", "0.6593991", "0.6593711", "0.65904385", "0.65883553", "0.6585978", "0.65778464", "0.65762115", "0.6552758", "0.65285766", "0.6519594", "0.65166324", "0.65114325", "0.65051717", "0.6500287", "0.6500287", "0.64830935", "0.6475816", "0.6475138", "0.6450183", "0.6444151", "0.6438238", "0.6417175", "0.6388555", "0.63820964", "0.6377309", "0.6376585", "0.63749474", "0.63616765", "0.6353155", "0.63450414", "0.63405067", "0.63392514", "0.6309068", "0.6281367", "0.6278989", "0.62733704", "0.62673265", "0.6257974", "0.6257464", "0.6252693", "0.62483406", "0.6236621", "0.62289774", "0.62230295", "0.6221835", "0.6210455", "0.6205603", "0.6204855", "0.6198846", "0.61822766", "0.6181796", "0.61816025", "0.617358", "0.6167889", "0.61660755", "0.61611605", "0.61531997", "0.6152631", "0.6149302", "0.61487377" ]
0.0
-1
Creates a new Console onetime password for the specified user. For more information about user credentials, see `User Credentials`__. Use this operation after creating a new user, or if a user forgets their password. The new onetime password is returned to you in the response, and you must securely deliver it to the user. They'll be prompted to change this password the next time they sign in to the Console. If they don't change it within 7 days, the password will expire and you'll need to create a new onetime password for the user.
def create_or_reset_ui_password(self, user_id, **kwargs): resource_path = "/users/{userId}/uiPassword" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_or_reset_ui_password got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "userId": user_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="UIPassword") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="UIPassword")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create(self, user):\r\n\r\n if not (PasswordHistory.is_student_password_reuse_restricted() or\r\n PasswordHistory.is_staff_password_reuse_restricted() or\r\n PasswordHistory.is_password_reset_frequency_restricted() or\r\n PasswordHistory.is_staff_forced_password_reset_enabled() or\r\n PasswordHistory.is_student_forced_password_reset_enabled()):\r\n\r\n return\r\n\r\n self.user = user\r\n self.password = user.password\r\n self.save()", "def post(self, user_id):\n user = User.query.get(user_id)\n if user is None:\n return mk_response(\"User does not exist\", 422)\n password = user.generate_new_pass()\n return {'password': password}", "def post(self, user_id):\n user = User.query.get(user_id)\n if user is None:\n return abort(422, message=\"User does not exist\")\n password = user.generate_new_pass()\n return { 'password' : password }", "def update_password(self, user, password):\n user.password = hashers.make_password(password)", "def _change_password(self, user, password):\r\n user.set_password(password)\r\n user.save()\r\n history = PasswordHistory()\r\n history.create(user)", "def new_password(self):\n # create new password\n return password_generator.create_password()\n # have password reset", "def test_creation_with_password(self, user):\n user.password = \"is_god\"\n user.save()", "def reset_password(user: User) -> Result[Password]:\n passwd = Password.new()\n command([\"/usr/sbin/chpasswd\"], passwd.wrap(\"{}:{{}}\".format(user.pw_name)))\n return Result(State.success, passwd)", "def userPassword(self, password=None):\n\n\t\tdisplay = False\n\n\t\tif password is None:\n\t\t\tdisplay = True\n\t\t\tpassword = hlstr.generate_password(\n\t\t\t\t\t\t\t\tLMC.configuration.users.min_passwd_size)\n\t\telif password == '':\n\t\t\tlogging.warning(_(u'Setting an empty password for user {0}. '\n\t\t\t\t'This is dangerous and totally insecure!').format(\n\t\t\t\t\tstylize(ST_LOGIN, self.__login)))\n\n\t\twith self.lock:\n\t\t\tif self.__already_created:\n\t\t\t\tLicornEvent('user_pre_change_password', user=self.proxy, password=password).emit(synchronous=True)\n\n\t\t\tprefix = '!' if self.__locked else ''\n\n\t\t\tif password == '':\n\t\t\t\tself.__userPassword = prefix\n\t\t\telse:\n\t\t\t\tself.__userPassword = '%s%s' % (prefix,\n\t\t\t\t\t\t\t\t\tself.backend.compute_password(password))\n\n\t\t\t# 3600*24 get us to the number of days since epoch.\n\t\t\tself.__shadowLastChange = int(time.time() / 86400)\n\n\t\t\tif self.__already_created:\n\t\t\t\tself.serialize()\n\t\t\t\tLicornEvent('user_post_change_password', user=self.proxy, password=password).emit(synchronous=True)\n\n\t\t\t\tif self.__already_created:\n\t\t\t\t\t# don't forward this event on user creation, because we\n\t\t\t\t\t# already have the \"user_added\" for this case.\n\t\t\t\t\tLicornEvent('user_userPassword_changed', user=self.proxy).emit(priorities.LOW)\n\n\t\t\tif display:\n\t\t\t\tlogging.notice(_(u'Set password for user {0} to {1}.').format(\n\t\t\t\t\tstylize(ST_NAME, self.__login),\n\t\t\t\t\tstylize(ST_IMPORTANT, password)),\n\t\t\t\t\t# don't display the clear-text password in the daemon's log.\n\t\t\t\t\tto_local=False)\n\t\t\telse:\n\t\t\t\tif self.__already_created:\n\t\t\t\t\tlogging.notice(_(u'Changed password for user {0}.').format(\n\t\t\t\t\t\t\t\t\t\t\tstylize(ST_NAME, self.__login)))", "async def create_new_user(*, user: User):\n with Session(engine) as session:\n user.password = simple_hash(user.name, user.password) #Hashing password for security\n session.add(user)\n session.commit()\n return {\"message\": \"User {user_id} created\".format(user_id = user.id)}", "def add_user(self, user, pw):\n self.db.execute(\"INSERT INTO user_credentials VALUES (?, ?)\", [user, pw])\n self.db.commit()", "def one_time_password(self, delay_time: float = 30.0) -> str:\n\n secret_without_spaces = self.remove_spaces(self._secret)\n upper_case_secret = self.to_upper_case(secret_without_spaces)\n secret = self.decode_with_base32(upper_case_secret)\n input = self.current_timestamp() / delay_time\n hmac = self.create_hmac(secret, input)\n offset = ord(hmac[len(hmac) - 1]) & 0x0F\n hex_four_characters = binascii.hexlify(hmac[offset : offset + 4].encode())\n password = int(hex_four_characters, 32) % 1000000\n return password", "def user_created(self, user, password):\n\n if not self.check_prereqs():\n return False\n\n if self.has_user(user):\n return False\n\n hash = self.hash_method.generate_hash(user,password)\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n res=self.set_password(user,password,create_user=True)\n self.log.debug(\"sqlflexibleauthstore: user_created: %s, %s\" % (user,res))\n return res", "def reset_user_password_service(user: User, password: str) -> None:\n hashed_password = bcrypt.generate_password_hash(password).decode('UTF-8')\n user.password = hashed_password\n db.session.commit()", "def change_password(self, user):\n if not self.is_valid():\n return None\n password = self.clean_password2()\n user.set_password(password)\n user.save()\n return user", "def create_user(self, password=None, **data):\n\n user = self.model(**data)\n user.set_password(password)\n user.is_active = False\n user.save()\n\n return user", "def save(self, *args, **kwargs):\n kwargs[\"commit\"] = False\n user = super(JOSNewPasswordForm, self).save(*args, **kwargs)\n\n password = self.cleaned_data.get(\"password1\")\n\n user.set_password(password)\n user.save()\n\n return user", "def set_password(user_id):\n user = _get_user_or_404(user_id)\n\n form = SetPasswordForm(request.form)\n if not form.validate():\n return set_password_form(user.id, form)\n\n new_password = form.password.data\n initiator_id = g.user.id\n\n password_service.update_password_hash(user.id, new_password, initiator_id)\n\n flash_success(\n gettext(\n \"New password has been set for user '%(screen_name)s'.\",\n screen_name=user.screen_name,\n )\n )\n\n return redirect_to('.view', user_id=user.id)", "def generate_password(c, user=\"root\"):\n passw = subprocess.run(\n [\n \"nix\",\n \"run\",\n \"--inputs-from\",\n \".#\",\n \"nixpkgs#xkcdpass\",\n \"--\",\n \"-d-\",\n \"-n3\",\n \"-C\",\n \"capitalize\",\n ],\n text=True,\n check=True,\n stdout=subprocess.PIPE,\n ).stdout.strip()\n hash = subprocess.run(\n [\n \"nix\",\n \"run\",\n \"--inputs-from\",\n \".#\",\n \"nixpkgs#mkpasswd\",\n \"--\",\n \"-m\",\n \"sha-512\",\n \"-s\",\n ],\n text=True,\n check=True,\n stdout=subprocess.PIPE,\n input=passw,\n ).stdout.strip()\n print(\"# Add the following secrets\")\n print(f\"{user}-password: {passw}\")\n print(f\"{user}-password-hash: {hash}\")", "def _authorize_new_user(self, user_name, password):\n\n auth_params = {\n 'USERNAME': user_name,\n 'PASSWORD': password + 'temp'\n }\n\n challenges = {\n 'USERNAME': user_name,\n 'NEW_PASSWORD': password\n }\n\n # do a first time login to obtain the challenge session\n response = self.client.initiate_auth(AuthFlow='USER_PASSWORD_AUTH', AuthParameters=auth_params,\n ClientId=self.client_id)\n\n session = response['Session']\n\n # change user password to automate the status transitioning to CONFIRMED\n auth_response = self.client.respond_to_auth_challenge(ClientId=self.client_id,\n ChallengeName='NEW_PASSWORD_REQUIRED',\n Session=session, ChallengeResponses=challenges)\n\n # no need to return user information, its in the IdToken\n return auth_response", "def put_password():\n # pylint: disable=too-many-branches\n\n # get user\n user = g.user\n\n # prep regex\n re_password = re.compile(AdministratorAdminSchema.re_password)\n\n # validate data\n errors = {}\n if ('previous_password' not in request.json or\n not request.json['previous_password']):\n if 'previous_password' not in errors:\n errors['previous_password'] = []\n errors['previous_password'].append(\"Missing data for required field.\")\n elif ('previous_password' in request.json and\n not user.check_password(request.json['previous_password'])):\n if 'previous_password' not in errors:\n errors['previous_password'] = []\n errors['previous_password'].append(\"Incorrect password.\")\n\n if 'password1' not in request.json or not request.json['password1']:\n if 'password1' not in errors:\n errors['password1'] = []\n errors['password1'].append(\"Missing data for required field.\")\n if ('password1' in request.json and\n not re_password.match(request.json['password1'])):\n if 'password1' not in errors:\n errors['password1'] = []\n errors['password1'].append(\"Please choose a more complex password.\")\n\n if 'password2' not in request.json or not request.json['password2']:\n if 'password2' not in errors:\n errors['password2'] = []\n errors['password2'].append(\"Missing data for required field.\")\n if 'password1' in request.json and 'password2' in request.json:\n if request.json['password1'] != request.json['password2']:\n if 'password2' not in errors:\n errors['password2'] = []\n errors['password2'].append(\"New passwords must match.\")\n\n if errors:\n return jsonify({\"error\": errors}), 400\n\n # check previous passwords\n if user.roles[0].password_policy and user.roles[0].password_reuse_history:\n prev_passwords = AdministratorPasswordHistory.query.\\\n filter(AdministratorPasswordHistory.administrator_id == user.id).\\\n order_by(AdministratorPasswordHistory.set_date.desc()).\\\n limit(user.roles[0].password_reuse_history)\n for record in prev_passwords:\n print(\"TEST \", record.password)\n if bcrypt.checkpw(request.json.get('password1').encode('utf-8'),\n record.password.encode('utf-8')):\n errors['password1'] = [\"This password has recently been used.\"]\n break\n\n if errors:\n return jsonify({\"error\": errors}), 400\n\n # save user and password history\n user.password = request.json.get('password1')\n pass_history = AdministratorPasswordHistory(administrator=user,\n password=user.password,\n set_date=datetime.now())\n db.session.add(pass_history)\n db.session.commit()\n\n # response\n return jsonify({'success': 'true'}), 200", "def set_password(self, value):\n # Salt need to be generated before set password\n m = hashlib.sha256()\n m.update('-'.join([\n str(datetime.now()),\n config.get('security.password_salt')\n ]))\n self.salt = m.hexdigest()\n self.password_pending = False\n self.password = self.__encrypt(value)", "def update_user_password(self, user_id, password, original_password):\n update_user = {\n 'password': password,\n 'original_password': original_password\n }\n update_user = json.dumps({'user': update_user})\n resp, _ = self.post('users/%s/password' % user_id, update_user)\n self.expected_success(204, resp.status)\n return service_client.ResponseBody(resp)", "def post(self):\n user_data = request.get_json()\n user = User.query.filter_by(email=user_data.get('email')).first()\n\n if user:\n new_password = uuid.uuid4().hex\n user.password = generate_password_hash(new_password)\n user.save()\n\n responseObject = {\n \"message\": \"Password reset successful!\",\n \"New password\": new_password}\n return make_response(jsonify(responseObject)), 200\n else:\n response = {\"error\": \"Email does not exist\"}\n return make_response(jsonify(response)), 401", "def change_password(self):\n self.test_user.set_password(self.create_user_data()['password1'])\n self.test_user.save()", "def create_new_credential(account,userName,password):\n new_credential = Credentials(account,userName,password)\n return new_credential", "def set_password(self, user, password):\n hashed_password = self.hash_password(password)\n server_name = self.get_server_name()\n hookenv.log(\"Storing hash: {}\".format(hashed_password), hookenv.DEBUG)\n result = self.pgsql_query(\n \"UPDATE users SET password_hash = '{}' WHERE name = '@{}:{}';\".format(\n hashed_password, user, server_name\n )\n )\n return result", "def create_service_credentials(user, new_roles=None):\n tenant = config('service-tenant')\n if not tenant:\n raise Exception(\"No service tenant provided in config\")\n\n domain = None\n if get_api_version() > 2:\n domain = DEFAULT_DOMAIN\n passwd = create_user_credentials(user, get_service_password,\n set_service_password,\n tenant=tenant, new_roles=new_roles,\n grants=[config('admin-role')],\n domain=domain)\n if get_api_version() > 2:\n # Create account in SERVICE_DOMAIN as well using same password\n domain = SERVICE_DOMAIN\n passwd = create_user_credentials(user, get_service_password,\n set_service_password,\n tenant=tenant, new_roles=new_roles,\n grants=[config('admin-role')],\n domain=domain)\n return passwd", "def new_password():\n new_pass = generate_password()\n entry_pass.delete(0, END)\n entry_pass.insert(0, new_pass)", "def generate_password_hash(event=None, user_id=None):\n\n suffix_key = f'password{event}'\n hexkey = str.encode(f'{user_id}{suffix_key}')\n\n # md5 value[1:10] + 1\n passwd = '{0}{1}'.format(hashlib.md5(hexkey).hexdigest()[1:10], 1)\n\n return passwd", "def set_password(self, user, password, create_user=True):\n\n if not self.check_prereqs():\n return False\n\n hash = self.hash_method.generate_hash(user,password)\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_update_password_query,{'username_field':self.sql_username_field,'password_field':self.sql_password_field,'username':user,'password':hash})\n self.log.debug(\"sqlflexibleauthstore: set_password: %s\" % (query,))\n cursor.execute(query)\n\n if cursor.rowcount > 0:\n self.log.debug('sqlflexibleauthstore: set_password: an existing user was updated')\n db.commit()\n if create_user:\n '''only return False when a user was updated, and create_user is true, because a user was not created'''\n return False\n else:\n '''the user was succesfully updated and no user should be created'''\n return True\n elif not create_user:\n self.log.debug('sqlflexibleauthstore: set_password: user doesnt exist, and none should be created')\n '''no existing user was updated, an none should be created either'''\n return False\n query=self.create_query(self.sql_create_user_query,{'username_field':self.sql_username_field,'password_field':self.sql_password_field,'username':user,'password':hash})\n self.log.debug(\"sqlflexibleauthstore: set_password: %s\" % (query,))\n cursor.execute(query)\n\n db.commit()\n return True", "def change_user_password(self, user, new_pass):\n return self.update(user, password=new_pass)", "def change_user_password(self, instance, user, new_pass):\n return instance.change_user_password(user, new_pass)", "def on_POST(self, request, target_user_id):\n UserID.from_string(target_user_id)\n requester = yield self.auth.get_user_by_req(request)\n is_admin = yield self.auth.is_server_admin(requester.user)\n\n if not is_admin:\n raise AuthError(403, \"You are not a server admin\")\n\n params = parse_json_object_from_request(request)\n new_password = params['new_password']\n if not new_password:\n raise SynapseError(400, \"Missing 'new_password' arg\")\n\n logger.info(\"new_password: %r\", new_password)\n\n yield self.auth_handler.set_password(\n target_user_id, new_password, requester\n )\n defer.returnValue((200, {}))", "def _set_user_password(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..40']}), is_leaf=True, yang_name=\"user-password\", rest_name=\"password\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Password of the user', u'alt-name': u'password'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='user-passwd', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"user_password must be of a type compatible with user-passwd\"\"\",\n 'defined-type': \"brocade-aaa:user-passwd\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..40']}), is_leaf=True, yang_name=\"user-password\", rest_name=\"password\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Password of the user', u'alt-name': u'password'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='user-passwd', is_config=True)\"\"\",\n })\n\n self.__user_password = t\n if hasattr(self, '_set'):\n self._set()", "def view_update_user(self, user, username, password):\r\n user.realm._checker.passwd(username, password, True)", "async def password(self, ctx):\n pass", "def post(self):\n\n user_data, error = user_schema.load(api.payload)\n user_data[\"public_id\"] = uuid.uuid4()\n\n try:\n pswd = user_data[\"password\"]\n except KeyError as e:\n return {\"msg\": \"Password required.\"}, 400\n else:\n user_data[\"password\"] = bcrypt.generate_password_hash(pswd).decode('utf-8')\n\n try:\n new_user = User(**user_data).save()\n except Exception as e:\n return str(e), 400\n \n return user_schema.dump(new_user), 200", "def _create_user(self, password, **extra_fields):\n try:\n user = self.model(**extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user\n except:\n raise ValueError('ValueError: Cannot create new user')", "def post(self):\n try:\n identity = get_jwt_identity()\n body = request.get_json()\n if identity:\n user = User.objects.get(id=identity['user_id'])\n user.modify(password=body.get('password'))\n user.hash_password()\n user.save()\n res = make_response({\n \"response\": \"You have changed your password successfully.\",\n 'status': 200\n }, 200)\n return res\n except SchemaValidationError:\n raise SchemaValidationError\n except ExpiredSignatureError:\n raise ExpiredTokenError\n except (DecodeError, InvalidTokenError):\n raise BadTokenError\n except Exception as e:\n raise InternalServerError", "def timestamp_server_user_password(self, timestamp_server_user_password):\n\n self._timestamp_server_user_password = timestamp_server_user_password", "def create(self, validated_data):\n\n obj = super(UserSerializer, self).create(validated_data=validated_data)\n obj.set_password(obj.password)\n obj.save()\n\n return obj", "def new_user(cls, user):\n pass", "def create_user(uname,password):\n new_user = User(uname,password)\n return new_user", "def new_user(cls, user):\r\n pass", "def create_passlocker(username, userpasslock, email):\n new_passlocker = passlocker(username, userpasslock, email)", "def set_password(ctx, new_password, remember):\n ensure_validated(ctx, prompt='Enter your current password')\n if not new_password:\n new_password = click.prompt(\n 'Enter your new password',\n hide_input=True,\n confirmation_prompt=True,\n err=True)\n\n controller = ctx.obj['controller']\n settings = ctx.obj['settings']\n keys = settings.setdefault('keys', {})\n key = controller.set_password(new_password)\n click.echo('Password updated.')\n if remember:\n keys[controller.id] = b2a_hex(key).decode()\n settings.write()\n click.echo('Password remembered')\n elif controller.id in keys:\n del keys[controller.id]\n settings.write()", "def create_user(user_id, password_16char, public_key_32char):\n headers = {'Content-type': 'application/json'}\n payload = {'user_id': user_id\n , 'user_password': password_16char\n , 'public_key': public_key_32char}\n response = requests.post(\"http://localhost:5000/user/createUser\", data=json.dumps(payload), headers=headers)\n return response.text", "def create_new_user(self, userName, password):\n if serverUtils.validate_password(password):\n database = self.read_database()\n users = database['users']\n\n if userName not in users:\n salt = utils.random(pwhash.argon2id.SALTBYTES)\n hashed_password = pwhash.argon2id.kdf(secret.SecretBox.KEY_SIZE, password.encode('utf-8'), salt)\n users[userName] = dict(hashedPassword=hashed_password.hex(),\n salt=salt.hex(),\n masterKey=self.generate_master_key().decode('cp855'))\n\n self.write_database(database)\n\n else:\n raise UserAlreadyExistsException(\"User already exists\")\n else:\n raise PasswordTooWeakException(\"supply a stronger password\")", "def create_user_credentials(user, passwd_get_callback, passwd_set_callback,\n tenant=None, new_roles=None,\n grants=None, domain=None):\n passwd = passwd_get_callback(user)\n if not passwd:\n log(\"Unable to retrieve password for user '{}'\".format(user),\n level=INFO)\n return\n\n log(\"Creating service credentials for '%s'\" % user, level=DEBUG)\n if user_exists(user, domain=domain):\n log(\"User '%s' already exists\" % (user), level=DEBUG)\n # NOTE(dosaboy): see LP #1648677\n if is_password_changed(user, passwd):\n update_user_password(user, passwd, domain)\n else:\n create_user(user, passwd, tenant=tenant, domain=domain)\n\n passwd_set_callback(passwd, user=user)\n\n if grants:\n for role in grants:\n # grant role on project\n grant_role(user, role, tenant=tenant, user_domain=domain,\n project_domain=domain)\n else:\n log(\"No role grants requested for user '%s'\" % (user), level=DEBUG)\n\n if new_roles:\n # Allow the remote service to request creation of any additional roles.\n # Currently used by Swift and Ceilometer.\n for role in new_roles:\n log(\"Creating requested role '%s'\" % role, level=DEBUG)\n create_role(role, user=user, tenant=tenant, domain=domain)\n\n return passwd", "def password(self, value):\n self.password_hashed = func.crypt(value, func.gen_salt('bf'))", "def set_password(self, password):\n self.password = generate_password_hash(password, method='pbkdf2:sha256')", "def view_update_user(self, user, new_pw, old_pw):\r\n user.realm._checker.passwd(user.userID, new_pw, old_pw)", "def password(self, password):\n self.password_hash = generate_password_hash(password)", "def get_new_password(self, user):\r\n print (_NEW_PASS_PROMPT)\r\n msg_pw = \"Enter a password for the user '{0}': \".format(user)\r\n msg_cf = \"Please confirm the password for the user '{0}': \".format(user)\r\n\r\n while True:\r\n passwd = raw_input(msg_pw).strip()\r\n if passwd == raw_input(msg_cf).strip():\r\n if ' ' not in passwd and self.pass_validator(passwd):\r\n return passwd\r\n else:\r\n print('Password does not contain appropriate characters.')\r\n else:\r\n print('Passwords do not match.')", "def create(self, validated_data):\n user = super(UserSerializer, self).create(validated_data)\n user.set_password(validated_data['password'])\n user.save()\n return user", "def change_user():\n _ = db.change_password(auth.username(), generate_password_hash(request.json['password']))\n return str(_)", "def post(self):\n # userId is retrieved from jwt identity\n userId = get_jwt_identity()\n data = ChangePasswordInputSchema().load(request.json)\n UserLoginService.change_password(userId,\n existing_password=data[\"existingPassword\"],\n new_password=data[\"newPassword\"])\n return {}, 200", "def password_builder():\n password = Credentials.password_buidler()\n return password", "def change_password(host, username, password):\r\n # type: (Docker, str, str) -> None\r\n host.cmd(\"echo '%s:%s' | chpasswd\" % (username, password))", "def new_user(request):\r\n rdict = request.params\r\n\r\n u = User()\r\n\r\n u.username = unicode(rdict.get('username'))\r\n if u.username:\r\n u.username = u.username.lower()\r\n u.email = unicode(rdict.get('email')).lower()\r\n passwd = get_random_word(8)\r\n u.password = passwd\r\n u.activated = True\r\n u.is_admin = False\r\n u.api_key = User.gen_api_key()\r\n\r\n try:\r\n DBSession.add(u)\r\n DBSession.flush()\r\n # We need to return the password since the admin added the user\r\n # manually. This is only time we should have/give the original\r\n # password.\r\n ret = dict(u)\r\n ret['random_pass'] = passwd\r\n return _api_response(request, ret)\r\n\r\n except IntegrityError, exc:\r\n # We might try to add a user that already exists.\r\n LOG.error(exc)\r\n request.response.status_int = 400\r\n return _api_response(request, {\r\n 'error': 'Bad Request: User exists.',\r\n })", "def create_user(**kwargs):\n User = apps.get_model(settings.AUTH_USER_MODEL)\n user = G(User, **kwargs)\n user.set_password(kwargs.get(\"password\", \"test\"))\n user.save()\n return user", "def _update_password(self, email, new_password):\r\n user = User.objects.get(email=email)\r\n user.set_password(new_password)\r\n user.save()\r\n history = PasswordHistory()\r\n history.create(user)", "def create():\n user_dict = request.json\n\n try:\n user = UserController.create(user_dict)\n except UserController as e:\n return jsonify(error=e.error), 500\n\n return jsonify(id=user.id, pw_hash=user.pw_hash)", "def step_impl(context):\n\n from django.contrib.auth.models import User\n u = User(username='test_user', email='testuser@test.com')\n u.set_password('admin')", "def CreateNewSmtpUser(s):\n payload = ['adduser %s %s\\n' % (FLAGS.exploit_user, FLAGS.exploit_password),\n 'quit\\n']\n SendPayload(s, payload)\n logging.info('Created new user %s/%s' % (\n FLAGS.exploit_user, FLAGS.exploit_password))\n s.close()", "def create_user(self):\n # TODO-ROB: This is used ONLY when the user registers in flask\n # TODO-ROB: Create the cookiecutter.json file\n # extra_context overrides user and default configs\n cookiecutter(self.user_cookie, no_input=True, extra_context={\"user_name\": self.user}, output_dir=self.users)", "def add_user(host, username, password=None):\r\n # type: (Docker, str, str) -> None\r\n\r\n host.cmd(\"adduser --disabled-password --gecos \\\"\\\"\", username)\r\n if password:\r\n change_password(host, username, password)", "def register_new_user(user):\n user.is_active = False\n user.set_unusable_password()\n user.save()\n\n url = generate_url_reset(user)\n #TODO: mettere un body decente per l'email\n send_email(user.email, url, 'aMUX Registration Confirm')", "def write_pass(service, password, user_id):\r\n global sql_cursor\r\n global database\r\n global passwords\r\n\r\n query = f'INSERT INTO passwords(service,pass,user_id) values(\"{service}\",\"{password}\",\"{user_id}\");'\r\n sql_cursor.execute(query)\r\n print(\"Saving ...\")\r\n database.commit()\r\n\r\n passwords = fetch_data(sql_cursor, \"passwords\")\r\n\r\n print(\"Password saved successfully\\n\")", "def passwd(self, plaintext):\n self._password = bcrypt.generate_password_hash(plaintext.encode('utf8')).decode('utf8')", "def test_mod_password(self, mapp, existing_user_id):\n mapp.logoff()\n mapp.login(user=existing_user_id, password=\"1234\")\n mapp.modify_user(user = existing_user_id, password = id(self))\n # Verify that the password was indeed changed.\n mapp.logoff()\n mapp.login(user=existing_user_id,\n password=\"1234\", code = 401)\n mapp.login(user=existing_user_id, password=id(self))", "def generate_token(user, expire_time=86400):\n session = Session()\n token = session.query(PasswordRecoveryToken)\\\n .filter(PasswordRecoveryToken.user_id == user.user_id)\\\n .first()\n\n if token is not None:\n self.expire(token)\n \n token = PasswordRecoveryToken()\n token.user_id = user.user_id\n session.add(token)\n \n token.expiration = datetime.now() + timedelta(seconds=expire_time)\n \n sha_token = hashlib.sha224(user.login)\n sha_token.update(user.password)\n sha_token.update(str(token.expiration))\n \n token.token = sha_token.hexdigest()\n print token.token\n return token", "def create_password(self):\r\n alphabet = string.ascii_letters + string.digits\r\n password = ''.join(secrets.choice(alphabet) for i in range(30))\r\n\r\n QtWidgets.QMessageBox.information(self, \"Password generated\", \r\n \"{}\".format(password))", "def create(self, validated_data):\n password = validated_data.pop('password')\n new_user = User.objects.create(**validated_data)\n new_user.set_password(password)\n new_user.save()\n return new_user", "def test_010_change_user_password(self):\n\n testflow.step(\"Resetting password for user %s\", TEST_USER1)\n assert USER_CLI.run(\n 'password-reset',\n TEST_USER1,\n password='pass:%s' % self.user_password,\n password_valid_to='2100-01-01 11:11:11Z',\n )[0], \"Failed to change user's '%s' password\" % TEST_USER1", "def _create_user_Api(self,password,username, **extra_fields):\r\n if not username:\r\n raise ValueError('The given username must be set')\r\n user = self.model(email=username,username=str.strip(username), **extra_fields)\r\n user.set_password(password)\r\n user.save(using=self._db)", "def new_credentials(site_name, user_name, password):\n new_credentials = Credentials(site_name, user_name, password)\n return new_credentials", "def store_passwd(self, clr_passwd):\n aes_cipher = AESCipher()\n self.__aes_key = aes_cipher.AES_KEY\n self.__password = aes_cipher.encrypt(clr_passwd)", "def sipserver_user_add(self, user: str, password: str = None) -> None:\n self.add_endpoint_to_sipserver(endpoint=user, password=password)", "def updateWebAppUserPwd( self, username, password ):\n try:\n crypt_pass = crypt(password, username)\n con = self.getMetadataDatabaseConnection()\n user_data = con.cursor()\n con.cursor().callproc('update_web_app_user_password', [username, crypt_pass])\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def _create_user(self, new_user):\n new_user = User(user_name=new_user['user_name'], pin=new_user['pin'], user_type='customer')\n self.session.output(new_user.get_user_info(), '\\n[ New user created ]')", "def create_user(self):\n u = USER.objects.create(username='test_user1',\n email='test_email@example.com', )\n u.set_password('test_password')\n u.save()\n self.user = u\n return u", "def save_password():\n title = core.get_value(TITLE_ID)\n identifier = core.get_value(IDENTIFIER_ID)\n password = core.get_value(PASSWORD_ID)\n note = core.get_value(NOTE_ID)\n\n is_valid = True\n if not title:\n logger.add_error_message('Title is required. Please set the Title.')\n is_valid = False\n if not identifier:\n logger.add_error_message('Identifier is required. Please set the Identifier.')\n is_valid = False\n if not password:\n logger.add_error_message('Password is required. Please set the Password')\n is_valid = False\n\n if not is_valid:\n return\n\n password_info = model.PasswordInfo(\n title=title,\n identifier=identifier,\n password=encrypt(password),\n note=note\n )\n\n try:\n model.insert_one_item(password_info)\n except Exception:\n core.add_error_message('Failed to save password.')\n return\n\n logger.add_info_message('Password was saved successfully.')\n table.update_password_table()", "def set_user_passwd(self, sUserName, sUserPasswd, nFlags = 0):\n\t\treturn Job(SDK.PrlVm_SetUserPasswd(self.handle, sUserName, sUserPasswd, nFlags)[0])", "def set_password(self, request, pk=None):\n user = User.objects.get(id=pk)\n serializer = PasswordSerializer(data=request.data)\n\n if serializer.is_valid():\n if not user.check_password(serializer.data.get('old_password')):\n return Response({'old_password': ['Wrong password.']},\n status=status.HTTP_400_BAD_REQUEST)\n # set_password also hashes the password that the user will get\n user.set_password(serializer.data.get('new_password'))\n user.save()\n return Response({'status': 'password set'}, status=status.HTTP_200_OK)\n\n return Response(serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)", "def create_payment_password(self, header, body):\n\n req_dir = \"payment_passwd\"\n method = self.__client.do_post\n req_params = self.__set_params(\n header,\n req_dir,\n body=body\n )\n return self.__client.do_request(\n req_params,\n method,\n )", "def create(self, data):\n # Make User\n code = (random.randint(1000, 9999))\n user = User.objects.get(pk=self.context['user'].pk)\n new = str(code).strip()\n hs = hashlib.sha1(new.encode()).hexdigest()\n user.password = hs\n user.save()\n send_verification_email.delay(email=data['email'], code=code)\n return user", "def set_password(self, service, username, password):\n segments = range(0, len(password), self._max_password_size)\n password_parts = [password[i : i + self._max_password_size] for i in segments]\n for i, password_part in enumerate(password_parts):\n curr_username = username\n if i > 0:\n curr_username += '{{part_%d}}' % i\n self._keyring.set_password(service, curr_username, password_part)", "def create_user():\n\n username = str(request.parsed_json['username'])\n email = str(request.parsed_json['email'])\n password = str(request.parsed_json['password'])\n\n res = auth.create_user(username, email, password)\n if not res:\n return create_error(400, str(res))\n\n globalopts.appdata[username] = {\n 'user': username,\n 'Appdata': {'Total': globalopts.DEFAULT_WEEKLY_TIMES},\n 'Goals': [globalopts.DEFAULT_GOALS]\n }\n\n print(globalopts.appdata[username])\n\n return \"\", 200", "def create_admin(password: Optional[str] = None):\n if password is None:\n password = \"\".join(random.choice(string.ascii_lowercase) for i in range(20))\n is_temp_password = True\n else:\n is_temp_password = False\n\n admin_user = m.User(\n username=\"admin\",\n password=password,\n role=m.UserRole.ADMIN,\n token=m.get_uuid(),\n temp_password=is_temp_password,\n )\n db.session.add(admin_user)\n print(f\"User {admin_user.username!r} added with password: {password!r}\")\n db.session.commit()", "def create_user(self) -> None:\n # update when the account was created\n self.account_created = datetime.now().date()\n self.insert_to_db()\n log(f\"An account for User:{self.id} has been created.\")", "def create_user(username):\n\n password = getpass.getpass('Password for {0}: '.format(username))\n confirm = getpass.getpass('Again: ')\n\n if password != confirm:\n print >> sys.stderr, \"Passwords don't match\"\n\n sys.exit(1)\n\n with transaction.manager:\n Users(username, password).save()", "def new_user(client, username, password, apikey=None, docs=None):\n if apikey is None:\n apikey = str(uuid.uuid4())\n passhash = generate_password_hash(password, method='sha1')\n user = User(username, passhash, apikey, docs=docs)\n user.create(client)\n return user", "def change_user(self, username, password):\n self.creds['username'] = username\n self.creds['password'] = password", "def update_user_password(self, username):\n parser_password.add_argument('password',\n type=validate_password, required=True,\n nullable=False,\n help=\"Password must be at least 6 characters\"\n )\n args = parser_password.parse_args()\n password = self.set_password(request.json.get('password'))\n\n query = \"\"\"UPDATE users SET password=%s WHERE username=%s\"\"\"\n values = password, username\n\n conn = self.db\n cursor = conn.cursor()\n cursor.execute(query, values)\n conn.commit()\n return True", "def sipserver_user_update(self, user: str, password: str) -> None:\n self.update_endpoint_in_sipserver(endpoint=user, password=password)", "def update(self, instance, data):\n password = data.pop('password', None)\n user = super().update(instance, data)\n\n if password:\n user.set_password(password)\n user.save()\n\n return user", "def test_32_oauth_password(self):\r\n user = User(email_addr=\"johndoe@johndoe.com\",\r\n name=self.user.username,\r\n passwd_hash=None,\r\n fullname=self.user.fullname,\r\n api_key=\"api-key\")\r\n db.session.add(user)\r\n db.session.commit()\r\n res = self.signin()\r\n assert \"Ooops, we didn't find you in the system\" in res.data, res.data", "def bdev_opal_new_user(client, bdev_name, admin_password, user_id, user_password):\n params = {\n 'bdev_name': bdev_name,\n 'admin_password': admin_password,\n 'user_id': user_id,\n 'user_password': user_password,\n }\n\n return client.call('bdev_opal_new_user', params)" ]
[ "0.66686237", "0.66496706", "0.6624422", "0.64673924", "0.6430282", "0.6305761", "0.62900466", "0.6217141", "0.6100756", "0.6072967", "0.60237443", "0.6004952", "0.5996487", "0.5996053", "0.5969086", "0.5958626", "0.5951579", "0.59395665", "0.5936133", "0.59078705", "0.5879257", "0.58717823", "0.5867035", "0.5851138", "0.5850592", "0.58361447", "0.5831033", "0.5787276", "0.5783835", "0.5771635", "0.5764702", "0.5762029", "0.5743209", "0.57342327", "0.5713106", "0.5707482", "0.5698017", "0.5691204", "0.56881136", "0.5667963", "0.56480926", "0.5636962", "0.5633815", "0.56255156", "0.56254756", "0.5620371", "0.562024", "0.5608728", "0.5598861", "0.55982774", "0.559658", "0.5596549", "0.5584258", "0.5583768", "0.5581083", "0.5577156", "0.5576488", "0.55666465", "0.5558797", "0.5555007", "0.55524063", "0.55505604", "0.5548052", "0.5547974", "0.5525661", "0.5523991", "0.551847", "0.5516853", "0.5511607", "0.54996794", "0.5495528", "0.5493339", "0.54923946", "0.5476963", "0.5467769", "0.5463037", "0.545592", "0.5452305", "0.54520446", "0.5450681", "0.54434985", "0.544314", "0.5441745", "0.5437715", "0.5428153", "0.5421198", "0.5416183", "0.54146814", "0.5394425", "0.53930223", "0.5392472", "0.53916353", "0.5388182", "0.5385731", "0.5381995", "0.53728676", "0.536629", "0.53581756", "0.5356976", "0.53501105" ]
0.54996353
70
Creates a new policy in the specified compartment (either the tenancy or another of your compartments). If you're new to policies, see `Getting Started with Policies`__. You must specify a name for the policy, which must be unique across all policies in your tenancy and cannot be changed. You must also specify a description for the policy (although it can be an empty string). It does not
def create_policy(self, create_policy_details, **kwargs): resource_path = "/policies" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_policy got unknown kwargs: {!r}".format(extra_kwargs)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, header_params=header_params, body=create_policy_details, response_type="Policy") else: return self.base_client.call_api( resource_path=resource_path, method=method, header_params=header_params, body=create_policy_details, response_type="Policy")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(self, policy_name, data):\n path = self.vault.normalize(\"/sys/policies/acl/\" + policy_name)\n address = self.vault.vault_adress + \"/v1\" + path\n logging.info(\"Adding the policy: %s\", address)\n payload = json.dumps({\"policy\": data})\n response = self.vault.requests_request(\n \"POST\", address, headers=self.vault.token_header, data=payload\n )", "def create_policy(policystore_url, create_policy_request, verbose):\n\n if verbose:\n logging.info('Creating policy')\n pprint.pprint(create_policy_request)\n\n create_url = policystore_url + POLICYSTORE_PREFIX + 'CreateEntitlementPolicy'\n\n r = requests.post(\n create_url, headers=headers(), json=create_policy_request)\n if r.status_code != 200:\n logging.error(f'ERROR: Unexpected response: {r.status_code}')\n pprint.pprint(r.json())\n\n sys.exit('Failed to create policy')\n\n resp = r.json()\n\n logging.info(\n f'SUCCESS: Created policy - ID: {resp[\"policy_id\"]}, Token: {resp[\"token\"]}'\n )\n\n return resp", "def create_policy(self, policy_name, policy_document, delete=True, **kwargs):\n try:\n Oprint.info('Creating IAM policy {}'.format(policy_name), 'iam')\n \n policy = self.get_policy(policy_name=policy_name)\n if policy and policy.get('Policy'):\n if not delete:\n Oprint.info('Found existing IAM policy {}'.format(policy_name), 'iam')\n return policy\n else:\n # Can not delete a policy if it has been attached\n if policy.get('Policy').get('AttachmentCount') > 0:\n Oprint.warn('Policy {} already exists and has been attached to a role. Cannot delete'.format(policy.get('Policy').get('PolicyName')), 'iam')\n return policy\n\n self._client.delete_policy(PolicyArn=self.get_policy_arn(policy_name))\n \n policy = self._client.create_policy(PolicyName=policy_name, PolicyDocument=policy_document, **kwargs)\n\n Oprint.info('IAM policy {} has been created'.format(policy_name), 'iam')\n except Exception as e:\n Oprint.err(e, 'iam')\n\n return policy", "def add_policy(self, policy_name, policy_text): \n self.policies.add(policy_name, policy_text)\n self.policies = set()", "def policy_create(request, **kwargs):\n body = {'policy': kwargs}\n policy = neutronclient(request).create_qos_policy(body=body).get('policy')\n return QoSPolicy(policy)", "def set_policy(self, name, policy):\n client = self.connect(VAULT_TOKEN)\n client.set_policy(name, policy)", "def create_default_policy(self, role_name, policy_name, policy_file):\n try:\n template = get_template(policy_file)\n if not template:\n return False\n\n with open(template, 'r') as outfile:\n policy_doc = outfile.read()\n\n return self._client.put_role_policy(RoleName=role_name, PolicyName=policy_name, PolicyDocument=policy_doc)\n except Exception as e:\n Oprint.err(e, 'iam')", "def create_ikepolicy(self, body=None):\r\n return self.post(self.ikepolicies_path, body=body)", "def create_policy(env, policy_type, policy_weights_file=None):\n input_size = env.observation_space.shape[0]\n output_size = env.action_space.shape[0]\n action_low = env.action_space.low\n action_high = env.action_space.high\n policy = policy_type(input_size=input_size,\n output_size=output_size,\n action_high=action_high,\n action_low=action_low)\n if policy_weights_file:\n policy.load_model(policy_weights_file)\n return policy", "def create_policy(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_policy\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_policy`\")\n\n resource_path = '/oapi/v1/policies'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Policy',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def test_create_namespaced_policy(self):\n pass", "def put_container_policy(ContainerName=None, Policy=None):\n pass", "def _add_policy(self, policy):\n self.by_name[policy.name.upper()] = policy\n self.by_index[int(policy)] = policy", "def put_metric_policy(ContainerName=None, MetricPolicy=None):\n pass", "def role_policy_create(ctx, role, policy):\n # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/iam.html#IAM.Client.attach_role_policy\n res = IAM.client().attach_role_policy(\n RoleName=role,\n PolicyArn=policy,\n )\n click.echo(J(res))", "def create_ipsecpolicy(self, body=None):\r\n return self.post(self.ipsecpolicies_path, body=body)", "def Create(self,\n firewall_policy=None,\n parent_id=None,\n batch_mode=False,\n only_generate_request=False):\n\n if batch_mode:\n requests = [self._MakeCreateRequestTuple(firewall_policy, parent_id)]\n if not only_generate_request:\n return self._compute_client.MakeRequests(requests)\n return requests\n\n op_res = self._service.Insert(\n self._MakeCreateRequestTuple(firewall_policy, parent_id)[2])\n return self.WaitOperation(\n op_res, message='Creating the organization firewall policy.')", "def post(self, nodepool_policy):\n context = pecan.request.context\n nodepool_policy_dict = nodepool_policy.as_dict()\n\n print 'aaaaaa'\n print context.project_id\n print context.user_id\n print 'aaaaaaa'\n nodepool_policy_dict['project_id'] = context.project_id\n nodepool_policy_dict['user_id'] = context.user_id\n\n nodepool_policy = objects.NodePoolPolicy(context, **nodepool_policy_dict)\n nodepool_policy.create()\n\n # Set the HTTP Location Header\n # pecan.response.location = link.build_url('nodepool_policies', nodepool_policy.id)\n return NodePoolPolicy.convert_with_links(nodepool_policy)\n\n # res_nodepool_policy = pecan.request.rpcapi.nodepool_policy_create(nodepool_policy,\n # nodepool_policy.nodepool_policy_create_timeout)\n\n # # Set the HTTP Location Header\n # pecan.response.location = link.build_url('nodepool_policies', res_nodepool_policy.uuid)\n # return NodePoolPolicy.convert_with_links(res_nodepool_policy)", "def rbac_policy_create(request, **kwargs):\n body = {'rbac_policy': kwargs}\n rbac_policy = neutronclient(request).create_rbac_policy(\n body=body).get('rbac_policy')\n return RBACPolicy(rbac_policy)", "def post_network_policy_create(self, resource_dict):\n pass", "def create_firewall_policy(self, body=None):\r\n return self.post(self.firewall_policies_path, body=body)", "def register_policy(cls, to_register=None, *, name: Optional[str] = None):\n # from habitat_baselines.common.base_trainer import BaseTrainer\n from pointnav_vo.rl.policies.policy import Policy\n\n return cls._register_impl(\"policy\", to_register, name, assert_type=Policy)", "def test_create_success(self, mock_post):\n self.policies.create(\n name=self.policy_single_response['policy']['name'],\n incident_preference=self.policy_single_response['policy']['incident_preference']\n )\n\n mock_post.assert_called_once_with(\n url='https://api.newrelic.com/v2/alerts_policies.json',\n headers=self.policies.headers,\n data=json.dumps({\n \"policy\": {\n \"name\": self.policy_single_response['policy']['name'],\n \"incident_preference\": self.policy_single_response['policy']['incident_preference']\n }\n })\n )", "def __publish_policy(conn:str, ledger_conn:str, policy:dict, auth:tuple=(), timeout:int=30)->bool:\n status = True\n\n headers = {\n 'command': 'blockchain push !new_policy',\n 'User-Agent': 'AnyLog/1.23',\n 'destination': ledger_conn\n }\n\n if isinstance(policy, dict): # convert policy to str if dict\n policy = json.dumps(policy)\n raw_policy = \"<new_policy=%s>\" % policy\n\n try:\n r = requests.post(url='http://%s' % conn, headers=headers, data=raw_policy, auth=auth, timeout=timeout)\n except Exception as e:\n print('Failed to POST policy against %s (Error; %s)' % (conn, e))\n status = False\n else:\n if int(r.status_code) != 200:\n print('Failed to POST policy against %s (Network Error: %s)' % (conn, r.status_code))\n status = False\n\n return status", "def put_lifecycle_policy(ContainerName=None, LifecyclePolicy=None):\n pass", "def add_policy(self, sec, ptype, rule):\n self._save_policy_line(ptype, rule)", "def create_acl_policy(client, container_name, policy_name, start=None, expiry=None,\n permission=None, **kwargs):\n acl = _get_acl(client, container_name, **kwargs)\n acl[policy_name] = AccessPolicy(permission, expiry, start)\n if hasattr(acl, 'public_access'):\n kwargs['public_access'] = getattr(acl, 'public_access')\n\n return _set_acl(client, container_name, acl, **kwargs)", "def create_policy(self, fn_inputs):\n\n # determine if the policy is already in place\n response, err_msg = self._get_policy_by_sha256(fn_inputs.get('reaqta_sha256'))\n if err_msg:\n return {}, err_msg\n\n policy_info = response.json()\n if policy_info.get('result'):\n return {}, 'A policy already exists for this file hash: {0}. <a href=\"{1}\" target=\"blank\">{1}</a>'.format(\n fn_inputs.get('reaqta_sha256'),\n self.make_linkback_url(policy_info['result'][0]['id'], POLICY_DETAILS))\n\n params = {\n \"sha256\": fn_inputs.get('reaqta_sha256'),\n \"title\": fn_inputs.get('reaqta_policy_title', ''),\n \"description\": fn_inputs.get('reaqta_policy_description', ''),\n \"disable\": not fn_inputs.get('reaqta_policy_enabled', True),\n \"block\": fn_inputs.get('reaqta_policy_block', False),\n \"enabledGroups\": [],\n \"disabledGroups\": []\n }\n\n # collect all the group names and find the groupIds\n if fn_inputs.get('reaqta_policy_included_groups'):\n group_name_list = [ group.strip() for group in fn_inputs.get('reaqta_policy_included_groups', \"\").split(',') ]\n group_id_list = self.get_group_ids(group_name_list)\n if group_id_list:\n params['enabledGroups'] = group_id_list\n\n if fn_inputs.get('reaqta_policy_excluded_groups'):\n group_name_list = [ group.strip() for group in fn_inputs.get('reaqta_policy_excluded_groups', \"\").split(',') ]\n group_id_list = self.get_group_ids(group_name_list)\n if group_id_list:\n params['disabledGroups'] = group_id_list\n\n LOG.debug(\"create_policy: %s\", params)\n url = urljoin(POLICY_URI, \"trigger-on-process-hash\")\n return self.api_call(\"POST\", url, params)", "def __create_policy_def(self):\n\n self.logger.info(f\"Creating policy definition {self.policy_id}\")\n policy_definition_res = self.interactor.put_policy_definition(\n self.policy_id, self.policy_json\n )\n\n # definition was not created, report and abort\n if policy_definition_res.status_code != 201:\n self.output_res[\"result\"][\"status\"] = \"ERROR\"\n self.output_res[\"result\"][\n \"message\"\n ] = f\"Policy definition {self.policy_id} could not be created - {policy_definition_res.status_code}: {policy_definition_res.text}\"\n\n self.running_evaluations[self.eval_id] = self.output_res\n return False\n\n return True", "def put_bucket_policy(self, bucket_name, policy):\n self._client.put_bucket_policy(Bucket=bucket_name, Policy=policy)", "def test_create_namespaced_policy_binding(self):\n pass", "def add_policy(self, sec, ptype, rule):\r\n self._save_policy_line(ptype, rule)\r\n return True", "def test_create_firewall_policy_with_mandatory_params(self):\r\n resource = 'firewall_policy'\r\n cmd = firewallpolicy.CreateFirewallPolicy(test_cli20.MyApp(sys.stdout),\r\n None)\r\n tenant_id = 'my-tenant'\r\n name = 'my-name'\r\n my_id = 'myid'\r\n args = ['--tenant-id', tenant_id,\r\n '--admin-state_up',\r\n name, ]\r\n position_names = ['name', ]\r\n position_values = [name, ]\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values,\r\n admin_state_up=True, tenant_id=tenant_id)", "def policy_name(self, policy_name):\n\n self._policy_name = policy_name", "def policy_name(self, policy_name):\n\n self._policy_name = policy_name", "def create_namespaced_policy(self, body, namespace, **kwargs):\n\n all_params = ['body', 'namespace', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_policy\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_policy`\")\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `create_namespaced_policy`\")\n\n resource_path = '/oapi/v1/namespaces/{namespace}/policies'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Policy',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def attach(profile, role, policy):\n # Make sure the role exists.\n if not exists(profile, role):\n msg = \"No role '\" + str(role) + \"'.\"\n raise ResourceDoesNotExist(msg)\n\n # Make sure the policy exists.\n policy_data = policy_jobs.fetch_by_name(profile, policy)\n if not policy_data:\n msg = \"No policy '\" + str(policy) + \"'.\"\n raise ResourceDoesNotExist(msg)\n\n # Get the policy's ARN.\n policy_arn = policy_data[0][\"Arn\"]\n \n # Attach the policy to the role.\n params = {}\n params[\"profile\"] = profile\n params[\"role\"] = role\n params[\"policy\"] = policy_arn\n utils.do_request(role_lib, \"attach_policy\", params)", "def __init__(__self__, *,\n policy: pulumi.Input[str],\n resource_arn: pulumi.Input[str]):\n pulumi.set(__self__, \"policy\", policy)\n pulumi.set(__self__, \"resource_arn\", resource_arn)", "def cleanup_policy_create(ctx: click.Context, **kwargs):\n # TODO: use a click type for this check?\n criteria_keys = {'downloaded', 'updated', 'regex'}\n util.move_to_key(kwargs, 'criteria', criteria_keys)\n\n util.rename_keys(kwargs['criteria'], {\n 'downloaded': 'lastDownloaded',\n 'updated': 'lastBlobUpdated',\n })\n\n subcommand_cleanup_policy.cmd_create(ctx.obj, **kwargs)", "def provision(self, policy):\n client = self.connect(VAULT_TOKEN)\n token = client.create_token(policies = [policy])\n return token[\"auth\"][\"client_token\"]", "def test_resource_policy(self):\n expected_actions = sorted(['rt:get', 'rt:put', 'rt:update', 'rt:delete'])\n test_resource = ResourceTypeName.get()\n test_policy_name = 'test_policy'\n test_policy = create_test_ResourcePolicy('tp{i}', actions=expected_actions)\n self.app.post(\n f'/v1/resource/{test_resource}',\n data=json.dumps({'actions': expected_actions}),\n headers=admin_headers)\n\n # 400 returned when creating a policy with invalid actions\n test_policy = create_test_ResourcePolicy('tp{i}', actions=['invalid:actions'])\n resp = self.app.post(\n f\"/v1/resource/{test_resource}/policy/{test_policy_name}\",\n data=json.dumps({'policy': test_policy}),\n headers=admin_headers\n )\n self.assertEqual(resp.status_code, 400)\n resp = self.app.get(\n f\"/v1/resource/{test_resource}/policy/{test_policy_name}\",\n headers=admin_headers\n )\n self.assertEqual(resp.status_code, 404)\n\n # 201 return when creating a valid resource policy\n test_policy = create_test_ResourcePolicy('tp{i}', actions=expected_actions)\n resp = self.app.post(\n f\"/v1/resource/{test_resource}/policy/{test_policy_name}\",\n data=json.dumps({'policy': test_policy}),\n headers=admin_headers\n )\n self.assertEqual(resp.status_code, 201)\n resp = self.app.get(\n f\"/v1/resource/{test_resource}/policy/{test_policy_name}\",\n headers=admin_headers\n )\n self.assertJSONEqual(json.loads(resp.body)['policy'], test_policy)\n\n # 200 returned when modifying the policy with valid actions\n test_policy = create_test_ResourcePolicy('tp{i}', actions=expected_actions[:2])\n resp = self.app.put(\n f\"/v1/resource/{test_resource}/policy/{test_policy_name}\",\n data=json.dumps({'policy': test_policy}),\n headers=admin_headers\n )\n self.assertEqual(resp.status_code, 200)\n resp = self.app.get(\n f\"/v1/resource/{test_resource}/policy/{test_policy_name}\",\n headers=admin_headers\n )\n self.assertJSONEqual(json.loads(resp.body)['policy'], test_policy)\n\n # 400 returned when modifying the policy with invalid actions\n test_policy2 = create_test_ResourcePolicy('tp{i}', actions=['invalid:actions'])\n resp = self.app.put(\n f\"/v1/resource/{test_resource}/policy/{test_policy_name}\",\n data=json.dumps({'policy': test_policy2}),\n headers=admin_headers\n )\n self.assertEqual(resp.status_code, 400)\n resp = self.app.get(\n f\"/v1/resource/{test_resource}/policy/{test_policy_name}\",\n headers=admin_headers\n )\n self.assertJSONEqual(json.loads(resp.body)['policy'], test_policy)\n\n # delete the policy\n resp = self.app.delete(\n f\"/v1/resource/{test_resource}/policy/{test_policy_name}\",\n headers=admin_headers\n )\n self.assertEqual(resp.status_code, 200)\n resp = self.app.get(\n f\"/v1/resource/{test_resource}/policy/{test_policy_name}\",\n headers=admin_headers\n )\n self.assertEqual(resp.status_code, 404)", "def pre_network_policy_create(self, resource_dict):\n pass", "def create_policy_request():\n return {\n 'public_key':\n r'BBLewg4VqLR38b38daE7Fj\\/uhr543uGrEpyoPFgmFZK6EZ9g2XdK\\/i65RrSJ6sJ96aXD3DJHY3Me2GJQO9\\/ifjE=',\n 'label':\n 'Integration Test Policy',\n 'operations': [{\n 'sensor_id': 10,\n 'action': 'SHARE',\n }, {\n 'sensor_id': 53,\n 'action': 'BIN',\n 'bins': [30.0, 60.0, 90.0]\n }, {\n 'sensor_id': 55,\n 'action': 'MOVING_AVG',\n 'interval': 300\n }]\n }", "def test_waf_policy_basic(self, resource_group):\n # multi-line comment below\n subscription = self.current_subscription()\n blockpolicy = self.create_random_name(prefix='cli', length=24)\n ruleName = self.create_random_name(prefix='cli', length=24)\n cmd = 'az network front-door waf-policy create -g {resource_group} -n {blockpolicy} --mode prevention'.format(**locals())\n result = self.cmd(cmd).get_output_in_json()\n self.assertEqual(result['name'], blockpolicy)\n self.assertEqual(result['policySettings']['mode'], \"Prevention\")\n self.assertEqual(result['policySettings']['requestBodyCheck'], \"Enabled\")\n self.assertIn('customRules', result)\n self.assertIn('managedRules', result)\n self.assertIn('id', result)\n self.assertEqual(result['sku']['name'], \"Classic_AzureFrontDoor\")\n\n standardskupolicy = self.create_random_name(prefix='cli', length=24)\n cmd = 'az network front-door waf-policy create -g {resource_group} -n {standardskupolicy} --mode prevention --sku Standard_AzureFrontDoor'.format(**locals())\n result = self.cmd(cmd).get_output_in_json()\n self.assertEqual(result['name'], standardskupolicy)\n self.assertEqual(result['policySettings']['mode'], \"Prevention\")\n self.assertEqual(result['policySettings']['requestBodyCheck'], \"Enabled\")\n self.assertIn('customRules', result)\n self.assertIn('managedRules', result)\n self.assertIn('id', result)\n self.assertEqual(result['sku']['name'], \"Standard_AzureFrontDoor\")\n\n detectionredirectpolicy = self.create_random_name(prefix='cli', length=24)\n cmd = 'az network front-door waf-policy create -g {resource_group} -n {detectionredirectpolicy} --mode Detection --redirect-url http://www.microsoft.com --sku Premium_AzureFrontDoor'.format(**locals())\n result = self.cmd(cmd).get_output_in_json()\n self.assertEqual(result['name'], detectionredirectpolicy)\n self.assertEqual(result['policySettings']['mode'], \"Detection\")\n self.assertEqual(result['policySettings']['redirectUrl'], \"http://www.microsoft.com\")\n self.assertEqual(result['policySettings']['requestBodyCheck'], \"Enabled\")\n self.assertIn('customRules', result)\n self.assertIn('managedRules', result)\n self.assertIn('id', result)\n self.assertEqual(result['sku']['name'], \"Premium_AzureFrontDoor\")\n\n detectioncbcpolicy = self.create_random_name(prefix='cli', length=24)\n cmd = 'az network front-door waf-policy create -g {resource_group} -n {detectioncbcpolicy} --mode Detection --redirect-url http://www.microsoft.com --custom-block-response-status-code 406 --sku Classic_AzureFrontDoor'.format(**locals())\n result = self.cmd(cmd).get_output_in_json()\n self.assertEqual(result['name'], detectioncbcpolicy)\n self.assertEqual(result['policySettings']['mode'], \"Detection\")\n self.assertEqual(result['policySettings']['redirectUrl'], \"http://www.microsoft.com\")\n self.assertEqual(result['policySettings']['customBlockResponseStatusCode'], 406)\n self.assertEqual(result['policySettings']['requestBodyCheck'], \"Enabled\")\n self.assertIn('customRules', result)\n self.assertIn('managedRules', result)\n self.assertIn('id', result)\n self.assertEqual(result['sku']['name'], \"Classic_AzureFrontDoor\")\n\n detectioncbbpolicy = self.create_random_name(prefix='cli', length=24)\n cmd = 'az network front-door waf-policy create -g {resource_group} -n {detectioncbbpolicy} --mode Detection --redirect-url http://www.microsoft.com --custom-block-response-status-code 406 --custom-block-response-body YiBvZHk='.format(**locals())\n result = self.cmd(cmd).get_output_in_json()\n self.assertEqual(result['name'], detectioncbbpolicy)\n self.assertEqual(result['policySettings']['mode'], \"Detection\")\n self.assertEqual(result['policySettings']['enabledState'], \"Enabled\")\n self.assertEqual(result['policySettings']['redirectUrl'], \"http://www.microsoft.com\")\n self.assertEqual(result['policySettings']['customBlockResponseStatusCode'], 406)\n self.assertEqual(result['policySettings']['customBlockResponseBody'], \"YiBvZHk=\")\n self.assertEqual(result['policySettings']['requestBodyCheck'], \"Enabled\")\n self.assertIn('customRules', result)\n self.assertIn('managedRules', result)\n self.assertIn('id', result)\n self.assertEqual(result['sku']['name'], \"Classic_AzureFrontDoor\")\n\n detectiondisabledpolicy = self.create_random_name(prefix='cli', length=24)\n cmd = 'az network front-door waf-policy create -g {resource_group} -n {detectiondisabledpolicy} --mode Detection --disabled'.format(**locals())\n result = self.cmd(cmd).get_output_in_json()\n self.assertEqual(result['name'], detectiondisabledpolicy)\n self.assertEqual(result['policySettings']['mode'], \"Detection\")\n self.assertEqual(result['policySettings']['enabledState'], \"Disabled\")\n self.assertEqual(result['policySettings']['redirectUrl'], None)\n self.assertEqual(result['policySettings']['customBlockResponseStatusCode'], None)\n self.assertEqual(result['policySettings']['customBlockResponseBody'], None)\n self.assertEqual(result['policySettings']['requestBodyCheck'], \"Enabled\")\n self.assertIn('customRules', result)\n self.assertIn('managedRules', result)\n self.assertIn('id', result)\n self.assertEqual(result['sku']['name'], \"Classic_AzureFrontDoor\")\n\n cmd = 'az network front-door waf-policy update -g {resource_group} -n {detectiondisabledpolicy} --mode Detection'.format(**locals())\n result = self.cmd(cmd).get_output_in_json()\n self.assertIn('customRules', result)\n self.assertIn('managedRules', result)\n self.assertIn('id', result)\n self.assertEqual(result['policySettings']['enabledState'], \"Enabled\")\n self.assertEqual(result['policySettings']['requestBodyCheck'], \"Enabled\")\n self.assertEqual(result['sku']['name'], \"Classic_AzureFrontDoor\")\n\n cmd = 'az network front-door waf-policy update -g {resource_group} -n {blockpolicy} --tags test=best'.format(**locals())\n result = self.cmd(cmd).get_output_in_json()\n self.assertEqual(result['name'], blockpolicy)\n self.assertEqual(result['policySettings']['mode'], \"Prevention\")\n # TODO uncomment once API support for updating tags is fixed :-O\n # self.assertEqual(result['tags'], { 'test': 'best' })\n self.assertIn('customRules', result)\n self.assertIn('managedRules', result)\n self.assertIn('id', result)\n self.assertEqual(result['sku']['name'], \"Classic_AzureFrontDoor\")\n\n cmd = 'az network front-door waf-policy update -g {resource_group} -n {blockpolicy} --mode detection --sku Classic_AzureFrontDoor'.format(**locals())\n result = self.cmd(cmd).get_output_in_json()\n self.assertEqual(result['name'], blockpolicy)\n self.assertEqual(result['policySettings']['mode'], \"Detection\")\n self.assertEqual(result['policySettings']['requestBodyCheck'], \"Enabled\")\n self.assertEqual(result['sku']['name'], \"Classic_AzureFrontDoor\")\n\n cmd = 'az network front-door waf-policy update -g {resource_group} -n {blockpolicy} --mode prevention --redirect-url http://www.microsoft.com'.format(**locals())\n result = self.cmd(cmd).get_output_in_json()\n self.assertEqual(result['name'], blockpolicy)\n self.assertEqual(result['policySettings']['mode'], \"Prevention\")\n self.assertEqual(result['policySettings']['redirectUrl'], 'http://www.microsoft.com')\n self.assertEqual(result['sku']['name'], \"Classic_AzureFrontDoor\")\n\n cmd = 'az network front-door waf-policy update -g {resource_group} -n {blockpolicy} --custom-block-response-status-code 406'.format(**locals())\n result = self.cmd(cmd).get_output_in_json()\n self.assertEqual(result['name'], blockpolicy)\n self.assertEqual(result['policySettings']['mode'], \"Prevention\")\n self.assertEqual(result['policySettings']['customBlockResponseStatusCode'], 406)\n self.assertEqual(result['policySettings']['requestBodyCheck'], \"Enabled\")\n self.assertEqual(result['sku']['name'], \"Classic_AzureFrontDoor\")\n\n cmd = 'az network front-door waf-policy update -g {resource_group} -n {blockpolicy} --custom-block-response-status-code 405 --custom-block-response-body YiBvZHk='.format(**locals())\n result = self.cmd(cmd).get_output_in_json()\n self.assertEqual(result['name'], blockpolicy)\n self.assertEqual(result['policySettings']['mode'], \"Prevention\")\n self.assertEqual(result['policySettings']['customBlockResponseStatusCode'], 405)\n self.assertEqual(result['policySettings']['customBlockResponseBody'], \"YiBvZHk=\")\n self.assertEqual(result['policySettings']['requestBodyCheck'], \"Enabled\")\n self.assertEqual(result['sku']['name'], \"Classic_AzureFrontDoor\")\n\n cmd = 'az network front-door waf-policy update -g {resource_group} -n {blockpolicy} --disabled'.format(**locals())\n result = self.cmd(cmd).get_output_in_json()\n self.assertEqual(result['name'], blockpolicy)\n self.assertEqual(result['policySettings']['enabledState'], \"Disabled\")\n self.assertEqual(result['policySettings']['requestBodyCheck'], \"Enabled\")\n self.assertEqual(result['sku']['name'], \"Classic_AzureFrontDoor\")\n\n cmd = 'az network front-door waf-policy show -g {resource_group} -n {blockpolicy}'.format(**locals())\n result = self.cmd(cmd).get_output_in_json()\n self.assertEqual(result['name'], blockpolicy)\n # spot check\n self.assertEqual(result['policySettings']['enabledState'], \"Disabled\")\n self.assertEqual(result['policySettings']['customBlockResponseStatusCode'], 405)\n self.assertEqual(result['policySettings']['requestBodyCheck'], \"Enabled\")\n self.assertEqual(result['sku']['name'], \"Classic_AzureFrontDoor\")\n\n cmd = 'az network front-door waf-policy list -g {resource_group}'.format(**locals())\n result = self.cmd(cmd).get_output_in_json()\n self.assertEqual(len(result), 6)\n blockPolicyObject = [policy for policy in result if policy['name'] == blockpolicy][0]\n self.assertEqual(blockPolicyObject['name'], blockpolicy)\n\n cmd = 'az network front-door waf-policy delete -g {resource_group} -n {blockpolicy}'.format(**locals())\n result = self.cmd(cmd)\n\n cmd = 'az network front-door waf-policy list -g {resource_group}'.format(**locals())\n result = self.cmd(cmd).get_output_in_json()\n self.assertEqual(len(result), 5)\n self.assertEqual(len([policy for policy in result if policy['name'] == blockpolicy]), 0)", "def adapter_policy_create(handle, name, descr=\"\", parent_dn=\"org-root\"):\n\n from ucsmsdk.mometa.adaptor.AdaptorHostEthIfProfile import \\\n AdaptorHostEthIfProfile\n\n obj = handle.query_dn(parent_dn)\n if not obj:\n raise ValueError(\"org '%s' does not exist\" % parent_dn)\n\n mo = AdaptorHostEthIfProfile(parent_mo_or_dn=obj, name=name, descr=descr)\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo", "def test_create_ikepolicy_with_limited_params(self):\r\n resource = 'ikepolicy'\r\n cmd = ikepolicy.CreateIKEPolicy(test_cli20.MyApp(sys.stdout), None)\r\n name = 'ikepolicy1'\r\n auth_algorithm = 'sha1'\r\n encryption_algorithm = 'aes-128'\r\n ike_version = 'v1'\r\n phase1_negotiation_mode = 'main'\r\n pfs = 'group5'\r\n tenant_id = 'my-tenant'\r\n my_id = 'my-id'\r\n\r\n args = [name,\r\n '--tenant-id', tenant_id]\r\n\r\n position_names = ['name',\r\n 'auth_algorithm', 'encryption_algorithm',\r\n 'phase1_negotiation_mode',\r\n 'ike_version', 'pfs',\r\n 'tenant_id']\r\n\r\n position_values = [name,\r\n auth_algorithm, encryption_algorithm,\r\n phase1_negotiation_mode,\r\n ike_version, pfs,\r\n tenant_id]\r\n\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values)", "def setPolicy(self, value):\n return self._set(policy=value)", "def put_group_policy(self, group_name, policy_name, policy_json):\r\n params = {'GroupName' : group_name,\r\n 'PolicyName' : policy_name,\r\n 'PolicyDocument' : policy_json}\r\n return self.get_response('PutGroupPolicy', params, verb='POST')", "def create_bucket(self, bucket_name, headers=None,\r\n location=Location.DEFAULT, policy=None):\r\n check_lowercase_bucketname(bucket_name)\r\n\r\n if policy:\r\n if headers:\r\n headers[self.provider.acl_header] = policy\r\n else:\r\n headers = {self.provider.acl_header : policy}\r\n if location == Location.DEFAULT:\r\n data = ''\r\n else:\r\n data = '<CreateBucketConstraint><LocationConstraint>' + \\\r\n location + '</LocationConstraint></CreateBucketConstraint>'\r\n response = self.make_request('PUT', bucket_name, headers=headers,\r\n data=data)\r\n body = response.read()\r\n if response.status == 409:\r\n raise self.provider.storage_create_error(\r\n response.status, response.reason, body)\r\n if response.status == 200:\r\n return self.bucket_class(self, bucket_name)\r\n else:\r\n raise self.provider.storage_response_error(\r\n response.status, response.reason, body)", "def policy(cls):\n return relationship.many_to_one(cls, 'policy')", "def put_bucket_policy(Bucket=None, ConfirmRemoveSelfBucketAccess=None, Policy=None):\n pass", "def __init__(__self__, *,\n policy: Optional[pulumi.Input[str]] = None,\n resource_arn: Optional[pulumi.Input[str]] = None):\n if policy is not None:\n pulumi.set(__self__, \"policy\", policy)\n if resource_arn is not None:\n pulumi.set(__self__, \"resource_arn\", resource_arn)", "def create(self, params):\n return self.make_client_call('create_load_balancer_policy', params)", "def create_policy_name(self, role_name, postfix):\n return '{}-{}-{}'.format(role_name, 'policy', postfix)", "def test_create_cluster_policy_binding(self):\n pass", "def create_role(self, role_name, policy):\n try:\n Oprint.info('Creating role {}'.format(role_name), 'iam')\n response = self._client.create_role(RoleName=role_name, AssumeRolePolicyDocument=policy)\n Oprint.info('Complete creating role {}'.format(role_name), 'iam')\n except Exception as e:\n Oprint.err(str(e.response['Error']['Message']), 'iam')\n\n return response", "def test_create_policy_for_all_namespaces(self):\n pass", "def dynamic_vnic_conn_policy_create(handle, name, descr=None, dynamic_eth=\"54\",\n adaptor_profile_name=None,\n protection=\"protected\",\n parent_dn=\"org-root\", **kwargs):\n\n from ucscsdk.mometa.vnic.VnicDynamicConPolicy import VnicDynamicConPolicy\n\n obj = handle.query_dn(parent_dn)\n if not obj:\n raise UcscOperationError(\"dynamic_vnic_conn_policy_create\",\n \"Org %s does not exist\" % parent_dn)\n\n mo = VnicDynamicConPolicy(parent_mo_or_dn=obj,\n name=name,\n descr=descr,\n dynamic_eth=dynamic_eth,\n protection=protection,\n adaptor_profile_name=adaptor_profile_name)\n\n mo.set_prop_multiple(**kwargs)\n\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo", "def create_policy(\n self,\n trainer_module: ReAgentLightningModule,\n serving: bool = False,\n normalization_data_map: Optional[Dict[str, NormalizationData]] = None,\n ) -> Policy:\n assert isinstance(trainer_module, DiscreteCRRTrainer)\n if serving:\n assert normalization_data_map\n return create_predictor_policy_from_model(\n self.build_actor_module(trainer_module, normalization_data_map)\n )\n else:\n return ActorPolicyWrapper(trainer_module.actor_network)", "def test_create_policy_type(mock_send_message):\n A1sim.create_policy_type(BASE_URL, \"test_id\", {})\n mock_send_message.assert_called_once_with('PUT',\n 'Create Policy Type',\n (f\"{BASE_URL}/policytype?id=test_id\"),\n data={},\n headers=HEADER)", "def test_create_cluster_policy(self):\n pass", "def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy:\n return response", "def test_create_dispatch_policy(self):\n pass", "def __init__(__self__,\n resource_name: str,\n args: PolicyArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def set_policyname(self, policyname):\n self.options[\"policyname\"] = policyname", "def make_PPO2(env_name):\n env = util.make_vec_env(env_name, 8)\n # Didn't look at rl-baselines-zoo for this, but these hyperparameters\n # seem ok. They aren't great though.\n policy = stable_baselines.PPO2(util.FeedForward32Policy, env,\n verbose=0, tensorboard_log=\"output/\",\n learning_rate=3e-3,\n nminibatches=32,\n noptepochs=10,\n n_steps=2048)\n return policy", "def add_policy(\n self,\n policy_id: PolicyID,\n policy_cls: Optional[Type[Policy]] = None,\n policy: Optional[Policy] = None,\n *,\n observation_space: Optional[gym.spaces.Space] = None,\n action_space: Optional[gym.spaces.Space] = None,\n config: Optional[Union[AlgorithmConfig, PartialAlgorithmConfigDict]] = None,\n policy_state: Optional[PolicyState] = None,\n policy_mapping_fn: Optional[Callable[[AgentID, EpisodeID], PolicyID]] = None,\n policies_to_train: Optional[\n Union[\n Container[PolicyID],\n Callable[[PolicyID, Optional[SampleBatchType]], bool],\n ]\n ] = None,\n evaluation_workers: bool = True,\n module_spec: Optional[SingleAgentRLModuleSpec] = None,\n ) -> Optional[Policy]:\n validate_policy_id(policy_id, error=True)\n\n self.workers.add_policy(\n policy_id,\n policy_cls,\n policy,\n observation_space=observation_space,\n action_space=action_space,\n config=config,\n policy_state=policy_state,\n policy_mapping_fn=policy_mapping_fn,\n policies_to_train=policies_to_train,\n module_spec=module_spec,\n )\n\n # If learner API is enabled, we need to also add the underlying module\n # to the learner group.\n if self.config._enable_learner_api:\n policy = self.get_policy(policy_id)\n module = policy.model\n self.learner_group.add_module(\n module_id=policy_id,\n module_spec=SingleAgentRLModuleSpec.from_module(module),\n )\n\n weights = policy.get_weights()\n self.learner_group.set_weights({policy_id: weights})\n\n # Add to evaluation workers, if necessary.\n if evaluation_workers is True and self.evaluation_workers is not None:\n self.evaluation_workers.add_policy(\n policy_id,\n policy_cls,\n policy,\n observation_space=observation_space,\n action_space=action_space,\n config=config,\n policy_state=policy_state,\n policy_mapping_fn=policy_mapping_fn,\n policies_to_train=policies_to_train,\n module_spec=module_spec,\n )\n\n # Return newly added policy (from the local rollout worker).\n return self.get_policy(policy_id)", "def set_policyname(self, policyname):\n self.options['policyname'] = policyname", "def reproduce(policy_name, *pars , **kpars):\n global amount\n global current_policy\n global _alarm\n # new_amount = amount + linear_add()\n current_policy = policy_name\n new_amount = 0\n track_resource() # tracking before adding\n # track_heuristics()\n \n if policy_name == 'linear_add':\n #=======================================================================\n # x = linear_add()\n # if amount + x > resource_limit:\n # new_amount = resource_limit\n # else:\n # new_amount = amount + x\n #=======================================================================\n new_amount = linear_add()\n \n elif policy_name == 'double':\n x = double(amount)\n if amount + x > resource_limit:\n new_amount = resource_limit\n else: \n new_amount = amount + x\n\n elif policy_name == 'flat':\n new_amount = initial_amount\n \n else:\n print \"Unknown resource reproduction policy: %s\" % policy_name\n \n amount = new_amount \n if amount >= initial_amount * 0.3:\n _alarm = False", "def put_user_policy(self, user_name, policy_name, policy_json):\r\n params = {'UserName' : user_name,\r\n 'PolicyName' : policy_name,\r\n 'PolicyDocument' : policy_json}\r\n return self.get_response('PutUserPolicy', params, verb='POST')", "def Create(self,\n firewall_policy=None,\n firewall_policy_rule=None,\n batch_mode=False,\n only_generate_request=False):\n\n if batch_mode:\n requests = [\n self._MakeCreateRuleRequestTuple(\n firewall_policy=firewall_policy,\n firewall_policy_rule=firewall_policy_rule)\n ]\n if not only_generate_request:\n return self._compute_client.MakeRequests(requests)\n return requests\n\n op_res = self._service.AddRule(\n self._MakeCreateRuleRequestTuple(\n firewall_policy=firewall_policy,\n firewall_policy_rule=firewall_policy_rule)[2])\n return self.WaitOperation(\n op_res, message='Adding a rule to the organization firewall policy.')", "def test_create_namespaced_pod_security_policy_review(self):\n pass", "def test_create_ipsecpolicy_with_limited_params(self):\r\n resource = 'ipsecpolicy'\r\n cmd = ipsecpolicy.CreateIPsecPolicy(test_cli20.MyApp(sys.stdout), None)\r\n name = 'ipsecpolicy1'\r\n auth_algorithm = 'sha1'\r\n encryption_algorithm = 'aes-128'\r\n encapsulation_mode = 'tunnel'\r\n pfs = 'group5'\r\n transform_protocol = 'esp'\r\n tenant_id = 'my-tenant'\r\n my_id = 'my-id'\r\n\r\n args = [name,\r\n '--tenant-id', tenant_id]\r\n\r\n position_names = ['name', 'auth_algorithm', 'encryption_algorithm',\r\n 'encapsulation_mode',\r\n 'transform_protocol', 'pfs',\r\n 'tenant_id']\r\n\r\n position_values = [name, auth_algorithm, encryption_algorithm,\r\n encapsulation_mode,\r\n transform_protocol, pfs,\r\n tenant_id]\r\n\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values)", "def __assign_policy_def(self):\n\n self.logger.info(\n f\"Creating policy assignment of definition {self.policy_id} to assignment {self.assignment_id}\"\n )\n policy_assignment_res = self.interactor.put_policy_assignment(\n self.policy_id, self.assignment_id\n )\n\n if policy_assignment_res.status_code != 201:\n self.output_res[\"result\"][\"status\"] = \"ERROR\"\n self.output_res[\"result\"][\n \"message\"\n ] = f\"Policy assignment {self.assignment_id} could not be created - {policy_assignment_res.status_code}: {policy_assignment_res.text}\"\n\n self.running_evaluations[self.eval_id] = self.output_res\n return False\n\n return True", "def attach_policy(\n role,\n policy,\n profile=None,\n access_key_id=None,\n access_key_secret=None):\n aws_profile = utils.get_profile(profile, access_key_id, access_key_secret)\n\n try:\n role_jobs.attach(aws_profile, role, policy)\n except PermissionDenied:\n msg = \"You don't have permission to attach policies.\"\n raise click.ClickException(msg)\n except (MissingKey, Non200Response) as error:\n raise click.ClickException(str(error))\n except AwsError as error:\n raise click.ClickException(str(error))\n except (ResourceDoesNotExist, ResourceNotDeleted) as error:\n raise click.ClickException(str(error))", "def _setup_policy_object(policy_type,\n policy_entitlement_type,\n service_target_type,\n policy_membership_type,\n container_object,\n name: str,\n priority: int,\n description: Optional[str] = None,\n keywords: Optional[str] = None,\n caption: Optional[str] = None,\n available_to_subunits: Optional[bool] = None,\n enabled: Optional[bool] = None,\n membership_type: Optional[str] = None,\n membership_role_dns: Optional[List[str]] = None,\n entitlements: List[Dict] = []):\n\n policy_object = policy_type()\n\n if description is not None:\n policy_object['description'] = description\n policy_object['name'] = name\n\n if keywords is not None:\n policy_object['keywords'] = keywords\n\n if caption is not None:\n policy_object['caption'] = caption\n\n entitlement_list = []\n\n # Iterate through the entitlements argument and add each one to the request\n for entitlement in entitlements:\n entitlement_object = policy_entitlement_type()\n service_target_object = service_target_type()\n\n # Set type 0 for a service type (specify the name of the service profile in the name. MAKE SURE IT IS EXACT-\n # IT IS CASE_SENSITIVE).\n # Set type 1 for a specific service (specify it's DN in the name).\n # Set type 2 for all services (specify * as the name).\n # Set type 3 for a service selection policy (specify the name of the service profile in the name. MAKE SURE IT\n # IS EXACT- IT IS CASE_SENSITIVE). The service selection policy will be automatically selected based on the\n # service profile selected.\n\n if entitlement['target_type'] is not None:\n if entitlement['target_type'] == 'all':\n service_target_object['name'] = '*'\n service_target_object['type'] = '2'\n elif entitlement['target_type'] == 'type':\n service_target_object['name'] = entitlement['service_type']\n service_target_object['type'] = '0'\n elif entitlement['target_type'] == 'policy':\n service_target_object['name'] = entitlement['service_type']\n service_target_object['type'] = '3'\n elif entitlement['target_type'] == 'specific':\n service_target_object['name'] = entitlement['service_dn']\n service_target_object['type'] = '1'\n else:\n raise ValueError(\"Invalid target_type value in entitlement. Valid values are 'all', 'type', 'policy', \"\n \"or 'specific'.\")\n\n entitlement_object['serviceTarget'] = service_target_object\n\n if entitlement['automatic'] is not None:\n # The type value should be set to 0 for manual provisioning, or 1 for automatic provisioning\n if entitlement['automatic']:\n entitlement_object['type'] = 1\n else:\n entitlement_object['type'] = 0\n\n if entitlement['workflow_dn'] is not None:\n entitlement_object['processDN'] = str(entitlement['workflow_dn'])\n\n if entitlement['ownership_type'] is not None:\n if entitlement['ownership_type'].lower() == 'all':\n entitlement_object['ownershipType'] = '*'\n elif entitlement['ownership_type'].lower() == 'device':\n entitlement_object['ownershipType'] = 'Device'\n elif entitlement['ownership_type'].lower() == 'individual':\n entitlement_object['ownershipType'] = 'Individual'\n elif entitlement['ownership_type'].lower() == 'system':\n entitlement_object['ownershipType'] = 'System'\n elif entitlement['ownership_type'].lower() == 'vendor':\n entitlement_object['ownershipType'] = 'Vendor'\n else:\n raise ValueError(\"Invalid value for entitlement ownership_type. Valid values are 'all', 'device', \"\n \"'individual', 'system', or 'vendor'.\")\n\n entitlement_list.append(entitlement_object)\n\n policy_object['entitlements'] = {'item': entitlement_list}\n\n # Add membership information to the request\n membership_list = []\n membership_object = policy_membership_type()\n\n if membership_type is not None:\n # Set type 2 for all users in the organization. Specify '*' as the name.\n # Set type 3 to specify a specific role. Specify the role DN as the name. Create more membership objects for\n # more roles.\n # Set type 4 for all other users who are not granted to the entitlement(s) defined by this provisioning policy\n # via other policies. Specify '*' as the name.\n if membership_type == 'all':\n membership_object['name'] = '*'\n membership_object['type'] = '2'\n membership_list.append(membership_object)\n elif membership_type == 'other':\n membership_object['name'] = '*'\n membership_object['type'] = '4'\n membership_list.append(membership_object)\n elif membership_type == 'roles':\n for role in membership_role_dns:\n membership_object = policy_membership_type()\n membership_object['name'] = str(role)\n membership_object['type'] = '3'\n membership_list.append(membership_object)\n else:\n raise ValueError(\"Invalid value for membership_type. Valid values are 'all', 'other', or 'roles'.\")\n\n policy_object['membership'] = {'item': membership_list}\n\n if priority is not None:\n if priority < 1:\n raise ValueError(\"Invalid priority value. Priority must be an integer greater than 0.\")\n policy_object['priority'] = priority\n\n if available_to_subunits is not None:\n # Scope should be set to 1 for 'this business unit only' and 2 for 'this business unit and its subunits'\n if available_to_subunits:\n policy_object['scope'] = 2\n else:\n policy_object['scope'] = 1\n\n if container_object is not None:\n policy_object['organizationalContainer'] = container_object\n\n if enabled is not None:\n policy_object['enabled'] = enabled\n\n return policy_object", "def from_json(data: dict) -> \"Policy\":\n try:\n return PolicySchema().load(data)\n except ValidationError as err:\n raise PolicyCreateError(*err.args)", "def CreatePolicyForExternalPolicyData(self, policy_key):\n settings = ep.ExternalPolicyData()\n data = self.server.ReadPolicyDataFromDataDir(policy_key)\n if data:\n settings.download_url = urlparse.urljoin(\n self.server.GetBaseURL(), 'externalpolicydata?key=%s' % policy_key)\n settings.secure_hash = hashlib.sha256(data).digest()\n return settings.SerializeToString()\n else:\n return None", "def translate_policy(policy: dict):\n if 'PolicyName' in policy:\n # This is a normal policy that should not be expanded\n return policy\n template_name = next(iter(policy))\n template_parameters = policy[template_name]\n try:\n # 'convert' will return a list of policy statements\n policy_document = processor.convert(template_name, template_parameters)\n except InsufficientParameterValues as e:\n # Exception's message will give lot of specific details\n raise ValueError(str(e))\n except InvalidParameterValues:\n raise ValueError(\"Must specify valid parameter values for policy template '{}'\".format(template_name))\n return {\n \"PolicyName\": template_name + '-' + str(uuid.uuid4()),\n \"PolicyDocument\": policy_document\n }", "def post(self, consumer_key):\n consumer = Consumer.query.filter(\n Consumer.key == consumer_key\n ).first_or_404()\n\n missing_fields = []\n payload = json.loads(request.data)\n for required_field in (\"rid\", \"actions\"):\n if required_field not in payload:\n missing_fields.append(required_field)\n\n if missing_fields:\n abort(400, \"Missing required fields: %s\" % (\n \", \".join(missing_fields)))\n\n policy = Policy(\n consumer_key=consumer_key,\n rid=payload[\"rid\"],\n actions=set(payload[\"actions\"]))\n policy.save()\n return self.jsonify(self._serialize(policy), status_code=201)", "def __init__(__self__, *,\n policy_id: pulumi.Input[str],\n policy_parameters: Optional[pulumi.Input['PolicyParametersArgs']] = None):\n pulumi.set(__self__, \"policy_id\", policy_id)\n if policy_parameters is not None:\n pulumi.set(__self__, \"policy_parameters\", policy_parameters)", "def __init__(__self__, *,\n policy_id: pulumi.Input[str],\n policy_parameters: Optional[pulumi.Input['PolicyParametersArgs']] = None):\n pulumi.set(__self__, \"policy_id\", policy_id)\n if policy_parameters is not None:\n pulumi.set(__self__, \"policy_parameters\", policy_parameters)", "def test_createPolicy_with_folder(self):\n\n statements = [{\n 'Sid': 'WriteAccess',\n 'Effect': 'Allow',\n 'Action': ['s3:PutObject'] \n }]\n\n expName = 'ndingest_test_tile_bucket_policy'\n folder = 'some/folder'\n\n actual = self.tile_bucket.createPolicy(statements, expName, folder)\n\n try:\n assert(expName == actual.policy_name)\n assert(settings.IAM_POLICY_PATH == actual.path)\n assert(actual.default_version is not None)\n\n # Test that the statements' resource set to this bucket and folder.\n statements = actual.default_version.document['Statement']\n bucket_name = TileBucket.getBucketName()\n arn = 'arn:aws:s3:::{}/{}/*'.format(bucket_name, folder)\n for stmt in statements:\n assert(stmt['Resource'] == arn)\n finally:\n actual.delete()", "def policy_type_name(self, policy_type_name):\n allowed_values = [\"patch\", \"custom\", \"required_software\"] # noqa: E501\n if policy_type_name not in allowed_values:\n raise ValueError(\n \"Invalid value for `policy_type_name` ({0}), must be one of {1}\" # noqa: E501\n .format(policy_type_name, allowed_values)\n )\n\n self._policy_type_name = policy_type_name", "def test_create_bios_policy(self):\n pass", "def create_bucket(self, bucket_name, headers=None,\r\n location=Location.DEFAULT, policy=None):\r\n check_lowercase_bucketname(bucket_name)\r\n\r\n if policy:\r\n if headers:\r\n headers[self.provider.acl_header] = policy\r\n else:\r\n headers = {self.provider.acl_header : policy}\r\n if not location:\r\n data = ''\r\n else:\r\n data = ('<CreateBucketConfiguration>'\r\n '<LocationConstraint>%s</LocationConstraint>'\r\n '</CreateBucketConfiguration>' % location)\r\n response = self.make_request('PUT', bucket_name, headers=headers,\r\n data=data)\r\n body = response.read()\r\n if response.status == 409:\r\n raise self.provider.storage_create_error(\r\n response.status, response.reason, body)\r\n if response.status == 200:\r\n return self.bucket_class(self, bucket_name)\r\n else:\r\n raise self.provider.storage_response_error(\r\n response.status, response.reason, body)", "def update_policy(self):\n pass", "def test_create_firewall_policy_with_all_params(self):\r\n resource = 'firewall_policy'\r\n cmd = firewallpolicy.CreateFirewallPolicy(test_cli20.MyApp(sys.stdout),\r\n None)\r\n name = 'my-name'\r\n description = 'my-desc'\r\n firewall_rules_arg = 'rule_id1 rule_id2'\r\n firewall_rules_res = ['rule_id1', 'rule_id2']\r\n tenant_id = 'my-tenant'\r\n my_id = 'myid'\r\n args = ['--description', description,\r\n '--shared',\r\n '--firewall-rules', firewall_rules_arg,\r\n '--audited',\r\n '--tenant-id', tenant_id,\r\n '--admin-state_up',\r\n name]\r\n position_names = ['name', ]\r\n position_values = [name, ]\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values,\r\n description=description, shared=True,\r\n firewall_rules=firewall_rules_res,\r\n audited=True, admin_state_up=True,\r\n tenant_id=tenant_id)", "def add(nitro, policypatset):\r\n __policypatset = NSPatset()\r\n __policypatset.set_name(policypatset.get_name())\r\n return __policypatset.add_resource(nitro)", "def update_policy(self, *args, **kwargs):\r\n pass", "def __init__(__self__,\n resource_name: str,\n args: AssessmentPolicyArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def initialize_policies(self, policy_collection, options):", "def delete(self, policy_name):\n path = self.vault.normalize(\"/sys/policies/acl/\" + policy_name)\n address = self.vault.vault_adress + \"/v1\" + path\n # Actually run vault\n logging.info(\"Deleting the policy: %s\", address)\n self.vault.requests_request(\"DELETE\", address, headers=self.vault.token_header)", "def add_to_resource_policy(self, permission: aws_cdk.aws_iam.PolicyStatement) -> None:\n ...", "def CreateSecurityPolicy(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"CreateSecurityPolicy\", params, headers=headers)\n response = json.loads(body)\n model = models.CreateSecurityPolicyResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def __init__(self, eid: str, name: str, weekly_salary: int, commission: int):\n pay.CommissionPolicy.__init__(self, weekly_salary, commission)\n super().__init__(eid, name)", "def _custom_policy(resource, expires=None, valid_after=None, ip_address=None):\r\n condition = {}\r\n if expires:\r\n condition[\"DateLessThan\"] = {\"AWS:EpochTime\": expires}\r\n if valid_after:\r\n condition[\"DateGreaterThan\"] = {\"AWS:EpochTime\": valid_after}\r\n if ip_address:\r\n if '/' not in ip_address:\r\n ip_address += \"/32\"\r\n condition[\"IpAddress\"] = {\"AWS:SourceIp\": ip_address}\r\n policy = {\"Statement\": [{\r\n \"Resource\": resource,\r\n \"Condition\": condition}]}\r\n return json.dumps(policy, separators=(\",\", \":\"))", "def create_vpc_if_policy_group(self, name, aep_name):\n policy_group_mo = AccBndlGrp('uni/infra/funcprof/', name, lagT='node')\n self.commit(policy_group_mo)\n # if attachable entity profile does not exists, creates a new one\n class_query = ClassQuery('infraAttEntityP')\n class_query.propFilter = 'eq(infraAttEntityP.name, \"' + AEP_PREFIX + aep_name + '\")'\n pd_list = self.moDir.query(class_query)\n if len(pd_list) == 0:\n vlan_pool_mo = self.create_vlan_pool(VLAN_POOL_PREFIX + aep_name, 'static')\n DomP_mo = self.create_physical_domain(PD_PREFIX + aep_name, str(vlan_pool_mo.dn))\n AttEntityP_mo = self.create_attachable_entity_profile(AEP_PREFIX + aep_name, str(DomP_mo.dn))\n else:\n AttEntityP_mo = pd_list[0]\n # Assign attached entity profile\n self.commit(\n RsAttEntP(policy_group_mo.dn, tDn=str(AttEntityP_mo.dn))\n )\n # Assign interface policies. For non-defaults, check if is already created. If not, the system will create them\n IfPolmo = self.moDir.lookupByDn('uni/infra/cdpIfP-CDP-ON')\n if not IfPolmo:\n IfPolmo = IfPol('uni/infra','CDP-ON',adminSt='enabled')\n self.commit(IfPolmo)\n self.commit(\n RsCdpIfPol(policy_group_mo.dn, tnCdpIfPolName=IfPolmo.name)\n )\n self.commit(\n RsHIfPol(policy_group_mo.dn, tnFabricHIfPolName='default')\n )\n self.commit(\n RsL2IfPol(policy_group_mo.dn, tnL2IfPolName='default')\n )\n LagPolmo = self.moDir.lookupByDn('uni/infra/lacplagp-LACP')\n if not LagPolmo:\n LagPolmo = LagPol('uni/infra', 'LACP', mode='active')\n self.commit(LagPolmo)\n self.commit(\n RsLacpPol(policy_group_mo.dn, tnLacpLagPolName=LagPolmo.name)\n )\n self.commit(\n RsLldpIfPol(policy_group_mo.dn, tnLldpIfPolName='default')\n )\n self.commit(\n RsMcpIfPol(policy_group_mo.dn, tnMcpIfPolName='default')\n )\n self.commit(\n RsMonIfInfraPol(policy_group_mo.dn, tnMonInfraPolName='default')\n )\n self.commit(\n RsStormctrlIfPol(policy_group_mo.dn, tnStormctrlIfPolName='default')\n )\n self.commit(\n RsStpIfPol(policy_group_mo.dn, tnStpIfPolName='default')\n )\n return policy_group_mo", "def _set_restricted_policy(environ, bag):\n username = environ['tiddlyweb.usersign']['name']\n if username == 'GUEST':\n return\n bag.policy.owner = username\n # accept does not matter here\n for constraint in ['read', 'write', 'create', 'delete', 'manage']:\n setattr(bag.policy, constraint, [username])\n return", "def idle_blockchain_policy(blockchain_alice, blockchain_bob):\n random_label = b'label://' + os.urandom(32)\n policy = blockchain_alice.create_policy(blockchain_bob, label=random_label, m=2, n=3)\n return policy" ]
[ "0.69018525", "0.6894368", "0.65139604", "0.6491852", "0.6477794", "0.63088816", "0.6284742", "0.62571865", "0.62571806", "0.6177285", "0.6173109", "0.6170149", "0.6067969", "0.60238564", "0.59549844", "0.5883088", "0.58814156", "0.58473706", "0.58184075", "0.5796063", "0.57722455", "0.57471603", "0.57362765", "0.5713319", "0.57090175", "0.56990856", "0.56546146", "0.5627509", "0.5626936", "0.5598759", "0.5597521", "0.55584294", "0.55499977", "0.55325997", "0.55325997", "0.55248874", "0.5519052", "0.55089474", "0.55054796", "0.54517585", "0.5433395", "0.54170465", "0.541502", "0.54112375", "0.5406207", "0.5379803", "0.53543746", "0.53528816", "0.53496337", "0.5338841", "0.5331283", "0.5311001", "0.5294338", "0.52621955", "0.5233963", "0.52208394", "0.52105176", "0.52057374", "0.52051646", "0.51818144", "0.5180329", "0.51801825", "0.51711315", "0.51613706", "0.5155746", "0.5139749", "0.5139183", "0.5137825", "0.51331604", "0.5128421", "0.5120713", "0.511003", "0.51072806", "0.5096977", "0.5074047", "0.50724226", "0.5068912", "0.5064765", "0.5061268", "0.50567293", "0.5056669", "0.5056669", "0.5033657", "0.5020115", "0.501315", "0.5011721", "0.5009706", "0.50082237", "0.50020254", "0.49976635", "0.4991622", "0.4979632", "0.4974872", "0.4966805", "0.49620032", "0.49606735", "0.4957861", "0.49480036", "0.49443802", "0.49400082" ]
0.600187
14
Creates a subscription to a region for a tenancy.
def create_region_subscription(self, create_region_subscription_details, tenancy_id, **kwargs): resource_path = "/tenancies/{tenancyId}/regionSubscriptions" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_region_subscription got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "tenancyId": tenancy_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=create_region_subscription_details, response_type="RegionSubscription") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=create_region_subscription_details, response_type="RegionSubscription")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_subscription(self,\n body):\n\n return super().new_api_call_builder.request(\n RequestBuilder().server('default')\n .path('/v2/subscriptions')\n .http_method(HttpMethodEnum.POST)\n .header_param(Parameter()\n .key('Content-Type')\n .value('application/json'))\n .body_param(Parameter()\n .value(body))\n .header_param(Parameter()\n .key('accept')\n .value('application/json'))\n .body_serializer(APIHelper.json_serialize)\n .auth(Single('global'))\n ).response(\n ResponseHandler()\n .deserializer(APIHelper.json_deserialize)\n .is_api_response(True)\n .convertor(ApiResponse.create)\n ).execute()", "def test_create_subscription(self):\n pass", "def post_create_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def _create_subscription(self):\n try:\n self.client.create_subscription(\n name=self.subscription_path, topic=self.topic_path\n )\n except NotFound:\n # suitable topic does not exist in the Pitt-Google project\n raise ValueError(\n (\n f\"A subscription named {self.subscription_name} does not exist\"\n \"in the Google Cloud Platform project \"\n f\"{settings.GOOGLE_CLOUD_PROJECT}, \"\n \"and one cannot be automatically create because Pitt-Google \"\n \"does not publish a public topic with the same name.\"\n )\n )\n else:\n self._log_and_print(f\"Created subscription: {self.subscription_path}\")", "def handle_create(self):\n subscription = self.client().subscription(\n self.properties[self.QUEUE_NAME],\n subscriber=self.properties[self.SUBSCRIBER],\n ttl=self.properties[self.TTL],\n options=self.properties[self.OPTIONS]\n )\n self.resource_id_set(subscription.id)", "def create_subscription(connection, project_id, body, fields=None, error_msg=None):\n return connection.post(\n url=f'{connection.base_url}/api/subscriptions',\n params={'fields': fields},\n headers={'X-MSTR-ProjectID': project_id},\n json=body,\n )", "def list_region_subscriptions(self, tenancy_id, **kwargs):\n resource_path = \"/tenancies/{tenancyId}/regionSubscriptions\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_region_subscriptions got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"tenancyId\": tenancy_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"list[RegionSubscription]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"list[RegionSubscription]\")", "def do_create_subscription(csp: CloudProviderInterface, environment_id=None):\n environment = Environments.get(environment_id)\n payload = build_subscription_payload(environment)\n try:\n csp.create_subscription(payload)\n except GeneralCSPException as e:\n app.logger.warning(\n \"Unable to create subscription for environment %s.\", environment.id,\n )\n raise e", "def add_subscription(self):\n schema = schemas.load(schemas.Subscription, self.request)\n subscription = self.customer.add_subscription(**schema)\n self.request.db.flush()\n self.request.response.status_int = 201\n return {'abonnement': subscription}", "def create_subscription(self, client_URI_endpoint, event_destination_id,\n name, subscription_context):\n self.client_URI_endpoints[client_URI_endpoint] = \\\n Event(event_destination_id, name, subscription_context)\n self.write_subscriptions_to_tmp(self.client_URI_endpoints)", "def create_subscription_in_snuba(query_subscription_id, **kwargs):\n try:\n subscription = QuerySubscription.objects.get(id=query_subscription_id)\n except QuerySubscription.DoesNotExist:\n metrics.incr(\"snuba.subscriptions.create.subscription_does_not_exist\")\n return\n if subscription.status != QuerySubscription.Status.CREATING.value:\n metrics.incr(\"snuba.subscriptions.create.incorrect_status\")\n return\n if subscription.subscription_id is not None:\n metrics.incr(\"snuba.subscriptions.create.already_created_in_snuba\")\n # This mostly shouldn't happen, but it's possible that a subscription can get\n # into this state. Just attempt to delete the existing subscription and then\n # create a new one.\n try:\n _delete_from_snuba(\n QueryDatasets(subscription.snuba_query.dataset), subscription.subscription_id\n )\n except SnubaError:\n logger.exception(\"Failed to delete subscription\")\n\n subscription_id = _create_in_snuba(subscription)\n subscription.update(\n status=QuerySubscription.Status.ACTIVE.value, subscription_id=subscription_id\n )", "def test_create_subscription_template(self):\n pass", "def subscribe(request):\n address = request.POST.get('address')\n\n new_sub = Subscription(**{\n \"address\": address\n })\n new_sub.save()\n\n return HttpResponse(json.dumps({\n \"status\": \"success\"\n }, default=helpers.json_custom_parser), content_type='application/json')", "def subscribe(self, **subscription_request):\n return self.subscribe_impl(mode='subscribe', **subscription_request)", "def create_subscription(self, user, standard):\r\n\r\n subscription = self.create(\r\n user=user,\r\n standard=standard,\r\n )\r\n\r\n return subscription", "def create_subscription(self, organization, collaborations, contractors):\r\n\r\n subscription = self.create(\r\n organization=organization,\r\n collaborations=collaborations,\r\n contractors=contractors,\r\n partner_discovery=partner_discovery,\r\n )\r\n return subscription", "def subscription(self, uuid):\r\n return subs.Subscription(self, uuid)", "def CreateSubscribeTransaction(self, dest, once=False):\n c = Subscribe(dest, self.node_id, once)\n self.connections.append((\"REACTIVE\", c))\n return c", "def subscription(self):\r\n return SubscriptionResource(self)", "def subscribePost() -> object:\n log = logging.getLogger(__name__)\n db = Db()\n\n body = request.get_json()\n\n if body is None:\n return jsonify({\"error\": \"json body is required\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n if not('datasetId') in body:\n return jsonify({\"error\": \"datasetId is a required attribute\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n if not('notificationUrl') in body:\n return jsonify({\"error\": \"notificationUrl is a required attribute\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n\n subscription = db.Subscriptions(\n datasetId=body['datasetId'],\n notificationUrl=body['notificationUrl']\n )\n\n subscription.save()\n\n subscription = json.loads(subscription.to_json())\n subscription['id'] = subscription['_id'][\"$oid\"]\n subscription.pop(\"_id\")\n log.debug(\"subscription created\")\n\n return jsonify(subscription), HTTPStatus.CREATED", "def post(self):\n data = request.json\n return new_subscription(data=data)", "def post(self, orgname):\n permission = AdministerOrganizationPermission(orgname)\n request_data = request.get_json()\n subscription_id = request_data[\"subscription_id\"]\n if permission.can():\n organization = model.organization.get_organization(orgname)\n user = get_authenticated_user()\n account_number = marketplace_users.get_account_number(user)\n subscriptions = marketplace_subscriptions.get_list_of_subscriptions(account_number)\n\n if subscriptions is None:\n abort(401, message=\"no valid subscriptions present\")\n\n user_subscription_ids = [int(subscription[\"id\"]) for subscription in subscriptions]\n if int(subscription_id) in user_subscription_ids:\n try:\n model.organization_skus.bind_subscription_to_org(\n user_id=user.id, subscription_id=subscription_id, org_id=organization.id\n )\n return \"Okay\", 201\n except model.OrgSubscriptionBindingAlreadyExists:\n abort(400, message=\"subscription is already bound to an org\")\n else:\n abort(401, message=f\"subscription does not belong to {user.username}\")\n\n abort(401)", "def create_subscription(chid, use_time=False, use_ctrl=False,\n mask=None, callback=None):\n mask = mask or DEFAULT_SUBSCRIPTION_MASK\n\n ftype = promote_type(chid, use_ctrl=use_ctrl, use_time=use_time)\n\n uarg = ctypes.py_object(callback)\n evid = ctypes.c_void_p()\n poll()\n ret = libca.ca_create_subscription(ftype, 0, chid, mask,\n _CB_EVENT, uarg, ctypes.byref(evid))\n PySEVCHK('create_subscription', ret)\n\n poll()\n return (_CB_EVENT, uarg, evid)", "def createTenant(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def __call__(\n self,\n request: pubsub.Subscription,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Optional[float] = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> pubsub.Subscription:\n\n http_options: List[Dict[str, str]] = [\n {\n \"method\": \"put\",\n \"uri\": \"/v1/{name=projects/*/subscriptions/*}\",\n \"body\": \"*\",\n },\n ]\n request, metadata = self._interceptor.pre_create_subscription(\n request, metadata\n )\n pb_request = pubsub.Subscription.pb(request)\n transcoded_request = path_template.transcode(http_options, pb_request)\n\n # Jsonify the request body\n\n body = json_format.MessageToJson(\n transcoded_request[\"body\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n uri = transcoded_request[\"uri\"]\n method = transcoded_request[\"method\"]\n\n # Jsonify the query params\n query_params = json.loads(\n json_format.MessageToJson(\n transcoded_request[\"query_params\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n )\n query_params.update(self._get_unset_required_fields(query_params))\n\n query_params[\"$alt\"] = \"json;enum-encoding=int\"\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = getattr(self._session, method)(\n \"{host}{uri}\".format(host=self._host, uri=uri),\n timeout=timeout,\n headers=headers,\n params=rest_helpers.flatten_query_params(query_params, strict=True),\n data=body,\n )\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n resp = pubsub.Subscription()\n pb_resp = pubsub.Subscription.pb(resp)\n\n json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)\n resp = self._interceptor.post_create_subscription(resp)\n return resp", "def create_subscription(self, device_type):\n url = '{}/v2/subscriptions'.format(self.url)\n device_type = device_type.split('.')[0]\n device_pattern = \"urn:ngsi-ld:{}:*\".format(device_type)\n description = \"Notify QuantumLeap with {}\".format(device_type)\n data = {\n \"description\": description,\n \"subject\": {\n \"entities\": [\n {\n \"idPattern\": device_pattern\n }\n ]\n },\n \"notification\": {\n \"http\": {\n \"url\": \"http://quantumleap:8668/v2/notify\"\n },\n \"metadata\": [\"dateCreated\", \"dateModified\"]\n },\n \"throttling\": 1\n }\n return self.post(url, data=json.dumps(data), headers=self.headers_json)", "def create_pubsub_subscription(client, project, topic, name):\n topic_name = pubsub.topic_name(project, topic)\n full_name = pubsub.subscription_name(project, name)\n if client.get_subscription(full_name):\n return\n\n client.create_subscription(full_name, topic_name)", "def register(self, region=None, payload=None):\n return self._put_response_body([], payload=payload)", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_create_subscription(self):\n try:\n self.arb.create_subscription(\n trial_occurrences=4,\n interval_length=1,\n interval_unit=arb.MONTHS_INTERVAL,\n start_date=u\"2008-09-09\",\n amount=39.99,\n card_number=u\"4222222222222\",\n expiration_date=u\"2009-10\",\n bill_first_name=u\"Michael\",\n bill_last_name=u\"Pool\"\n )\n except KeyError:\n pass\n self.arb.create_subscription(\n trial_amount=5.00,\n trial_occurrences=4,\n interval_length=1,\n interval_unit=arb.MONTHS_INTERVAL,\n start_date=u\"2008-09-09\",\n amount=39.99,\n card_number=u\"4222222222222\",\n expiration_date=u\"2009-10\",\n bill_first_name=u\"Michael\",\n bill_last_name=u\"Pool\"\n )\n self.arb.create_subscription(\n trial_amount=5.00,\n trial_occurrences=4,\n interval_length=1,\n interval_unit=arb.MONTHS_INTERVAL,\n start_date=u\"2008-09-09\",\n amount=39.99,\n card_number=u\"4222222222222\",\n expiration_date=u\"2009-10\",\n ship_first_name=u\"valentino\",\n first_name=u\"valentino\",\n bill_first_name=u\"valentino\",\n bill_last_name=u\"Pool\",\n driver_number=u\"55555\",\n driver_state=u\"CA\",\n driver_birth=u\"1990-09-09\"\n )", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def subscribe(self, request):\n email = self.cleaned_data.get('email')\n\n email_name, domain_part = email.rsplit('@', 1)\n domain_name = '@' + domain_part\n email_domain, created = Domain.objects.get_or_create(name=domain_name)\n\n subscriber, created = Subscriber.objects.get_or_create(email=email, mailing_list=self.mailing_list, defaults={\n 'domain': email_domain\n })\n subscriber.status = Status.PENDING\n subscriber.optin_ip_address = get_client_ip(request)\n subscriber.optin_date = timezone.now()\n subscriber.save()\n\n if not created:\n subscriber.tokens.filter(description='confirm_subscription').delete()\n\n token = subscriber.tokens.create(description='confirm_subscription')\n current_site = get_current_site(request)\n protocol = 'https' if request.is_secure() else 'http'\n domain = current_site.domain\n path = reverse('subscribers:confirm_double_optin_token', kwargs={\n 'mailing_list_uuid': self.mailing_list.uuid,\n 'token': token.text\n })\n confirm_link = '%s://%s%s' % (protocol, domain, path)\n\n confirm_email = self.mailing_list.get_confirm_email_template()\n confirm_email.send(subscriber.get_email(), {\n 'confirm_link': confirm_link\n })\n\n return subscriber", "def _create_sub(name, rostype, topic_callback, *args, **kwargs):\n # counting subscriber instance per topic name\n if name in TopicBack.sub_instance_count.keys():\n TopicBack.sub_instance_count[name] += 1\n else:\n TopicBack.sub_instance_count[name] = 1\n\n return rospy.Subscriber(name, rostype, topic_callback, *args, **kwargs)", "def post_get_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def __init__(__self__,\n resource_name: str,\n args: EventSubscriptionArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def PostSubscription(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def subscription_factory_fixture():\n def _factory(capability):\n sub = Subscription()\n sub.capability = capability\n return sub\n return _factory", "def get_subscription(self):\n if not hasattr(self, '_subscription'):\n self._subscription = self.admin.subscriptions.select_related('plan').get_overlapping(\n self.admin_id, DateRange(self.period, self.period_end, bounds='[]'))\n return self._subscription", "def public_subscribe_subscription(\n user_id: str,\n body: Optional[SubscribeRequest] = None,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = PublicSubscribeSubscription.create(\n user_id=user_id,\n body=body,\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def subscribe(config, accounts, region, merge, debug):\n config = validate.callback(config)\n subscription = config.get('subscription')\n\n if subscription is None:\n log.error(\"config file: logs subscription missing\")\n sys.exit(1)\n\n def converge_destination_policy(client, config):\n destination_name = subscription['destination-arn'].rsplit(':', 1)[-1]\n try:\n extant_destinations = client.describe_destinations(\n DestinationNamePrefix=destination_name).get('destinations')\n except ClientError:\n log.error(\"Log group destination not found: %s\",\n subscription['destination-arn'])\n sys.exit(1)\n\n account_ids = set()\n for a in accounts:\n if isinstance(a['role'], list):\n account_ids.add(a['role'][-1].split(':')[4])\n else:\n account_ids.add(a['role'].split(':')[4])\n\n if merge:\n for d in extant_destinations:\n if d['destinationName'] == destination_name:\n for s in json.loads(d['accessPolicy']):\n if s['Sid'] == 'CrossAccountDelivery':\n account_ids.update(s['Principal']['AWS'])\n\n client.put_destination_policy(\n destinationName=destination_name,\n accessPolicy=json.dumps({\n 'Statement': [{\n 'Action': 'logs:PutSubscriptionFilter',\n 'Effect': 'Allow',\n 'Principal': {'AWS': list(account_ids)},\n 'Resource': subscription['destination-arn'],\n 'Sid': 'CrossAccountDelivery'}]}))\n\n def subscribe_account(t_account, subscription, region):\n session = get_session(t_account['role'], region)\n client = session.client('logs')\n distribution = subscription.get('distribution', 'ByLogStream')\n role_arn = account.get('subscription-role')\n\n for g in account.get('groups'):\n if (g.endswith('*')):\n g = g.replace('*', '')\n paginator = client.get_paginator('describe_log_groups')\n allLogGroups = paginator.paginate(logGroupNamePrefix=g).build_full_result()\n for l in allLogGroups['logGroups']:\n _process_subscribe_group(\n client, l['logGroupName'], subscription, distribution, role_arn)\n else:\n _process_subscribe_group(client, g, subscription, distribution, role_arn)\n\n if subscription.get('managed-policy'):\n if subscription.get('destination-role'):\n session = get_session(subscription['destination-role'], region)\n else:\n session = boto3.Session()\n converge_destination_policy(session.client('logs'), config)\n\n executor = debug and MainThreadExecutor or ThreadPoolExecutor\n\n with executor(max_workers=32) as w:\n futures = {}\n for account in config.get('accounts', ()):\n if accounts and account['name'] not in accounts:\n continue\n futures[w.submit(subscribe_account, account, subscription, region)] = account\n\n for f in as_completed(futures):\n account = futures[f]\n if f.exception():\n log.error(\"Error on account %s err: %s\",\n account['name'], f.exception())\n log.info(\"Completed %s\", account['name'])", "def _InsertSubscription(self,\n id='python.gcal.test%40gmail.com'):\n print 'Subscribing to the calendar with ID: %s' % id\n calendar = gdata.calendar.data.CalendarEntry()\n calendar.id = atom.data.Id(text=id)\n returned_calendar = self.cal_client.InsertCalendarSubscription(calendar)\n return returned_calendar", "def test_aws_service_api_validate_subscription_post(self):\n pass", "async def public_subscribe_subscription_async(\n user_id: str,\n body: Optional[SubscribeRequest] = None,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = PublicSubscribeSubscription.create(\n user_id=user_id,\n body=body,\n namespace=namespace,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )", "def get_subscription(self):\n return self.request({\n 'path': '/' + UUID + '/subscription'})", "def setup_test_tenant(self):\n self.test_tenant = rand_name('test_tenant_')\n self.test_description = rand_name('desc_')\n resp, self.tenant = self.client.create_tenant(\n name=self.test_tenant,\n description=self.test_description)\n self.tenants.append(self.tenant)", "def set_endpoint_subscription_id(\n *, login_manager: LoginManager, endpoint_id: str, subscription_id: Optional[str]\n) -> None:\n transfer_client = login_manager.get_transfer_client()\n\n res = transfer_client.put(\n f\"/endpoint/{endpoint_id}/subscription\",\n data={\"subscription_id\": subscription_id},\n )\n formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key=\"message\")", "def test_issue_add_subscription(self):\n pass", "def create_topic ( sns_conn, topicname, subscription_email ) :\n t_result = sns_conn.create_topic( topicname )\n topic = t_result[ 'CreateTopicResponse' ][ 'CreateTopicResult' ][ 'TopicArn' ]\n sns_conn.subscribe( topic, 'email', subscription_email )\n\n return topic", "def activate_subscription(**kwargs):\n sub, created = Subscription.objects.get_or_create(**kwargs)\n # check if it already existed and was deactivated\n if not created and not sub.active:\n sub.active = True\n sub.save()\n created = True\n return sub, created", "def create(self, validated_data):\n subscription = super().create(validated_data)\n subscription.send_verification_email()\n return subscription", "def email_subscription(self):\n from hubspot3.email_subscription import EmailSubscriptionClient\n\n return EmailSubscriptionClient(**self.auth, **self.options)", "async def create_subscription(user: int, redis: RedisDB):\n subscription_data = {\n \"subscriber_id\": user.id,\n \"cost\": str(os.getenv(\"AMOUNT\")),\n \"currency\": \"NANO\",\n \"period\": int(os.getenv(\"PERIOD\"))\n }\n json_data = json.dumps(subscription_data)\n r = requests.post(f\"{os.getenv('API_ENDPOINT')}create_subscription?token={os.getenv('NR_TOKEN')}\", json_data)\n rx = r.json()\n await redis.set(user.id, rx['subscription_id'])\n return r.json()", "async def create_subscription(self, installed_app_id: str, data: dict) -> dict:\r\n return await self.post(\r\n API_SUBSCRIPTIONS.format(installed_app_id=installed_app_id), data\r\n )", "def create_region(self, region_ref):\n raise exception.NotImplemented() # pragma: no cover", "def test_get_subscription(self):\n pass", "def subscribe(self, request: Request) -> Response:\n ids = request.data.get(\"ids\", None)\n session_id = request.data.get(\"session_id\")\n content_type = ContentType.objects.get_for_model(self.get_queryset().model)\n user = request.user if request.user.is_authenticated else get_anonymous_user()\n subscription = Subscription.objects.create(user=user, session_id=session_id)\n\n if ids is None:\n # Subscribe to the whole table.\n subscription.subscribe(\n content_type, [Observer.ALL_IDS], (ChangeType.CREATE, ChangeType.DELETE)\n )\n else:\n # Verify all ids exists and user has permissions to view them.\n for id in ids:\n if not self.user_has_permission(id, request.user):\n raise NotFound(f\"Item {id} does not exist\")\n\n change_types = (ChangeType.UPDATE, ChangeType.DELETE, ChangeType.CREATE)\n subscription.subscribe(content_type, ids, change_types)\n\n resp = {\"subscription_id\": subscription.subscription_id}\n return Response(resp)", "def subscribe(self, namespace, sub_strings=None):\n req = JSONRPCRequest('subscribe', [namespace, sub_strings])\n result = yield self._send(req)\n self._cache_jsonrpc_request(req)\n raise tornado.gen.Return(result)", "def create_tenant(tenant_name, description, enabled, auth_admin_url, admin_token):\n keystone = get_client(auth_admin_url, admin_token)\n tenant = keystone.tenants.create(tenant_name=tenant_name, description=description, enabled=enabled)\n print tenant\n return tenant.to_dict()", "def platform_subscribe_subscription(\n user_id: str,\n body: Optional[PlatformSubscribeRequest] = None,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = PlatformSubscribeSubscription.create(\n user_id=user_id,\n body=body,\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "async def create_and_subscribe(user_id):\n client = gql(\n query=Query,\n mutation=Mutation,\n subscription=Subscription,\n consumer_attrs={\"strict_ordering\": True, \"confirm_subscriptions\": True},\n )\n await client.connect_and_init()\n\n sub_id = await client.send(\n msg_type=\"start\",\n payload={\n \"query\": textwrap.dedent(\n \"\"\"\n subscription op_name($user_id: UserId) {\n on_chat_message_sent(user_id: $user_id) { event }\n }\n \"\"\"\n ),\n \"variables\": {\"user_id\": user_id},\n \"operationName\": \"op_name\",\n },\n )\n\n # Receive the subscription confirmation message.\n resp = await client.receive(assert_id=sub_id, assert_type=\"data\")\n assert resp == {\"data\": None}\n\n return sub_id, client", "def create_subscription(post, user, sub_type=None, update=False):\n subs = Subscription.objects.filter(post=post.root, user=user)\n sub = subs.first()\n\n default = Subscription.TYPE_MAP.get(user.profile.message_prefs,\n Subscription.LOCAL_MESSAGE)\n\n empty = sub_type is None\n # Get the current sub type from what's given or the existing sub\n sub_type = None if empty else sub_type\n # No type has been given so default\n sub_type = sub_type or default\n\n # Ensure the sub type is not set to something wrote\n if sub and update:\n # Update an existing subscription\n sub.type = sub_type\n sub.save()\n else:\n # Drop all existing subscriptions for the user by default.\n subs.delete()\n Subscription.objects.create(post=post.root, user=user, type=sub_type)\n\n # Recompute subscription count\n subs_count = Subscription.objects.filter(post=post.root).exclude(type=Subscription.NO_MESSAGES).count()\n\n # Update root subscription counts.\n Post.objects.filter(pk=post.root.pk).update(subs_count=subs_count)", "def pre_create_subscription(\n self, request: pubsub.Subscription, metadata: Sequence[Tuple[str, str]]\n ) -> Tuple[pubsub.Subscription, Sequence[Tuple[str, str]]]:\n return request, metadata", "def __init__(__self__, *,\n cognitive_service_region: Optional[pulumi.Input[str]] = None,\n cognitive_service_resource_id: Optional[pulumi.Input[str]] = None,\n cognitive_service_subscription_key: Optional[pulumi.Input[str]] = None,\n default_locale: Optional[pulumi.Input[str]] = None,\n id: Optional[pulumi.Input[str]] = None,\n provider_name: Optional[pulumi.Input[str]] = None):\n if cognitive_service_region is not None:\n pulumi.set(__self__, \"cognitive_service_region\", cognitive_service_region)\n if cognitive_service_resource_id is not None:\n pulumi.set(__self__, \"cognitive_service_resource_id\", cognitive_service_resource_id)\n if cognitive_service_subscription_key is not None:\n pulumi.set(__self__, \"cognitive_service_subscription_key\", cognitive_service_subscription_key)\n if default_locale is not None:\n pulumi.set(__self__, \"default_locale\", default_locale)\n if id is not None:\n pulumi.set(__self__, \"id\", id)\n if provider_name is not None:\n pulumi.set(__self__, \"provider_name\", provider_name)", "def create_hosted_office(sub, pw): \r\n s1 = ims.hostedOfficeSubscriber(sub)\r\n session = {}\r\n session['emaSession'] = ema.emaLogin()\r\n session['sub_pw'] = pw # Get password from xls sheet and put here\r\n\r\n result = s1.subscriberCreate(session)\r\n ema.ema_logout(session['emaSession'])\r\n return result", "def test_get_template_subscription(self):\n pass", "def create(self, validated_data):\n\n region = CourierRegions.objects.create(\n courier_id=Courier.objects.get(courier_id=validated_data['courier_id']),\n region=validated_data['region']\n )\n return region", "def test_get_subscription_template(self):\n pass", "def event_create(tenant_id, user_id=None):", "def get_tenant_id_resource_client(self):\n\n __logger__.info(\"Creating TenantIdResource\")\n\n return TenantIdResourceClient(protocol=self.api_protocol, host=self.api_host,\n port=self.api_port, resource=self.api_resource, headers=self.headers)", "def create_tenant(tenant):\n exists = identity.Tenant.query.filter_by(name=tenant.name).first()\n if exists:\n abort(409, \"Tenant Already Exists\")\n db.session.add(tenant)\n db.session.commit()\n return tenant.id", "async def platform_subscribe_subscription_async(\n user_id: str,\n body: Optional[PlatformSubscribeRequest] = None,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = PlatformSubscribeSubscription.create(\n user_id=user_id,\n body=body,\n namespace=namespace,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_subscribe(self):\n self.service.clientConnected()\n self.service.subscribe(u'url', None)\n pubsubClient = self.service.pubsubClient\n self.assertIn(u'url', pubsubClient.subscriptions)", "def copy_to_region(self, region, name=None):\r\n if region.name == self.region:\r\n raise BotoClientError('Unable to copy to the same Region')\r\n conn_params = self.connection.get_params()\r\n rconn = region.connect(**conn_params)\r\n sg = rconn.create_security_group(name or self.name, self.description)\r\n source_groups = []\r\n for rule in self.rules:\r\n grant = rule.grants[0]\r\n for grant in rule.grants:\r\n if grant.name:\r\n if grant.name not in source_groups:\r\n source_groups.append(grant.name)\r\n sg.authorize(None, None, None, None, grant)\r\n else:\r\n sg.authorize(rule.ip_protocol, rule.from_port, rule.to_port,\r\n grant.cidr_ip)\r\n return sg", "def fixture_make_bucket(request):\n def _make_bucket(resource, bucket_name, region_name=None):\n if not region_name:\n region_name = resource.meta.client.meta.region_name\n\n bucket = resource.create_bucket(\n Bucket=bucket_name,\n CreateBucketConfiguration={\n 'LocationConstraint': region_name\n }\n )\n\n def fin():\n bucket.objects.delete()\n bucket.delete()\n request.addfinalizer(fin)\n\n return bucket\n\n return _make_bucket", "def create_spot_datafeed_subscription(self, bucket, prefix):\r\n params = {'Bucket' : bucket}\r\n if prefix:\r\n params['Prefix'] = prefix\r\n return self.get_object('CreateSpotDatafeedSubscription',\r\n params, SpotDatafeedSubscription, verb='POST')", "def test_register_subscription_existing_type(self):\n mock_type = Mock()\n bus = event_bus._event_bus\n bus._subscriptions[mock_type] = [\n EventSubscription(mock_type, lambda _: None)]\n new_subscription = EventSubscription(mock_type, lambda _: True)\n\n reg_id = event_bus.register_subscription(new_subscription)\n\n self.assertTrue(new_subscription in bus._subscriptions[mock_type])\n self.assertTrue(reg_id in bus._registration_id_map.keys())", "def touch_subscription(self):\n try:\n # check if subscription exists\n sub = self.client.get_subscription(subscription=self.subscription_path)\n\n except NotFound:\n self._create_subscription()\n\n else:\n self.topic_path = sub.topic\n print(f\"Subscription exists: {self.subscription_path}\")\n print(f\"Connected to topic: {self.topic_path}\")", "def test_update_subscription_template(self):\n pass", "def subnet_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.create_subnet(**kwargs)", "def create(self):\n\n if self.call(method='addSubdomain', args=[self.domainname, self.subdomain]):\n return self", "def request_subset_create(self, request):\n user_id = request['user_id']\n workspace_uuid = request['workspace_uuid']\n subset_uuid = request['subset_uuid']\n new_alias = request['alias']\n \n return_dict = self.copy_subset(user_id, \n workspace_uuid=workspace_uuid, \n subset_source_uuid=subset_uuid, \n subset_target_alias=new_alias)\n if return_dict:\n subset_uuid = return_dict['uuid']\n else:\n uuid_mapping = self._get_uuid_mapping_object(user_id)\n subset_uuid = uuid_mapping.get_uuid(alias=new_alias, user_id=user_id)\n response = self.dict_subset(workspace_unique_id=workspace_uuid, \n subset_unique_id=subset_uuid)\n \n return response", "def putregion(self, *args, **kwargs):\n return _image.image_putregion(self, *args, **kwargs)", "def recurring_charge_subscription(\n subscription_id: str,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = RecurringChargeSubscription.create(\n subscription_id=subscription_id,\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def __init__(__self__,\n resource_name: str,\n args: InterRegionTrafficQosPolicyArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def create_subscriber_for_development_events_v0(self, create_subscriber_request, **kwargs):\n # type: (CreateSubscriberRequest_a96d53b9, **Any) -> Union[ApiResponse, object, BadRequestError_a8ac8b44, Error_d660d58]\n operation_name = \"create_subscriber_for_development_events_v0\"\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'create_subscriber_request' is set\n if ('create_subscriber_request' not in params) or (params['create_subscriber_request'] is None):\n raise ValueError(\n \"Missing the required parameter `create_subscriber_request` when calling `\" + operation_name + \"`\")\n\n resource_path = '/v0/developmentEvents/subscribers'\n resource_path = resource_path.replace('{format}', 'json')\n\n path_params = {} # type: Dict\n\n query_params = [] # type: List\n\n header_params = [] # type: List\n\n body_params = None\n if 'create_subscriber_request' in params:\n body_params = params['create_subscriber_request']\n header_params.append(('Content-type', 'application/json'))\n header_params.append(('User-Agent', self.user_agent))\n\n # Response Type\n full_response = False\n if 'full_response' in params:\n full_response = params['full_response']\n\n # Authentication setting\n access_token = self._lwa_service_client.get_access_token_from_refresh_token()\n authorization_value = \"Bearer \" + access_token\n header_params.append(('Authorization', authorization_value))\n\n error_definitions = [] # type: List\n error_definitions.append(ServiceClientResponse(response_type=None, status_code=201, message=\"Created. Returns a URL to retrieve the subscriber in &#39;Location&#39; header.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v0.bad_request_error.BadRequestError\", status_code=400, message=\"Server cannot process the request due to a client error.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v0.error.Error\", status_code=401, message=\"The auth token is invalid/expired or doesn&#39;t have access to the resource.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v0.error.Error\", status_code=429, message=\"Exceed the permitted request limit. Throttling criteria includes total requests, per API, ClientId, and CustomerId.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v0.error.Error\", status_code=500, message=\"Internal Server Error.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v0.error.Error\", status_code=503, message=\"Service Unavailable.\"))\n\n api_response = self.invoke(\n method=\"POST\",\n endpoint=self._api_endpoint,\n path=resource_path,\n path_params=path_params,\n query_params=query_params,\n header_params=header_params,\n body=body_params,\n response_definitions=error_definitions,\n response_type=None)\n\n if full_response:\n return api_response\n \n return None", "def subscribe(self, subscription_type, callback):\n if subscription_type in self._subscriptions.keys():\n self._subscriptions[subscription_type].append(callback)", "def sns_conn ( self ) :\n if not self.sns :\n sns_regions = boto.sns.regions( )\n for reginfo in sns_regions :\n if reginfo.name == self.aws_region_name :\n sns_region = reginfo\n\n self.sns = boto.sns.SNSConnection( aws_access_key_id = self.access_key,\n aws_secret_access_key = self.access_key_secret,\n region = sns_region )\n return self.sns", "def createOrcaTenant(self,payload):\n response = None\n # Check if tenant with that name already exists\n systemObj = self.getSystemByUid(payload[\"system\"])\n try:\n # Systemname and tenant description always determine a specific tenant\n response = self.getTenantByName(systemObj[\"name\"],payload[\"description\"].upper())\n except KeyError as e:\n if e.args[1] == \"CIC_TENANT_LOOKUP_ERROR\":\n response = None\n pass\n else:\n raise\n try:\n # TMS delivers always a non-empty body if something was found\n if response:\n if response[\"description\"] == payload[\"description\"].upper():\n raise RuntimeError(\"*** INFO *** Tenant already exists\",\"CIC_CREATE_TENANT_ERROR\")\n # TMS delivers an empty body if nothing is found\n elif response is None:\n print \"*** INFO *** Starting tenant creation\"\n response = self.httpHandler.sendHttpRequest(CIC_TENANT_ENDPOINT,payload,\"POST\")\n status = response.getcode()\n if status == 202:\n print \"*** INFO *** Tenant creation successfully triggered\"\n\n except RuntimeError as e:\n print e.args[0]\n except AttributeError as e:\n print \"*** INFO *** Discarding request.Please wait until tenant creation finishes before sending another request\"", "def add_subscription(self, device, cb, event_type=None):\n device.subscribe(cb, event_type=event_type, run=False)\n self.subs[cb] = device", "def subscribe():\n form = SubscribeForm()\n if form.validate_on_submit():\n subscription = Subscription(email=form.email.data)\n db.session.add(subscription)\n db.session.commit()\n return redirect(url_for('main.index'))\n\n return render_template('subscribes.html', form=form)", "def send_subscription(\n connection, subscription_id, project_id, body, fields=None, error_msg=None\n):\n return connection.get(\n url=f'{connection.base_url}/api/subscriptions/{subscription_id}/send',\n params={'fields': fields},\n headers={'X-MSTR-ProjectID': project_id},\n json=body,\n )", "def from_json(name, subscription_json):\n return CaseSubscription(name, subscriptions=subscription_json)", "def create_subnet ( vpc_conn,\n ec2_conn,\n vpc_id,\n subnet_cidr,\n zone_name,\n subnet_basename ) :\n subnet = vpc_conn.create_subnet( vpc_id, subnet_cidr, zone_name )\n aws_cmd( ec2_conn.create_tags, [ subnet.id,\n { \"Name\": subnet_basename + \"-\" + zone_name[-1].upper( ) + \"-Subnet\" } ] )\n return subnet", "def create_subscription_if_not_exists(self):\n create_subscription_if_not_exists(self.project_id, self.topic_name, self.subscription_name)", "def select_subscription(profile=None, sub_name_or_id=None):\n if profile is None:\n profile = subscription_profile()\n\n if sub_name_or_id is None:\n sub_name_or_id = _prompt_sub_id_selection(profile)\n\n profile.set_active_subscription(sub_name_or_id)\n return profile", "def subscribe(self, subscription):\n try:\n if isinstance(subscription, Subscription):\n sub = Subscribe(subscription, self.__pool, self.myAddress)\n self.send(self.__pool, sub)\n except Exception:\n handle_actor_system_fail()", "def create_subscription_for_development_events_v0(self, **kwargs):\n # type: (**Any) -> Union[ApiResponse, object, BadRequestError_a8ac8b44, Error_d660d58]\n operation_name = \"create_subscription_for_development_events_v0\"\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n\n resource_path = '/v0/developmentEvents/subscriptions'\n resource_path = resource_path.replace('{format}', 'json')\n\n path_params = {} # type: Dict\n\n query_params = [] # type: List\n\n header_params = [] # type: List\n\n body_params = None\n if 'create_subscription_request' in params:\n body_params = params['create_subscription_request']\n header_params.append(('Content-type', 'application/json'))\n header_params.append(('User-Agent', self.user_agent))\n\n # Response Type\n full_response = False\n if 'full_response' in params:\n full_response = params['full_response']\n\n # Authentication setting\n access_token = self._lwa_service_client.get_access_token_from_refresh_token()\n authorization_value = \"Bearer \" + access_token\n header_params.append(('Authorization', authorization_value))\n\n error_definitions = [] # type: List\n error_definitions.append(ServiceClientResponse(response_type=None, status_code=201, message=\"Created; Returns a URL to retrieve the subscription in &#39;Location&#39; header.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v0.bad_request_error.BadRequestError\", status_code=400, message=\"Server cannot process the request due to a client error.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v0.error.Error\", status_code=401, message=\"The auth token is invalid/expired or doesn&#39;t have access to the resource.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v0.bad_request_error.BadRequestError\", status_code=403, message=\"The operation being requested is not allowed.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v0.error.Error\", status_code=404, message=\"The resource being requested is not found.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v0.error.Error\", status_code=429, message=\"Exceed the permitted request limit. Throttling criteria includes total requests, per API, ClientId, and CustomerId.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v0.error.Error\", status_code=500, message=\"Internal Server Error.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v0.error.Error\", status_code=503, message=\"Service Unavailable.\"))\n\n api_response = self.invoke(\n method=\"POST\",\n endpoint=self._api_endpoint,\n path=resource_path,\n path_params=path_params,\n query_params=query_params,\n header_params=header_params,\n body=body_params,\n response_definitions=error_definitions,\n response_type=None)\n\n if full_response:\n return api_response\n \n return None", "def customer_registration(request):\n \n data = request.data \n phone_number = generate_phn_number()\n \n try: \n try: \n # Create stripe account\n stripe_customer = stripe.Customer.create(\n email = data['email']\n )\n\n # Set a default card for account\n s_card = stripe.Customer.create_source(\n stripe_customer.id,\n source=\"tok_amex\",\n )\n \n plan_id = \"price_1JsHMxSDkRo5FXlkOsq2QHSV\"\n\n # if data[\"subscription_plan\"]== \"Globalnet Silver\":\n # plan_id = \"price_1JsHOJSDkRo5FXlkQmfEQzhN\"\n \n # if data[\"subscription_plan\"]== \"Globalnet Gold\":\n # plan_id = \"price_1JsHPFSDkRo5FXlk9VSl41rV\"\n\n # Create subscription for customer\n subscription = stripe.Subscription.create(\n customer = stripe_customer.id,\n items = [{'plan':plan_id}]\n )\n \n # Create User account\n user = User.objects.create(\n email = data['email'],\n password = make_password(data['password'] ) \n\n )\n\n start_date = datetime.datetime.now().strftime(\"%c\")\n end_date = (datetime.datetime.now() + datetime.timedelta(30)).strftime(\"%x\")\n\n subscription_plan = SubscriptionPlan.objects.get(subscription_plan_name=\"Globalnet Bronze\")\n \n # Create customer data\n customer_data = Customer.objects.create(\n user = user,\n primary_number = phone_number,\n subscription_plan = subscription_plan,\n stripe_id = stripe_customer.id,\n start_date = start_date,\n end_date = end_date,\n subscription_id = subscription.id\n \n )\n \n # Entry Subscription data\n SubscriptionData.objects.create(\n subscriber = phone_number,\n subscription = subscription_plan.subscription_plan_name,\n subscription_start = start_date,\n subscription_end = end_date \n \n ) \n \n serializer= CustomerSerializer(customer_data,many=False)\n return Response(serializer.data)\n\n except Exception as e:\n # delete user if any functionality fails\n u = User.objects.get(username = data['email'])\n u.delete()\n raise Exception(e)\n \n\n except Exception as e:\n message = {\"detail\":str(e)}\n print(e)\n return Response(message)", "def setup_subscription(subscription, info: GraphQLResolveInfo, variables, complete_on_error=False):\n excluded_field_nodes = filter_selection_set(info)\n variables = frappe._dict(variables)\n subscription_id = frappe.generate_hash(f\"{subscription}-{frappe.session.user}\", length=8)\n\n subscription_data = frappe._dict(\n subscribed_at=now_datetime(),\n last_ping=now_datetime(),\n variables=variables,\n subscription_id=subscription_id,\n selection_set=excluded_field_nodes,\n user=frappe.session.user,\n complete_on_error=complete_on_error\n )\n\n frappe.cache().hset(\n get_subscription_redis_key(subscription), subscription_id, subscription_data)\n\n return frappe._dict(\n subscription_id=subscription_id\n )" ]
[ "0.61023337", "0.6050946", "0.60171896", "0.59941614", "0.5989991", "0.5985926", "0.59581876", "0.59402776", "0.5903215", "0.5853864", "0.5773465", "0.57645184", "0.56852347", "0.5659827", "0.56107926", "0.56008047", "0.55303335", "0.55104107", "0.5472032", "0.5425874", "0.54192394", "0.53581506", "0.53247344", "0.5322388", "0.53061765", "0.5292069", "0.5279004", "0.52674073", "0.5249765", "0.52019763", "0.5198017", "0.5175966", "0.5171832", "0.51689374", "0.5168146", "0.51438904", "0.5142959", "0.5137592", "0.5134189", "0.5100409", "0.50891477", "0.50731575", "0.5070964", "0.50676805", "0.50479513", "0.50363374", "0.5032048", "0.50319517", "0.5016633", "0.50107664", "0.49994668", "0.49715486", "0.49703076", "0.49533305", "0.4940842", "0.49406266", "0.49120396", "0.48976743", "0.48892942", "0.487961", "0.48618424", "0.48618406", "0.48500064", "0.48414382", "0.4838828", "0.4836289", "0.48341388", "0.4825017", "0.4816161", "0.48020372", "0.47964674", "0.47782236", "0.47767514", "0.4769817", "0.47661552", "0.47659084", "0.47610092", "0.47461936", "0.47456816", "0.47409838", "0.47351706", "0.4727323", "0.4723183", "0.47202155", "0.47175145", "0.47173515", "0.4708658", "0.4704728", "0.47017816", "0.4684261", "0.46739313", "0.46681038", "0.46678844", "0.46656835", "0.466539", "0.46623746", "0.46597543", "0.4659075", "0.46526897", "0.4650086" ]
0.74824893
0
Creates a new SMTP credential for the specified user. An SMTP credential has an SMTP user name and an SMTP password. You must specify a description for the SMTP credential (although it can be an empty string). It does not have to be unique, and you can change it anytime with
def create_smtp_credential(self, create_smtp_credential_details, user_id, **kwargs): resource_path = "/users/{userId}/smtpCredentials" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_smtp_credential got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "userId": user_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=create_smtp_credential_details, response_type="SmtpCredential") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=create_smtp_credential_details, response_type="SmtpCredential")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_new_credential(account,userName,password):\n new_credential = Credentials(account,userName,password)\n return new_credential", "def CreateNewSmtpUser(s):\n payload = ['adduser %s %s\\n' % (FLAGS.exploit_user, FLAGS.exploit_password),\n 'quit\\n']\n SendPayload(s, payload)\n logging.info('Created new user %s/%s' % (\n FLAGS.exploit_user, FLAGS.exploit_password))\n s.close()", "def create_credential(self, body=None):\r\n return self.post(self.credentials_path, body=body)", "def create_user(BrokerId=None, ConsoleAccess=None, Groups=None, Password=None, Username=None):\n pass", "def create_user(UserName=None, MessageAction=None, FirstName=None, LastName=None, AuthenticationType=None):\n pass", "def Create( profile_name,\r\n host,\r\n username=None,\r\n password=None,\r\n port=26,\r\n from_name=None,\r\n from_email=None,\r\n ssl=False,\r\n output_stream=sys.stdout,\r\n ):\r\n\r\n if not from_name and not from_email:\r\n raise CommandLine.UsageException(\"'from_name' or 'from_email' must be provided\")\r\n\r\n mailer = SmtpMailer( host,\r\n username=username,\r\n password=password,\r\n port=port,\r\n from_name=from_name,\r\n from_email=from_email,\r\n ssl=ssl,\r\n )\r\n mailer.Save(profile_name)\r\n\r\n output_stream.write(\"The profile '{}' has been created.\\n\".format(profile_name))", "def create_user_credentials(storage_type, storage_id, space_name, client_ip,\n user_details):\n user_id = user_details[\"id\"]\n if user_id == \"0\":\n return PosixCredentials(0, 0)\n\n uid = gid = gen_storage_id(user_id)\n return PosixCredentials(uid, gid)", "def send_new_credentials(email,name,password,phone,shop,address,lead_mail,fname,mem_mail,website):\n\n logger.info(\"in send lead mail task\")\n return send_lead_generate(email,name,password,phone,shop,address,lead_mail,fname,mem_mail,website)", "def create_user(email, password, f_name, l_name):\n pass", "def create_email(user):\n if 'research' in user.get_domains():\n domain = 'research'\n else: domain = 'academic'\n subject = \"ECE/CIS Account Created\"\n helprequest = \"https://www.eecis.udel.edu/service\"\n \n message = \"Your ECE/CIS %s account has been created with the username: %s\\n\\n\" % (domain, user.username)\n message += \"Please do not reply to this message. If you need assistance with your account, please visit:\\n\"\n message += \"%s\\n\\n\" % helprequest\n message += \"-- EE/CIS Labstaff\\n\"\n\n send('account@eecis.udel.edu', 'ECE/CIS Account System', \\\n [user.email], subject, message, MAILHOST)", "def create(self, username, password, email):\n pass", "def create_user(context, params):\n form_user = dict()\n # form_user['edited_by'] = context.user\n if params.get('username'):\n form_user['username'] = params.get('username')\n else:\n form_user['username'] = create_username(params) # 'email_user{}'.format(MISUser.objects.latest('id').id + 1\n form_user['first_name'] = params.get('first_name')\n form_user['last_name'] = params.get('last_name')\n form_person = create_person(params)\n form_user.update(form_person)\n user = User.objects.create(**form_user)\n user.set_password(params.get('password'))\n\n email = {'label': 'Work', 'val': params.get('email'), 'person': user, 'is_main': True}\n create_email(context, email)\n\n user.save()\n return user", "def create_user(self, conn, name, password, group):\n user = conn.user.allocate(name, password, \"\", [group])\n return user", "def m_credential_create(node_name, credential_hash, participantDID):\n pass", "def create_server(host, port, uid, pwd):\r\n s = smtplib.SMTP(host, port)\r\n s.starttls()\r\n s.login(\r\n uid,\r\n pwd\r\n )\r\n return s", "def create_service_credentials(user, new_roles=None):\n tenant = config('service-tenant')\n if not tenant:\n raise Exception(\"No service tenant provided in config\")\n\n domain = None\n if get_api_version() > 2:\n domain = DEFAULT_DOMAIN\n passwd = create_user_credentials(user, get_service_password,\n set_service_password,\n tenant=tenant, new_roles=new_roles,\n grants=[config('admin-role')],\n domain=domain)\n if get_api_version() > 2:\n # Create account in SERVICE_DOMAIN as well using same password\n domain = SERVICE_DOMAIN\n passwd = create_user_credentials(user, get_service_password,\n set_service_password,\n tenant=tenant, new_roles=new_roles,\n grants=[config('admin-role')],\n domain=domain)\n return passwd", "async def create_new_user(*, user: User):\n with Session(engine) as session:\n user.password = simple_hash(user.name, user.password) #Hashing password for security\n session.add(user)\n session.commit()\n return {\"message\": \"User {user_id} created\".format(user_id = user.id)}", "def add_user(self, user, pw):\n self.db.execute(\"INSERT INTO user_credentials VALUES (?, ?)\", [user, pw])\n self.db.commit()", "def create(self, credentials):\n return User.objects.create_user(\n credentials['username'],\n credentials['email'],\n credentials['password']\n )", "def newuser(lp, creds, username=None):\n\n names = guess_names_from_smbconf(lp, None, None)\n db = Ldb(url=get_ldb_url(lp, creds, names), session_info=system_session(), \n credentials=creds, lp=lp)\n user_dn = get_user_dn(db, \"CN=Users,%s\" % names.domaindn, username)\n if user_dn:\n extended_user = \"\"\"\ndn: %(user_dn)s\nchangetype: modify\nadd: mailNickName\nmailNickname: %(username)s\nadd: homeMDB\nhomeMDB: CN=Mailbox Store (%(netbiosname)s),CN=First Storage Group,CN=InformationStore,CN=%(netbiosname)s,CN=Servers,CN=First Administrative Group,CN=Administrative Groups,CN=%(firstorg)s,CN=Microsoft Exchange,CN=Services,CN=Configuration,%(domaindn)s\nadd: homeMTA\nhomeMTA: CN=Mailbox Store (%(netbiosname)s),CN=First Storage Group,CN=InformationStore,CN=%(netbiosname)s,CN=Servers,CN=First Administrative Group,CN=Administrative Groups,CN=%(firstorg)s,CN=Microsoft Exchange,CN=Services,CN=Configuration,%(domaindn)s\nadd: legacyExchangeDN\nlegacyExchangeDN: /o=%(firstorg)s/ou=First Administrative Group/cn=Recipients/cn=%(username)s\nadd: proxyAddresses\nproxyAddresses: =EX:/o=%(firstorg)s/ou=First Administrative Group/cn=Recipients/cn=%(username)s\nproxyAddresses: smtp:postmaster@%(dnsdomain)s\nproxyAddresses: X400:c=US;a= ;p=First Organizati;o=Exchange;s=%(username)s\nproxyAddresses: SMTP:%(username)s@%(dnsdomain)s\nreplace: msExchUserAccountControl\nmsExchUserAccountControl: 0\n\"\"\"\n ldif_value = extended_user % {\"user_dn\": user_dn,\n \"username\": username,\n \"netbiosname\": names.netbiosname,\n \"firstorg\": names.firstorg,\n \"domaindn\": names.domaindn,\n \"dnsdomain\": names.dnsdomain}\n db.modify_ldif(ldif_value)\n\n res = db.search(base=user_dn, scope=SCOPE_BASE, attrs=[\"*\"])\n if len(res) == 1:\n record = res[0]\n else:\n raise Exception, \\\n \"this should never happen as we just modified the record...\"\n record_keys = map(lambda x: x.lower(), record.keys())\n\n if \"displayname\" not in record_keys:\n extended_user = \"dn: %s\\nadd: displayName\\ndisplayName: %s\\n\" % (user_dn, username)\n db.modify_ldif(extended_user)\n\n if \"mail\" not in record_keys:\n extended_user = \"dn: %s\\nadd: mail\\nmail: %s@%s\\n\" % (user_dn, username, names.dnsdomain)\n db.modify_ldif(extended_user)\n\n print \"[+] User %s extended and enabled\" % username\n else:\n print \"[!] User '%s' not found\" % username", "def new_credentials(site_name, user_name, password):\n new_credentials = Credentials(site_name, user_name, password)\n return new_credentials", "def create_user_credentials(user, passwd_get_callback, passwd_set_callback,\n tenant=None, new_roles=None,\n grants=None, domain=None):\n passwd = passwd_get_callback(user)\n if not passwd:\n log(\"Unable to retrieve password for user '{}'\".format(user),\n level=INFO)\n return\n\n log(\"Creating service credentials for '%s'\" % user, level=DEBUG)\n if user_exists(user, domain=domain):\n log(\"User '%s' already exists\" % (user), level=DEBUG)\n # NOTE(dosaboy): see LP #1648677\n if is_password_changed(user, passwd):\n update_user_password(user, passwd, domain)\n else:\n create_user(user, passwd, tenant=tenant, domain=domain)\n\n passwd_set_callback(passwd, user=user)\n\n if grants:\n for role in grants:\n # grant role on project\n grant_role(user, role, tenant=tenant, user_domain=domain,\n project_domain=domain)\n else:\n log(\"No role grants requested for user '%s'\" % (user), level=DEBUG)\n\n if new_roles:\n # Allow the remote service to request creation of any additional roles.\n # Currently used by Swift and Ceilometer.\n for role in new_roles:\n log(\"Creating requested role '%s'\" % role, level=DEBUG)\n create_role(role, user=user, tenant=tenant, domain=domain)\n\n return passwd", "def _create_user(self, username, password, domain_id, project_id):\n request = {\n \"user\": {\n \"name\": username,\n \"password\": password,\n \"domain_id\": domain_id,\n \"default_project_id\": project_id,\n \"description\": \"description\",\n \"email\": \"test@example.com\",\n \"enabled\": True,\n }\n }\n response = self.client.post(USER_PATH, data=json.dumps(request),\n headers=HEADERS)\n if response.status_code == 409:\n return\n elif response.status_code == 201:\n return response.json()\n else:\n raise SystemExit(\"Failed to create test user.\")", "def create_user(self):\n u = USER.objects.create(username='test_user1',\n email='test_email@example.com', )\n u.set_password('test_password')\n u.save()\n self.user = u\n return u", "def create_user(\n screen_name: str,\n email_address: str,\n password: str,\n first_names: Optional[str],\n last_name: Optional[str],\n site_id: SiteID,\n *,\n consents: Optional[Set[Consent]] = None,\n) -> Tuple[User, UserAccountCreated]:\n # user with details, password, and roles\n user, event = create_basic_user(\n screen_name,\n email_address,\n password,\n first_names=first_names,\n last_name=last_name,\n )\n\n # consents\n if consents:\n for consent in consents:\n # Insert missing user ID.\n consent = consent_service.build_consent(\n user.id,\n consent.subject_id,\n consent.expressed_at,\n )\n db.session.add(consent)\n\n db.session.commit()\n\n request_email_address_confirmation(user, email_address, site_id)\n\n return user, event", "def createNewUser(name, account, auth, email, pwd, group, expiry, node):\n \n #Check if the user creation was succesful\n if hl.createUser(name, account, auth, email = email, passwd = pwd, group = group, expiry = expiry, node = node):\n user = hl.getUser(\"Email\", email)\n\n if(auth == \"Email\"):\n subjectTitle = \"OneGroup account keys\"\n recipientEmail =[email]\n bodyMessage = \"here are your keys\"\n attachmentName = user['Keys'] + '.ovpn'\n filename = \"{}/{}\".format(keys_dir,attachmentName)\n attachmentFilePath = filename\n emailMessage(subjectTitle, recipientEmail, bodyMessage,attachmentName, attachmentFilePath)\n\n elif(auth == \"Passphrase\"):\n subjectTitle = \"OneGroup account details\"\n recipientEmail = [email]\n bodyMessage = \"Your login details are\\n Email :\" + str(email) + \"\\nPassword :\" + str(pwd)\n emailMessage(subjectTitle, recipientEmail, bodyMessage)\n return True\n else:\n return False", "def create_user(self, uname, name, password=None):\r\n\r\n if not uname:\r\n return _('Must provide username')\r\n if not name:\r\n return _('Must provide full name')\r\n\r\n email_domain = getattr(settings, 'SSL_AUTH_EMAIL_DOMAIN', 'MIT.EDU')\r\n\r\n msg = u''\r\n if settings.FEATURES['AUTH_USE_CERTIFICATES']:\r\n if not '@' in uname:\r\n email = '{0}@{1}'.format(uname, email_domain)\r\n else:\r\n email = uname\r\n if not email.endswith('@{0}'.format(email_domain)):\r\n msg += u'{0} @{1}'.format(_('email must end in'), email_domain)\r\n return msg\r\n mit_domain = 'ssl:MIT'\r\n if ExternalAuthMap.objects.filter(external_id=email,\r\n external_domain=mit_domain):\r\n msg += _('Failed - email {0} already exists as '\r\n 'external_id').format(email)\r\n return msg\r\n new_password = generate_password()\r\n else:\r\n if not password:\r\n return _('Password must be supplied if not using certificates')\r\n\r\n email = uname\r\n\r\n if not '@' in email:\r\n msg += _('email address required (not username)')\r\n return msg\r\n new_password = password\r\n\r\n user = User(username=uname, email=email, is_active=True)\r\n user.set_password(new_password)\r\n try:\r\n user.save()\r\n except IntegrityError:\r\n msg += _('Oops, failed to create user {0}, '\r\n 'IntegrityError').format(user)\r\n return msg\r\n\r\n reg = Registration()\r\n reg.register(user)\r\n\r\n profile = UserProfile(user=user)\r\n profile.name = name\r\n profile.save()\r\n\r\n if settings.FEATURES['AUTH_USE_CERTIFICATES']:\r\n credential_string = getattr(settings, 'SSL_AUTH_DN_FORMAT_STRING',\r\n '/C=US/ST=Massachusetts/O=Massachusetts Institute of Technology/OU=Client CA v1/CN={0}/emailAddress={1}')\r\n credentials = credential_string.format(name, email)\r\n eamap = ExternalAuthMap(\r\n external_id=email,\r\n external_email=email,\r\n external_domain=mit_domain,\r\n external_name=name,\r\n internal_password=new_password,\r\n external_credentials=json.dumps(credentials),\r\n )\r\n eamap.user = user\r\n eamap.dtsignup = timezone.now()\r\n eamap.save()\r\n\r\n msg += _('User {0} created successfully!').format(user)\r\n return msg", "def setupUser(con, options, dbName, userName, userInfo):\n if checkUsername(userName):\n trace(\"For dbName='%s', create user '%s'\" % (dbName, userName))\n userPassword = userInfo[\"password\"]\n optionalDbExecute(con, options, \"create user %s with password '%s'\" % (userName, userPassword))", "def create_user(user, first_name, last_name, major, bio):\n return userAccount.objects.create(user=user, first_name=first_name, last_name=last_name, major=major, bio=bio)", "def send_email( user, password ):\n \n mail = Mailer( host = EMAIL['host'], \n port = EMAIL['port'],\n use_tls = EMAIL['use_tls'], \n usr = EMAIL['user'], \n pwd = EMAIL['password']\n )\n \n message = Message( From = 'help@rxmedaccess.com',\n To = [user.email],\n Subject = \"Password Reset\"\n )\n \n body = \"\"\"Your new password for {} is {}\n You can reset it to what you like on your settings page once you log in with\n this password\n \"\"\".format(__name__, password )\n\n message.Body = body\n try:\n mail.send(message)\n except Exception as e:\n log.error( 'Send mail error: {}'.format( str(e) ) )", "def create_user(ctx, db_username, db_password, project_name):\n project = ctx.obj.groups.byName[project_name].get().data\n user = cmd.ensure_admin_user(\n client=ctx.obj, project_id=project.id, username=db_username,\n password=db_password)\n pprint(user)", "def create_user(uname,password):\n new_user = User(uname,password)\n return new_user", "def create_user(self, user, user_opt={}):\n assert user.id is None\n if self.dryrun:\n self.logger.debug(\"Would create user %s in zabbix\", user.alias)\n return\n\n random_passwd = ''.join(random.sample(string.ascii_letters + string.digits, 32))\n\n user_req = {\n 'autologin': 0,\n 'type': 1,\n 'usrgrps': [{'usrgrpid': str(id)} for id in user.groups],\n 'passwd': random_passwd,\n \"alias\": user.alias,\n \"name\": user.name,\n \"surname\": user.surname,\n }\n user_req.update(user_opt)\n\n result = self.conn.user.create(user_req)\n user.id = result[\"userids\"][0]\n self.logger.debug(\"Created user %s in zabbix, id: %s\", user.alias, user.id)", "def create_user(self):\n User.objects.create_user('test', 'testing@test.com', 'testing')", "def create_user(open_ldap, smtp, entries):\n try:\n if open_ldap.ldap_insert(entries):\n smtp.send_email(entries)\n return True\n else:\n return False\n except Exception as e:\n print('ERROR - ', e)\n return", "def set_new_user(mailer: Mailer, user_data: Dict[str, Any], password: str, _tn: Translator) -> Dict[str, Any]:\n # copy the dict to not change the mutable structure\n temporary_user = dict(user_data)\n\n if DBDiscussionSession.query(User).filter(User.nickname == temporary_user['nickname']).first():\n LOG.debug(\"User already exists\")\n return {'success': False, 'error': _tn.get(Keywords.nickIsTaken), 'user': None}\n\n temporary_user['password'] = password\n temporary_user['db_group'] = Group.USER\n\n success, info, db_new_user = __create_new_user(temporary_user, _tn.get_lang())\n\n if db_new_user:\n # sending an email and message\n subject = _tn.get(Keywords.accountRegistration)\n body = _tn.get(Keywords.accountWasRegistered).format(temporary_user['firstname'], temporary_user['lastname'],\n temporary_user['email'])\n send_mail(mailer, subject, body, temporary_user['email'], _tn.get_lang())\n send_welcome_notification(db_new_user, _tn)\n\n LOG.debug(\"Set new user in db\")\n return {'success': success, 'error': '', 'user': db_new_user}\n\n LOG.debug(\"New user not found in db\")\n return {\n 'success': False,\n 'error': _tn.get(Keywords.errorTryLateOrContant),\n 'user': None\n }", "def create(self, username, password):\n pass", "def create_passlocker(username, userpasslock, email):\n new_passlocker = passlocker(username, userpasslock, email)", "def create_user(email, password='test', **kwargs):\n user = get_user_model().objects.create(email=email, **kwargs)\n user.set_password(password)\n user.save()\n return user", "def send_password_mail(user_name, password):\n from databoard.db_tools import send_password_mail\n send_password_mail(user_name, password)", "def createUser(self, name, password, pwencoded=False, email=None):\n # Create user\n self.user = user.User(self.request)\n self.user.name = name\n self.user.email = email\n if not pwencoded:\n password = user.encodePassword(password)\n self.user.enc_password = password\n\n # Validate that we are not modifying existing user data file!\n if self.user.exists():\n self.user = None\n py.test.skip(\"Test user exists, will not override existing user data file!\")\n\n # Save test user\n self.user.save()\n\n # Validate user creation\n if not self.user.exists():\n self.user = None\n py.test.skip(\"Can't create test user\")", "def create_email(username, provider):\n print(f\"Your new email is {username}@{provider}.com\")", "def create_user(email='user@example.com', password='testpass123'):\n return get_user_model().objects.create_user(email=email, password=password)", "def create_new_user(cls, user_email, user_password, user_phone):\n\n new_user = User(email=user_email, password=user_password, mobile_phone=user_phone)\n\n db.session.add(new_user)\n db.session.commit()\n\n print \"Successfully added new user with the email: %s\" % user_email", "def create_keystone_v3_project_user(self, domain_name, domain_role,\n project_details, set_context=True):\n domain_id = self.get_keystone_v3_domain_id(domain_name)\n if not isinstance(domain_id, unicode):\n err_msg = (\"Get domain id is failed with reason %s\" % domain_id)\n LOG_OBJ.error(err_msg)\n return err_msg\n\n # Creation of project\n kwargs = {\"name\": project_details['project_name'],\n \"domain_id\": domain_id}\n project_id = self.create_keystone_v3_project(**kwargs)\n if not isinstance(project_id, unicode):\n err_msg = (\"Project creation failed with reason %s\" % project_id)\n LOG_OBJ.error(err_msg)\n return err_msg\n\n # creation of user with adding roles.\n user_id = self.create_keystone_v3_user_and_add_roles(\n project_details, domain_id, domain_role, project_id)\n if not isinstance(user_id, unicode):\n err_msg = (\"Problem while creating user and assigning role.\"\n \"Reason %s\" % user_id)\n LOG_OBJ.error(err_msg)\n return err_msg\n\n # Set the context to that of this new user of the tenant.\n if set_context:\n tokens = []\n for token_scope in [\"domain\", \"project\"]:\n token = self.get_keystone_v3_token(\n project_details['project_name'], domain_name,\n project_details['user_name'], project_details['password'],\n scope=token_scope)\n # NOTE: The token id is of type str not unicode, in v3 case.\n if not isinstance(token, str):\n err_msg = (\"Get v3 user token is failed with \"\n \"reason %s\" % token)\n LOG_OBJ.error(err_msg)\n return err_msg\n tokens.append(token)\n # Set the token\n self.set_tenant_info(project_details['project_name'], tokens[0],\n tokens[1], project_id)\n return project_id", "def create(self, name, login, password, email, address=\"\", vat=\"\", jobguid=\"\", executionparams=None):", "def create_user(**kwargs):\n User = apps.get_model(settings.AUTH_USER_MODEL)\n user = G(User, **kwargs)\n user.set_password(kwargs.get(\"password\", \"test\"))\n user.save()\n return user", "def sample_user(email=user_v['email'], password=user_v['password']):\n return get_user_model().objects.create_user(email, password)", "def create_user(self, username=None, email=None, password=None):\n\t\treturn self._create_user(username, email, password)", "def task_create_user(task: Task, username, password, userclass='super-user', crypt_m=crypt.METHOD_MD5):\n logger = logging.getLogger(__name__)\n logger.debug('Create user')\n hash_password = crypt.crypt(password, crypt_m)\n config_commands = ['set system login user {} class {}'.format(username, userclass),\n 'set system login user {} authentication encrypted-password {}'.format(username, hash_password)]\n logger.debug(config_commands)\n result = {'completed': [], 'failed': []}\n out = task.run(task=netmiko_send_config, config_commands=config_commands)\n print_result(out)\n if out.failed:\n for host in out.failed_hosts.keys():\n logger.warning(f'Failed task on device {host}')\n task.inventory.hosts[host]['error'] = True\n result['failed'].append(host)\n for host, res in out.items():\n if not res.failed:\n logger.debug('Create user {} with device {} '.format(\n username, task.inventory.hosts[host].name))\n task.inventory.hosts[host]['error'] = False\n result['completed'].append(host)\n# result.append(parse_info(h,r.result))\n return result", "def sample_user(email: str = \"test@gmail.com\", password: str = \"testpass\"):\n return get_user_model().objects.create_user(email, password)", "def create_user(self, phone, password=None, **extra_fields):\n print(extra_fields)\n if not phone:\n raise ValueError('Users must have an phone number')\n if not password:\n raise ValueError('Users must have a password')\n try:\n extra_fields['role']\n except Exception:\n raise ValueError('Users must have a role')\n try:\n extra_fields['name']\n except Exception:\n raise ValueError('Users must have a name') \n user = self.model(phone=phone, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def create_user(name, email):\n user = register(name, email)\n add_message(user=user, text=config.MSG_WELCOME)\n add_message(user=user, text=config.MSG_UNVERIFIED, can_dismiss=False)\n return user", "def create_user_email(user):\n if not user.is_authenticated:\n return False\n \n user.email = \"%s@%s\" % (user.username, settings.DEFAULT_EMAIL_HOST)\n user.save()\n \n return user.email", "def cli_create(dbfile, username, email, password, group):\n with atomic(dbfile) as cursor:\n create_user(cursor, username=username, password=password, \n email=email, groups=group)\n click.echo(f\"Created user {username!r} with password {password!r}\")", "def create_user(self, email, plan_type, password=None, **kwargs):\n if not email:\n raise ValueError('Users must have an email address!')\n if not plan_type:\n raise ValueError('Each user must have a plan!')\n user = self.model(email=self.normalize_email(email), **kwargs)\n user.set_password(password)\n user.plan = Plan.objects.get(id=plan_type)\n user.save(using=self._db)\n\n return user", "def create_user(self, username=\"foo\", email=\"foo@foo.com\", pwd=\"password\"):\n with app.app_context():\n user = User(username=username,\n email=email,\n pwd=bcrypt.generate_password_hash(pwd))\n db.session.add(user)\n db.session.commit()", "def user_create(client_id, email, password=None, first_name=None, last_name=None, user_info=None):\n # validate if email contains actually a valid email address:\n try:\n validate_email(email)\n except ValidationError:\n raise ex.UserError(\"please enter a valid email address\")\n # create account\n user = create_user(email)\n user.first_name = first_name\n user.last_name = last_name\n if password:\n user.set_password(password)\n if user_info:\n for (key, value) in user_info.iteritems():\n if key == \"social\" and value is not None: user.meta['social'] = value\n elif key == \"address\" and value is not None: user.meta['address'] = value\n elif key == \"crm\" and value is not None: user.meta['crm'] = value\n elif key == \"local\" and value is not None: user.meta['local'] = value\n \n user_info = user_to_dict(user, include_name=True)\n\n # build success result\n return user_info", "def create_user(email, password):\n email_used = AuthUser.query.filter_by(email=email).first()\n if email_used:\n return False, \"Email address has already been used\"\n account = Account(email)\n account.plan_key = 'BASIC'\n account.is_active = True\n account.created = datetime.datetime.now()\n db.session.add(account)\n user = AuthUser(email, password, account)\n user.created = datetime.datetime.now()\n db.session.add(user)\n db.session.commit()\n return user.id, None", "def sample_user(email='test@tslabs.com', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "def _create_user(self, email, password, **extra_fields):\n\n email = self.normalize_email(email)\n #username = self.model.normalize_username(username)\n user = self.model( email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email_or_phone, password=None, **extra_fields):\n return self._create_user(email_or_phone, password, False, False, **extra_fields)", "def newuser(self, username, password,\n force_password_change_at_next_login_req=False,\n useusernameascn=False, userou=None, surname=None, givenname=None,\n initials=None, profilepath=None, scriptpath=None, homedrive=None,\n homedirectory=None, jobtitle=None, department=None, company=None,\n description=None, mailaddress=None, internetaddress=None,\n telephonenumber=None, physicaldeliveryoffice=None, sd=None,\n setpassword=True, uidnumber=None, gidnumber=None, gecos=None,\n loginshell=None, uid=None, nisdomain=None, unixhome=None,\n smartcard_required=False):\n\n displayname = \"\"\n if givenname is not None:\n displayname += givenname\n\n if initials is not None:\n displayname += ' %s.' % initials\n\n if surname is not None:\n displayname += ' %s' % surname\n\n cn = username\n if useusernameascn is None and displayname != \"\":\n cn = displayname\n\n user_dn = \"CN=%s,%s,%s\" % (cn, (userou or \"CN=Users\"), self.domain_dn())\n\n dnsdomain = ldb.Dn(self, self.domain_dn()).canonical_str().replace(\"/\", \"\")\n user_principal_name = \"%s@%s\" % (username, dnsdomain)\n # The new user record. Note the reliance on the SAMLDB module which\n # fills in the default informations\n ldbmessage = {\"dn\": user_dn,\n \"sAMAccountName\": username,\n \"userPrincipalName\": user_principal_name,\n \"objectClass\": \"user\"}\n\n if smartcard_required:\n ldbmessage[\"userAccountControl\"] = str(dsdb.UF_NORMAL_ACCOUNT |\n dsdb.UF_SMARTCARD_REQUIRED)\n setpassword = False\n\n if surname is not None:\n ldbmessage[\"sn\"] = surname\n\n if givenname is not None:\n ldbmessage[\"givenName\"] = givenname\n\n if displayname != \"\":\n ldbmessage[\"displayName\"] = displayname\n ldbmessage[\"name\"] = displayname\n\n if initials is not None:\n ldbmessage[\"initials\"] = '%s.' % initials\n\n if profilepath is not None:\n ldbmessage[\"profilePath\"] = profilepath\n\n if scriptpath is not None:\n ldbmessage[\"scriptPath\"] = scriptpath\n\n if homedrive is not None:\n ldbmessage[\"homeDrive\"] = homedrive\n\n if homedirectory is not None:\n ldbmessage[\"homeDirectory\"] = homedirectory\n\n if jobtitle is not None:\n ldbmessage[\"title\"] = jobtitle\n\n if department is not None:\n ldbmessage[\"department\"] = department\n\n if company is not None:\n ldbmessage[\"company\"] = company\n\n if description is not None:\n ldbmessage[\"description\"] = description\n\n if mailaddress is not None:\n ldbmessage[\"mail\"] = mailaddress\n\n if internetaddress is not None:\n ldbmessage[\"wWWHomePage\"] = internetaddress\n\n if telephonenumber is not None:\n ldbmessage[\"telephoneNumber\"] = telephonenumber\n\n if physicaldeliveryoffice is not None:\n ldbmessage[\"physicalDeliveryOfficeName\"] = physicaldeliveryoffice\n\n if sd is not None:\n ldbmessage[\"nTSecurityDescriptor\"] = ndr_pack(sd)\n\n ldbmessage2 = None\n if any(map(lambda b: b is not None, (uid, uidnumber, gidnumber, gecos,\n loginshell, nisdomain, unixhome))):\n ldbmessage2 = ldb.Message()\n ldbmessage2.dn = ldb.Dn(self, user_dn)\n if uid is not None:\n ldbmessage2[\"uid\"] = ldb.MessageElement(str(uid), ldb.FLAG_MOD_REPLACE, 'uid')\n if uidnumber is not None:\n ldbmessage2[\"uidNumber\"] = ldb.MessageElement(str(uidnumber), ldb.FLAG_MOD_REPLACE, 'uidNumber')\n if gidnumber is not None:\n ldbmessage2[\"gidNumber\"] = ldb.MessageElement(str(gidnumber), ldb.FLAG_MOD_REPLACE, 'gidNumber')\n if gecos is not None:\n ldbmessage2[\"gecos\"] = ldb.MessageElement(str(gecos), ldb.FLAG_MOD_REPLACE, 'gecos')\n if loginshell is not None:\n ldbmessage2[\"loginShell\"] = ldb.MessageElement(str(loginshell), ldb.FLAG_MOD_REPLACE, 'loginShell')\n if unixhome is not None:\n ldbmessage2[\"unixHomeDirectory\"] = ldb.MessageElement(\n str(unixhome), ldb.FLAG_MOD_REPLACE, 'unixHomeDirectory')\n if nisdomain is not None:\n ldbmessage2[\"msSFU30NisDomain\"] = ldb.MessageElement(\n str(nisdomain), ldb.FLAG_MOD_REPLACE, 'msSFU30NisDomain')\n ldbmessage2[\"msSFU30Name\"] = ldb.MessageElement(\n str(username), ldb.FLAG_MOD_REPLACE, 'msSFU30Name')\n ldbmessage2[\"unixUserPassword\"] = ldb.MessageElement(\n 'ABCD!efgh12345$67890', ldb.FLAG_MOD_REPLACE,\n 'unixUserPassword')\n\n self.transaction_start()\n try:\n self.add(ldbmessage)\n if ldbmessage2:\n self.modify(ldbmessage2)\n\n # Sets the password for it\n if setpassword:\n self.setpassword((\"(distinguishedName=%s)\" %\n ldb.binary_encode(user_dn)),\n password,\n force_password_change_at_next_login_req)\n except:\n self.transaction_cancel()\n raise\n else:\n self.transaction_commit()", "def _create_user(self, password, **extra_fields):\n try:\n user = self.model(**extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user\n except:\n raise ValueError('ValueError: Cannot create new user')", "def create_user(first_name,last_name,email,password):\n\n\tnew_user = User(first_name,last_name,email,password)\n\treturn new_user", "def _create_user(self, email, password, first_name, last_name, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n if not self.model:\n self.model = MHacksUser\n try:\n request = extra_fields.pop('request')\n except KeyError:\n request = None\n user = self.model(email=email, first_name=first_name, last_name=last_name, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n from django.contrib.auth.models import Group\n user.groups.add(Group.objects.get(name=GroupEnum.HACKER))\n user.save(using=self._db)\n from utils import send_verification_email\n if request:\n send_verification_email(user, request)\n return user", "def _create_user(self, username, email, password, phone, **extra_fields):\n\n username = self.model.normalize_username(username)\n user = self.model(username=username, email=email, phone=phone, **extra_fields) # using email_id instead of email\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, username, password):\n return self._user(username=username, password=password, create=True)", "def sample_user(email='test@gmail.com', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "def sample_user(email='test@gmail.com', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "def sample_user(email='test@gmail.com', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "def create_user(entry):\n # only works for first + last name currently\n full_name = entry[5].split()\n email = '{first_name}-{client_id}@{domain}'.format(\n first_name=full_name[0].lower(),\n client_id=str(entry[4]).strip(), # unique email for clients with same name\n domain='example.com')\n password = 'test1234'\n dob = timezone.now() - timedelta(days=(365 * random.randint(18, 99)))\n try:\n user = get_user_model().objects.get(email=email)\n except get_user_model().DoesNotExist:\n user = get_user_model().objects.create_user(email=email, first_name=full_name[0],\n last_name=full_name[1], password=password, dob=dob)\n return user", "def do_user_create(cs, args):\n cs.users.create(args.username, args.password, args.email, args.realname,\n args.comment)\n print(\"Create user '%s' successfully.\" % args.username)", "def sample_user(email='john7ric@mail.com', password='open@123'):\n return get_user_model().objects.create_user(email, password)", "def sample_user(email, password, is_doctor, is_hospital_admin):\n return MyUser.objects.create_user(email, is_hospital_admin, is_doctor, password)", "def create_new_user():\n return get_user_model().objects.create_user(\n email='test@gmail.com',\n password='test@londodnjisdjfois',\n username='tempusername'\n )", "def _create_user(self, ident, password, **extra_fields):\n if not ident:\n raise ValueError('The Email must be set')\n ident = self.normalize_email(ident)\n user = self.model(ident=ident, **extra_fields)\n user.set_password(password)\n\n user.save()\n return user", "def create_user(\n self,\n name: str,\n email: str,\n password: str,\n daa_pdf: Optional[bytes],\n institution: str,\n website: str,\n budget: float,\n role: Dict[Any, Any],\n verify_key: VerifyKey,\n ) -> None:\n _private_key = SigningKey.generate()\n\n encoded_pk = _private_key.encode(encoder=HexEncoder).decode(\"utf-8\")\n encoded_vk = _private_key.verify_key.encode(encoder=HexEncoder).decode(\"utf-8\")\n added_by = self.get_user(verify_key).name # type: ignore\n\n # Register the user in the database\n self.signup(\n name=name,\n email=email,\n password=password,\n role=role,\n budget=budget,\n private_key=encoded_pk,\n verify_key=encoded_vk,\n daa_pdf=daa_pdf,\n added_by=added_by,\n institution=institution,\n website=website,\n )", "def create_user_service(username: str, email: str, password: str) -> None:\n hashed_password = bcrypt.generate_password_hash(password).decode('UTF-8')\n user = User(username=username, email=email, password=hashed_password)\n db.session.add(user)\n db.session.commit()", "def create_user(user_id, password_16char, public_key_32char):\n headers = {'Content-type': 'application/json'}\n payload = {'user_id': user_id\n , 'user_password': password_16char\n , 'public_key': public_key_32char}\n response = requests.post(\"http://localhost:5000/user/createUser\", data=json.dumps(payload), headers=headers)\n return response.text", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, first_name, last_name, password, **extra_fields):\n if not email:\n raise ValueError(_('Email Address is required'))\n email = self.normalize_email(email)\n user = self.model(\n email=email,\n first_name=first_name,\n last_name=last_name,\n **extra_fields\n )\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, user_account, password=None):\n if not user_account:\n raise ValueError('Users must have an user_account')\n\n user = self.model(\n user_account=user_account\n )\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user_object():\n user = User.objects.get_or_create(username='testuser',\n first_name='Test',\n last_name='User',\n email='test@test.com')[0]\n user.set_password('testabc123')\n user.save()\n\n return user", "def new_user(request):\r\n rdict = request.params\r\n\r\n u = User()\r\n\r\n u.username = unicode(rdict.get('username'))\r\n if u.username:\r\n u.username = u.username.lower()\r\n u.email = unicode(rdict.get('email')).lower()\r\n passwd = get_random_word(8)\r\n u.password = passwd\r\n u.activated = True\r\n u.is_admin = False\r\n u.api_key = User.gen_api_key()\r\n\r\n try:\r\n DBSession.add(u)\r\n DBSession.flush()\r\n # We need to return the password since the admin added the user\r\n # manually. This is only time we should have/give the original\r\n # password.\r\n ret = dict(u)\r\n ret['random_pass'] = passwd\r\n return _api_response(request, ret)\r\n\r\n except IntegrityError, exc:\r\n # We might try to add a user that already exists.\r\n LOG.error(exc)\r\n request.response.status_int = 400\r\n return _api_response(request, {\r\n 'error': 'Bad Request: User exists.',\r\n })", "def create_user_object():\n user = User.objects.get_or_create(username='testuser',\n first_name='Test',\n last_name='User',\n email='test@test.com')[0]\n user.set_password('testabc123')\n user.save()\n return user", "def create_user(\n self, email, \n user_name, \n first_name,\n password,\n **other_information):\n\n if not email:\n raise ValueError(\n _('You must be provide an email address!')\n )\n \n email = self.normalize_email(email)\n user = self.model(\n email=email,\n user_name=user_name,\n first_name=first_name,\n **other_information\n )\n\n user.set_password(password)\n user.save()\n\n return user", "def create_user(name, password, tenant=None, domain=None):\n manager = get_manager()\n if user_exists(name, domain=domain):\n log(\"A user named '%s' already exists in domain '%s'\" % (name, domain),\n level=DEBUG)\n return\n\n tenant_id = None\n if tenant:\n tenant_id = manager.resolve_tenant_id(tenant, domain=domain)\n if not tenant_id:\n error_out(\"Could not resolve tenant_id for tenant '%s' in domain \"\n \"'%s'\" % (tenant, domain))\n\n domain_id = None\n if domain:\n domain_id = manager.resolve_domain_id(domain)\n if not domain_id:\n error_out('Could not resolve domain_id for domain %s when creating'\n ' user %s' % (domain, name))\n\n manager.create_user(name=name,\n password=password,\n email='juju@localhost',\n tenant_id=tenant_id,\n domain_id=domain_id)\n log(\"Created new user '%s' tenant: '%s' domain: '%s'\" % (name, tenant_id,\n domain_id), level=DEBUG)", "def _create_user(self, email, password, **extra_fields):\n\t\tif not email:\n\t\t\traise ValueError('The given email must be set')\n\t\temail = self.normalize_email(email)\n\t\tuser = self.model(email=email, **extra_fields)\n\t\tuser.set_password(password)\n\t\tuser.save(using=self._db)\n\t\treturn user", "def _create_user(self, email, password, **extra_fields):\n\t\tif not email:\n\t\t\traise ValueError('The given email must be set')\n\t\temail = self.normalize_email(email)\n\t\tuser = self.model(email=email, **extra_fields)\n\t\tuser.set_password(password)\n\t\tuser.save(using=self._db)\n\t\treturn user", "def create_user(self, user_id):\n data = {\n 'email': self._email_for_user_id(user_id),\n 'username': user_id,\n 'password': str(uuid.uuid4()),\n 'name': user_id,\n }\n\n # create user and return it to caller\n return self._post('/users', data=data)", "def create_user(self, email=None, name=None, password=None, phone=None):\n # if not email:\n # raise ValueError('Users must have an email address')\n\n user = self.model(\n email=email,\n name=name,\n phone=phone\n )\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(user_name: str):\n user = User()\n user.username = user_name\n user.save()\n return user", "def __init__(self, user, password, _recipients, templatedir='templates'):\n\n self.user = user\n self.password = password\n self.recipient = _recipients if type (_recipients) is list else [_recipients]\n self.server = 'smtp.gmail.com'\n self.port = 587\n\n if os.path.isdir(templatedir):\n self.templatedir = templatedir\n else:\n self.templatedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), templatedir)\n\n self.env = Environment(loader=FileSystemLoader(self.templatedir))", "def sample_user(email='scott@test.com', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "def user(**kwargs):\n defaults = {}\n if 'username' not in kwargs:\n defaults['username'] = ''.join(random.choice(letters)\n for x in xrange(15))\n if 'email' not in kwargs:\n defaults['email'] = ''.join(\n random.choice(letters) for x in xrange(10)) + '@example.com'\n defaults.update(kwargs)\n user = User(**defaults)\n user.set_password(kwargs.get('password', 'testpass'))\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n print(\"create user\")\n return user", "def create_user(username):\n\n password = getpass.getpass('Password for {0}: '.format(username))\n confirm = getpass.getpass('Again: ')\n\n if password != confirm:\n print >> sys.stderr, \"Passwords don't match\"\n\n sys.exit(1)\n\n with transaction.manager:\n Users(username, password).save()", "def create_a_user(self, username='fry', email='fry@futur.ama', password='Qwerty!234'):\n user = User.objects.create_user(username, email, password)\n user.save()\n return user", "def create_user(fname, lname, email, password, phone_number):\n user = User(fname = fname, lname = lname , email = email ,password = password, phone_number = phone_number)\n #setting password hash\n user.set_password(password)\n db.session.add(user)\n db.session.commit()\n\n return user" ]
[ "0.6836732", "0.6782684", "0.66625166", "0.6148272", "0.60939145", "0.59752667", "0.59635276", "0.59110373", "0.5898103", "0.58977437", "0.5887567", "0.58812404", "0.5815839", "0.5802219", "0.57664925", "0.57609296", "0.57314557", "0.57081306", "0.56840855", "0.56605315", "0.5635352", "0.55870754", "0.5580626", "0.5579031", "0.55513906", "0.55480254", "0.55424273", "0.55354136", "0.5533354", "0.5503334", "0.55005276", "0.54932624", "0.54926896", "0.54723006", "0.5450565", "0.5441564", "0.5440344", "0.5439515", "0.5438194", "0.54380023", "0.5435078", "0.5431898", "0.54048675", "0.5392365", "0.53726995", "0.53661644", "0.5346531", "0.53464997", "0.53321177", "0.5328356", "0.53235954", "0.5319738", "0.53175336", "0.53153354", "0.53116155", "0.53108907", "0.5300398", "0.52967477", "0.52964514", "0.529536", "0.52874035", "0.5283937", "0.5278095", "0.5269913", "0.5267814", "0.52665734", "0.52611", "0.5254836", "0.525264", "0.525264", "0.525264", "0.5246726", "0.52422947", "0.5232217", "0.52256924", "0.52249885", "0.522386", "0.52221096", "0.5220444", "0.5219833", "0.52183974", "0.52182394", "0.5210677", "0.52060604", "0.52055365", "0.52044684", "0.5202703", "0.5200955", "0.51921666", "0.51921666", "0.51884234", "0.51873046", "0.5180693", "0.51801467", "0.5179385", "0.5179329", "0.51765", "0.5175611", "0.5173978", "0.5173285" ]
0.72970504
0
Creates a new tag in the specified tag namespace. The tag requires either the OCID or the name of the tag namespace that will contain this tag definition. You must specify a name for the tag, which must be unique across all tags in the tag namespace and cannot be changed. The name can contain any ASCII character except the space (_) or period (.) characters. Names are case insensitive. That means, for example, \"myTag\" and \"mytag\" are not allowed in the same namespace. If you specify a name that's already in use in the tag namespace, a 409 error is returned. The tag must have a description. It does not have to be unique, and you can change it with
def create_tag(self, tag_namespace_id, create_tag_details, **kwargs): resource_path = "/tagNamespaces/{tagNamespaceId}/tags" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_tag got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "tagNamespaceId": tag_namespace_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=create_tag_details, response_type="Tag") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=create_tag_details, response_type="Tag")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create(self, name, tag):\n\n\t\turl_json = urllib.urlencode({\"name\": name, \"tag\": tag})\n\t\treturn self._create(\"/tag?json_hash=%s\" % url_json, \"tag\")", "def create_tag(self, session, tags):\n self._tag(session.put, tags=tags, session=session)", "def create_tag(self, entry_name, tag):\n return self.__datacatalog.create_tag(parent=entry_name, tag=tag)", "def create_tag_namespace(self, create_tag_namespace_details, **kwargs):\n resource_path = \"/tagNamespaces\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"create_tag_namespace got unknown kwargs: {!r}\".format(extra_kwargs))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=create_tag_namespace_details,\n response_type=\"TagNamespace\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=create_tag_namespace_details,\n response_type=\"TagNamespace\")", "def create_tag(name):\n name = name.strip().lower()\n tag = Tags(name)\n try:\n db_session.add(tag)\n db_session.commit()\n except exc.IntegrityError as err:\n db_session.rollback()\n return 'Tag \"%s\" has not been added - already exists: %s.' % (name, err), 'warning', None\n return 'Tag \"%s\" has been added.' % name, 'success', tag", "def create_or_get_tag(self, tag_name: str, *args, **kwargs):\n\n tag_data = api.create_or_get_tag(\n tag_name,\n *args,\n api_key=self.__creds.api_key_v2, \n **kwargs)\n return en.Tag(tag_data)", "def create_a_tag(self, tag_id, contact_id):\n data = {\"contactTag\":{\"contact\":str(contact_id),\"tag\":str(tag_id)}}\n\n return self.client._post(\"/contactTags\", json=data)", "def createTag(self, authenticationToken, tag):\r\n pass", "def createTag(self, authenticationToken, tag):\r\n self.send_createTag(authenticationToken, tag)\r\n return self.recv_createTag()", "def create_tag():\n \n name = request.form['tag_name']\n\n if \"name\" in session:\n return redirect(\"/tags\")\n\n else:\n new_tag = Tag(name = name)\n db.session.add(new_tag)\n db.session.commit()\n return redirect(\"/tags\")", "def _create_tag_request():\n\n key = helpers.get('Tag.1.Key')\n value = helpers.get('Tag.1.Value')\n resource_id = helpers.get('ResourceId.1')\n\n if resource_id in current_app.config['RESOURCE_TYPE_MAP']:\n resource_type = current_app.config['RESOURCE_TYPE_MAP'][resource_id]\n else:\n errors.invalid_request(\n str(resource_id) + \" not found in configuration\")\n\n args = {\n 'command': 'createTags',\n 'resourceids': resource_id,\n 'resourcetype': resource_type,\n 'tags[0].key': key,\n 'tags[0].value': value\n }\n\n response = requester.make_request_async(args)\n\n return response", "def create(self, params={}, **options):\n return self.client.post(\"/tags\", params, **options)", "def make_tag(tag_name, text='', tag_attr=None):\n if tag_attr is None:\n tag_attr = {}\n\n doc = xml.dom.minidom.Document()\n element = doc.createElement(tag_name)\n if tag_attr:\n for k, v in izip(list(tag_attr.keys()), list(tag_attr.values())):\n element.setAttribute(unicode(k), unicode(v))\n if text:\n text_node = doc.createTextNode(text.strip())\n element.appendChild(text_node)\n return element", "def tag ():\n\n tagname = get_tag(comp_versions, 'ACE')\n\n if opts.tag:\n if opts.take_action:\n vprint (\"Placing tag %s on ACE_TAO\" % (tagname))\n ex (\"cd $DOC_ROOT/ACE_TAO && git tag -a \" + tagname + \" -m\\\"\" + tagname + \"\\\"\")\n\n vprint (\"Placing tag %s on MPC\" % (tagname))\n ex (\"cd $DOC_ROOT/MPC && git tag -a \" + tagname + \" -m\\\"\" + tagname + \"\\\"\")\n\n # Update release branches\n latest_branch_helper (update_latest_branch, opts.release_type)\n else:\n vprint (\"Placing tag %s on ACE_TAO\" % (tagname))\n vprint (\"Placing tag %s on MPC\" % (tagname))\n print (\"Creating tags:\\n\")\n print (\"Placing tag \" + tagname + \"\\n\")", "def create_tag(tag, directory=None):\n execute_command('git tag {0}'.format(tag), shell=True, cwd=directory)", "def create_in_workspace(self, workspace, params={}, **options):\n path = \"/workspaces/%s/tags\" % (workspace)\n return self.client.post(path, params, **options)", "def add_tag(self, obj, tag_name):\r\n tag_names = parse_tag_input(tag_name)\r\n if not len(tag_names):\r\n raise AttributeError(_('No tags were given: \"%s\".') % tag_name)\r\n if len(tag_names) > 1:\r\n raise AttributeError(_('Multiple tags were given: \"%s\".') % tag_name)\r\n tag_name = tag_names[0]\r\n if settings.FORCE_LOWERCASE_TAGS:\r\n tag_name = tag_name.lower()\r\n tag, created = self.get_or_create(name=tag_name)\r\n ctype = ContentType.objects.get_for_model(obj)\r\n TaggedItem._default_manager.get_or_create(\r\n tag=tag, content_type=ctype, object_id=obj.pk)", "def add_tag(filename, tag_name):\n storeapps = APP.config[\"storage\"]\n filename = filename.encode(\"utf-8\")\n print filename\n\n try:\n application = list(nativeapps.io.ls(storeapps, r\".*\" + filename + \"$\"))[0]\n meta_path = os.path.join(os.path.dirname(application), \"metadata.json\")\n metadata = json.loads(nativeapps.io.readfile(meta_path))\n tags = set(metadata.get(\"tags\", []))\n tags.add(tag_name)\n metadata[\"tags\"] = list(tags)\n nativeapps.io.writefile(meta_path, json.dumps(metadata))\n except IndexError:\n return \"Unknown application: %s\" % (application), 404\n\n return \"added\", 200", "def add_tag(session, tag_name, user_id=None, username='system_user'):\n session = validate_session(session)\n date_created=datetime.now()\n try:\n add_tag = TagInfo(tag_name, date_created, user_id)\n session.add(add_tag)\n session.commit()\n return(True, \"Tag %s added\" % (tag_name), add_tag)\n except Exception as e:\n session.rollback()\n return(False, \"Tag %s failed to add\" % (tag_name))", "def addNode(self, nTag, pkg, exe, args, name, namespace):\r\n try:\r\n validateName(nTag)\r\n except IllegalName:\r\n raise InvalidRequest('Node tag is not a valid.')\r\n\r\n if nTag in self._nodes:\r\n raise InvalidRequest(\"Can not use the same node tag '{0}' in the \"\r\n 'same container twice.'.format(nTag))\r\n\r\n node = self._obj.createNode(pkg, exe, args, name, namespace)\r\n self._nodes[nTag] = node\r\n node.notifyOnDeath(self._nodeDied)", "def create(self, label_id):\n data = {\n 'type': 'tagit',\n 'rate_count': 0,\n 'rate_range': 'day',\n 'limit_count': 0,\n 'limit_range': 'day',\n 'schedule': [],\n 'enabled': True,\n 'args': {\n 'sn': label_id,\n 'tag_sn': label_id\n }\n }\n # Yes, it's confusing. the `/actions/` endpoint is used for tags, while\n # the /tags/ endpoint is used for labels.\n return self._post(\n request=ApiActions.CREATE.value,\n uri=ApiUri.ACTIONS.value,\n params=data\n )", "def ex_create_tags(self, node, tags):\n if not tags:\n return\n\n params = { 'Action': 'CreateTags',\n 'ResourceId.0': node.id }\n for i, key in enumerate(tags):\n params['Tag.%d.Key' % i] = key\n params['Tag.%d.Value' % i] = tags[key]\n\n self.connection.request(self.path,\n params=params.copy()).object", "def add_tag(self, session, tag):\n self._tag(session.put, key=tag, session=session)", "def test_create_tag_invalid(self):\n tag_data = {'name': ''}\n res = self.client.post(TAGS_URL, data=tag_data)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def create(self, name, description=None, color=None):\n data = {\n 'name': name,\n 'title': name,\n 'description': description or name,\n 'appearance': {\n 'color': color or random_color()\n }\n }\n # Yes, it's confusing. the `/tags/` endpoint is used for labels\n return self._post(\n request=ApiActions.CREATE.value,\n uri=ApiUri.TAGS.value,\n params=data\n )", "def test_create_tag_invalid(self):\n payload = {'name': ''}\n res = self.client.post(TAGS_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_tag_invalid(self):\n payload = {'name': ''}\n res = self.client.post(TAGS_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def make_new_tag(tag_name, user_path, user_signed_in, current_user):\n if not user_signed_in:\n print('ALERT: -- User not logged in --')\n else:\n user = current_user[0]\n print(is_tag(tag_name, user_path, current_user))\n if is_tag(tag_name, user_path, current_user):\n print('Tag already exist')\n else:\n os.mkdir((user_path + '\\\\' + user + '\\\\' + tag_name).encode('unicode_escape'))\n print('Tag --' + tag_name + '-- Created')", "def create_tag(case_dict, new_tag, username, password):\n # ---------------------------------------------------------------------\n logger.debug(\"create_tag\")\n\n # create a new trunk tag\n os.chdir(case_dict[\"archive_temp_dir\"])\n svn_repo = \"{0}/trunk\".format(case_dict[\"svn_repo_url\"])\n svn_repo_tag = \"{0}/trunk_tags/{1}\".format(case_dict[\"svn_repo_url\"], new_tag)\n msg = '\"create new trunk tag\"'\n cmd = [\n \"svn\",\n \"copy\",\n \"--username\",\n username,\n \"--password\",\n password,\n svn_repo,\n svn_repo_tag,\n \"--message\",\n msg,\n ]\n try:\n subprocess.check_call(cmd)\n except subprocess.CalledProcessError as error:\n cmd_nopasswd = [\n \"svn\",\n \"copy\",\n \"--username\",\n username,\n \"--password\",\n \"******\",\n svn_repo,\n svn_repo_tag,\n \"--message\",\n msg,\n ]\n msg = _call_template.substitute(\n function=\"checkin_trunk\",\n cmd=cmd_nopasswd,\n error=error.returncode,\n strerror=error.output,\n )\n logger.warning(msg)\n raise SVNException(msg)", "def _create_element(tag, text=\"\", attr={}, namespace=Xmlns_path):\n element = Et.Element('.//' + namespace + tag, attr)\n element.text = text\n return element", "def create(cls, ns, name, **kwargs):\n key_name = '%s:%s' % (ns, name)\n return cls(key_name=key_name, ns=ns, name=name, **kwargs)", "def tag(self, tag):\n \n if isinstance(tag, six.integer_types):\n try:\n tag = Tag.objects.get(pk=tag, owner=self.owner)\n except Tag.DoesNotExist:\n #Handle this better?\n return\n \n if isinstance(tag, six.string_types):\n tname = tag\n try:\n tag = Tag(owner=self.owner, name=tag)\n tag.save()\n except IntegrityError:\n tag = Tag.objects.get(slug=makeslug(tname), owner=self.owner)\n \n tag.save() # If this isn't here there are crashes for some reason\n self.tags.add(tag)", "def ensure_tag(name, vocabulary=None):\n obj = {'name': name}\n where_clause = tag_table.c.name == name\n\n if vocabulary is not None:\n ## Add vocabulary to filter / new object\n vocabulary_id = ensure_vocabulary(vocabulary)\n obj['vocabulary_id'] = vocabulary_id\n where_clause = and_(\n where_clause, tag_table.c.vocabulary_id == vocabulary_id)\n\n query = select([tag_table]).where(where_clause)\n tag_obj = query.execute().first()\n\n if tag_obj is not None:\n ## We already have the tag!\n return tag_obj['id']\n\n ## Create tag, return id\n return create_tag(obj)", "def post_namespace_create(self, resource_dict):\n pass", "def create_tag(self,\r\n access_token,\r\n tag_create_params):\r\n\r\n # Prepare query URL\r\n _url_path = '/tags'\r\n _query_builder = Configuration.base_uri\r\n _query_builder += _url_path\r\n _query_parameters = {\r\n 'access_token': access_token\r\n }\r\n _query_builder = APIHelper.append_url_with_query_parameters(_query_builder,\r\n _query_parameters, Configuration.array_serialization)\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n # Prepare headers\r\n _headers = {\r\n 'accept': 'application/json',\r\n 'content-type': 'application/json; charset=utf-8'\r\n }\r\n\r\n # Prepare and execute request\r\n _request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(tag_create_params))\r\n CustomQueryAuth.apply(_request)\r\n _context = self.execute_request(_request)\r\n\r\n # Endpoint and global error handling using HTTP status codes.\r\n if _context.response.status_code == 0:\r\n raise APIException('Unexpected error.', _context)\r\n self.validate_response(_context)\r\n\r\n # Return appropriate type\r\n return APIHelper.json_deserialize(_context.response.raw_body, Tag.from_dictionary)", "def tag(self, tag_name):\r\n return Tag(self, tag_name)", "def CreateTags(self, ResourceId, TagName, TagValue):\n\n Client = boto3.client(self.Service)\n\n if self.Service == 'ec2':\n response = Client.create_tags(\n Resources = [\n\t\t ResourceId\n\t\t],\n\t\tTags = [\n {\n\t\t 'Key': TagName,\n\t\t 'Value': TagValue\n }\n\t\t]\n\t )\n elif self.Service == 'efs':\n response = Client.create_tags(\n FileSystemId = ResourceId,\n\t\tTags = [\n {\n\t\t 'Key': TagName,\n\t\t 'Value': TagValue\n }\n\t\t]\n\t )\n elif self.Service == 'redshift':\n response = Client.create_tags(\n ResourceName = ResourceId,\n\t\tTags = [\n {\n\t\t 'Key': TagName,\n\t\t 'Value': TagValue\n }\n\t\t]\n\t )\n elif self.Service == 'workspaces':\n response = Client.create_tags(\n ResourceId = ResourceId,\n\t\tTags = [\n {\n\t\t 'Key': TagName,\n\t\t 'Value': TagValue\n }\n\t\t]\n\t )\n else:\n raise TagNotSupportedError(str(self.Service))\n\n return True", "def create(self, session, prepend_key=True):\n if not self.allow_create:\n raise exceptions.MethodNotSupported(self, \"create\")\n\n endpoint_override = self.service.get_endpoint_override()\n if self.put_create:\n # create tag do not need requires_id\n request = self._prepare_request(requires_id=False,\n prepend_key=prepend_key)\n response = session.put(request.uri, endpoint_filter=self.service,\n endpoint_override=endpoint_override,\n json=request.body, headers=request.headers)\n self._translate_response(response)\n return self", "def create_tag_with_entry(title):\n tag = Tag.objects.create(title=title)\n tag.save()\n tag.entry.add(1)\n return tag", "def _add_tag(self, tag_name):\n tag = TagInfo()\n tag._name = tag_name\n self._tags.append(tag)\n return tag", "def test_create_tag_with_invalid_details_invalid(self):\n\n payload = {\n 'name': ''\n }\n\n res = self.client.post(TAGS_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def testAddTag(self):\n project = self.session.create_project()\n\n project.add_tag(\"test\")\n self.assertEqual(project.tags, [\"test\"], \"Can add a tag to a project.\")\n\n json_str = project.to_json()\n doc = json.loads(json_str)\n\n self.assertEqual(doc['meta']['tags'], [\"test\"],\n \"JSON representation had correct tags after add_tag().\")\n\n # Try adding the same tag yet again, shouldn't get a duplicate\n with self.assertRaises(ValueError):\n project.add_tag(\"test\")\n\n json_str = project.to_json()\n doc2 = json.loads(json_str)\n\n self.assertEqual(doc2['meta']['tags'], [\"test\"],\n \"JSON document did not end up with duplicate tags.\")", "def test_add_tag_invalid(self):\n payload = {'name': ''}\n res = self.client.post(TAGS_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def add_tag(convo_ID, tag_ID):\n # Make API request\n url = \"https://api2.frontapp.com/conversations/\" + convo_ID + \"/tags\"\n payload = json.dumps({\"tag_ids\": [tag_ID]})\n headers = {\"Authorization\": BEARER_TOKEN, \"Content-Type\": \"application/json\"}\n requests.request(\"POST\", url, headers=headers, data=payload)", "def test_create_tag_invalid_payload(self):\n\n tag_payload = {'name': ''}\n response = self.client.post(URL_TAGS, tag_payload)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def create_namespace(node, namespace, delete_before_create=True):\n if delete_before_create:\n Namespaces.delete_namespace(node, namespace)\n\n cmd = f\"ip netns add {namespace}\"\n exec_cmd_no_error(node, cmd, sudo=True)\n Namespaces.__namespaces.append(namespace)", "def update_tag(self, tag_namespace_id, tag_name, update_tag_details, **kwargs):\n resource_path = \"/tagNamespaces/{tagNamespaceId}/tags/{tagName}\"\n method = \"PUT\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"update_tag got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"tagNamespaceId\": tag_namespace_id,\n \"tagName\": tag_name\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_tag_details,\n response_type=\"Tag\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_tag_details,\n response_type=\"Tag\")", "def CreateForTag(cls, tag):\n parent_key = cls._GetParentKeyFromTag(tag)\n return cls(parent=parent_key)", "def add_tag():\n\n tag_name = request.form[\"name\"]\n\n if not tag_name:\n flash(\"Please enter tag name\")\n return redirect(\"/tags/new\")\n\n tag = Tag(name=tag_name)\n db.session.add(tag)\n db.session.commit()\n\n return redirect(\"/tags\")", "async def slashtag_add(\n self,\n ctx: commands.Context,\n tag_name: TagName(check_global=False),\n *,\n tagscript: TagScriptConverter,\n ):\n await self.create_slash_tag(ctx, tag_name, tagscript, is_global=False)", "def test_create_tag_invalid(self):\n payload = {'name':''}\n res = self.client.post(TAG_URL,payload)\n self.assertEqual(res.status_code,status.HTTP_400_BAD_REQUEST)", "def add(self, tag):\n self.tags[tag.name] = tag", "def create_tags(ResourceArn=None, Tags=None):\n pass", "def _handle_new_tag(self, tag_name):\n log.debug(\"Handling new tag: %s\", tag_name)\n tag, created = Tag.get_or_create(name=tag_name)\n if tag not in self.doc.tags:\n self.listbox.body.insert(-1, MenuButton(tag_name, None))\n tag.documents.add(self.doc)\n self.new_tag.set_edit_text(\"\")", "def test_create_tag(self):\n\n tag_payload = {'name': 'Test Tag'}\n self.client.post(URL_TAGS, tag_payload)\n\n is_tag_created = Tag.objects.filter(\n user=self.user,\n name=tag_payload['name']\n ).exists()\n\n self.assertTrue(is_tag_created)", "def test_add_defined_tag_to_bucket(self, test, object_storage, with_or_without_compartment):\n namespace_name, bucket_name = self._get_bucket_details(object_storage)\n session_factory = test.oci_session_factory()\n policy = test.load_policy(\n {\n \"name\": \"add-defined-tag-to-bucket\",\n \"resource\": \"oci.bucket\",\n \"query\": [\n {\"namespace_name\": namespace_name},\n ],\n \"filters\": [\n {\"type\": \"value\", \"key\": \"name\", \"value\": bucket_name},\n ],\n \"actions\": [{\"type\": \"update\", \"defined_tags\": self.get_defined_tag(\"add_tag\")}],\n },\n session_factory=session_factory,\n )\n policy.run()\n resource = self._fetch_bucket_validation_data(\n policy.resource_manager, namespace_name, bucket_name\n )\n test.assertEqual(resource[\"name\"], bucket_name)\n test.assertEqual(self.get_defined_tag_value(resource[\"defined_tags\"]), \"true\")", "def create_tags(resource_id, key, value):\n response = EC2.create_tags(\n Resources=[\n resource_id,\n ],\n Tags=[\n {\n 'Key': key,\n 'Value': value\n },\n ]\n )\n return response", "def GachaCraftNodeExcelAddTag_(builder, Tag_):\n return AddTag_(builder, Tag_)", "def add_tag(self, *,\n id: str,\n tag: str,\n tag_type: str = 'default',\n resource_type: ResourceType = ResourceType.Table) -> None:\n LOGGER.info(f'New tag {tag} for id {id} with type {tag_type} and resource type {resource_type.name}')\n\n resource_table = f'{resource_type.name.lower()}_tag'\n resource_model = self._get_model_from_table_name(resource_table)\n if not resource_model:\n raise NotImplementedError(f'The resource type {resource_type.name} is not defined!')\n\n resource_key = f'{resource_type.name.lower()}_rk'\n\n tag_record = RDSTag(rk=tag, tag_type=tag_type)\n resource_tag_record = resource_model(tag_rk=tag)\n resource_tag_record.__setattr__(resource_key, id)\n try:\n with self.client.create_session() as session:\n session.merge(tag_record)\n session.merge(resource_tag_record)\n session.commit()\n except Exception as e:\n LOGGER.exception(f'Failed to add tag {tag} for {id}')\n raise e", "def add_new_tag():\n\n name = request.form.get('name')\n\n new_tag = Tag(name=name)\n db.session.add(new_tag)\n db.session.commit()\n\n return redirect(f'/tags')", "def add_tag(self, tag, attributes, extent):\n self.tags.append((tag, attributes, extent))", "def _mk_tag(ns, tag):\n return '{%s}%s' % (ns, tag) if ns else tag", "def make_new_tag():\n return render_template('tags/new_tag.html')", "def test_create_tag_succesful(self):\n payload = {'name': 'Test tag'}\n res = self.client.post(TAGS_URL, payload)\n\n exists = Tag.objects.filter(\n user=self.user,\n name=payload['name']\n ).exists()\n self.assertTrue(exists)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)", "def create_container(ContainerName=None, Tags=None):\n pass", "def create_tag(self, name: str, revision: Optional[str] = None) -> None:\n if not revision:\n self._status.check_authority_for_commit()\n revision = self._status.commit_id\n\n post_data: Dict[str, Any] = {\"commit\": revision, \"name\": name}\n\n self._client.open_api_do(\"POST\", \"tags\", self.dataset_id, json=post_data)", "def add_new_tag():\n\n return render_template('create-tag.html')", "def add_tag(self, key, value=''):\r\n status = self.connection.create_tags([self.id], {key : value})\r\n if self.tags is None:\r\n self.tags = TagSet()\r\n self.tags[key] = value", "def create(context, namespace_name, values, session):\n\n namespace = namespace_api.get(\n context, namespace_name, session)\n\n # if the resource_type does not exist, create it\n resource_type_name = values['name']\n metadef_utils.drop_protected_attrs(\n models.MetadefNamespaceResourceType, values)\n try:\n resource_type = resource_type_api.get(\n context, resource_type_name, session)\n except exc.NotFound:\n resource_type = None\n LOG.debug(\"Creating resource-type %s\", resource_type_name)\n\n if resource_type is None:\n resource_type_dict = {'name': resource_type_name, 'protected': False}\n resource_type = resource_type_api.create(\n context, resource_type_dict, session)\n\n # Create the association record, set the field values\n ns_resource_type_dict = _to_db_dict(\n namespace['id'], resource_type['id'], values)\n new_rec = _create_association(context, namespace_name, resource_type_name,\n ns_resource_type_dict, session)\n\n return _to_model_dict(resource_type_name, new_rec)", "def add_tag(self, tag: str) -> None:\n tags = self.get_tag_index()\n tags.append(tag)\n self.write_tag_index(list(set(tags)))", "def add_tag(self, cr, uid, ids, code=None, name=None, create=False, context=None):\n tag_obj = self.pool.get('res.tag')\n tag_ids = tag_obj.get_tag_ids(cr, uid, self._name, code=code, name=name, context=context)\n if not tag_ids and create:\n model_id = self.pool.get('res.tag.model').search(cr, uid, [('model', '=', self._name)])[0]\n tag_ids = [tag_obj.create(cr, uid, {'name': name, 'code': code, 'model_id': model_id}, context=context)]\n\n if tag_ids:\n self.write(cr, uid, ids, {'tag_ids': [(4, tid) for tid in tag_ids]}, context=context)\n\n return bool(tag_ids)", "def tag(request, tag_name):\n raise NotImplementedError", "def _createElement(self, identifier, request):\n try:\n decoder, contentType = self._getDecoder(request)\n state = decoder(request.body)\n\n element = self._collection.createElementFromState(state)\n\n actualIdentifier = getattr(element, element.identifyingAttribute)\n if actualIdentifier != identifier:\n raise errors.IdentifierError(identifier, actualIdentifier)\n\n self._collection.add(element)\n return Created()\n except errors.SerializableError, e:\n contentType = self.defaultContentType\n encoder = self.encoders[contentType]\n errorResource = RESTErrorPage(e, encoder, contentType)\n return errorResource", "def tag(\n name: str, value: str, *, attributes: dict = {}, namespace: str = None\n ) -> str:\n attributes_str = \" \".join(f'{k}=\"{v}\"' for k, v in attributes.items())\n ns = f\"{namespace}:\" if namespace else \"\"\n if attributes_str:\n attributes_str = \" \" + attributes_str\n if value:\n return f\"<{ns}{name}{attributes_str}>{value}</{ns}{name}>\"\n return f\"<{ns}{name}{attributes_str} />\"", "def create_tags(self, resource_ids, tags):\r\n params = {}\r\n self.build_list_params(params, resource_ids, 'ResourceId')\r\n self.build_tag_param_list(params, tags)\r\n return self.get_status('CreateTags', params, verb='POST')", "def add_tag(self, transaction, citation_handle, tag_handle):\n citation = self.dbstate.db.get_citation_from_handle(citation_handle)\n citation.add_tag(tag_handle)\n self.dbstate.db.commit_citation(citation, transaction)", "def AddTag(self, tag):\n\n if not self.persistant:\n return\n\n self.db.ExecuteSql('insert into tags(tag, track_id) values(\"%s\", %d);'\n %(tag, self.persistant['id']))\n self.db.ExecuteSql('commit;')", "def test_create_tag_successful(self):\n tag_data = {'name': 'Snack'}\n res = self.client.post(TAGS_URL, data=tag_data)\n\n exists = Tag.objects.filter(\n user=self.user,\n name=tag_data['name']\n ).exists()\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n self.assertTrue(exists)", "def create_test_node_tag(**kw):\n tag = get_test_node_tag(**kw)\n dbapi = db_api.get_instance()\n return dbapi.add_node_tag(tag['node_id'], tag['tag'])", "def tag_add(self, remote_path, corpus_id, tag, storage_id=None):\n client, remote_path = self._get_storage(remote_path, storage_id=storage_id)\n return client.tag_add(corpus_id, tag)", "def sample_tag(user, name='Service Tag'):\n return Tag.objects.create(user=user, name=name)", "def create_resource(self, namespace: \"str\" = None):\n names = [\"create_namespaced_csistorage_capacity\", \"create_csistorage_capacity\"]\n\n _kube_api.execute(\n action=\"create\",\n resource=self,\n names=names,\n namespace=namespace,\n api_client=None,\n api_args={\"body\": self.to_dict()},\n )", "def create_or_update_tags(self, Tags):\n tag = Tags[0]\n asg_name = tag['ResourceId']\n ec2_tag = {\n 'Key': tag['Key'],\n 'Value': tag['Value']\n }\n try:\n response = self.asg.create_or_update_tags(\n Tags=Tags\n )\n except Exception as e:\n logger.error('Unknown Error: %s', str(e))\n else:\n logger.info(response)\n\n asg_instances = self.get_asg_instance_ids(asg_name)\n return EC2Wrapper(self.session).create_tags(Resources=asg_instances, Tags=[ec2_tag])", "def add_tag(self, tag_name, name = 'tag'):\r\n if self._only_whitespace.match(tag_name):\r\n # Don't allow an empty tag\r\n return\r\n\r\n try:\r\n tag = Tag._by_name(tag_name)\r\n except NotFound:\r\n tag = Tag._new(tag_name)\r\n tag._commit()\r\n\r\n # See if link already has this tag\r\n tags = LinkTag._fast_query(tup(self), tup(tag), name=name)\r\n link_tag = tags[(self, tag, name)]\r\n if not link_tag:\r\n link_tag = LinkTag(self, tag, name=name)\r\n link_tag._commit()\r\n\r\n return link_tag", "def createElement(tagName):\n print(\"Warning: createElement is deprecated in favor of createComponent\")\n return createComponent(tagName)", "def handle_add_new_tag():\n tag = Tag(name=request.form['name'])\n\n db.session.add(tag)\n db.session.commit()\n\n return redirect('/tags')", "def update_tag_namespace(self, tag_namespace_id, update_tag_namespace_details, **kwargs):\n resource_path = \"/tagNamespaces/{tagNamespaceId}\"\n method = \"PUT\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"update_tag_namespace got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"tagNamespaceId\": tag_namespace_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_tag_namespace_details,\n response_type=\"TagNamespace\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_tag_namespace_details,\n response_type=\"TagNamespace\")", "def create_uuid3(namespace, name):\n return uuid.uuid3(namespace, six.ensure_str(name))", "def set_tag(request):\n try:\n # Update existing Tag if tag_id exists, else create new Tag\n if \"tag_id\" not in request.POST or not request.POST[\"tag_id\"]:\n # Create new Tag\n tag = Tag.objects.create(title=request.form.cleaned_data[\"title\"])\n\n ActionLogger().log(request.user, \"created\", \"Knowledgebase Tag %s\" % tag)\n return format_ajax_response(True, \"Knowledgebase tag created successfully.\")\n else:\n # Update existing category\n tag = Tag.objects.get(pk=request.POST[\"tag_id\"])\n tag.title = request.form.cleaned_data[\"title\"]\n tag.save()\n\n ActionLogger().log(request.user, \"modified\", \"Knowledgebase Tag %s\" % tag)\n return format_ajax_response(True, \"Knowledgebase tag updated successfully.\")\n except Exception as ex:\n logger.error(\"Failed to set_tag: %s\" % ex)\n return format_ajax_response(False, \"There was an error setting the specified knowledgebase tag.\")", "def createContainer(tag, data={}): #@NoSelf", "def change_tag_namespace_compartment(self, tag_namespace_id, change_tag_namespace_compartment_detail, **kwargs):\n resource_path = \"/tagNamespaces/{tagNamespaceId}/actions/changeCompartment\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"change_tag_namespace_compartment got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"tagNamespaceId\": tag_namespace_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=change_tag_namespace_compartment_detail)\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=change_tag_namespace_compartment_detail)", "def initiate_new_tag (self,tag,key):\r\n\r\n #with shelf\r\n if self.using_shelf:\r\n self.tag_dict[tag] = {key}\r\n #with database\r\n\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname, tag, key)\r\n db_cursor.execute(\"INSERT OR REPLACE\"\r\n +\" INTO tags_to_keys\"\r\n +\" (notebook, tag, keyword)\"\r\n +\" VALUES (?,?,?);\",\r\n value_tuple)", "def tag(nodes, arg='', split=True, default=0, add_parent_space=True, add_default_spaces=True, zero_node=None, const_node=None):\n\n nodes = mc.ls(nodes)\n orig_const_node = const_node\n orig_zero_node = zero_node\n\n for node in nodes:\n\n # get zero if not specified\n if orig_zero_node:\n zero_node = orig_zero_node\n else:\n zero_node = node+'_ZERO'\n\n # get const node - this is what the parent constraint will be thrown onto\n if orig_const_node:\n const_node = orig_const_node\n else:\n const_node = node+'_CONST'\n\n if add_parent_space and not mc.objExists(zero_node):\n mc.warning('Cannot find a zero for '+node)\n continue\n\n if not mc.objExists(const_node):\n mc.warning('Cannot find a const for '+const_node)\n continue\n\n # build arg\n arg = arg.strip()\n if ' ' in arg:\n arg = re.sub(' +',' ', arg)\n\n arg_kwargs = {\n 'arg': arg,\n 'split': split,\n 'default': default,\n 'const_node': const_node,\n 'zero_node': zero_node,\n 'add_default_spaces': add_default_spaces,\n 'add_parent_space': add_parent_space\n }\n\n # create + set attr\n if not mc.objExists(node+'.tagSpaces'):\n mc.addAttr(node, ln='tagSpaces', dt='string', hidden=False)\n\n mc.setAttr(node+'.tagSpaces', str(arg_kwargs), type='string')\n\n # This convert all this legacy sloppy crap to the new hotness\n space_obj = Space(node)\n space_obj.set_data()\n\n return arg", "def add_tag(self, tag):\n self.tags.append(tag)", "def _AddCreatedNamespace(self, state_tracker, identifier, line_number,\n namespace=None):\n if not namespace:\n namespace = identifier\n\n if self._HasSuppression(state_tracker, 'missingProvide'):\n return\n\n self._created_namespaces.append([namespace, identifier, line_number])", "def insert_tag(self, tagname, attrs=[], text=\"\",\n autoclose=False, newline=False):\n if autoclose:\n self.str += '\\n<%s%s%s>' % (tagname, string_for_attrs(attrs),\n ' /' if self.is_xml else '')\n else:\n self.push_tag(tagname, attrs)\n if text:\n if newline:\n self.insert_text('\\n' + indent(text, self.indent_level()))\n else:\n self.insert_text(text)\n self.pop_tag(newline=newline)", "def test_add_tag(self):\n fc = self.read_feature(region='Adriatic_Sea')\n\n fc.tag(tags=['tag1', 'tag2', 'Mediterranean_Basin'])\n assert (fc.features[0]['properties']['tags'] ==\n 'Adriatic_Sea;Mediterranean_Basin;tag1;tag2')\n\n self.check_feature(fc.features[0])", "def create_tags(tags_path: Path, email: str, password: str, host_url: str):\n with open(tags_path) as f:\n tags_json = json.load(f)\n\n client = client_util.make_client(host_url, email, password)\n\n # Build dictionary of tags as they exist on the server, mapped by slug.\n online_tags_resp = api_get_tags.sync_detailed(client=client)\n if online_tags_resp.status_code != HTTPStatus.OK:\n click.echo(f\"Request to get tags failed with status {online_tags_resp}\")\n exit(1)\n online_tags = {\n online_tag.slug: online_tag for online_tag in online_tags_resp.parsed\n }\n\n # Record slugs of tags that failed.\n failures = set()\n\n for tag in tags_json[\"tags\"]:\n slug = tag[\"slug\"]\n name = tag[\"name\"]\n description = tag[\"description\"]\n color = tag.get(\"color\")\n\n if slug in online_tags:\n # Update\n online_tag = online_tags[slug]\n if (\n name == online_tag.name\n and description == online_tag.description\n and (color is None or color == online_tag.color)\n ):\n click.echo(f\"Tag {slug} is already up to date.\")\n else:\n click.echo(f\"Updating tag {slug}\")\n res = api_update_tag.sync_detailed(\n slug,\n client=client,\n json_body=PutTagsTagJsonBody(\n name,\n description,\n color if color else online_tags[slug].color,\n ),\n )\n if res.status_code != HTTPStatus.OK:\n click.echo(f\"Request failed with content={res.content}\")\n failures.add(slug)\n else:\n # Create\n click.echo(f\"Creating tag {slug}\")\n res = api_create_tag.sync_detailed(\n client=client,\n json_body=PostTagsJsonBody(\n name,\n slug,\n description,\n color=color if color else UNSET,\n ),\n )\n if res.status_code != HTTPStatus.CREATED:\n click.echo(f\"Request failed with content={res.content}\", err=True)\n failures.add(slug)\n\n if failures:\n click.echo(f\"Completed with failures: {failures}\", err=True)\n sys.exit(1)", "def create_tag(path, name, version, notes, test=False):\n\n tag_name = \"{}-{}\".format(name, version)\n tag_contents = \"Release %s for %s\\n\\n%s\" % (version, name, notes)\n\n if test:\n tag_name = \"test@\" + tag_name\n tag_contents = \"Test \" + tag_contents\n\n print(\"Creating annotated release tag: %s\" % tag_name)\n run_in_component(path, ['git', 'tag', '-a', '-F', '-', tag_name], stdin=tag_contents)", "def create(self, name=None, description=None):\n uri = URITemplate(self.baseuri + '/{owner}').expand(\n owner=self.username)\n return self.session.post(uri, json=self._attribs(name, description))" ]
[ "0.668044", "0.64789695", "0.6425235", "0.63370335", "0.62885815", "0.6265026", "0.6156292", "0.61304843", "0.5985835", "0.5902972", "0.5860836", "0.5857317", "0.58103025", "0.5736905", "0.57278633", "0.571501", "0.5682197", "0.56273866", "0.5566556", "0.5561892", "0.5553828", "0.5545572", "0.55432534", "0.55401194", "0.5506235", "0.5496501", "0.54942423", "0.5488063", "0.54709196", "0.5461829", "0.54439485", "0.54430693", "0.534843", "0.5336418", "0.5329959", "0.53045946", "0.5299483", "0.5289044", "0.52645415", "0.5256345", "0.5242787", "0.5235177", "0.52324855", "0.5228213", "0.52257115", "0.52217585", "0.5217019", "0.52148294", "0.52096367", "0.51985246", "0.51750684", "0.51694906", "0.5152105", "0.51327765", "0.51304066", "0.5129684", "0.5129471", "0.51263666", "0.51209426", "0.5112871", "0.51111007", "0.50970364", "0.50949776", "0.50764114", "0.5073295", "0.50582224", "0.50435805", "0.50413644", "0.50357985", "0.50257164", "0.5009755", "0.5008614", "0.5001197", "0.4998684", "0.49962315", "0.4994019", "0.49904567", "0.4981564", "0.4978175", "0.49770766", "0.49429157", "0.492379", "0.49208084", "0.48964944", "0.48963678", "0.48908284", "0.489074", "0.48853713", "0.4880499", "0.4879605", "0.48764196", "0.48630872", "0.4861964", "0.4846571", "0.48441756", "0.48438865", "0.48361236", "0.48346114", "0.48341796", "0.48242828" ]
0.7148086
0
Creates a new tag default in the specified compartment for the specified tag definition. If you specify that a value is required, a value is set during resource creation (either by the user creating the resource or another tag defualt). If no value is set, resource creation is blocked. If the `isRequired` flag is set to \"true\", the value is set during resource creation. If the `isRequired` flag is set to \"false\", the value you enter is set during resource creation.
def create_tag_default(self, create_tag_default_details, **kwargs): resource_path = "/tagDefaults" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token", "opc_request_id" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_tag_default got unknown kwargs: {!r}".format(extra_kwargs)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing), "opc-request-id": kwargs.get("opc_request_id", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, header_params=header_params, body=create_tag_default_details, response_type="TagDefault") else: return self.base_client.call_api( resource_path=resource_path, method=method, header_params=header_params, body=create_tag_default_details, response_type="TagDefault")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Option(name: str, value: Union[str, int], default: Optional[bool] = None) -> Dict:\n doc = {'name': name, 'value': value}\n if default is not None:\n doc['isDefault'] = default\n return doc", "def register_option_pair(key, default_value):\n\n _OPTION_TEMPLATE[key] = default_value", "def createDevIDAttr(shapefileName, defaultVal):\n\n inputds = ogr.Open(shapefileName,update=True)\n if not inputds:\n sys.exit(\"Unable to open input file '{0}'\".format(shapefileName))\n\n inputlyr = inputds.GetLayer()\n\n # Create field definition(s)\n # Add input Layer Fields to the output Layer if defined in field_names arg.\n inLayerDefn = inputlyr.GetLayerDefn()\n if inLayerDefn.GetFieldIndex(cc.DEV_LAYER_ATTRIBUTE_NAME) == -1:\n print(\"\\tCreating an Attribute '{0}' in vector file '{1}'\".format(cc.DEV_LAYER_ATTRIBUTE_NAME,shapefileName))\n\n inputlyr.CreateField(ogr.FieldDefn(cc.DEV_LAYER_ATTRIBUTE_NAME, ogr.OFTInteger))\n\n for inFeature in inputlyr:\n inFeature.SetField(cc.DEV_LAYER_ATTRIBUTE_NAME,defaultVal)\n inputlyr.SetFeature(inFeature)\n\n inputds.Destroy()\n print(\"\\tCreated an Attribute '{0}' in vector file '{1}'\".format(cc.DEV_LAYER_ATTRIBUTE_NAME,shapefileName))", "def update_tag_default(self, tag_default_id, update_tag_default_details, **kwargs):\n resource_path = \"/tagDefaults/{tagDefaultId}\"\n method = \"PUT\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\",\n \"opc_request_id\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"update_tag_default got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"tagDefaultId\": tag_default_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing),\n \"opc-request-id\": kwargs.get(\"opc_request_id\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_tag_default_details,\n response_type=\"TagDefault\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_tag_default_details,\n response_type=\"TagDefault\")", "def _add_default_tags(self):\n self.tags.add_tag('ban', required=True)", "def create_object_parameter_from_default(obj, default):\n values = []\n if default.enum:\n for v in DefaultParameterVl.objects.filter(parameter=default).all():\n values.append({'value' : v.value,\n 'caption' : v.caption})\n return create_object_parameter(obj, 'user', False,\n tp = default.tp,\n name=default.name,\n descr=default.descr,\n values=values)", "def __init__(self, default_value, description, register=None, name=None,\n is_key=False, **kwargs):\n self._default_value = default_value\n self._description = description\n self._register = register\n self._name = name\n self._is_key = is_key\n self._kwargs = kwargs\n\n self._value = default_value\n self._frozen = False", "def set_default(self, name, default, group=None):\n opt_info = self._get_opt_info(name, group)\n opt_info['default'] = self._get_enforced_type_value(\n opt_info['opt'], default)\n opt_info['location'] = LocationInfo(\n Locations.set_default,\n _get_caller_detail(3), # this function has a decorator to skip\n )", "def _create_tag_request():\n\n key = helpers.get('Tag.1.Key')\n value = helpers.get('Tag.1.Value')\n resource_id = helpers.get('ResourceId.1')\n\n if resource_id in current_app.config['RESOURCE_TYPE_MAP']:\n resource_type = current_app.config['RESOURCE_TYPE_MAP'][resource_id]\n else:\n errors.invalid_request(\n str(resource_id) + \" not found in configuration\")\n\n args = {\n 'command': 'createTags',\n 'resourceids': resource_id,\n 'resourcetype': resource_type,\n 'tags[0].key': key,\n 'tags[0].value': value\n }\n\n response = requester.make_request_async(args)\n\n return response", "def addDefault(self, name, object):\n if name is None:\n raise ValueError(\"Name cannot be None\")\n self.defaultChoice = name\n self.addObject(name, object)", "def __init__(self, name=None, values=None, default_value=None):\n self.swagger_types = {\n 'name': 'str',\n 'values': 'list[TagPropertyAllowedValue]',\n 'default_value': 'str'\n }\n\n self.attribute_map = {\n 'name': 'name',\n 'values': 'values',\n 'default_value': 'defaultValue'\n }\n\n self._name = name\n self._values = values\n self._default_value = default_value", "def var(\n default: Any = RAISE,\n converter: Callable | None = None,\n name: str | None = None,\n validator: Callable | None = None,\n help: str | None = None,\n) -> Any:\n return attr.ib(\n default=default,\n metadata={CNF_KEY: _ConfigEntry(name, default, None, None, help)},\n converter=converter,\n validator=validator,\n )", "def create(self, name, description=None, color=None):\n data = {\n 'name': name,\n 'title': name,\n 'description': description or name,\n 'appearance': {\n 'color': color or random_color()\n }\n }\n # Yes, it's confusing. the `/tags/` endpoint is used for labels\n return self._post(\n request=ApiActions.CREATE.value,\n uri=ApiUri.TAGS.value,\n params=data\n )", "def validate_default_element(self, value):\n return self.validate_element(value)", "def validate_default(self, value):\n return self.__validate(value, self.validate_default_element)", "def test_string_default(self):\n tag = Tag()\n self.assertEqual(tag.value, 'default')", "def delete_tag_default(self, tag_default_id, **kwargs):\n resource_path = \"/tagDefaults/{tagDefaultId}\"\n method = \"DELETE\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_request_id\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"delete_tag_default got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"tagDefaultId\": tag_default_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-request-id\": kwargs.get(\"opc_request_id\", missing),\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)", "def default_arg(default):\n class DefaultArg(argparse.Action):\n def __call__(self, parser, namespace, value, option_string):\n if value is None:\n setattr(namespace, self.dest, default)\n else:\n setattr(namespace, self.dest, value)\n\n return DefaultArg", "def _set_default(name, value, context):\n if name not in context:\n context[name] = value", "def __init__(self, name=None, value=None):\n default_attr = dict(name=str(),\n value=str())\n self.name = name\n self.value = value\n self._set_default_attr(default_attr)", "def createTag(self, authenticationToken, tag):\r\n pass", "def __init__(self,\n name=None,\n help_text=None,\n fallthroughs=None,\n completer=None,\n completion_request_params=None,\n completion_id_field=None,\n value_type=None,\n parameter_name=None):\n self.attribute_name = name\n self.help_text = help_text\n self.fallthroughs = fallthroughs or []\n # The completer is always None because neither the surface nor the yaml\n # schema allow for specifying completers currently.\n self.completer = completer\n self.completion_request_params = completion_request_params\n self.completion_id_field = completion_id_field\n self.value_type = value_type or six.text_type\n self.parameter_name = parameter_name", "def get_default_value(self, tag, primitive_type, hint=None):\n # initialize\n default_value = self.get_default_value_of_type(primitive_type)\n\n # use example value as default (if exist)\n if self.use_examples_for_default and self.get_examples_values:\n examples_values = self.get_examples_values(tag)\n if examples_values:\n default_value = list(examples_values)[0]\n\n # use response value as default (if exist)\n if self.use_response_for_default and self.get_response_values:\n response_values = self.get_response_values(tag, hint)\n if response_values:\n default_value = response_values[0]\n\n return default_value", "def make_tag(tag_name, text='', tag_attr=None):\n if tag_attr is None:\n tag_attr = {}\n\n doc = xml.dom.minidom.Document()\n element = doc.createElement(tag_name)\n if tag_attr:\n for k, v in izip(list(tag_attr.keys()), list(tag_attr.values())):\n element.setAttribute(unicode(k), unicode(v))\n if text:\n text_node = doc.createTextNode(text.strip())\n element.appendChild(text_node)\n return element", "def create_or_get_tag(self, tag_name: str, *args, **kwargs):\n\n tag_data = api.create_or_get_tag(\n tag_name,\n *args,\n api_key=self.__creds.api_key_v2, \n **kwargs)\n return en.Tag(tag_data)", "def _default(self, section, option, default):\r\n if not self.has_section(section):\r\n self.add_section(section)\r\n if not self.has_option(section, option):\r\n self.set(section, option, default)\r\n self.save()", "def default(default_value, force=False):\n def default_setter(value):\n \"\"\"\n Sets the value to the given default value, assuming the original value\n is not set or the default value is set to forced.\n\n :param Any value: Injected by CKAN core\n :rtype: Any\n \"\"\"\n return value if value and not force else default_value\n\n return default_setter", "def default_value(self, value: Any) -> None:\n self.sdc_resource.set_input_default_value(self, value)\n self._default_value = value", "def createElement(tag,attrib={},text={}):\n element = ET.Element(tag,attrib)\n element.text = text\n return element", "def setdefault(self, value: Any) -> None:\n self.default_factory = value \n return", "def __init__(self, name, default=None, help=None):\n self._name = name.replace('-', '_')\n self._value = default\n self._default = default\n self._help = help", "def validate_default_element(self, value):\n return self.validate_element(value)", "def create_option(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"create_option\")", "def __init__(self,\n number,\n required=False,\n repeated=False,\n variant=None,\n default=None):\n if not isinstance(number, int) or not 1 <= number <= constants.MAX_FIELD_NUMBER:\n raise InvalidNumberError('Invalid number for field: %s'\n '\\nNumber must be 1 or greater and %d or less' %\n (number, constants.MAX_FIELD_NUMBER))\n\n if constants.FIRST_RESERVED_FIELD_NUMBER <= number <= constants.LAST_RESERVED_FIELD_NUMBER:\n raise InvalidNumberError('Tag number %d is a reserved number.\\n'\n 'Numbers %d to %d are reserved' %\n (number, constants.FIRST_RESERVED_FIELD_NUMBER,\n constants.LAST_RESERVED_FIELD_NUMBER))\n\n if repeated and required:\n raise FieldDefinitionError('Cannot set both repeated and required')\n\n if variant is None:\n variant = self.DEFAULT_VARIANT\n\n if repeated and default is not None:\n raise FieldDefinitionError('Repeated fields may not have defaults')\n\n if variant not in self.VARIANTS:\n raise InvalidVariantError('Invalid variant: %s\\n'\n 'Valid variants for %s are %r' %\n (variant, type(self).__name__, sorted(self.VARIANTS)))\n\n self.number = number\n self.required = required\n self.repeated = repeated\n self.variant = variant\n\n if default is not None:\n try:\n self.validate_default(default)\n except ValidationError as err:\n try:\n name = self.name\n except AttributeError:\n # For when raising error before name initialization.\n raise InvalidDefaultError('Invalid default value for %s: %r: %s' %\n (self.__class__.__name__, default, err))\n else:\n raise InvalidDefaultError('Invalid default value for field %s:'\n '%r: %s' % (name, default, err))\n\n self.__default = default\n\n if not issubclass(self.__class__, ExpandedField):\n self.__initialized = True", "def BoostDesc_create(desc=None, use_scale_orientation=None, scale_factor=None): # real signature unknown; restored from __doc__\n pass", "def AddDefaultTags(resource_id, region):\n tags = {'owner': FLAGS.owner, 'perfkitbenchmarker-run': FLAGS.run_uri}\n AddTags(resource_id, region, **tags)", "def setdefault(self, value: Any) -> None: # type: ignore\n self.default_factory = value \n return", "def _create_option(\n key: str,\n description: Optional[str] = None,\n default_val: Optional[Any] = None,\n scriptable: bool = False,\n visibility: str = \"visible\",\n deprecated: bool = False,\n deprecation_text: Optional[str] = None,\n expiration_date: Optional[str] = None,\n replaced_by: Optional[str] = None,\n type_: type = str,\n sensitive: bool = False,\n) -> ConfigOption:\n option = ConfigOption(\n key,\n description=description,\n default_val=default_val,\n scriptable=scriptable,\n visibility=visibility,\n deprecated=deprecated,\n deprecation_text=deprecation_text,\n expiration_date=expiration_date,\n replaced_by=replaced_by,\n type_=type_,\n sensitive=sensitive,\n )\n assert (\n option.section in _section_descriptions\n ), 'Section \"%s\" must be one of %s.' % (\n option.section,\n \", \".join(_section_descriptions.keys()),\n )\n assert key not in _config_options_template, 'Cannot define option \"%s\" twice.' % key\n _config_options_template[key] = option\n return option", "def test_with_default() -> None:\n soup = generate_case(\"with_default\")\n\n tests.html_schema_doc_asserts.assert_default_values(soup, ['\"Linux\"', '[\"white\", \"blue\"]', \"2\"])", "def __init__(self,\n number,\n required=False,\n repeated=False,\n variant=None,\n default=None):\n if not isinstance(number, int) or not 1 <= number <= MAX_FIELD_NUMBER:\n raise InvalidNumberError(\n 'Invalid number for field: %s\\n'\n 'Number must be 1 or greater and %d or less' %\n (number, MAX_FIELD_NUMBER))\n\n if FIRST_RESERVED_FIELD_NUMBER <= number <= LAST_RESERVED_FIELD_NUMBER:\n raise InvalidNumberError('Tag number %d is a reserved number.\\n'\n 'Numbers %d to %d are reserved' %\n (number, FIRST_RESERVED_FIELD_NUMBER,\n LAST_RESERVED_FIELD_NUMBER))\n\n if repeated and required:\n raise FieldDefinitionError('Cannot set both repeated and required')\n\n if variant is None:\n variant = self.DEFAULT_VARIANT\n\n if repeated and default is not None:\n raise FieldDefinitionError('Repeated fields may not have defaults')\n\n if variant not in self.VARIANTS:\n raise InvalidVariantError(\n 'Invalid variant: %s\\nValid variants for %s are %r' %\n (variant, type(self).__name__, sorted(self.VARIANTS)))\n\n self.number = number\n self.required = required\n self.repeated = repeated\n self.variant = variant\n\n if default is not None:\n try:\n self.validate_default(default)\n except ValidationError as err:\n try:\n name = self.name\n except AttributeError:\n # For when raising error before name initialization.\n raise InvalidDefaultError(\n 'Invalid default value for %s: %r: %s' %\n (self.__class__.__name__, default, err))\n else:\n raise InvalidDefaultError(\n 'Invalid default value for field %s: '\n '%r: %s' % (name, default, err))\n\n self.__default = default\n self.__initialized = True", "def __init__(self, name, values=None, exclusive=False, default=None):\n super(BooleanAttributeSchema, self).__init__(\n name, exclusive=exclusive, default=default\n )\n self.values = set(values or [])\n self.validate_default_value()", "def _update_annotation_with_default(anno, name, default):\n # Create instance if is type class\n complete_annotation = anno\n if _is_dsl_type_cls(anno):\n complete_annotation = anno()\n complete_annotation.name = name\n if default is Input._EMPTY:\n return complete_annotation\n if isinstance(complete_annotation, Input):\n # Non-parameter Input has no default attribute\n if complete_annotation._is_parameter_type and complete_annotation.default is not None:\n # logger.warning(\n # f\"Warning: Default value of f{complete_annotation.name!r} is set twice: \"\n # f\"{complete_annotation.default!r} and {default!r}, will use {default!r}\"\n # )\n pass\n complete_annotation._update_default(default)\n return complete_annotation", "def create(self, name, tag):\n\n\t\turl_json = urllib.urlencode({\"name\": name, \"tag\": tag})\n\t\treturn self._create(\"/tag?json_hash=%s\" % url_json, \"tag\")", "def create_default(cls):\n raise NotImplementedError(common.OVERRIDE_MESSAGE)", "def register_tag(name: str, config: Dict[str, Any], override: bool = False) -> Optional[Target]:\n if hasattr(_ffi_api, \"TargetTagAddTag\"):\n return _ffi_api.TargetTagAddTag(name, config, override)\n return None", "def add_parameter(self, paramId, dataType, default, valRange=None, label=\"Parameter\"):\n self.inputs[paramId] = {\n 'label': label,\n 'entry': None,\n 'value': None,\n 'valRange': valRange,\n 'dataType': dataType,\n 'default': default\n }", "def _create_element(tag, text=\"\", attr={}, namespace=Xmlns_path):\n element = Et.Element('.//' + namespace + tag, attr)\n element.text = text\n return element", "def default(self, default):\n\n self._set_field(\"value\", default)", "def default_spec(\n tag_or_pred: Union[Tag, SpecPredicate],\n *preds: SpecPredicate,\n default: Any = None,\n conformer: Optional[Conformer] = None,\n) -> Spec:\n tag, preds = tag_maybe(tag_or_pred, *preds)\n\n if len(preds) != 1:\n raise ValueError(\"Must provide exactly one Spec predicate for 'default' Specs\")\n\n return any_spec(\n tag or \"default\",\n preds[0],\n every_spec(conformer=lambda _: default),\n conformer=conformer,\n )", "def validate_default(self, value):\n return self.__validate(value, self.validate_default_element)", "def create_input_element(self, **kwargs):\r\n return None", "def help_default_values():\n click.echo_via_pager(docgen.generate_default_value_help())", "def createTagValue(creatorID, tagID, objectID, value):\n store = getMainStore()\n return store.add(TagValue(creatorID, tagID, objectID, value))", "def BoolVariable(key, help, default):\n return (key, '%s (yes|no)' % help, default,\n _validator, _text2bool)", "def set_default(self, default):\n\n\t\tif default is not None and not isinstance(default, bool):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: default EXPECTED TYPE: bool', None, None)\n\t\t\n\t\tself.__default = default\n\t\tself.__key_modified['default'] = 1", "def validate_default_value(self):\n if self.has_default_value:\n if not self.is_valid_value(self.default):\n raise AttributeSchemaError(\n \"Default value '%s' is not compliant with the schema\"\n )", "def parameter(self, name, doc, default = None):\n self._parameters.append((name, doc.strip(), default))\n return self", "def __init__(\n self,\n *,\n type: str = \"boolean\",\n default: bool = None,\n optional: bool = None,\n description: str = None,\n **kwargs,\n ):\n pass", "def test_alright_when_required_field_is_missing_but_default_is_given():\n\n model_definition = {'language': {'type': 'fixed',\n 'required': True,\n 'persisted': True,\n 'default': 'portuguese'},\n 'source': {'type': 'list',\n 'required': False,\n 'persisted': True}}\n product1 = {'source': ['Whatever']}\n factory = ProductModelFactory(model_definition)\n factory.build('product1', product1)\n # Ok. No exceptions were raised.", "def _update_annotation_with_default(\n anno: Union[Annotation, Input, Output], name: str, default: Any\n ) -> Union[Annotation, Input, Output]:\n # Create instance if is type class\n complete_annotation = anno\n if _is_dsl_type_cls(anno):\n complete_annotation = anno()\n complete_annotation._port_name = name\n if default is Input._EMPTY:\n return complete_annotation\n if isinstance(complete_annotation, Input):\n # Non-parameter Input has no default attribute\n if complete_annotation._is_primitive_type and complete_annotation.default is not None:\n # logger.warning(\n # f\"Warning: Default value of f{complete_annotation.name!r} is set twice: \"\n # f\"{complete_annotation.default!r} and {default!r}, will use {default!r}\"\n # )\n pass\n complete_annotation._update_default(default)\n if isinstance(complete_annotation, Output) and default is not None:\n msg = (\n f\"Default value of Output {complete_annotation._port_name!r} cannot be set:\"\n f\"Output has no default value.\"\n )\n raise UserErrorException(msg)\n return complete_annotation", "def addElement(name, defaultUri=None, content=None):", "def for_tag(self, tag):\n if not isinstance(tag, str):\n raise TypeError('Tag must be a string')\n\n self.token['requiredTag'] = tag\n\n return self", "def _defaulted(cls, value, default):\n return default if value is None else value", "def setdefault(self, k, d=None): # real signature unknown; restored from __doc__\n pass", "def __init__(\n self,\n *,\n name: str,\n label: str,\n optional: bool = False,\n value: str = None,\n placeholder: str = None,\n ):\n super().__init__(\n name=name,\n label=label,\n optional=optional,\n value=value,\n placeholder=placeholder,\n )", "def __init__(\n self,\n *,\n name: str,\n label: str,\n optional: bool = False,\n value: str = None,\n placeholder: str = None,\n ):\n super().__init__(\n name=name,\n label=label,\n optional=optional,\n value=value,\n placeholder=placeholder,\n )", "def __init__(\n self,\n *,\n name: str,\n label: str,\n optional: bool = False,\n value: str = None,\n placeholder: str = None,\n ):\n super().__init__(\n name=name,\n label=label,\n optional=optional,\n value=value,\n placeholder=placeholder,\n )", "def is_default(self):\n return self._tag == 'default'", "def __init__(self, concept_ref='', value=''):\n self.concept_ref = concept_ref\n self.value = value", "def __init__(__self__, *,\n default_resource_request: Optional[str] = None,\n vault_critical_operation: Optional[str] = None):\n if default_resource_request is not None:\n pulumi.set(__self__, \"default_resource_request\", default_resource_request)\n if vault_critical_operation is not None:\n pulumi.set(__self__, \"vault_critical_operation\", vault_critical_operation)", "def default(self, value):\n # save {value} as the default\n self._default = value\n # all done\n return", "def __init__(self, attr=None, default=None, help_text=None):\r\n\r\n self._attribute = attr\r\n self._field_name = None\r\n self._default = default\r\n\r\n if help_text:\r\n self.help_text = help_text", "def create_option(self, label, value=None):\n option = Option(label, value)\n self.append(option)\n return option", "def test_add_defined_tag_to_bucket(self, test, object_storage, with_or_without_compartment):\n namespace_name, bucket_name = self._get_bucket_details(object_storage)\n session_factory = test.oci_session_factory()\n policy = test.load_policy(\n {\n \"name\": \"add-defined-tag-to-bucket\",\n \"resource\": \"oci.bucket\",\n \"query\": [\n {\"namespace_name\": namespace_name},\n ],\n \"filters\": [\n {\"type\": \"value\", \"key\": \"name\", \"value\": bucket_name},\n ],\n \"actions\": [{\"type\": \"update\", \"defined_tags\": self.get_defined_tag(\"add_tag\")}],\n },\n session_factory=session_factory,\n )\n policy.run()\n resource = self._fetch_bucket_validation_data(\n policy.resource_manager, namespace_name, bucket_name\n )\n test.assertEqual(resource[\"name\"], bucket_name)\n test.assertEqual(self.get_defined_tag_value(resource[\"defined_tags\"]), \"true\")", "def create_option(self) -> Optional[pulumi.Input[Union[str, 'OsDiskCreateOption']]]:\n return pulumi.get(self, \"create_option\")", "def __init__(self, definition=None, do_validate=False):\n\n if not definition:\n definition = {}\n self._definition = definition\n\n if 'version' not in self._definition:\n self._definition['version'] = DEFAULT_VERSION\n\n if self._definition['version'] != DEFAULT_VERSION:\n msg = '%s is an unsupported version number'\n raise InvalidDefinition('INVALID_DEFINITION', msg % self._definition['version'])\n\n self._populate_default_values()\n\n try:\n if do_validate:\n validate(definition, RECIPE_DEFINITION_SCHEMA)\n except ValidationError as ex:\n raise InvalidDefinition('INVALID_DEFINITION', 'Invalid recipe definition: %s' % unicode(ex))", "def create_type(name, description, metadata, force):\n type_ = orm.DataFlagType()\n\n type_.name = name\n type_.description = description\n type_.metadata = metadata\n\n if force:\n type_.save()\n else:\n click.echo(\"Type to create:\\n\")\n click.echo(format_type(type_))\n if click.confirm(\"Create type?\"):\n type_.save()\n click.echo(\"Success.\")\n else:\n click.echo(\"Aborted.\")", "def get_tag_default(self, tag_default_id, **kwargs):\n resource_path = \"/tagDefaults/{tagDefaultId}\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"get_tag_default got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"tagDefaultId\": tag_default_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"TagDefault\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"TagDefault\")", "def getDefault():", "def new(cls, default=None):\n return cls(default=default)", "def __init__(__self__, *,\n name: Optional[str] = None,\n value: Optional[str] = None):\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if value is not None:\n pulumi.set(__self__, \"value\", value)", "def __init__(__self__, *,\n name: Optional[str] = None,\n value: Optional[str] = None):\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if value is not None:\n pulumi.set(__self__, \"value\", value)", "def __init__(self, tagged_sents, default_tag='nc0s000'):\n self._default_tag = default_tag", "def create_tag(create_timestamps):\n ctx = dash.callback_context\n triggered_id, triggered_prop, triggered_value = utils.ctx_triggered_info(ctx)\n\n # When the button is initially added, it fires a callback.\n # We want to prevent this callback from making changes to the update signal.\n if triggered_value is None:\n raise PreventUpdate\n\n state.create_tag(\"\")\n return constants.OK_SIGNAL", "def test_default_creation_2():\n actual = os.path.join('.', 'test_files', 'rc_test_default.input')\n times = list(range(0, 30, 5))\n params = {\"names\": ['V'],\n \"values\": [\n [1],\n [0],\n [-1],\n [0],\n [1]\n ]\n }\n input_creator = InputCreator(None, times, params)\n f_out = input_creator.default_creation_2()\n with open(actual) as f_actual:\n actual_content = f_actual.read()\n\n content = f_out.getvalue()\n\n assert_equal(content, actual_content)", "def __init__(__self__, *,\n default_resource_request: Optional[pulumi.Input[str]] = None,\n vault_critical_operation: Optional[pulumi.Input[str]] = None):\n if default_resource_request is not None:\n pulumi.set(__self__, \"default_resource_request\", default_resource_request)\n if vault_critical_operation is not None:\n pulumi.set(__self__, \"vault_critical_operation\", vault_critical_operation)", "def register_def(self, name, default, def_text):\n\n _type = self.type_by_name[name]\n _type.name = name\n _def = _type.register_def_raw(default, def_text)\n self.def_by_hash[_def.hash] = _def\n return _def", "def tag(\n name: str, value: str, *, attributes: dict = {}, namespace: str = None\n ) -> str:\n attributes_str = \" \".join(f'{k}=\"{v}\"' for k, v in attributes.items())\n ns = f\"{namespace}:\" if namespace else \"\"\n if attributes_str:\n attributes_str = \" \" + attributes_str\n if value:\n return f\"<{ns}{name}{attributes_str}>{value}</{ns}{name}>\"\n return f\"<{ns}{name}{attributes_str} />\"", "def create_default_network(context):\n return [{\n 'type': 'templates/network.py',\n 'name': 'fc-network',\n 'properties': {\n 'resourceName': 'network',\n 'name': 'network',\n 'projectId': '$(ref.fc-project.projectId)',\n 'autoCreateSubnetworks': True,\n # We pass the dependsOn list into the network template as a\n # parameter. Deployment Manager doesn't support dependsOn for\n # template-call nodes, so we can't have this resource itself depend on\n # the project-wide resources.\n 'dependsOn': '$(ref.fc-project.resourceNames)',\n },\n }]", "def create_tag(self, entry_name, tag):\n return self.__datacatalog.create_tag(parent=entry_name, tag=tag)", "def f_default(self, default = 1) :\n pass", "def default():", "def make_new_tag(tag_name, user_path, user_signed_in, current_user):\n if not user_signed_in:\n print('ALERT: -- User not logged in --')\n else:\n user = current_user[0]\n print(is_tag(tag_name, user_path, current_user))\n if is_tag(tag_name, user_path, current_user):\n print('Tag already exist')\n else:\n os.mkdir((user_path + '\\\\' + user + '\\\\' + tag_name).encode('unicode_escape'))\n print('Tag --' + tag_name + '-- Created')", "def add_tag(self, obj, tag_name):\r\n tag_names = parse_tag_input(tag_name)\r\n if not len(tag_names):\r\n raise AttributeError(_('No tags were given: \"%s\".') % tag_name)\r\n if len(tag_names) > 1:\r\n raise AttributeError(_('Multiple tags were given: \"%s\".') % tag_name)\r\n tag_name = tag_names[0]\r\n if settings.FORCE_LOWERCASE_TAGS:\r\n tag_name = tag_name.lower()\r\n tag, created = self.get_or_create(name=tag_name)\r\n ctype = ContentType.objects.get_for_model(obj)\r\n TaggedItem._default_manager.get_or_create(\r\n tag=tag, content_type=ctype, object_id=obj.pk)", "def group(cls: type[T], optional: bool = False) -> T:\n default = None if optional else RAISE\n return attr.ib(\n default=default,\n metadata={CNF_KEY: _ConfigEntry(None, default, cls, True)},\n )", "def test_no_default_value(self):\n dim = Real(\"yolo\", \"uniform\", -3, 4)\n assert dim.default_value is None", "def test_no_default_value(self):\n dim = Dimension(\"yolo\", \"uniform\", -3, 4)\n assert dim.default_value is None", "def pre_config_node_create(self, resource_dict):\n pass", "def test_config_option_required_default():\n class Config(config.Config):\n a = config.option(int, required=True, default=12, help=\"\")\n\n c = config.structure({}, Config)\n assert c.a == 12\n\n c = config.structure({\"a\": 23}, Config)\n assert c.a == 23\n\n with pytest.raises(config.ConfigError):\n config.structure({\"a\": None}, Config)", "def _ValueOrPlaceHolder(value_string, description):\n value_element = xml.etree.ElementTree.Element('value')\n value_element.set('xml:lang', _VALUE_LANGUAGE)\n\n if value_string:\n value_element.text = value_string\n else:\n value_element.text = '** INSERT %s **' % description\n\n return value_element" ]
[ "0.5894807", "0.55696887", "0.548009", "0.52427024", "0.5172015", "0.5163364", "0.5069004", "0.50466603", "0.50242513", "0.5003325", "0.4976948", "0.49649096", "0.49576932", "0.49290386", "0.4914476", "0.49040148", "0.49038035", "0.48990166", "0.48935652", "0.48932627", "0.48672876", "0.4841067", "0.48255783", "0.4821933", "0.48154172", "0.47908288", "0.4787615", "0.47782978", "0.47718528", "0.47643954", "0.47606185", "0.4756601", "0.47514093", "0.47409117", "0.4735763", "0.47286236", "0.47235686", "0.4711209", "0.470434", "0.46912426", "0.46839556", "0.4676166", "0.46750492", "0.46737662", "0.4664757", "0.4662496", "0.46623552", "0.4660885", "0.4638475", "0.4638237", "0.46371958", "0.4630583", "0.46270856", "0.46233627", "0.46211612", "0.46194953", "0.46092317", "0.46084744", "0.46082532", "0.4588523", "0.45783314", "0.45750815", "0.4572613", "0.4570522", "0.45654744", "0.45654744", "0.45654744", "0.4557074", "0.45506805", "0.45341545", "0.45288453", "0.4528628", "0.45263708", "0.45211488", "0.45160273", "0.45140576", "0.45130396", "0.45086455", "0.45001143", "0.4492379", "0.44881603", "0.44881603", "0.44860205", "0.44826436", "0.44822168", "0.44717", "0.44682777", "0.44647133", "0.44633415", "0.44604358", "0.44571733", "0.44563684", "0.44535297", "0.44506803", "0.44500577", "0.44455424", "0.44450638", "0.4443447", "0.44430986", "0.4436986" ]
0.64231205
0
Creates a new tag namespace in the specified compartment. You must specify the compartment ID in the request object (remember that the tenancy is simply the root compartment). You must also specify a name for the namespace, which must be unique across all namespaces in your tenancy and cannot be changed. The name can contain any ASCII character except the space (_) or period (.). Names are case insensitive. That means, for example, \"myNamespace\" and \"mynamespace\" are not allowed in the same tenancy. Once you created a namespace, you cannot change the name. If you specify a name that's already in use in the tenancy, a 409 error is returned. You must also specify a description for the namespace. It does not have to be unique, and you can change it with
def create_tag_namespace(self, create_tag_namespace_details, **kwargs): resource_path = "/tagNamespaces" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_tag_namespace got unknown kwargs: {!r}".format(extra_kwargs)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, header_params=header_params, body=create_tag_namespace_details, response_type="TagNamespace") else: return self.base_client.call_api( resource_path=resource_path, method=method, header_params=header_params, body=create_tag_namespace_details, response_type="TagNamespace")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post_namespace_create(self, resource_dict):\n pass", "def create_namespace(node, namespace, delete_before_create=True):\n if delete_before_create:\n Namespaces.delete_namespace(node, namespace)\n\n cmd = f\"ip netns add {namespace}\"\n exec_cmd_no_error(node, cmd, sudo=True)\n Namespaces.__namespaces.append(namespace)", "def createNamespace(self):\r\n raise NotImplementedError('Endpoint can not be used directly.')", "def create_namespaced_net_namespace(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_net_namespace\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_net_namespace`\")\n\n resource_path = '/oapi/v1/netnamespaces'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1NetNamespace',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def change_tag_namespace_compartment(self, tag_namespace_id, change_tag_namespace_compartment_detail, **kwargs):\n resource_path = \"/tagNamespaces/{tagNamespaceId}/actions/changeCompartment\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"change_tag_namespace_compartment got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"tagNamespaceId\": tag_namespace_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=change_tag_namespace_compartment_detail)\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=change_tag_namespace_compartment_detail)", "def create_namespaced_namespace(self, body, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.create_namespaced_namespace_with_http_info(body, **kwargs)\n else:\n (data) = self.create_namespaced_namespace_with_http_info(body, **kwargs)\n return data", "def create_namespaced_namespace_with_http_info(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_namespace\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_namespace`\")\n\n resource_path = '/api/v1/namespaces'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Namespace',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'))", "def create(cls, ns, name, **kwargs):\n key_name = '%s:%s' % (ns, name)\n return cls(key_name=key_name, ns=ns, name=name, **kwargs)", "def create (self, name, dsspolicyguid, jobguid = \"\", executionparams = {}):\n params =dict()\n params['name'] = name\n params['dsspolicyguid'] = dsspolicyguid\n executionparams['rootobjecttype'] = 'dssnamespace'\n\n \n return q.workflowengine.actionmanager.startRootobjectAction('dssnamespace', 'create', params, jobguid=jobguid, executionparams=executionparams)", "def create_tag(self, tag_namespace_id, create_tag_details, **kwargs):\n resource_path = \"/tagNamespaces/{tagNamespaceId}/tags\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"create_tag got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"tagNamespaceId\": tag_namespace_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=create_tag_details,\n response_type=\"Tag\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=create_tag_details,\n response_type=\"Tag\")", "def _create_namespace(self):\n self.ocp.new_project(self.namespace)", "def create_namespace(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"create_namespace\")", "def create_namespace(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"create_namespace\")", "def create_namespace(self, name, status_wait=True):\n name = name or self.generate_random_name()\n\n manifest = {\n \"apiVersion\": \"v1\",\n \"kind\": \"Namespace\",\n \"metadata\": {\n \"name\": name,\n \"labels\": {\n \"role\": name\n }\n }\n }\n self.v1_client.create_namespace(body=manifest)\n\n if status_wait:\n with atomic.ActionTimer(self,\n \"kubernetes.wait_for_nc_become_active\"):\n wait_for_status(name,\n status=\"Active\",\n read_method=self.get_namespace)\n return name", "def test_namespace_bucket_creation_rpc(\n self, ns_resource_factory, bucket_factory, platform\n ):\n # Create the namespace resource and verify health\n ns_resource_name = ns_resource_factory(platform=platform)[1]\n\n # Create the namespace bucket on top of the namespace resource\n bucket_factory(\n amount=1,\n interface=\"mcg-namespace\",\n write_ns_resource=ns_resource_name,\n read_ns_resources=[ns_resource_name],\n )", "def replace_namespaced_net_namespace(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method replace_namespaced_net_namespace\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `replace_namespaced_net_namespace`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `replace_namespaced_net_namespace`\")\n\n resource_path = '/oapi/v1/netnamespaces/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1NetNamespace',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def test_create_namespaced_deployment_request_instantiate(self):\n pass", "def test_namespace_bucket_creation_with_rgw_rpc(\n self, ns_resource_factory, bucket_factory, rgw_deployments\n ):\n # Create the namespace resource and verify health\n ns_resource_name = ns_resource_factory(platform=constants.RGW_PLATFORM)[1]\n\n # Create the namespace bucket on top of the namespace resource\n bucket_factory(\n amount=1,\n interface=\"mcg-namespace\",\n write_ns_resource=ns_resource_name,\n read_ns_resources=[ns_resource_name],\n )", "def test_create_net_namespace(self):\n pass", "def sync_namespace(alias, reg_code, authToken, space=None, action=None):\n if space == None:\n action = 'get'\n print(\" ACTION: GET\")\n elif action == None:\n if 'aeskey' not in space:\n print(\"Space not encrypted\")\n quit()\n action = 'update'\n print(\" ACTION: UPDATE\")\n elif action == 'delete':\n print(\" ACTION: DELETE\")\n url = endpoint('namespace')\n headers={'authorizationToken': authToken}\n data = json.dumps({'action': action, 'alias': alias, 'reg_code': reg_code, 'namespace': space})\n payload_size = sys.getsizeof(data)\n print(\" Size of payload is: %s\" % (convert_size(payload_size)))\n print(\" Max payload is: %s\" % (convert_size(max_payload_size)))\n if payload_size >= max_payload_size:\n print(\" OVER MAX PAYLOAD: %s\" % (convert_size(max_payload_size)))\n quit()\n r = requests.post(url, headers=headers, data=data) \n print(\" Request made\")\n if r.status_code == 403:\n print(\" Invalid registration code, exiting\")\n quit()\n elif r.status_code == 406:\n print(\" Namespace mismatch\")\n quit()\n else:\n print(\" └──statusCode:\" + str(r.status_code) )\n return r", "def pre_namespace_create(self, resource_dict):\n pass", "def create_resource(self, namespace: \"str\" = None):\n names = [\"create_namespaced_csistorage_capacity\", \"create_csistorage_capacity\"]\n\n _kube_api.execute(\n action=\"create\",\n resource=self,\n names=names,\n namespace=namespace,\n api_client=None,\n api_args={\"body\": self.to_dict()},\n )", "def _AddCreatedNamespace(self, state_tracker, identifier, line_number,\n namespace=None):\n if not namespace:\n namespace = identifier\n\n if self._HasSuppression(state_tracker, 'missingProvide'):\n return\n\n self._created_namespaces.append([namespace, identifier, line_number])", "def create_client_by_namespace(\n body: ClientmodelClientCreateRequest,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = CreateClientByNamespace.create(\n body=body,\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def register_namespace(alias, reg_code, pubKey=None, password=None):\n print( \" Registering namespace: %s\" % (alias) )\n if pubKey == None:\n generate_keys()\n pubKey = os.environ[\"pubKey\"]\n\n if check_lspace() == True:\n print(\" Device already registred to a namespace\")\n return False\n \n url = endpoint('register')\n payload = json.dumps({\n \"action\": \"register\",\n \"alias\": alias,\n \"reg_code\": reg_code,\n \"pubKey\": pubKey\n })\n r = requests.post(url, data=payload) \n statusCode = r.status_code\n content = json.loads(r.content)\n\n if statusCode == 201:\n print(\" Namespace registered succesfully\")\n namespace = content[\"namespace\"]\n namespace[\"privKey\"] = os.environ[\"privKey\"]\n save_lspace(namespace, password)\n else:\n print(\" Something went wrong - %s\" % (statusCode))\n quit()\n\n return statusCode", "def replace_namespaced_namespace_with_http_info(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method replace_namespaced_namespace\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `replace_namespaced_namespace`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `replace_namespaced_namespace`\")\n\n resource_path = '/api/v1/namespaces/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Namespace',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'))", "def new_namespace(key):\n\tif key in REGISTRY:\n\t\traise KeyError(\"key:{0} already exists\".format(key))\n\n\tREGISTRY[key] = Namespace()", "def create_or_fetch_namespace(self):\n\n def _create_new_namespace():\n logger.info(\n f\"Creating a new namespace: {self.namespace_name} in {self.namespace_region}\"\n )\n\n data = {\n \"name\": self.namespace_name,\n \"resource_group_id\": self.resource_group_id,\n \"resource_plan_id\": \"functions-base-plan\",\n }\n\n res = requests.post(\n self.cf_namespaces_url, headers=self.get_headers(), json=data\n ).json()\n if res.status_code != 200:\n logger.error(res.text)\n namespace_id = res[\"id\"]\n logger.info(f\"Created new namespace with id: {namespace_id}\")\n return namespace_id\n\n def _get_cloud_function_namespaces_metadata(offset=0):\n \"\"\"returns meta data on namespaces of ibm cloud functions within a specified region\n :param offset - offset from the beginning of the list of results attained from the GET request,\n which may contain up to 200 namespaces per http response\"\"\"\n\n res = requests.get(\n f\"{self.cf_namespaces_url}?limit=200&offset={offset}\",\n headers=self.get_headers(),\n )\n return json.loads(res.text)\n\n def _get_cloud_function_namespaces():\n \"\"\"returns relevant metadata on existing namespaces within a given region.\"\"\"\n logger.info(\n f\"Obtaining Cloud Function namespaces in {self.namespace_region}\"\n )\n\n namespaces = []\n\n collecting_namespaces = True\n max_limit = 200\n offset = 0\n\n # request for namespaces is limited to 200 at a time, thus the request is fulfilled in increments of 200s.\n while collecting_namespaces:\n namespace_metadata = _get_cloud_function_namespaces_metadata(offset)\n if namespace_metadata[\"total_count\"] == max_limit:\n offset += max_limit\n else:\n collecting_namespaces = False\n\n for name_space in namespace_metadata[\"namespaces\"]:\n if \"name\" in name_space: # API based namespace\n namespaces.append(\n {\n \"name\": name_space[\"name\"],\n \"type\": \"API_based\",\n \"id\": name_space[\"id\"],\n \"region\": name_space[\"location\"],\n }\n )\n\n else: # cloud foundry based namespace\n namespaces.append(\n {\n \"name\": name_space[\"id\"],\n \"type\": \"CF_based\",\n \"region\": name_space[\"location\"],\n }\n )\n\n return namespaces\n\n namespaces_in_region = _get_cloud_function_namespaces()\n target_namespace_id = None\n if namespaces_in_region:\n target_namespace_id = next(\n (\n namespace[\"id\"]\n for namespace in namespaces_in_region\n if namespace[\"name\"] == self.namespace_name\n ),\n None,\n )\n if not target_namespace_id:\n target_namespace_id = _create_new_namespace()\n else:\n logger.info(f\"Reusing namespace: {target_namespace_id}\")\n return target_namespace_id", "def createNamespace(self):\r\n if self._namespaces:\r\n raise InternalError('Can not have more than one namespace '\r\n 'in an Environment endpoint at a time.')\r\n\r\n return Environment(self)", "async def create_client_by_namespace_async(\n body: ClientmodelClientCreateRequest,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = CreateClientByNamespace.create(\n body=body,\n namespace=namespace,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )", "def __setattr__(self, name, value):\n if not isinstance(name, str):\n raise ValueError('Namespace label must be a string')\n if name.startswith('_'):\n raise ValueError('Namespace cannot start with an underscore')\n\n if name in self._namespaces:\n raise ValueError('Namespaces cannot be redefined')\n\n self._namespaces[name] = Namespace(name, label=value)", "def test_create_namespaced_deployment_config(self):\n pass", "def newNs(self, href, prefix):\n ret = libxml2mod.xmlNewNs(self._o, href, prefix)\n if ret is None:raise treeError('xmlNewNs() failed')\n __tmp = xmlNs(_obj=ret)\n return __tmp", "def test_create_namespaced_template(self):\n pass", "def patch_namespaced_net_namespace(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method patch_namespaced_net_namespace\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `patch_namespaced_net_namespace`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `patch_namespaced_net_namespace`\")\n\n resource_path = '/oapi/v1/netnamespaces/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PATCH',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1NetNamespace',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def ReplaceNamespace(self, request, global_params=None):\n config = self.GetMethodConfig('ReplaceNamespace')\n return self._RunMethod(\n config, request, global_params=global_params)", "def ReplaceNamespace(self, request, global_params=None):\n config = self.GetMethodConfig('ReplaceNamespace')\n return self._RunMethod(\n config, request, global_params=global_params)", "def update():\n for namespace in metadata.get_namespaces():\n logging.info('Switching namespace: \\'%s\\'', namespace)\n namespace_manager.set_namespace(namespace)\n update_per_namespace()\n\n namespace_manager.set_namespace('')\n return ('', 204)", "async def add_namespace(self, namespace: str, **kwargs) -> Union[str, None]:\n if namespace == self.get_namespace(): # if it belongs to this app's namespace\n raise ValueError(\"Cannot add namespace with the same name as operating namespace\")\n\n writeback = kwargs.get(\"writeback\", \"safe\")\n persist = kwargs.get(\"persist\", True)\n\n return await self.AD.state.add_namespace(namespace, writeback, persist, self.name)", "def update_tag_namespace(self, tag_namespace_id, update_tag_namespace_details, **kwargs):\n resource_path = \"/tagNamespaces/{tagNamespaceId}\"\n method = \"PUT\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"update_tag_namespace got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"tagNamespaceId\": tag_namespace_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_tag_namespace_details,\n response_type=\"TagNamespace\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_tag_namespace_details,\n response_type=\"TagNamespace\")", "def _configure_namespaces(api):\n\t#{{cookiecutter.app_name}}_namespace\n\tapi.add_namespace({{cookiecutter.app_name}}_namespace)", "def addNamespace(self, *args):\n return _libsbml.XMLToken_addNamespace(self, *args)", "async def save_namespace(self, **kwargs) -> None:\n namespace = self._get_namespace(**kwargs)\n await self.AD.state.save_namespace(namespace)", "def add_namespaces(specification):\n\n for ns in specification[\"namespaces\"]:\n specification[\"namespaces\"][ns][\"list\"] = []\n specification[\"namespaces\"][ns][\"list_long\"] = []\n specification[\"namespaces\"][ns][\"list_short\"] = []\n\n specification[\"namespaces\"][ns][\"to_short\"] = {}\n specification[\"namespaces\"][ns][\"to_long\"] = {}\n\n for obj in specification[\"namespaces\"][ns][\"info\"]:\n specification[\"namespaces\"][ns][\"list\"].extend([obj[\"name\"], obj[\"abbreviation\"]])\n specification[\"namespaces\"][ns][\"list_short\"].append(obj[\"abbreviation\"])\n specification[\"namespaces\"][ns][\"list_long\"].append(obj[\"name\"])\n\n specification[\"namespaces\"][ns][\"to_short\"][obj[\"abbreviation\"]] = obj[\"abbreviation\"]\n specification[\"namespaces\"][ns][\"to_short\"][obj[\"name\"]] = obj[\"abbreviation\"]\n\n specification[\"namespaces\"][ns][\"to_long\"][obj[\"abbreviation\"]] = obj[\"name\"]\n specification[\"namespaces\"][ns][\"to_long\"][obj[\"name\"]] = obj[\"name\"]\n\n # For AminoAcid namespace\n if \"abbrev1\" in obj:\n specification[\"namespaces\"][ns][\"to_short\"][obj[\"abbrev1\"]] = obj[\"abbreviation\"]\n specification[\"namespaces\"][ns][\"to_long\"][obj[\"abbrev1\"]] = obj[\"name\"]", "def create_uuid3(namespace, name):\n return uuid.uuid3(namespace, six.ensure_str(name))", "def test_create_namespaced_build_request_instantiate(self):\n pass", "def create_namespaced_service(self, body, namespace, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.create_namespaced_service_with_http_info(body, namespace, **kwargs)\n else:\n (data) = self.create_namespaced_service_with_http_info(body, namespace, **kwargs)\n return data", "def set_namespace(key, dic):\n\tnew_namespace(key)\n\tREGISTRY[key] = Namespace(dic)", "def setup_namespace(zone, prefix, org, org_owner, verbose=False):\n\n # process args\n if org and not org_owner:\n if verbose:\n print('Must provide both organization name and organization owner arguments')\n return 2\n \n if not zone:\n zone = get_local_zone(verbose=verbose)\n if not zone:\n return 2\n\n if prefix and prefix.find('/' + zone) != 0:\n if verbose:\n print('Prefix %s not within zone %s' % (prefix, zone))\n return 2\n\n if prefix and org:\n path_prefix = os.path.join(prefix, org)\n elif org:\n path_prefix = os.path.join('/', zone, org)\n else:\n path_prefix = os.path.join('/', zone)\n\n if org:\n org_group = 'ids-%s#%s' % (org, zone)\n if org_owner and org_owner.find('#') != -1:\n org_user = org_owner\n elif org_owner:\n org_user = '%s#%s' % (org_owner, zone)\n else:\n org_group = None\n org_user = None\n\n\n # check if the anonymous user is defined within the zone\n # if so, it will be given a 'read' ACL on the same\n # collections as the 'public' group\n include_anonymous = False\n if irods_user_exists('anonymous#%s' % (zone,)) > 0:\n include_anonymous = True\n\n\n # top-level '/zone/organization' or '/prefix/organization' collection.\n # 'public' and 'anonymous' get read ACLs so browsing can work\n if org:\n if verbose:\n print(' creating collection %s' % (path_prefix,))\n rc = irods_mkdir(path_prefix, verbose)\n if rc:\n return rc\n acl_list = [['public#%s' % (zone,), 'read'], [org_user, 'own']]\n if include_anonymous:\n acl_list.append(['anonymous#%s' % (zone,), 'read'])\n if verbose:\n print(' setting default ACLs on %s' % (path_prefix,))\n rc = irods_setacls(path_prefix, acl_list, verbose)\n if rc:\n return rc\n else:\n # if we're setting up '/zone', then we need need to make sure\n # that 'public' and 'anonymous' can read and '/' and '/zone' so \n # browsing will work\n acl_list = [['public#%s' % (zone,), 'read'],]\n if include_anonymous:\n acl_list.append(['anonymous#%s' % (zone,), 'read'])\n if verbose:\n print(' setting default ACLs on %s' % (path_prefix,))\n rc = irods_setacls('/', acl_list, verbose)\n if rc:\n return rc\n rc = irods_setacls(path_prefix, acl_list, verbose)\n if rc:\n return rc\n\n\n # The following sub-collections are created, with the\n # listed ACLs:\n #\n # top-level/private - organizational group read ACL\n # top-level/shared - 'ids-user' gets read ACL\n # top-level/public - 'public' gets read ACL\n #\n\n # private\n path = os.path.join(path_prefix, 'private')\n if verbose:\n print(' creating collection %s' % (path,))\n rc = irods_mkdir(path, verbose)\n if rc:\n return rc\n if org:\n # won't add this ACL if we're doing /zone/private\n acl_list = [[org_group, 'read'], [org_user, 'own']]\n if verbose:\n print(' setting default ACLs on %s' % (path,))\n rc = irods_setacls(path, acl_list, verbose)\n if rc:\n return rc\n\n # shared\n path = os.path.join(path_prefix, 'shared')\n if verbose:\n print(' creating collection %s' % (path,))\n rc = irods_mkdir(path, verbose)\n if rc:\n return rc\n acl_list = [['ids-user#%s' % (zone,), 'read'],]\n if org:\n acl_list.append([org_user, 'own'])\n if verbose:\n print(' setting default ACLs on %s' % (path,))\n rc = irods_setacls(path, acl_list, verbose)\n if rc:\n return rc\n\n # public\n path = os.path.join(path_prefix, 'public')\n if verbose:\n print(' creating collection %s' % (path,))\n rc = irods_mkdir(path, verbose)\n if rc:\n return rc\n acl_list = [['public#%s' % (zone,), 'read'],]\n if include_anonymous:\n acl_list.append(['anonymous#%s' % (zone,), 'read'])\n if org:\n acl_list.append([org_user, 'own'])\n if verbose:\n print(' setting default ACLs on %s' % (path,))\n rc = irods_setacls(path, acl_list, verbose)\n if rc:\n return rc\n\n \n return 0", "def __init__(self, default_ns, namespaces=[]):\n self.document = prov.ProvDocument ()\n self.default_ns = default_ns\n self.document.set_default_namespace (self.default_ns)\n self.namespaces = namespaces\n self.subspaces = {}\n for namespace in self.namespaces:\n self.subspaces[namespace] = self.add_namespace (self.default_ns, namespace)", "def test_add_defined_tag_to_bucket(self, test, object_storage, with_or_without_compartment):\n namespace_name, bucket_name = self._get_bucket_details(object_storage)\n session_factory = test.oci_session_factory()\n policy = test.load_policy(\n {\n \"name\": \"add-defined-tag-to-bucket\",\n \"resource\": \"oci.bucket\",\n \"query\": [\n {\"namespace_name\": namespace_name},\n ],\n \"filters\": [\n {\"type\": \"value\", \"key\": \"name\", \"value\": bucket_name},\n ],\n \"actions\": [{\"type\": \"update\", \"defined_tags\": self.get_defined_tag(\"add_tag\")}],\n },\n session_factory=session_factory,\n )\n policy.run()\n resource = self._fetch_bucket_validation_data(\n policy.resource_manager, namespace_name, bucket_name\n )\n test.assertEqual(resource[\"name\"], bucket_name)\n test.assertEqual(self.get_defined_tag_value(resource[\"defined_tags\"]), \"true\")", "def create_namespaced_identity(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_identity\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_identity`\")\n\n resource_path = '/oapi/v1/identities'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Identity',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def create_namespaced_group(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_group`\")\n\n resource_path = '/oapi/v1/groups'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Group',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def create_namespaced_template(self, body, namespace, **kwargs):\n\n all_params = ['body', 'namespace', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_template\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_template`\")\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `create_namespaced_template`\")\n\n resource_path = '/oapi/v1/namespaces/{namespace}/processedtemplates'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Template',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def test_createElementNS():\n\n assert not _do_test_raw(\"\"\"\n var x = \"foo\";\n x.createElementNS();\n x.createElementNS(\"foo\");\n x.createElementNS(\"foo\", \"bar\");\n \"\"\").failed()\n\n assert _do_test_raw(\"\"\"\n var x = \"foo\";\n x.createElementNS(\"foo\", \"script\");\n \"\"\").failed()\n\n assert _do_test_raw(\"\"\"\n var x = \"foo\";\n x.createElementNS(\"foo\", bar);\n \"\"\").failed()\n\n # Test for https://github.com/mozilla/amo-validator/issues/368\n assert not _do_test_raw(\"\"\"\n var x = \"foo\",\n nsXUL = \"http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul\";\n\n x.createElementNS(nsXUL, 'panelview')\n \"\"\").failed()\n\n # Creating a <script> element raises a warning of course.\n assert _do_test_raw(\"\"\"\n var x = \"foo\",\n nsXUL = \"http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul\";\n\n x.createElementNS(nsXUL, 'script')\n \"\"\").failed()", "def addNode(self, nTag, pkg, exe, args, name, namespace):\r\n try:\r\n validateName(nTag)\r\n except IllegalName:\r\n raise InvalidRequest('Node tag is not a valid.')\r\n\r\n if nTag in self._nodes:\r\n raise InvalidRequest(\"Can not use the same node tag '{0}' in the \"\r\n 'same container twice.'.format(nTag))\r\n\r\n node = self._obj.createNode(pkg, exe, args, name, namespace)\r\n self._nodes[nTag] = node\r\n node.notifyOnDeath(self._nodeDied)", "def patch_namespaced_namespace_with_http_info(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method patch_namespaced_namespace\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `patch_namespaced_namespace`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `patch_namespaced_namespace`\")\n\n resource_path = '/api/v1/namespaces/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'PATCH',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Namespace',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'))", "def create_namespaced_build(self, body, namespace, **kwargs):\n\n all_params = ['body', 'namespace', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_build\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_build`\")\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `create_namespaced_build`\")\n\n resource_path = '/oapi/v1/namespaces/{namespace}/builds'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Build',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def createEnvironment(self, _):\r\n if self._namespaces:\r\n raise InternalError('The environment can have only one namespace '\r\n 'at a time.')\r\n\r\n environment = Environment(self)\r\n return self._avatar.callRemote('setupNamespace', environment)", "def container(name, ostemplate, **kwargs):\n if not openvz.exists(name):\n ctid = openvz.get_available_ctid()\n openvz.create(ctid, ostemplate=ostemplate, **kwargs)\n openvz.set(ctid, name=name)\n return Container(name)", "def test_namespace_bucket_creation_with_many_resources_rpc(\n self, ns_resource_factory, bucket_factory\n ):\n logger.info(\"Create namespace resources and verify health\")\n ns_resources = [ns_resource_factory()[1] for _ in range(0, 100)]\n\n logger.info(\"Create the namespace bucket with many namespace resources\")\n bucket_factory(\n amount=1,\n interface=\"mcg-namespace\",\n write_ns_resource=ns_resources[0],\n read_ns_resources=ns_resources,\n )", "def addNamespace(self, *args):\n return _libsbml.SBMLNamespaces_addNamespace(self, *args)", "def test_create_namespaced_policy_binding(self):\n pass", "def test_create_namespaced_policy(self):\n pass", "def create_namespaces(self, iface):\n if iface not in self.namespaces:\n name = \"netns_{}\".format(iface)\n self._lhost.ui.create_namespace(name)\n\n self._lhost.ui.modify_ports([iface], netns=name)\n self.namespaces[iface] = name\n\n self.iface_config(iface, adminMode='Up')", "def test_create_namespaced_processed_template(self):\n pass", "def create_bridge(self, num_ifaces: int) -> Bridge:\n testutils.log.info(\n \"---------------------- Creating a namespace ----------------------\",\n )\n random.seed(datetime.now().timestamp())\n bridge = Bridge(uuid.uuid4())\n result = bridge.create_virtual_env(num_ifaces)\n if result != testutils.SUCCESS:\n bridge.ns_del()\n testutils.log.error(\n \"---------------------- Namespace creation failed ----------------------\",\n )\n raise SystemExit(\"Unable to create the namespace environment.\")\n testutils.log.info(\n \"---------------------- Namespace successfully created ----------------------\"\n )\n return bridge", "def create_namespaced_policy(self, body, namespace, **kwargs):\n\n all_params = ['body', 'namespace', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_policy\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_policy`\")\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `create_namespaced_policy`\")\n\n resource_path = '/oapi/v1/namespaces/{namespace}/policies'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Policy',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def replace_namespaced_namespace_finalize(self, body, name, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.replace_namespaced_namespace_finalize_with_http_info(body, name, **kwargs)\n else:\n (data) = self.replace_namespaced_namespace_finalize_with_http_info(body, name, **kwargs)\n return data", "def create(self, name, tag):\n\n\t\turl_json = urllib.urlencode({\"name\": name, \"tag\": tag})\n\t\treturn self._create(\"/tag?json_hash=%s\" % url_json, \"tag\")", "def declare_namespace(namespace):\n\treturn \"\\\\declare{%s}\" % namespace", "def create_container(ContainerName=None, Tags=None):\n pass", "def create_namespaced_template_2(self, body, namespace, **kwargs):\n\n all_params = ['body', 'namespace', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_template_2\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_template_2`\")\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `create_namespaced_template_2`\")\n\n resource_path = '/oapi/v1/namespaces/{namespace}/templates'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Template',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def post_namespace_delete(self, resource_id, resource_dict):\n pass", "def declareNamespace (self, namespace, prefix=None, add_to_map=False):\n if not isinstance(namespace, pyxb.namespace.Namespace):\n raise pyxb.UsageError('declareNamespace: must be given a namespace instance')\n if namespace.isAbsentNamespace():\n raise pyxb.UsageError('declareNamespace: namespace must not be an absent namespace')\n if prefix is None:\n prefix = namespace.prefix()\n if prefix is None:\n pfxs = self.__inScopePrefixes.get(namespace)\n if pfxs:\n prefix = next(iter(pfxs))\n while prefix is None:\n self.__namespacePrefixCounter += 1\n candidate_prefix = 'ns%d' % (self.__namespacePrefixCounter,)\n if not (candidate_prefix in self.__inScopeNamespaces):\n prefix = candidate_prefix\n ns = self.__inScopePrefixes.get(prefix)\n if ns:\n if ns != namespace:\n raise pyxb.LogicError('Prefix %s is already in use for %s' % (prefix, ns))\n return prefix\n if not self.__mutableInScopeNamespaces:\n self.__clonePrefixMap()\n self.__mutableInScopeNamespaces = True\n self.__addPrefixMap(prefix, namespace)\n return prefix", "def list_tag_namespaces(self, compartment_id, **kwargs):\n resource_path = \"/tagNamespaces\"\n method = \"GET\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"page\",\n \"limit\",\n \"include_subcompartments\",\n \"lifecycle_state\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_tag_namespaces got unknown kwargs: {!r}\".format(extra_kwargs))\n\n if 'lifecycle_state' in kwargs:\n lifecycle_state_allowed_values = [\"ACTIVE\", \"INACTIVE\", \"DELETING\", \"DELETED\"]\n if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:\n raise ValueError(\n \"Invalid value for `lifecycle_state`, must be one of {0}\".format(lifecycle_state_allowed_values)\n )\n\n query_params = {\n \"compartmentId\": compartment_id,\n \"page\": kwargs.get(\"page\", missing),\n \"limit\": kwargs.get(\"limit\", missing),\n \"includeSubcompartments\": kwargs.get(\"include_subcompartments\", missing),\n \"lifecycleState\": kwargs.get(\"lifecycle_state\", missing)\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[TagNamespaceSummary]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[TagNamespaceSummary]\")", "def test_namespace_resource_creation_rpc(self, ns_resource_factory):\n # Create the namespace resource and verify health\n ns_resource_factory()", "def gen_namespace(self, node):\n node.functions = self.define_function_suffix(node.functions)\n for ns in node.namespaces:\n self.gen_namespace(ns)", "def _create_tag_request():\n\n key = helpers.get('Tag.1.Key')\n value = helpers.get('Tag.1.Value')\n resource_id = helpers.get('ResourceId.1')\n\n if resource_id in current_app.config['RESOURCE_TYPE_MAP']:\n resource_type = current_app.config['RESOURCE_TYPE_MAP'][resource_id]\n else:\n errors.invalid_request(\n str(resource_id) + \" not found in configuration\")\n\n args = {\n 'command': 'createTags',\n 'resourceids': resource_id,\n 'resourcetype': resource_type,\n 'tags[0].key': key,\n 'tags[0].value': value\n }\n\n response = requester.make_request_async(args)\n\n return response", "def create_nat(self, **attrs):\n return self._create(_gw.Service, tenant_id=self.get_project_id(), **attrs)", "def ex_create_storage_service(\n self,\n name,\n location,\n description=None,\n affinity_group=None,\n extended_properties=None,\n ):\n\n response = self._perform_storage_service_create(\n self._get_storage_service_path(),\n AzureXmlSerializer.create_storage_service_to_xml(\n service_name=name,\n label=self._encode_base64(name),\n description=description,\n location=location,\n affinity_group=affinity_group,\n extended_properties=extended_properties,\n ),\n )\n\n self.raise_for_response(response, 202)\n\n return True", "def create_namespaced_pod_with_http_info(self, body, namespace, **kwargs):\n\n all_params = ['body', 'namespace', 'pretty']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_pod\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_pod`\")\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `create_namespaced_pod`\")\n\n resource_path = '/api/v1/namespaces/{namespace}/pods'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Pod',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'))", "def create_namespaced_image(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_image\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_image`\")\n\n resource_path = '/oapi/v1/images'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Image',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def delete_namespaced_net_namespace(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_namespaced_net_namespace\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `delete_namespaced_net_namespace`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `delete_namespaced_net_namespace`\")\n\n resource_path = '/oapi/v1/netnamespaces/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def create_in_workspace(self, workspace, params={}, **options):\n path = \"/workspaces/%s/tags\" % (workspace)\n return self.client.post(path, params, **options)", "def deletecollection_namespaced_net_namespace(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method deletecollection_namespaced_net_namespace\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/netnamespaces'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def _ensure_namespace_exists(self, subsystem, volume_path, uuid):\n for ns in subsystem.namespaces:\n if ns.get_attr('device', 'path') == volume_path:\n return ns.nsid\n\n ns_id = self._get_available_namespace_id(subsystem)\n ns_data = self._namespace_dict(uuid, volume_path, ns_id)\n nvmet.Namespace.setup(subsystem, ns_data)\n return ns_id", "def test_resource_namespace(self, integrationtest, k8sconfig):\n # Fixtures.\n config = self.k8sconfig(integrationtest, k8sconfig)\n MM = MetaManifest\n\n for src in [\"\", \"v1\"]:\n # A particular Namespace.\n res, err = k8s.resource(config, MM(src, \"Namespace\", None, \"name\"))\n assert not err\n assert res == K8sResource(\n apiVersion=\"v1\",\n kind=\"Namespace\",\n name=\"namespaces\",\n namespaced=False,\n url=f\"{config.url}/api/v1/namespaces/name\",\n )\n\n # A particular Namespace in a particular namespace -> Invalid.\n assert k8s.resource(config, MM(src, \"Namespace\", \"ns\", \"name\")) == (res, err)\n\n # All Namespaces.\n res, err = k8s.resource(config, MM(src, \"Namespace\", None, None))\n assert not err\n assert res == K8sResource(\n apiVersion=\"v1\",\n kind=\"Namespace\",\n name=\"namespaces\",\n namespaced=False,\n url=f\"{config.url}/api/v1/namespaces\",\n )\n\n # Same as above because the \"namespace\" argument is ignored for Namespaces.\n assert k8s.resource(config, MM(src, \"Namespace\", \"name\", \"\")) == (res, err)", "def _mk_tag(ns, tag):\n return '{%s}%s' % (ns, tag) if ns else tag", "def test_create_ns_bucket_from_utilized_resources_rpc(\n self,\n rgw_deployments,\n mcg_obj,\n cld_mgr,\n awscli_pod_session,\n ns_resource_factory,\n bucket_factory,\n test_directory_setup,\n ):\n logger.info(\"Create the namespace resources and verify health\")\n target_bucket1, resource1 = ns_resource_factory(platform=constants.RGW_PLATFORM)\n target_bucket2, resource2 = ns_resource_factory(platform=constants.AWS_PLATFORM)\n\n original_folder = test_directory_setup.origin_dir\n result_folder = test_directory_setup.result_dir\n\n logger.info(\"Upload files directly to cloud target buckets\")\n rgw_creds = {\n \"access_key_id\": cld_mgr.rgw_client.access_key,\n \"access_key\": cld_mgr.rgw_client.secret_key,\n \"endpoint\": cld_mgr.rgw_client.endpoint,\n }\n aws_creds = {\n \"access_key_id\": cld_mgr.aws_client.access_key,\n \"access_key\": cld_mgr.aws_client.secret_key,\n \"endpoint\": constants.MCG_NS_AWS_ENDPOINT,\n \"region\": self.DEFAULT_REGION,\n }\n self.write_files_to_pod_and_upload(\n mcg_obj,\n awscli_pod_session,\n bucket_to_write=target_bucket1,\n amount=3,\n s3_creds=rgw_creds,\n )\n self.write_files_to_pod_and_upload(\n mcg_obj,\n awscli_pod_session,\n bucket_to_write=target_bucket2,\n amount=3,\n original_folder=original_folder,\n s3_creds=aws_creds,\n )\n\n logger.info(\"Create the namespace bucket on top of the namespace resource\")\n rand_ns_bucket = bucket_factory(\n amount=1,\n interface=\"mcg-namespace\",\n write_ns_resource=resource1,\n read_ns_resources=[resource1, resource2],\n )[0].name\n\n logger.info(\"Read files from ns bucket\")\n self.download_files(\n mcg_obj,\n awscli_pod_session,\n result_folder=result_folder,\n bucket_to_read=rand_ns_bucket,\n )\n\n logger.info(\"Compare between uploaded files and downloaded files\")\n assert self.compare_dirs(\n awscli_pod_session,\n original_folder=original_folder,\n result_folder=result_folder,\n amount=3,\n )", "def create_secret(self, name, namespace):\n secret_manifest = {\n \"apiVersion\": \"v1\",\n \"kind\": \"Secret\",\n \"metadata\": {\n \"name\": name,\n \"annotations\": {\n \"kubernetes.io/service-account.name\": name\n }\n }\n }\n self.v1_client.create_namespaced_secret(namespace=namespace,\n body=secret_manifest)", "def create_namespaced_service_with_http_info(self, body, namespace, **kwargs):\n\n all_params = ['body', 'namespace', 'pretty']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_service\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_service`\")\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `create_namespaced_service`\")\n\n resource_path = '/api/v1/namespaces/{namespace}/services'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Service',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'))", "def create_namespaced_node(self, body, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.create_namespaced_node_with_http_info(body, **kwargs)\n else:\n (data) = self.create_namespaced_node_with_http_info(body, **kwargs)\n return data", "def create_namespaced_pod(self, body, namespace, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.create_namespaced_pod_with_http_info(body, namespace, **kwargs)\n else:\n (data) = self.create_namespaced_pod_with_http_info(body, namespace, **kwargs)\n return data", "def test_create_namespaced_build(self):\n pass", "def create(self, session, prepend_key=True):\n if not self.allow_create:\n raise exceptions.MethodNotSupported(self, \"create\")\n\n endpoint_override = self.service.get_endpoint_override()\n if self.put_create:\n # create tag do not need requires_id\n request = self._prepare_request(requires_id=False,\n prepend_key=prepend_key)\n response = session.put(request.uri, endpoint_filter=self.service,\n endpoint_override=endpoint_override,\n json=request.body, headers=request.headers)\n self._translate_response(response)\n return self", "def post_namespace_update(self, resource_id, resource_dict):\n pass", "def replace_namespaced_template(self, body, namespace, name, **kwargs):\n\n all_params = ['body', 'namespace', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method replace_namespaced_template\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `replace_namespaced_template`\")\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `replace_namespaced_template`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `replace_namespaced_template`\")\n\n resource_path = '/oapi/v1/namespaces/{namespace}/templates/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Template',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def convert_to_namespace(file, output, keyword):\n resource = parse_bel_resource(file)\n write_namespace(\n namespace_keyword=(keyword or resource['AnnotationDefinition']['Keyword']),\n namespace_name=resource['AnnotationDefinition']['Keyword'],\n namespace_description=resource['AnnotationDefinition']['DescriptionString'],\n author_name='Charles Tapley Hoyt',\n namespace_domain=NAMESPACE_DOMAIN_OTHER,\n values=resource['Values'],\n citation_name=resource['Citation']['NameString'],\n file=output\n )", "def POST(self, uri='catalog'):\n # content negotiation\n content_type = negotiated_content_type(self.supported_types, self.default_content_type)\n\n # registry acl enforcement\n allowed = web.ctx.ermrest_registry.can_create(web.ctx.webauthn2_context.attributes)\n if not allowed:\n raise rest.Forbidden(uri)\n\n # optional input\n docstr = web.ctx.env['wsgi.input'].read().decode().strip()\n if docstr:\n try:\n doc = json.loads(docstr)\n except:\n raise exception.rest.BadRequest('Could not deserialize JSON input.')\n else:\n doc = {}\n\n # create the alias entry\n catalog_id = web.ctx.ermrest_registry.claim_id(id=doc.get('id'), id_owner=doc.get('owner'))\n\n # register the catalog descriptor\n entry = web.ctx.ermrest_registry.register(catalog_id, alias_target=doc.get('alias_target'))\n\n web.header('Content-Type', content_type)\n web.ctx.ermrest_request_content_type = content_type\n\n # set location header and status\n location = '/ermrest/catalog/%s' % catalog_id\n web.header('Location', location)\n web.ctx.status = '201 Created'\n\n if content_type == _text_plain:\n return str(catalog_id)\n else:\n assert content_type == _application_json\n return json.dumps(dict(id=catalog_id))" ]
[ "0.6208053", "0.61728823", "0.61027914", "0.61002004", "0.59118104", "0.5871321", "0.58043087", "0.5771377", "0.5746048", "0.5731766", "0.5726167", "0.57059276", "0.5678216", "0.55117524", "0.54968196", "0.54622465", "0.54276574", "0.5421414", "0.54114", "0.5391365", "0.53783107", "0.53694326", "0.53572416", "0.53054005", "0.5293745", "0.5259266", "0.5218361", "0.5187072", "0.5183011", "0.5120911", "0.50942045", "0.50751245", "0.5074465", "0.50693035", "0.5048946", "0.50411594", "0.50411594", "0.5039475", "0.5033786", "0.5018365", "0.50129986", "0.49930224", "0.49760348", "0.49540383", "0.49452725", "0.49144033", "0.49109086", "0.49098924", "0.4908043", "0.4884019", "0.4868998", "0.48655325", "0.48602882", "0.4840987", "0.4826537", "0.4818298", "0.4815849", "0.48131052", "0.480893", "0.48051745", "0.4799129", "0.47874787", "0.4784033", "0.47673255", "0.47642618", "0.4760623", "0.475263", "0.47485548", "0.47459495", "0.47358918", "0.47358716", "0.47342014", "0.47323388", "0.4717451", "0.47174153", "0.47152367", "0.47047383", "0.46963757", "0.46919206", "0.46750888", "0.46750155", "0.46739286", "0.4673309", "0.46667674", "0.46629483", "0.46484804", "0.46473867", "0.46455973", "0.4643744", "0.46410367", "0.4634914", "0.46302974", "0.46156204", "0.46152267", "0.4609475", "0.46084082", "0.46082708", "0.4592585", "0.45906916", "0.45869485" ]
0.656771
0
Creates a new user in your tenancy. For conceptual information about users, your tenancy, and other IAM Service components, see `Overview of the IAM Service`__. You must specify your tenancy's OCID as the compartment ID in the request object (remember that the tenancy is simply the root compartment). Notice that IAM resources (users, groups, compartments, and some policies) reside within the tenancy itself, unlike cloud resources such as compute instances, which typically reside within compartments inside the tenancy. For information about OCIDs, see `Resource Identifiers`__. You must also specify a name for the user, which must be unique across all users in your tenancy
def create_user(self, create_user_details, **kwargs): resource_path = "/users" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_user got unknown kwargs: {!r}".format(extra_kwargs)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, header_params=header_params, body=create_user_details, response_type="User") else: return self.base_client.call_api( resource_path=resource_path, method=method, header_params=header_params, body=create_user_details, response_type="User")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create():\n api_request = apireq.APIRequest(request, 'client_schema')\n if api_request.is_invalid():\n return api_request.error_text, 400\n return user_management.create_user(api_json['username'])", "def create_user(self, **kwargs):\n\n user = self.user_model(**self._prepare_create_user_args(**kwargs))\n return self.put(user)", "def create_user(self, request):\n if User.query(User.name == request.user_name).get():\n raise endpoints.ConflictException(\n 'A User with that name already exists!')\n # Validate request.user_name is alphanumeric\n if not str(request.user_name).isalnum():\n raise endpoints.BadRequestException(\n 'User name must be alphanumeric')\n # If email address is given, validate it.\n email = ''\n if not getattr(request, 'email') == None:\n email = str(getattr(request, 'email'))\n if len(email) > 0:\n if not validateEmail(email):\n raise endpoints.BadRequestException(\n 'The given email is invalid!')\n user = User(name=request.user_name, email=email)\n user.put()\n return StringMessage(message='User {} created!'.format(\n request.user_name))", "def create_user(self, request):\n if User.query(User.name == request.user_name).get():\n raise endpoints.ConflictException(\n 'A User with that name already exists!')\n user = User(name=request.user_name, email=request.email)\n user.put()\n return StringMessage(message='User {} created!'.format(\n request.user_name))", "def create_user(self, request):\n if User.query(User.name == request.user_name).get():\n raise endpoints.ConflictException(\n 'A User with that name already exists!')\n user = User(name=request.user_name, email=request.email)\n user.put()\n return StringMessage(message='User {} created!'.format(\n request.user_name))", "def create_user(self) -> 'outputs.ActingUserResponse':\n return pulumi.get(self, \"create_user\")", "def create_user(self, user_id):\n data = {\n 'email': self._email_for_user_id(user_id),\n 'username': user_id,\n 'password': str(uuid.uuid4()),\n 'name': user_id,\n }\n\n # create user and return it to caller\n return self._post('/users', data=data)", "def create_user():\n username = request.get_json().get(\"name\", None)\n role = request.get_json().get(\"role\", None)\n email = request.get_json().get(\"email\", None)\n return jsonify(\n admin.create_user(current_app.scoped_session(), username, role, email)\n )", "def create_user(self, req):\n\n if models.User.query(models.User.name == req.user_name).get():\n raise endpoints.ConflictException('A User with that name already exists!')\n\n models.User.create(req.user_name, req.email)\n return msgs.StringMessage(msg=\"User {} created!\".format(req.user_name))", "def create_user():\r\n data = request.get_json() or {}\r\n print(data)\r\n # some data checks\r\n if 'username' not in data or 'password' not in data:\r\n return bad_request('must include username and password fields')\r\n if User.query.filter_by(username=data['username']).first():\r\n return bad_request('please use a different username')\r\n user = User()\r\n # add user to database\r\n user.add_user(data)\r\n # check that the transaction was successful\r\n res = User.query.filter_by(username=data['username']).one_or_none()\r\n # return added user as query response\r\n if res:\r\n response = jsonify(res.to_dict())\r\n response.status_code = 201\r\n # else return error\r\n else:\r\n response.status_code = 403\r\n response.headers['Location'] = url_for('api.get_user', id=user.id)\r\n return response", "def new_user(request):\r\n rdict = request.params\r\n\r\n u = User()\r\n\r\n u.username = unicode(rdict.get('username'))\r\n if u.username:\r\n u.username = u.username.lower()\r\n u.email = unicode(rdict.get('email')).lower()\r\n passwd = get_random_word(8)\r\n u.password = passwd\r\n u.activated = True\r\n u.is_admin = False\r\n u.api_key = User.gen_api_key()\r\n\r\n try:\r\n DBSession.add(u)\r\n DBSession.flush()\r\n # We need to return the password since the admin added the user\r\n # manually. This is only time we should have/give the original\r\n # password.\r\n ret = dict(u)\r\n ret['random_pass'] = passwd\r\n return _api_response(request, ret)\r\n\r\n except IntegrityError, exc:\r\n # We might try to add a user that already exists.\r\n LOG.error(exc)\r\n request.response.status_int = 400\r\n return _api_response(request, {\r\n 'error': 'Bad Request: User exists.',\r\n })", "def create_user(username, **kwargs):\n user = create_user_object(username=username, **kwargs)\n response = utils.checked_api_call(users_api, 'create_new', body=user)\n if response:\n return response.content", "def create_user(self):\n return User.objects.create_user(**self.user_data)", "def create_user():\n try:\n payload = _validatePayload(request)\n timestamp = int(time.time() * 1000)\n user = {\n 'name': payload.get('name'),\n 'email': payload.get('email'),\n 'password': _encodePassword(payload.get('password')),\n 'createdAt': timestamp,\n 'updatedAt': timestamp,\n }\n\n resp = table.put_item(\n Item=user,\n Expected={'email': {'Exists': False}}\n )\n return jsonify(user), 200\n except Exception as e:\n logger.info('ERROR {}'.format(str(e)))\n return _customizeErrorMessage(e)", "def create_user():\n record = request.get_json()\n if record is None:\n return {\"Error\": \"No data Supplied.\"}, 400\n\n schema = user_schema.load(record)\n\n if UserModel.objects(email=schema['email']):\n return {\"Error\": \"User Data already exists.\"}, 400\n user = UserModel(**schema)\n user.hash_password()\n user.save()\n ser_data = user_schema.dump(user)\n token = Auth.generate_token(ser_data[\"_id\"])\n return {\"message\": \"User Created Successfully\", \"Token\": token, \"id\": str(user.id)}, 200", "def create_user():\n body = request.get_json(silent=True)\n if body is None:\n abort(400, jsonify(error=\"Not a JSON\"))\n if 'email' not in body:\n abort(400, jsonify(error=\"Missing email\"))\n if 'password' not in body:\n abort(400, jsonify(error=\"Missing password\"))\n user = models.user.User(**body)\n models.storage.new(user)\n models.storage.save()\n return make_response(jsonify(user.to_dict()), 201)", "def create_user():\n try:\n\n user = User(username=request.json.get(\"username\"), score=0,)\n\n user.insert()\n\n response = jsonify({\"success\": True, \"created_user_id\": user.id})\n\n except AttributeError:\n abort(400)\n\n return response", "def create_user(user, first_name, last_name, major, bio):\n return userAccount.objects.create(user=user, first_name=first_name, last_name=last_name, major=major, bio=bio)", "def create_user():\r\n if not request.is_json or 'name' not in request.get_json() or 'phone_number' not in request.get_json() or 'password' not in request.get_json():\r\n return bad_request('Missing required data.')\r\n try:\r\n return add_user(request)\r\n except:\r\n return bad_request(error_messages['user_exist'])", "def CreateUser(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def create_user(self):\n User.objects.create_user('test', 'testing@test.com', 'testing')", "def create_user():\n try:\n # Create new user\n try:\n body = request.get_json()\n except:\n # Bad request as request body is not available\n return abort(400)\n\n record_id = collection.insert(body)\n return jsonify({\"message\":\"Successfully Created the resource.\"}), 201\n\n except:\n # Error while trying to create the resource\n return \"Error while trying to create the resource\", 500", "async def create_user(user_request: UserRequestModel):\n\n user = User.create(\n username=user_request.username,\n email=user_request.email\n )\n\n return user", "def create_user(self, **kwargs):\n kwargs = self._prepare_create_user_args(**kwargs)\n user = self.user_model(**kwargs)\n # noinspection PyUnresolvedReferences\n return self.save(user)", "def create_user(self, *args, **kwargs):\n user = User.objects.create_user(*args, **kwargs)\n return get_profile(user)", "def create_user(user: User):\n coll = data_access.get_user_collection()\n\n if user.name == \"\":\n raise HTTPException(status.HTTP_400_BAD_REQUEST,\n \"User name must not be empty.\")\n\n if coll.find_one(user.dict()) is None:\n coll.insert_one(user.dict())", "def create_user(UserName=None, MessageAction=None, FirstName=None, LastName=None, AuthenticationType=None):\n pass", "def new_user():\n username = request.json.get('username')\n password = request.json.get('password')\n picture = request.json.get('picture')\n email = request.json.get('email')\n if username is None or password is None:\n print(\"missing arguments\")\n abort(400)\n\n if getUserByUsername(username) is not None:\n print(\"existing user\")\n return jsonify({'message': 'user already exists'}), 200\n\n user = addUser(username, picture, email, password)\n return jsonify(user=user.serialize), 201", "def post(self):\n data = flask.request.json\n user_dao.create_user(data)\n return None, 201", "def create_user(\n *,\n user_in: schemas.UserCreate,\n) -> schemas.User:\n next_user_id = users[-1].id + 1 # type: ignore\n user = schemas.User(\n id=next_user_id,\n email=user_in.email,\n is_active=user_in.is_active,\n is_superuser=user_in.is_superuser,\n full_name=user_in.full_name,\n )\n users.append(user)\n return user", "def create_user(context, params):\n form_user = dict()\n # form_user['edited_by'] = context.user\n if params.get('username'):\n form_user['username'] = params.get('username')\n else:\n form_user['username'] = create_username(params) # 'email_user{}'.format(MISUser.objects.latest('id').id + 1\n form_user['first_name'] = params.get('first_name')\n form_user['last_name'] = params.get('last_name')\n form_person = create_person(params)\n form_user.update(form_person)\n user = User.objects.create(**form_user)\n user.set_password(params.get('password'))\n\n email = {'label': 'Work', 'val': params.get('email'), 'person': user, 'is_main': True}\n create_email(context, email)\n\n user.save()\n return user", "def create_user(user_name, password, tenant_name, auth_admin_url, admin_token):\n keystone = get_client(auth_admin_url, admin_token)\n tenants = keystone.tenants.list()\n my_tenant = [x for x in tenants if x.name==tenant_name][0]\n my_user = keystone.users.create(name=user_name, password=password, tenant_id=my_tenant.id)\n print my_user\n return my_user.to_dict()", "def create(self, validated_data):\n username = validated_data.get('username')\n email = validated_data.get('email')\n password = validated_data.get('password')\n first_name = validated_data.get('first_name', '')\n last_name = validated_data.get('last_name', '')\n return User.objects.create_user(username, email, password, first_name=first_name,\n last_name=last_name)", "def create_user(self, user, user_opt={}):\n assert user.id is None\n if self.dryrun:\n self.logger.debug(\"Would create user %s in zabbix\", user.alias)\n return\n\n random_passwd = ''.join(random.sample(string.ascii_letters + string.digits, 32))\n\n user_req = {\n 'autologin': 0,\n 'type': 1,\n 'usrgrps': [{'usrgrpid': str(id)} for id in user.groups],\n 'passwd': random_passwd,\n \"alias\": user.alias,\n \"name\": user.name,\n \"surname\": user.surname,\n }\n user_req.update(user_opt)\n\n result = self.conn.user.create(user_req)\n user.id = result[\"userids\"][0]\n self.logger.debug(\"Created user %s in zabbix, id: %s\", user.alias, user.id)", "def create_user(self, user_data):\n\n with suppress(self.client.exceptions.UsernameExistsException):\n attributes = self._get_attributes(user_data)\n attrib_set = {attr['Name']: attr['Value'] for attr in attributes}\n user = None\n try:\n user = self.client.admin_get_user(\n UserPoolId=self.user_pool_id,\n Username=user_data['email']\n )\n except self.client.exceptions.UserNotFoundException:\n pass\n if user:\n user_attr_set = {attr['Name']: attr['Value'] for attr in user['UserAttributes']}\n user_attr_set.pop('sub')\n if user_attr_set != attrib_set:\n self.client.admin_update_user_attributes(\n UserPoolId=self.user_pool_id,\n Username=user_data['email'],\n UserAttributes=attributes\n )\n return self.sign_in_user(user_data['email'], user_data['password'])\n\n user = self.client.admin_create_user(\n UserPoolId=self.user_pool_id,\n Username=user_data['email'],\n TemporaryPassword=user_data['password'] + 'temp',\n UserAttributes=attributes\n )\n return self._authorize_new_user(user['User']['Username'], user_data['password'])", "def create(self, validated_data: dict):\n return User.objects.create_user(**validated_data)", "def create(self, validated_data):\n username = validated_data.pop('username')\n email = validated_data.pop('email')\n password = validated_data.pop('password')\n user = User.objects.create_user(\n username, email, password, **validated_data)\n return user", "def create_user(self, _user_data):\n _url = \"http://\" + self.host_ip + \":35357/v2.0/users\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.cloud_admin_info['token_project']}\n _body = json.dumps(_user_data)\n response = self.request(\"POST\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while creating user: %s\" %\n _user_data['user']['name'])\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Create user Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n LOG_OBJ.info(\"User created successfully. Details:%s\" % output)\n\n return output['user']['id']", "def api_create_user():\n data = request.json\n\n errs, res = self.user_manager.create_user_as_admin(\n email=data['email'],\n username=data['username'],\n role=data['role'],\n passwd=data['password'],\n passwd2=data['password'],\n name=data.get('full_name', ''))\n\n # validate\n if errs:\n return {'errors': errs}\n\n user, first_coll = res\n return {'user': user.name, 'first_coll': first_coll.name if first_coll else ''}", "def _create_user(self, new_user):\n new_user = User(user_name=new_user['user_name'], pin=new_user['pin'], user_type='customer')\n self.session.output(new_user.get_user_info(), '\\n[ New user created ]')", "def create(self, request, *args, **kwargs):\n user = request.user\n if user.is_authenticated and not user.has_perm(\"users.add_user\"):\n self.permission_denied(request, message=_(\"You cannot create users.\"))\n return super().create(request, *args, **kwargs)", "def create(self, data):\n # ensure 'create()' calls the specific 'create_user()' method\n # note that the 'data' gets validated\n user = get_user_model().objects.create_user(**data)\n return user", "def do_user_create(cs, args):\n cs.users.create(args.username, args.password, args.email, args.realname,\n args.comment)\n print(\"Create user '%s' successfully.\" % args.username)", "def create_user(self, data):\n return self.client.post(\n path='/api/v2/auth/signup/', data=json.dumps(data), content_type='application/json')", "def create_user():\n usr = request.get_json()\n if not usr:\n abort(400, {'Not a JSON'})\n elif 'email' not in usr:\n abort(400, {'Missing email'})\n elif 'password' not in usr:\n abort(400, {'Missing password'})\n else:\n new_usr = User(**usr)\n storage.new(new_usr)\n storage.save()\n return jsonify(new_usr.to_dict()), 201", "def create_user(user_name: str):\n user = User()\n user.username = user_name\n user.save()\n return user", "def create(self, validated_data):\n\n user_data = {\n \"username\" : validated_data.get(\"username\"),\n \"email\" : validated_data.get(\"email\"),\n \"password\" : validated_data.get(\"password\")\n }\n user = User.objects.create_user(**user_data)\n user.save()\n\n account_data = {\n \"phone\" : validated_data.get(\"phone\"),\n \"type\" : validated_data.get(\"type\"),\n \"lat\" : validated_data.get(\"lat\"),\n \"lang\" : validated_data.get(\"lang\"),\n \"center_point\" : validated_data.get(\"center_point\")\n }\n account = Account(user = user, **account_data)\n account.save()\n\n return user", "def create_user():\n new_user = User(id=login_session['gplus_id'],\n name=login_session['username'],\n email=login_session['email'],\n picture=login_session['picture'])\n session.add(new_user)\n session.flush()\n session.commit()\n user = session.query(User).filter_by(email=login_session['email']).one()\n return user.id", "def sample_user(email=user_v['email'], password=user_v['password']):\n return get_user_model().objects.create_user(email, password)", "def post(self):\n return self.get_request_handler(request.headers).create_new_user(request)", "def create_new_user():\n return get_user_model().objects.create_user(\n email='test@gmail.com',\n password='test@londodnjisdjfois',\n username='tempusername'\n )", "def add_user():\n input = request.get_json()\n\n if input == None:\n return jsonify({'error': 'Invalid POST request, no data'}), 400\n if not 'username' in input:\n return jsonify({'error': 'Invalid POST request, missing username'}), 400\n if not 'password' in input:\n return jsonify({'error': 'Invalid POST request, missing password'}), 400\n if not 'display_name' in input:\n return jsonify({'error': 'Invalid POST request, missing display_name'}), 400\n if not 'role' in input:\n return jsonify({'error': 'Invalid POST request, missing role'}), 400\n\n netAdminToolDB = app.config['DATABASE']\n id = netAdminToolDB.add_user(input['username'], input['password'],\n input['display_name'], input['role'])\n\n newUser = netAdminToolDB.get_user(id)\n newUserDict = dict(newUser)\n uri = url_for('get_user', user_id=newUser.id, _external=True)\n newUserDict['uri'] = uri\n\n return jsonify({'user': newUserDict}), 201", "def post(self):\n self.parser.add_argument(\n 'name', required=True, type=self.validator.validate_string_fields, help='Enter a valid name')\n self.parser.add_argument(\n 'email', required=True, type=self.validator.validate_string_fields, help='Must be a valid email')\n self.parser.add_argument(\n 'password', required=True, type=self.validator.validate_string_fields, help='Must enter a valid password')\n\n user = self.parser.parse_args()\n response = self.user_models.create_user(user['name'],\n user['email'],\n user['password'])\n return {\"message\": response}, 201", "async def create_user(current_active_user: AuthUserPublic = Depends(get_current_active_user),\n form_data: AuthUserCreationForm = Depends()) -> AuthUserPublic:\n if not current_active_user.is_superuser:\n raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,\n detail=\"Permission denied.\")\n try:\n user = store_user(**form_data.dict())\n except Exception:\n raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n detail=\"Failed to store in DB.\")\n if not user:\n raise HTTPException(status_code=status.HTTP_409_CONFLICT,\n detail=\"Username already exists.\")\n return user", "def create(self, validated_data):\n user = User.objects.create_user(\n email=validated_data['email'],\n password=validated_data['password'],\n )\n return user", "def create_user() -> tuple:\n # created new user\n user_data: dict = request.get_json()\n names: str = user_data.get(\"names\")\n surname: str = user_data.get(\"surname\")\n cell: str = user_data.get(\"cell\")\n email: str = user_data.get(\"email\")\n password: str = user_data.get(\"password\")\n uid: str = user_data.get(\"uid\")\n organization_id: str = user_data.get(\"organization_id\")\n\n # Add User View will perform error checking\n return user_view.add_user(organization_id=organization_id, uid=uid, names=names, surname=surname,\n cell=cell, email=email, password=password)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create_user():\n body = request.json\n username = body.get('username')\n password = body.get('password')\n validation = validate_user(username, password)\n password = md5(password.encode('utf-8')).hexdigest()\n if validation != \"OK\":\n return HTTPResponse(status=500, body={\"message\":validation})\n try:\n with db.atomic():\n user = User.create(username=username, password=password)\n user.save()\n ret = json.dumps({'message':'user created'})\n return HTTPResponse(status=200, body=ret)\n except IntegrityError:\n ret = json.dumps({'message':'user already exists'})\n return HTTPResponse(status=500, body=ret)", "def create_user():\n email = request.json.get('email')\n username = request.json.get('username')\n password = request.json.get('password')\n\n details = [email, username, password]\n\n if not all(details):\n return bad_request(\"you must supply email, username and password\")\n if User.query.filter_by(email=email).first() is not None and User.query.filter_by(username=username) is not None:\n return forbidden(\"email or username already exist\")\n\n user = User(email=email, username=username)\n user.hash_password(password)\n user.save()\n\n return {'status': (user.username + ' has successfully registered')}", "def create_user(self):\n unique_id = str(uuid.uuid4())\n new_user_properties = {\n \"name\": self.name,\n \"mission_statement\": self.mission_statement,\n \"unique_id\": unique_id,\n \"email\": self.email.lower(),\n \"is_mentor\": True,\n \"is_tutor\": True,\n \"is_visible\": True,\n \"is_available_for_in_person\": True,\n \"is_admin\": True}\n new_user_node = Node.cast(AgoraLabel.USER, new_user_properties)\n try:\n self.graph_db.create(new_user_node)\n except:\n pass\n return new_user_node", "def create_user(username=None, password=None, is_admin=False):\n if username is None:\n username = \"username\"\n if password is None:\n password = b\"password\"\n\n kwargs = dict([\n (\"username\", username),\n (\"password\", password),\n (\"is_admin\", is_admin)\n ])\n return CreateUserService(**kwargs).call()", "def create (self, validated_data):\n user = models.UserProfile.objects.create_user(\n email = validated_data ['email'],\n name = validated_data ['name'],\n password = validated_data ['password']\n )\n\n return user", "def _create_user(self, username, password, domain_id, project_id):\n request = {\n \"user\": {\n \"name\": username,\n \"password\": password,\n \"domain_id\": domain_id,\n \"default_project_id\": project_id,\n \"description\": \"description\",\n \"email\": \"test@example.com\",\n \"enabled\": True,\n }\n }\n response = self.client.post(USER_PATH, data=json.dumps(request),\n headers=HEADERS)\n if response.status_code == 409:\n return\n elif response.status_code == 201:\n return response.json()\n else:\n raise SystemExit(\"Failed to create test user.\")", "def create(self, validated_data):\n ## overriding default create\n\n user = UserProfile.objects.create_user(\n email = validated_data['email'],\n name = validated_data['name'],\n password=validated_data['password']\n )\n \n return user", "def create_user(self, request):\n if User.query(User.name == request.user_name).get():\n raise endpoints.ConflictException(\n 'A User with that name already exists!')\n #By adding wins, it added it to the create_user input #api page.\n wins = defaults['wins']\n user = User(name=request.user_name, email=request.email, wins = wins)\n #user.put() sends the user info that is ndb\n user.put()\n\n for key,val in sorted(craft.items()):\n outmessage =(\"{} : Can be make with {}\".format(key, val))\n return StringMessage(message='User {} created!'.format(\n outmessage))\n #This just returns a message for response at bottom of API\n #screen.", "def sample_user(email=\"student@test.com\",\n password=\"password123\",\n name=\"some name\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)", "def create(self, validated_data):\n user = UserProfile.objects.create_user(\n email=validated_data[\"email\"],\n name=validated_data[\"name\"],\n password=validated_data[\"password\"]\n )\n\n return user", "def create_form_user(self, **kwargs):\n user = User.objects.create_user(\n **kwargs\n )\n return user", "def create_new_user(cls, user_email, user_password, user_phone):\n\n new_user = User(email=user_email, password=user_password, mobile_phone=user_phone)\n\n db.session.add(new_user)\n db.session.commit()\n\n print \"Successfully added new user with the email: %s\" % user_email", "def create(self, validated_data):\n user = UserProfile.objects.create_user(\n email=validated_data['email'],\n first_name = validated_data['first_name'],\n last_name = validated_data['last_name'],\n password = validated_data['password']\n )\n return user", "def signup(self, request):\n # TODO: Add user authentication. Currently, we will create an acct \n new_user = Account.add_new_user(request)\n if new_user is None:\n return AccountResponse(errmsg=\"Username already exists!\")\n return AccountResponse(id=new_user.key.id())", "def create_user(session, phone_number, name, pass_hash, funds=0.0):\n # Perform the db job\n user = User(phone_number=phone_number, name=name, pass_hash=pass_hash, funds=funds)\n session.add(user)\n session.commit()\n return USER_GET_URI.format(user_id=phone_number)", "async def create_new_user(*, user: User):\n with Session(engine) as session:\n user.password = simple_hash(user.name, user.password) #Hashing password for security\n session.add(user)\n session.commit()\n return {\"message\": \"User {user_id} created\".format(user_id = user.id)}", "def sample_user(email, password, is_doctor, is_hospital_admin):\n return MyUser.objects.create_user(email, is_hospital_admin, is_doctor, password)", "def create_user(self, username, email, persona_id, nombre_completo, password=None, **kwargs):\n return self._create_user(username, email, persona_id, nombre_completo, password, False, False, **kwargs)", "def create_user(email='user@example.com', password='testpass123'):\n return get_user_model().objects.create_user(email=email, password=password)", "def new_user():\n success = True\n try:\n usr = User(request.json['username'], request.json['email'])\n db.session.add(usr)\n db.session.commit()\n except:\n success = False\n return jsonify(success=success)", "def create_new_user(first_name, last_name, email, password):\n \n new_user = User(first_name, last_name, email, password)\n db.session.add(new_user)\n db.session.commit()\n \n # link a root storage folder to the user\n root_folder = Folder()\n db.session.add(root_folder)\n db.session.commit()\n new_user.storage_root_id = root_folder.id\n new_user.storage_root = root_folder\n db.session.commit()\n\n # link usage tracking to the user\n usage = Usage()\n usage.user_id = new_user.id\n new_user.usage = usage\n db.session.add(usage)\n db.session.commit()\n\n # link a billing address to the user\n billing_address = BillingAddress()\n billing_address.user_id = new_user.id\n new_user.billing_address = billing_address\n db.session.add(billing_address)\n db.session.commit()\n\n # link settings to the User\n settings = Settings()\n settings.user_id = new_user.id\n new_user.settings = settings\n db.session.add(settings)\n db.session.commit()", "def create_user_object(self, request):\r\n user = {\r\n \"first_name\": request.form.get(\"first_name\"),\r\n \"last_name\": request.form.get(\"last_name\"),\r\n \"age\": request.form.get(\"age\"),\r\n \"cpr_number\": request.form.get(\"CPR\"),\r\n \"email\": request.form.get(\"email\"),\r\n \"phone_number\": request.form.get(\"phone_number\"),\r\n \"password\": PasswordHasher().hash(request.form.get(\"password\")),\r\n \"bank_account\": str(BankAccount(\"Savings\", 1000.00).store_account().inserted_id),\r\n \"crypto_wallet\": str(CryptoWallet(\"Bitcoin\", 0.0045).store_account().inserted_id)\r\n }\r\n return user", "def create_a_user(self, username='fry', email='fry@futur.ama', password='Qwerty!234'):\n user = User.objects.create_user(username, email, password)\n user.save()\n return user", "def create_user(self, username, email: str = None, password: str = None, **kwargs):\n return self._create_user(username, email=email, password=password, **kwargs)", "def sample_user(email: str = \"test@gmail.com\", password: str = \"testpass\"):\n return get_user_model().objects.create_user(email, password)", "def user_create(client_id, email, password=None, first_name=None, last_name=None, user_info=None):\n # validate if email contains actually a valid email address:\n try:\n validate_email(email)\n except ValidationError:\n raise ex.UserError(\"please enter a valid email address\")\n # create account\n user = create_user(email)\n user.first_name = first_name\n user.last_name = last_name\n if password:\n user.set_password(password)\n if user_info:\n for (key, value) in user_info.iteritems():\n if key == \"social\" and value is not None: user.meta['social'] = value\n elif key == \"address\" and value is not None: user.meta['address'] = value\n elif key == \"crm\" and value is not None: user.meta['crm'] = value\n elif key == \"local\" and value is not None: user.meta['local'] = value\n \n user_info = user_to_dict(user, include_name=True)\n\n # build success result\n return user_info", "def sample_user(email='test@londonappdev.com', password='testpass'):\n\n return get_user_model().objects.create_user(email, password)", "def create_user(data):\n return woo_request_helper().post_details(wc_endpoint='customers', params=data)", "def new_user(first_name, sur_name, user_name, email, password):\n new_user = User(first_name, sur_name, user_name, email, password)\n return new_user", "def create_user(self, username=None, email=None, password=None):\n\t\treturn self._create_user(username, email, password)", "def sample_user(email=\"test@email.com\", password=\"password123\"):\n\n return get_user_model().objects.create_user(email, password)", "def sample_user(email='test@gmail.com', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "def sample_user(email='test@gmail.com', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "def sample_user(email='test@gmail.com', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "def do_user_create():\n target = User(\n request.form['gender'],\n request.form['first_name'],\n request.form['name'],\n request.form['mail'],\n request.form['meter_id'],\n request.form['group_id'],\n secrets.token_hex(33))\n target.set_role(request.form['role'])\n target.nick = request.form['nick']\n db.session.add(target)\n db.session.commit()\n return user_list(\"Created user \" + target.name)", "def sample_user(email='john7ric@mail.com', password='open@123'):\n return get_user_model().objects.create_user(email, password)", "def create_user(self):\n if not self.is_valid():\n return None\n # generate a username \n ids = User.objects.values_list('id', flat=True).order_by('-id')[:1]\n if len(ids) > 0:\n # ids[0] will be the maximum value (due to order_by: '-id')\n idnum = ids[0] + 1\n else:\n idnum = 1\n # create User object \n username = \"user%s\" % idnum\n # NOTE: store email in lower case\n email = self.clean_email().lower()\n password = self.clean_password2()\n user = User(username=username, email=email, password='tmp')\n user.save()\n # set the real password\n user.set_password(password)\n # make user inactive (until user has confirmed account)\n user.is_active = False\n # update\n user.save()\n return user", "def sample_user(email='test@tslabs.com', password='testpass'):\n return get_user_model().objects.create_user(email, password)" ]
[ "0.77350646", "0.7719065", "0.7699377", "0.7653624", "0.76405936", "0.7613418", "0.7483702", "0.7418345", "0.7319665", "0.73193854", "0.7315282", "0.7287517", "0.7279548", "0.7276206", "0.72723275", "0.72554487", "0.724378", "0.72345245", "0.718245", "0.71740806", "0.71527773", "0.71055645", "0.70917404", "0.7077725", "0.70666087", "0.7053357", "0.7040028", "0.70366466", "0.7025584", "0.7023903", "0.70203054", "0.701762", "0.700963", "0.7005708", "0.7003798", "0.69901365", "0.6986601", "0.6986209", "0.6983656", "0.6968723", "0.6962668", "0.6932889", "0.6924351", "0.69225925", "0.6920654", "0.69183534", "0.69181067", "0.6915261", "0.691461", "0.68982387", "0.6891929", "0.68796164", "0.68540823", "0.68503773", "0.6837658", "0.68333864", "0.6832478", "0.6832478", "0.6832478", "0.6832478", "0.6832478", "0.6832478", "0.6832478", "0.68239313", "0.6821635", "0.6810366", "0.6808002", "0.68051606", "0.6798682", "0.67963445", "0.6785071", "0.6775075", "0.67631304", "0.676113", "0.67598444", "0.6756813", "0.67480624", "0.6745205", "0.6744887", "0.6738557", "0.6738396", "0.67369866", "0.67366177", "0.67352396", "0.67241246", "0.6723227", "0.67158496", "0.67097396", "0.6708942", "0.67077196", "0.6707008", "0.6696124", "0.6694789", "0.66932523", "0.6691699", "0.6691699", "0.6691699", "0.66857177", "0.6676291", "0.6676027", "0.6672733" ]
0.0
-1
Deletes the specified API signing key for the specified user. Every user has permission to use this operation to delete a key for their own user ID. An administrator in your organization does not need to write a policy to give users this ability. To compare, administrators who have permission to the tenancy can use this operation to delete a key for any user, including themselves.
def delete_api_key(self, user_id, fingerprint, **kwargs): resource_path = "/users/{userId}/apiKeys/{fingerprint}" method = "DELETE" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "if_match" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "delete_api_key got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "userId": user_id, "fingerprint": fingerprint } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "if-match": kwargs.get("if_match", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params) else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_ssh_key(self, user_id, key_id):\n\n _gu = self.get_user(user_id)\n if _gu is None:\n return None\n\n # build URL and make request\n return self._delete('/users/{0}/keys/{1}'.format(_gu['id'], key_id))", "def Delete(self, user, key):\n return self.Remove(user, key)", "def delete_user_key(self, key):\n return AlgoliaUtils_request(self.headers, self.write_hosts, \"DELETE\", \"/1/keys/%s\" % key, self.timeout)", "def delete_signing_cert(self, cert_id, user_name=None):\r\n params = {'CertificateId' : cert_id}\r\n if user_name:\r\n params['UserName'] = user_name\r\n return self.get_response('DeleteSigningCertificate', params)", "def user_id_delete(user_id):\n user = storage.get(\"User\", user_id)\n\n if user is None:\n abort(404)\n user.delete()\n del user\n return make_response(jsonify({}), 200)", "def delete_access_key(self, access_key_id, user_name=None):\r\n params = {'AccessKeyId' : access_key_id}\r\n if user_name:\r\n params['UserName'] = user_name\r\n return self.get_response('DeleteAccessKey', params)", "def user_delete(user_id=None):\n obj = storage.get(\"User\", user_id)\n if obj is None:\n abort(404)\n storage.delete(obj)\n storage.save()\n return jsonify({}), 200", "def delete_user(self, user):\n # type: (dict) -> dict\n self.request_url = \"{0}/{1}/{2}\".format(self.API_URL, self.USER_ENDPOINT, user['id'])\n return self.__create_request(payload=user, request_type=self.REQUEST_DELETE, version=\"v1\")", "def del_user_id(user_id):\r\n obj = storage.get(User, user_id)\r\n if obj is None:\r\n abort(404)\r\n obj.delete()\r\n storage.save()\r\n return jsonify({}), 200", "def delete_user(user_id=None):\n obj = storage.get('User', user_id)\n if obj is None:\n abort(404)\n else:\n storage.delete(obj)\n storage.save()\n return jsonify({}), 200", "def user_delete(user_id):\n user = storage.get('User', user_id)\n if user is None:\n abort(404)\n user.delete()\n storage.save()\n return jsonify({}), 200", "def delete_user(user_id=None):\n\n user = storage.get(\"User\", user_id)\n if user is None:\n abort(404)\n else:\n storage.delete(user)\n storage.save()\n return jsonify({}), 200", "def delete_user(user_id):\n usr = storage.get(User, user_id)\n if usr:\n usr.delete(), storage.save()\n return {}\n else:\n abort(404)", "def delete_user(user_id):\n user = storage.get(User, user_id)\n if user is None:\n abort(404)\n storage.delete(user)\n storage.save()\n return jsonify({}), 200", "def delete_user_key(self, key):\n return AlgoliaUtils_request(self.client.headers, self.write_hosts, \"DELETE\", \"/1/indexes/%s/keys/%s\" % (self.url_index_name, key), self.client.timeout)", "def delete_user(user_id):\n user_obj = storage.get(\"User\", user_id)\n if user_obj:\n storage.delete(user_obj)\n storage.save()\n return jsonify({}), 200\n else:\n abort(404)", "def delete_user_entitlement(self, user_id):\n route_values = {}\n if user_id is not None:\n route_values['userId'] = self._serialize.url('user_id', user_id, 'str')\n self._send(http_method='DELETE',\n location_id='8480c6eb-ce60-47e9-88df-eca3c801638b',\n version='6.0-preview.3',\n route_values=route_values)", "def delete(self, user_id):\n return delete_user(user_id)", "def delete_user_by_xng_id(self, user):\n # type: (dict) -> dict\n self.request_url = \"{0}/{1}/xngId/{2}\".format(self.API_URL, self.USER_ENDPOINT, user['xngId'])\n return self.__create_request(payload=user, request_type=self.REQUEST_DELETE, version=\"v1\")", "def delete_user(self, user):\n self.delete(user)", "def delete(self, user_id):\r\n return delete_user(request, user_id)", "def delete_user(self, user):\n # noinspection PyUnresolvedReferences\n self.delete(user)", "def delete_user(user_id):\n temp = models.storage.get('User', user_id)\n if temp is None:\n abort(404)\n temp.delete()\n models.storage.save()\n return jsonify({})", "def delete(self):\n\n user_id = get_jwt_identity()\n user = user_crud.get(user_id)\n if not user:\n abort(404, message=\"User not Found\")\n all_tokens = auth_crud.get_user_tokens(user_id)\n tokens = [token.to_dict() for token in all_tokens]\n for token in tokens:\n auth_crud.revoke_token(token['id'], user_id)\n user = user_crud.remove(user_id)\n\n return {'msg': 'User Removed'}", "def delete(self, key: object):\n try:\n del self._user_data[key]\n except KeyError:\n pass", "def delete(user_id):\n assert isinstance(user_id, ObjectId)\n\n User.objects(id=user_id).delete()", "def remove(self, user):\r\n url = '{0}/{1}'.format(self.get_url(), user)\r\n\r\n return http.Request('DELETE', url), parsers.parse_empty", "def delete_user(self, user):\n self.execute(TABELLE['id_users'][\"delete\"], user[\"id\"])", "def delete_user(self, user_id):\n return self._delete('/users/{0}'.format(user_id))", "def delete(self, user_id):\n user = User.query.get(user_id)\n \n if user is None:\n return abort(422, message=\"User does not exist\")\n \n # check if the user is an admin and is the only one\n admins = User.query.filter_by(admin=True).all()\n if user.id == get_jwt_identity() and len(admins) == 1:\n return abort(422, message=\"User is the only admin, there must be at least one admin in the system\")\n \n user.delete()\n \n return { 'message': \"User '{}' has been deleted\".format(user.id) }", "def delete(self, user_id):\n res = self._user.delete_user(user_id)\n\n if res:\n return {\n \"status\": 200,\n \"data\": [{\n \"id\": res[\"id\"],\n \"message\": \"user record has been deleted\"\n }]\n }, 200\n else:\n return {\n \"status\": 404,\n \"error\": \"Not found for id {}\".format(user_id)\n }, 404", "def delete_key(uid):\n if request.method == 'POST':\n hl.deleteUser(uid)\n return redirect('/users')", "def delete_user_by_id(user_id):\n return woo_request_helper().delete_details(wc_endpoint='customers/{}'.format(user_id))", "def delete_user(self, user_name):\n user = self.get_user(user_name)\n return self.client.delete_resource(user.get('href'))", "def delete_keypair(self, username, access_key):\n msg = \"delete_keypair not implemented\"\n raise NotImplementedError(msg)", "def delete(self, user_id):\n\n user = User.objects.get_or_404(public_id=user_id)\n return user.delete()", "def delete(self, user_id):\n user = User.query.get(user_id)\n\n if user is None:\n return mk_response(\"User does not exist\", 422)\n\n # check if the user is an admin and is the only one\n admins = User.query.filter_by(admin=True).all()\n if user.id == get_jwt_identity() and len(admins) == 1:\n return mk_response(\"User is the only admin, there must \" +\n \"be at least one admin in the system\", 422)\n\n user.delete()\n\n return mk_response(\"User '{}' has been deleted\".format(user.id))", "def delete_user(payload, user_id):\n user = User.query.get(user_id)\n # exception for non existing id\n if user is None:\n abort(404)\n # set error status\n error = False\n # delete the user\n try:\n user.delete()\n except Exception:\n user.rollback()\n error = True\n print(sys.exc_info())\n finally:\n user.close_session()\n\n if error:\n abort(422)\n\n return jsonify({\n 'success': True,\n 'deleted': user_id\n })", "def delete_user():\n del globalopts.appdata[request.user]\n del globalopts.users[request.user]\n return \"\", 200", "def delete_user(self, user_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.delete_user(user_id)", "def apply_deletion_policy(cls, user_id: str) -> None:\n keys = cls.query(datastore_services.any_of(\n cls.sender_id == user_id,\n )).fetch(keys_only=True)\n datastore_services.delete_multi(keys)", "def delete_api_key(api_key):\n api.delete(api_key)", "def delete_key(self, api_key):\n\t\ttry:\n\t\t\tvalidation.required(api_key, 'api_key')\n\t\texcept errors.ValidationError, ex:\n\t\t\tself.log.warning(\"Validation failure: %s\" % str(ex))\n\t\t\traise errors.APIError, str(ex)\n\n\t\treturn self.app.db.query(\n\t\t\t\"\"\"\n\t\t\tdelete from\n\t\t\t\tapi_keys\n\t\t\twhere\n\t\t\t\tapi_key = %s\n\t\t\t\"\"\", (api_key, ))", "def delete(user_id):\n # Get the user requested\n user = User.query.filter(User.user_id == user_id).one_or_none()\n\n if user is not None:\n db.session.delete(user)\n db.session.commit()\n return (\n \"User {user_id} deleted\".format(user_id=user_id), 200\n )\n\n else:\n abort(\n 404,\n \"Person not found for Id: {user_id}\".format(user_id=user_id),\n )", "def delete_user(user_id):\n netAdminToolDB = app.config['DATABASE']\n user = netAdminToolDB.get_user(user_id)\n\n if user == None:\n return jsonify({'error': 'User_id not found'}), 404\n\n netAdminToolDB.delete_user(user_id)\n return jsonify({'result': True})", "def delete_user(user_id):\n current_user = get_jwt_identity()\n\n if not current_user:\n print('uri=/login error=\"Missing user\"', flush=True)\n return jsonify(message=\"Missing user\"), 400\n\n if not Administrator.is_administrator(current_user):\n print('non-admin user error', flush=True)\n return jsonify(message=\"You are not allowed to delete other users\"), 403\n\n if user_id == current_user:\n return jsonify(message=\"You are not allowed to delete yourself\"), 403\n\n try:\n User.delete(user_id)\n return jsonify(message=\"Delete succeeded\"), 200\n\n except Exception as e:\n print(e, flush=True)\n return jsonify(message='{}'.format(e)), 501", "def delete_user(self, user):\n try:\n with dbm.open(self.dbm_path, 'c', 0o600) as db:\n del db[user.name]\n except KeyError as k:\n pass", "def delete_user(self, user):\n name = utils.get_name(user)\n self._user_manager.delete(name)", "def delete_request(self, user_id, request_id):\n return self.request(\n \"{0}_{1}\".format(request_id, user_id), method=\"DELETE\"\n )", "def apply_deletion_policy(cls, user_id: str) -> None:\n keys = cls.query(datastore_services.any_of(\n cls.recipient_id == user_id,\n cls.sender_id == user_id,\n )).fetch(keys_only=True)\n datastore_services.delete_multi(keys)", "def delete_user(self, user_id):\n\n # ask the model to delete the user\n um = User(self.settings)\n status = um.delete(user_id)\n\n # return\n return status", "async def red_delete_data_for_user(self, *, requester, user_id):\n\t\tawait self.config.user_from_id(user_id).clear()", "def delete(self, request, user_id=None):\n data = json.loads(request.body.decode())\n authenticated = Account.check_credentials(request, data['email'], data['password'])\n user = {}\n user['account_id'] = authenticated.id\n\n if authenticated.check_admin(request, user):\n NLTKOutput.remove(request=request, pk=user_id)\n Account.remove(request=request, pk=user_id)\n return Response(json='Account and content deleted', status=204)\n\n return Response(json='Not Authorized', status=401)", "def remove(self, user_id):\n pass", "async def red_delete_data_for_user(self, *, requester, user_id):\n return", "async def red_delete_data_for_user(self, *, requester, user_id):\n return", "def delete(khoros_object, user_id, return_json=False):\n # TODO: Allow other identifiers (e.g. login, email, etc.) to be provided instead of just the User ID\n query_url = f\"{khoros_object.core_settings['v2_base']}/users/{user_id}\"\n response = api.delete(query_url, return_json, auth_dict=khoros_object.auth)\n if response.status_code == 403 and 'Feature is not configured' in response.text:\n try:\n identifier = response.text.split('identifier: ')[1].split('\"')[0]\n raise errors.exceptions.FeatureNotConfiguredError(identifier=identifier)\n except IndexError:\n raise errors.exceptions.FeatureNotConfiguredError()\n if return_json:\n response = response.json()\n return response", "def delete_user(self) -> 'outputs.ActingUserResponse':\n return pulumi.get(self, \"delete_user\")", "def delete(self, new_data, user_id):\n print(new_data)\n request_id = get_jwt_identity()\n user = user_crud.get(request_id)\n if not user.is_superuser:\n abort(401,\n message=\"You do not have permission to view this endpoint\")\n all_tokens = auth_crud.get_user_tokens(user_id)\n tokens = [token.to_dict() for token in all_tokens]\n for token in tokens:\n auth_crud.revoke_token(token['id'], user_id)\n user = user_crud.remove(user_id)\n\n return {'msg': 'User Removed'}", "def users_byUserId_delete(self, userId, headers=None, query_params=None):\n uri = self.url + \"/users/\"+userId\n uri = uri + build_query_string(query_params)\n return requests.delete(uri, headers=headers)", "def delete_user(self) -> None:\n table_dictionary = {\n 'Apple': {\n 'table': 'AppleReceipts',\n 'user_id': 'User_id'\n },\n 'ESL': {\n 'table': 'ESLReceipts',\n 'user_id': 'User_id'\n },\n 'Transactions': {\n 'table': 'Transactions',\n 'user_id': 'User_id'\n },\n 'Users': {\n 'table': 'Users',\n 'user_id': 'id'\n },\n }\n\n # delete the current user's information from the db.\n for key in table_dictionary:\n query = f\"\"\"\n DELETE\n FROM {table_dictionary[key]['table']}\n WHERE {table_dictionary[key]['user_id']}=?;\n \"\"\"\n self.db.commit(query, values=(self.id,))\n\n # perform a sign out\n self.sign_out()\n\n log(f\"User:{self.id} has deleted their account.\")", "def delete_customer_secret_key(self, user_id, customer_secret_key_id, **kwargs):\n resource_path = \"/users/{userId}/customerSecretKeys/{customerSecretKeyId}\"\n method = \"DELETE\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"delete_customer_secret_key got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id,\n \"customerSecretKeyId\": customer_secret_key_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)", "def del_api_key_for_user(username):\n global db\n if db is None:\n init_db()\n user_model = Query()\n users = db.search(user_model.username == username)\n\n if not users:\n LOGGER.warning(\"User %s not found\", username)\n return False\n try:\n for user in users:\n user['api_key'] = None\n db.write_back(users)\n return True\n except Exception as e:\n LOGGER.exception(e)\n return False", "def delete_api_key(self, apikey_id, **kwargs):\n\n all_params = ['apikey_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_api_key\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'apikey_id' is set\n if ('apikey_id' not in params) or (params['apikey_id'] is None):\n raise ValueError(\"Missing the required parameter `apikey_id` when calling `delete_api_key`\")\n\n resource_path = '/apikeys/{apikeyId}'.replace('{format}', 'json')\n path_params = {}\n if 'apikey_id' in params:\n path_params['apikeyId'] = params['apikey_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['privileges', 'apikey']\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='OkResponse',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def deleteKey(self, key):\n key.delete()", "def delete_user(self, user_name):\r\n params = {'UserName' : user_name}\r\n return self.get_response('DeleteUser', params)", "def delete_access_key(self, username, accesskeyid):\n try:\n self.iam_client.delete_access_key(\n UserName=username,\n AccessKeyId=accesskeyid\n )\n except ClientError as error:\n if error.response['Error']['Code'] == 'NoSuchEntityException':\n pass", "def delete_key(self, key_id):\r\n return self.sshkey.deleteObject(id=key_id)", "def delete(cls):\n user = user_schema.load(request.get_json(), partial=(\"email\",))\n\n current_identity = get_jwt_identity()\n db_user = UserModel.find_by_id(current_identity)\n logging.info(\n f\"Delete called by {db_user.id}: {db_user.username} with data: {user['username']}\"\n )\n if db_user.username == user['username']:\n if is_correct_password(db_user.pw_salt, db_user.pw_hash, user['password']):\n db_user.delete_from_db()\n return {\"message\": msgs.DELETED.format(db_user.username)}, 200\n else:\n return {\"error\": msgs.INVALID_PASSWORD}, 401\n return {\"error\": msgs.OWN_RECORD_ONLY}, 401", "def delete(self, user: 'UserCondensed'):\n self._delete(entity=user)", "def deleteKey(self):\n\n self.key_del_response = self.ec2.delete_key_pair(KeyName=self.key)", "def delete(user_id: int):\n usr = get_by_id(user_id)\n if not usr:\n raise UserNotFound\n\n db.session.delete(usr)\n db.session.commit()", "def delete_user(user_id):\n\n user = User.query.get(user_id)\n db.session.delete(user)\n db.session.commit()\n return", "def delete_user(cls, user_id=None, email=None):\n params = {\n 'email': email,\n 'user_id': user_id\n }\n user_dict = cls._do_call(\n 'DELETE', cls.api_endpoint + 'users', params)\n return user_dict", "def delete_user(request, user):\n\n if models.Group.created_by(user).count() > 0:\n raise UserDeletionError('Cannot delete user who is a group creator.')\n\n user.groups = []\n\n query = _all_user_annotations_query(request, user)\n annotations = es_helpers.scan(client=request.es.conn, query={'query': query})\n for annotation in annotations:\n storage.delete_annotation(request, annotation['_id'])\n\n request.db.delete(user)", "async def delete_user(user_id):\n \n user = User.select().where(User.id == user_id).first()\n\n if not user:\n return HTTPException(404, 'User not found')\n else:\n user.delete_instance()\n\n return f\"User {user.username} deleted successfully\"", "def deleteUser(self, uID):\n\n cursor = self.conn.cursor()\n query = \"DELETE FROM Users CASCADE \" \\\n \"WHERE uID= %s RETURNING cID; \"\n cursor.execute(query, (uID,))\n cID = cursor.fetchone()[0]\n\n query = \"DELETE FROM Credential \" \\\n \"WHERE cID= %s; \"\n cursor.execute(query, (cID,))\n\n self.conn.commit()\n return", "def delete_keystone_v3_user(self, user_id):\n LOG_OBJ.debug(\"Disable the user.\")\n kwargs = {\"user_id\": user_id, \"enabled\": False}\n self.set_keystone_v3_user(**kwargs)\n\n LOG_OBJ.debug(\"Deleting the user.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/users/\" + str(user_id)\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n response = self.request(\"DELETE\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while deleting the user\")\n print (\"No response from Server while deleting the user\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Deleting user Failed with status %s \"\n \"and error : %s\" % (response.status, response.data))\n print (\" Deleting user Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n return True", "def deleteUserById(SID, userId):\n return call(\"deleteUserById\", SID, userId)", "def delete_user(self, _id):\n return self.make_request(\"DELETE\", \"users/\"+_id, {})", "def delete_users(user_id):\n my_users = storage.get(\"User\", user_id)\n if my_users:\n storage.delete(my_users)\n storage.save()\n storage.close()\n return jsonify({}), 200\n else:\n abort(404)", "def delete_user():", "def delete_user(self):\n\n User.user_list.remove(self)", "def delete_user(self, user_id, **kwargs):\n resource_path = \"/users/{userId}\"\n method = \"DELETE\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"delete_user got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)", "def delete_user():\n token = request.args.get('token')\n data = jwt.decode(token, app.config['SECRET_KEY'])\n\n permit = functions.delete_user(data)\n if permit:\n return make_response(jsonify({'Delete': 'User Deleted Successfully'}), 201)\n else:\n return make_response(jsonify({'Delete Failed': 'Credentials not match or the user not exist'}), 201)", "def delete(self):\r\n return self.bucket.delete_key(self.name, version_id=self.version_id)", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user(self):\n User.user_list.remove(self)", "def delete_tokens_for_user(self, user_id, project_id=None):\n if not CONF.token.revoke_by_id:\n return\n self.delete_tokens(user_id, tenant_id=project_id)\n for trust in self.trust_api.list_trusts_for_trustee(user_id):\n # Ensure we revoke tokens associated to the trust / project\n # user_id combination.\n self.delete_tokens(user_id, trust_id=trust['id'],\n tenant_id=project_id)\n for trust in self.trust_api.list_trusts_for_trustor(user_id):\n # Ensure we revoke tokens associated to the trust / project /\n # user_id combination where the user_id is the trustor.\n\n # NOTE(morganfainberg): This revocation is a bit coarse, but it\n # covers a number of cases such as disabling of the trustor user,\n # deletion of the trustor user (for any number of reasons). It\n # might make sense to refine this and be more surgical on the\n # deletions (e.g. don't revoke tokens for the trusts when the\n # trustor changes password). For now, to maintain previous\n # functionality, this will continue to be a bit overzealous on\n # revocations.\n self.delete_tokens(trust['trustee_user_id'], trust_id=trust['id'],\n tenant_id=project_id)", "def delete_all_keypairs(self, user):\n msg = \"delete_all_keypairs not implemented\"\n raise NotImplementedError(msg)", "def delete_by(self, user):\n if user.is_superuser or user is self.added_by:\n self.delete()", "def delete_user(self, user_id):\n sql = 'update account_user set is_deleted = 1 where id = %s'\n with connection.cursor() as cursor:\n cursor.execute(sql, [user_id])\n row = cursor.fetchone()\n\n return row", "def remove_apikey_from_keyring(platform_id='public', # type: str\n base_url=None, # type: str\n keyring_entries_username=KR_DEFAULT_USERNAME, # type: str\n ):\n client = ODSClient(platform_id=platform_id, base_url=base_url, keyring_entries_username=keyring_entries_username)\n client.remove_apikey_from_keyring()", "def delete_user_access_token(self, user_id, user_password, user_access_token, give_json=False):\n\n url = Constants.BASE_URL + 'domains/users/accesstokens'\n response = requests.delete(url=url,\n params={'key': self.api_key, 'user_id': user_id, 'user_password': user_password})\n if give_json:\n return response.json()\n else:\n return response.text", "def userdel(pwfile, user):\n return __salt__[\"webutil.userdel\"](pwfile, user)", "def delete(self, userguid, jobguid=\"\", executionparams=dict()):", "def delete_user(request):\n user_id = request.POST.get('user_id')\n User.objects.filter(id=user_id).delete()\n response = {'status': 1, 'status_message': 'Success'}\n return HttpResponse(json.dumps(response))", "def delete_user(UserName=None, AuthenticationType=None):\n pass", "def delete_key(stub, key, version):\n try:\n response = stub.Delete(keyval_pb2.DeleteRequest(key=key, current_version=version))\n print(\"Delete result:\")\n print_response(response)\n except grpc.RpcError as exception:\n print_response(exception)" ]
[ "0.70384455", "0.6984856", "0.69313693", "0.66379356", "0.663607", "0.65839124", "0.6564669", "0.656256", "0.6548626", "0.65405446", "0.6510242", "0.64997286", "0.6473384", "0.646185", "0.64603364", "0.6452552", "0.64474607", "0.6435671", "0.6400676", "0.63887805", "0.6383552", "0.63346934", "0.6330828", "0.6319978", "0.62541276", "0.62272936", "0.6173618", "0.60936904", "0.60935277", "0.6082945", "0.60137767", "0.6005738", "0.59992486", "0.5989201", "0.5955624", "0.59459984", "0.59426415", "0.5938681", "0.5932155", "0.5931391", "0.59232825", "0.5900192", "0.5881067", "0.5880939", "0.5871987", "0.5870696", "0.5864894", "0.5805828", "0.58007014", "0.57944095", "0.5792746", "0.57839423", "0.5783045", "0.5781681", "0.57766587", "0.57766587", "0.5769786", "0.57630867", "0.5751628", "0.5738935", "0.5734437", "0.57209706", "0.571447", "0.57100576", "0.56945413", "0.56860954", "0.5685867", "0.5677977", "0.56737804", "0.5663961", "0.5658039", "0.5652058", "0.56434727", "0.5636081", "0.56351805", "0.562521", "0.5622881", "0.5606082", "0.55983305", "0.5587801", "0.55551344", "0.55470675", "0.5519469", "0.5514554", "0.5511283", "0.5489597", "0.5488018", "0.5488018", "0.5488018", "0.54851013", "0.5482619", "0.54751885", "0.5472474", "0.5471002", "0.54706866", "0.54605806", "0.5458372", "0.545519", "0.54513955", "0.5425047" ]
0.66472024
3
Deletes the specified auth token for the specified user.
def delete_auth_token(self, user_id, auth_token_id, **kwargs): resource_path = "/users/{userId}/authTokens/{authTokenId}" method = "DELETE" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "if_match" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "delete_auth_token got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "userId": user_id, "authTokenId": auth_token_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "if-match": kwargs.get("if_match", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params) else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self):\n\n user_id = get_jwt_identity()\n user = user_crud.get(user_id)\n if not user:\n abort(404, message=\"User not Found\")\n all_tokens = auth_crud.get_user_tokens(user_id)\n tokens = [token.to_dict() for token in all_tokens]\n for token in tokens:\n auth_crud.revoke_token(token['id'], user_id)\n user = user_crud.remove(user_id)\n\n return {'msg': 'User Removed'}", "def delete_user(self, user):\n self.delete(user)", "def delete_user_access_token(self, user_id, user_password, user_access_token, give_json=False):\n\n url = Constants.BASE_URL + 'domains/users/accesstokens'\n response = requests.delete(url=url,\n params={'key': self.api_key, 'user_id': user_id, 'user_password': user_password})\n if give_json:\n return response.json()\n else:\n return response.text", "def delete_auth_token():\n data = get_request_data(request)\n address = data.get(\"address\")\n token = data.get(\"token\")\n\n valid, message = is_token_valid(token, address)\n if not valid:\n return jsonify(error=message), 400\n\n force_expire_token(token)\n\n return jsonify(success=\"Token has been deactivated.\")", "def delete_user():\n token = request.args.get('token')\n data = jwt.decode(token, app.config['SECRET_KEY'])\n\n permit = functions.delete_user(data)\n if permit:\n return make_response(jsonify({'Delete': 'User Deleted Successfully'}), 201)\n else:\n return make_response(jsonify({'Delete Failed': 'Credentials not match or the user not exist'}), 201)", "def delete_user(self, user):\n # noinspection PyUnresolvedReferences\n self.delete(user)", "def delete(self, user_id):\r\n return delete_user(request, user_id)", "def delete(self, request, user_id=None):\n data = json.loads(request.body.decode())\n authenticated = Account.check_credentials(request, data['email'], data['password'])\n user = {}\n user['account_id'] = authenticated.id\n\n if authenticated.check_admin(request, user):\n NLTKOutput.remove(request=request, pk=user_id)\n Account.remove(request=request, pk=user_id)\n return Response(json='Account and content deleted', status=204)\n\n return Response(json='Not Authorized', status=401)", "def delete(self, user_id):\n return delete_user(user_id)", "def team_user_delete(token_user, team_id, user_id):\n team = Team.query.get(team_id)\n if team is None:\n abort(404, 'team not found')\n\n if len(team.members) == 1:\n abort(400, 'only one member on team -- use team delete instead')\n\n # check for permissions to delete the team\n if not (token_user.has_permission('team.update.elevated') or\n (token_user.has_permission('team.update') and\n team.has_member(token_user))):\n abort(403, 'insufficient permissions to delete user from team')\n\n user = User.query.get(user_id)\n if user is None:\n abort(400, 'invalid user id')\n\n user.teams.remove(team)\n get_db().commit()\n\n return '', 204", "def delete_user(self, user):\n self.execute(TABELLE['id_users'][\"delete\"], user[\"id\"])", "def delete(self, new_data, user_id):\n print(new_data)\n request_id = get_jwt_identity()\n user = user_crud.get(request_id)\n if not user.is_superuser:\n abort(401,\n message=\"You do not have permission to view this endpoint\")\n all_tokens = auth_crud.get_user_tokens(user_id)\n tokens = [token.to_dict() for token in all_tokens]\n for token in tokens:\n auth_crud.revoke_token(token['id'], user_id)\n user = user_crud.remove(user_id)\n\n return {'msg': 'User Removed'}", "def delete_user():\n del globalopts.appdata[request.user]\n del globalopts.users[request.user]\n return \"\", 200", "def delete_user():", "def delete_user(self, user):\n name = utils.get_name(user)\n self._user_manager.delete(name)", "def delete(cls):\n user = user_schema.load(request.get_json(), partial=(\"email\",))\n\n current_identity = get_jwt_identity()\n db_user = UserModel.find_by_id(current_identity)\n logging.info(\n f\"Delete called by {db_user.id}: {db_user.username} with data: {user['username']}\"\n )\n if db_user.username == user['username']:\n if is_correct_password(db_user.pw_salt, db_user.pw_hash, user['password']):\n db_user.delete_from_db()\n return {\"message\": msgs.DELETED.format(db_user.username)}, 200\n else:\n return {\"error\": msgs.INVALID_PASSWORD}, 401\n return {\"error\": msgs.OWN_RECORD_ONLY}, 401", "def user_id_delete(user_id):\n user = storage.get(\"User\", user_id)\n\n if user is None:\n abort(404)\n user.delete()\n del user\n return make_response(jsonify({}), 200)", "def deleteUser(user):\n delete_user(user)\n return redirect(url_for('login'))", "def delete(self, user_id):\n user = User.query.get(user_id)\n \n if user is None:\n return abort(422, message=\"User does not exist\")\n \n # check if the user is an admin and is the only one\n admins = User.query.filter_by(admin=True).all()\n if user.id == get_jwt_identity() and len(admins) == 1:\n return abort(422, message=\"User is the only admin, there must be at least one admin in the system\")\n \n user.delete()\n \n return { 'message': \"User '{}' has been deleted\".format(user.id) }", "def remove(self, user):\r\n url = '{0}/{1}'.format(self.get_url(), user)\r\n\r\n return http.Request('DELETE', url), parsers.parse_empty", "def delete(user_id):\n assert isinstance(user_id, ObjectId)\n\n User.objects(id=user_id).delete()", "def delete(self, url, user):\n token = self.login(user)\n response = requests.delete(url_root + url, headers={\"access-token\": token})\n return response.json(), response.status_code", "def delete_user(self, user_id):\n return self._delete('/users/{0}'.format(user_id))", "def delete_user(user_id=None):\n\n user = storage.get(\"User\", user_id)\n if user is None:\n abort(404)\n else:\n storage.delete(user)\n storage.save()\n return jsonify({}), 200", "def user_delete(user_id):\n user = storage.get('User', user_id)\n if user is None:\n abort(404)\n user.delete()\n storage.save()\n return jsonify({}), 200", "def delete_user(user_id=None):\n obj = storage.get('User', user_id)\n if obj is None:\n abort(404)\n else:\n storage.delete(obj)\n storage.save()\n return jsonify({}), 200", "def delete(request):\n # user_name == user_id\n required_fields = ['user_id', 'token']\n\n # Check if the post request contain the required fields\n if set(required_fields) != set(list(request.data.keys())):\n return Response({'error': str('Missing required fields!')}, status=status.HTTP_400_BAD_REQUEST)\n\n # POST Request content\n data = request.data\n\n # check for not allowed characters\n if check_special_characters(str(data['user_id'])) or check_special_characters(str(data['token'])):\n return Response({'error': str('Unaccepted character passed!')},\n status=status.HTTP_400_BAD_REQUEST)\n\n # Here check if user_id matches the token with the database\n if not db.check_user(data['user_id'], data['token']):\n return Response({'error': str('UNAUTHORIZED')}, status=status.HTTP_401_UNAUTHORIZED)\n\n # Here remove the user's account from the database\n if not db.remove_user(data['user_id']):\n return Response({'error': str('Error when removing the user account!')}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n return Response({'status': 'success'})", "def delete_user(user_id):\n current_user = get_jwt_identity()\n\n if not current_user:\n print('uri=/login error=\"Missing user\"', flush=True)\n return jsonify(message=\"Missing user\"), 400\n\n if not Administrator.is_administrator(current_user):\n print('non-admin user error', flush=True)\n return jsonify(message=\"You are not allowed to delete other users\"), 403\n\n if user_id == current_user:\n return jsonify(message=\"You are not allowed to delete yourself\"), 403\n\n try:\n User.delete(user_id)\n return jsonify(message=\"Delete succeeded\"), 200\n\n except Exception as e:\n print(e, flush=True)\n return jsonify(message='{}'.format(e)), 501", "def del_user_id(user_id):\r\n obj = storage.get(User, user_id)\r\n if obj is None:\r\n abort(404)\r\n obj.delete()\r\n storage.save()\r\n return jsonify({}), 200", "def test_delete_user_with_valid_input_using_token(self):\n # setup\n user = self.generate_username_password()\n resp1 = self.create_user(user)\n try:\n assert resp1.status_code == 201\n assert resp1.headers[\"Content-Type\"] == \"application/json; charset=utf-8\"\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp1.request)\n self.pprint_response(resp1)\n resp_body1 = resp1.json()\n uuid_ = resp_body1[\"userID\"]\n resp2 = self.generate_token(user)\n try:\n assert resp2.status_code == 200\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp2.request)\n self.pprint_response(resp2)\n resp_body2 = resp2.json()\n token = resp_body2[\"token\"]\n\n # test\n resp3 = self.delete_user_token(uuid_, token)\n try:\n assert resp3.status_code == 204\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp3.request)\n self.pprint_response(resp3)\n\n # teardown: none", "def delete_user(self, user):\n # type: (dict) -> dict\n self.request_url = \"{0}/{1}/{2}\".format(self.API_URL, self.USER_ENDPOINT, user['id'])\n return self.__create_request(payload=user, request_type=self.REQUEST_DELETE, version=\"v1\")", "def user_delete(user_id=None):\n obj = storage.get(\"User\", user_id)\n if obj is None:\n abort(404)\n storage.delete(obj)\n storage.save()\n return jsonify({}), 200", "def delete_user(user_id):\n user = storage.get(User, user_id)\n if user is None:\n abort(404)\n storage.delete(user)\n storage.save()\n return jsonify({}), 200", "def delete_user(self, user_id):\n\n # ask the model to delete the user\n um = User(self.settings)\n status = um.delete(user_id)\n\n # return\n return status", "def delete_user(payload, user_id):\n user = User.query.get(user_id)\n # exception for non existing id\n if user is None:\n abort(404)\n # set error status\n error = False\n # delete the user\n try:\n user.delete()\n except Exception:\n user.rollback()\n error = True\n print(sys.exc_info())\n finally:\n user.close_session()\n\n if error:\n abort(422)\n\n return jsonify({\n 'success': True,\n 'deleted': user_id\n })", "def delete_user(self, _id):\n return self.make_request(\"DELETE\", \"users/\"+_id, {})", "def delete_user(self, user_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.delete_user(user_id)", "def delete_user(UserName=None, AuthenticationType=None):\n pass", "def delete_user(self, user_name):\n user = self.get_user(user_name)\n return self.client.delete_resource(user.get('href'))", "def delete(self, user_id):\n user = User.query.get(user_id)\n\n if user is None:\n return mk_response(\"User does not exist\", 422)\n\n # check if the user is an admin and is the only one\n admins = User.query.filter_by(admin=True).all()\n if user.id == get_jwt_identity() and len(admins) == 1:\n return mk_response(\"User is the only admin, there must \" +\n \"be at least one admin in the system\", 422)\n\n user.delete()\n\n return mk_response(\"User '{}' has been deleted\".format(user.id))", "def delete_token(self, token_id):\n raise exception.NotImplemented() # pragma: no cover", "def delete_user(user_id):\n user_obj = storage.get(\"User\", user_id)\n if user_obj:\n storage.delete(user_obj)\n storage.save()\n return jsonify({}), 200\n else:\n abort(404)", "def delete_user():\n #TODO user delete\n pass", "def delete_user(self, user):\n try:\n with dbm.open(self.dbm_path, 'c', 0o600) as db:\n del db[user.name]\n except KeyError as k:\n pass", "def delete_user(user_id):\n temp = models.storage.get('User', user_id)\n if temp is None:\n abort(404)\n temp.delete()\n models.storage.save()\n return jsonify({})", "def delete_user(user_id):\n usr = storage.get(User, user_id)\n if usr:\n usr.delete(), storage.save()\n return {}\n else:\n abort(404)", "def delete_tokens_for_user(self, user_id, project_id=None):\n if not CONF.token.revoke_by_id:\n return\n self.delete_tokens(user_id, tenant_id=project_id)\n for trust in self.trust_api.list_trusts_for_trustee(user_id):\n # Ensure we revoke tokens associated to the trust / project\n # user_id combination.\n self.delete_tokens(user_id, trust_id=trust['id'],\n tenant_id=project_id)\n for trust in self.trust_api.list_trusts_for_trustor(user_id):\n # Ensure we revoke tokens associated to the trust / project /\n # user_id combination where the user_id is the trustor.\n\n # NOTE(morganfainberg): This revocation is a bit coarse, but it\n # covers a number of cases such as disabling of the trustor user,\n # deletion of the trustor user (for any number of reasons). It\n # might make sense to refine this and be more surgical on the\n # deletions (e.g. don't revoke tokens for the trusts when the\n # trustor changes password). For now, to maintain previous\n # functionality, this will continue to be a bit overzealous on\n # revocations.\n self.delete_tokens(trust['trustee_user_id'], trust_id=trust['id'],\n tenant_id=project_id)", "def revoke_token(self, token, token_type_hint, request, *args, **kwargs):\n if token_type_hint:\n tok = self._tokengetter(**{token_type_hint: token})\n else:\n tok = self._tokengetter(access_token=token)\n if not tok:\n tok = self._tokengetter(refresh_token=token)\n\n if tok and tok.client_id == request.client.client_id:\n request.client_id = tok.client_id\n request.user = tok.user\n tok.delete()\n return True\n\n msg = 'Invalid token supplied.'\n log.debug(msg)\n request.error_message = msg\n return False", "def delete_user_account(connection,user):\r\n with connection:\r\n connection.execute(DELETE_SPECIFIC_USER,(user,))", "def delete_user(user_id):\n netAdminToolDB = app.config['DATABASE']\n user = netAdminToolDB.get_user(user_id)\n\n if user == None:\n return jsonify({'error': 'User_id not found'}), 404\n\n netAdminToolDB.delete_user(user_id)\n return jsonify({'result': True})", "def delete_user():\n\n request_data = request.get_json(silent=True)\n\n if not request_data:\n logger.warning('Delete_user failed - data missing')\n result = {'message': 'Deletion failed - data missing'}\n return jsonify(result), 400\n\n password = request_data.get('password')\n\n if password is None:\n logger.error('Delete_user failed - no password provided')\n result = {'message': 'Deletion failed - no password provided'}\n return jsonify(result), 400\n\n if not current_user.check_password(password):\n logger.error('Delete_user failed - wrong password provided')\n result = {'message': 'Deletion failed - password incorrect'}\n return jsonify(result), 401\n\n # TODO: make sure to not delete the dashboard if it is shared between users\n for dash in dashboard.dashboards_of_user(current_user.id):\n try:\n dashboard.remove_from_repository(dash)\n\n periodic_tasks.remove_task(('dashboard', dash.id, 'historic_fetching'))\n periodic_tasks.remove_task(('dashboard', dash.id, 'fetching'))\n periodic_tasks.remove_task(('dashboard', dash.id, 'pinging'))\n except KeyError:\n logger.warning(f'Dashboard {dash} from user {current_user} has already been removed.')\n\n try:\n user.remove_from_repository(current_user.id)\n except KeyError:\n result = {'message': 'User not found in database.'}\n logger.warning(f'Delete_user failed - {current_user} was not found in the database.')\n return jsonify(result), 500\n\n logger.info(f'{current_user} deleted themselves successfully.')\n\n result = {'message': f'User {current_user} successfully deleted themselves.'}\n return jsonify(result), 200", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user(self):\n User.user_list.remove(self)", "def delete(self, user: 'UserCondensed'):\n self._delete(entity=user)", "def delete_user(id):\n pass", "def delete(self, user_id):\n\n try:\n self.get(user_id)\n url = \"{0}/users/{1}\".format(self.base_url, user_id)\n url = self._add_token_to_url(url)\n self.session.headers.update({\"Content-Type\": \"application/x-www-form-urlencoded\"})\n self.logger.debug(\"Deleting user with ID: <{0}>\".format(user_id))\n response = self.session.delete(url)\n self.logger.debug(\"Received response code {0} with reason {1}\"\n .format(response.status_code, response.reason))\n if response.status_code == 200:\n self.logger.debug(\"User successfully deleted\")\n else:\n raise InvalidResponseCodeException(\"Response code invalid, the expected response code is {0}, \"\n \"the actual response code is {1}\".format(200, response.status_code))\n return None\n except UserNotFoundException as err:\n self.logger.debug(\"User not found, error {0}\".format(err))", "def delete_user(self) -> 'outputs.ActingUserResponse':\n return pulumi.get(self, \"delete_user\")", "def delete_user(self):\n\n User.user_list.remove(self)", "def logout(request):\n request.user.auth_token.delete()\n return Response({}, status=status.HTTP_200_OK)", "def delete_tokens(self, user_id, tenant_id=None, trust_id=None,\n consumer_id=None):\n if not CONF.token.revoke_by_id:\n return\n token_list = self._list_tokens(user_id,\n tenant_id=tenant_id,\n trust_id=trust_id,\n consumer_id=consumer_id)\n\n for token in token_list:\n try:\n self.delete_token(token)\n except exception.NotFound:\n pass", "async def delete_user(user_id):\n \n user = User.select().where(User.id == user_id).first()\n\n if not user:\n return HTTPException(404, 'User not found')\n else:\n user.delete_instance()\n\n return f\"User {user.username} deleted successfully\"", "def delete(self, user_id):\n\n user = User.objects.get_or_404(public_id=user_id)\n return user.delete()", "def remove(self, token):\n self.rpc.call(MsfRpcMethod.AuthTokenRemove, [token])", "def delete_user(self) -> None:\n table_dictionary = {\n 'Apple': {\n 'table': 'AppleReceipts',\n 'user_id': 'User_id'\n },\n 'ESL': {\n 'table': 'ESLReceipts',\n 'user_id': 'User_id'\n },\n 'Transactions': {\n 'table': 'Transactions',\n 'user_id': 'User_id'\n },\n 'Users': {\n 'table': 'Users',\n 'user_id': 'id'\n },\n }\n\n # delete the current user's information from the db.\n for key in table_dictionary:\n query = f\"\"\"\n DELETE\n FROM {table_dictionary[key]['table']}\n WHERE {table_dictionary[key]['user_id']}=?;\n \"\"\"\n self.db.commit(query, values=(self.id,))\n\n # perform a sign out\n self.sign_out()\n\n log(f\"User:{self.id} has deleted their account.\")", "def delete_user(self, user_name):\r\n params = {'UserName' : user_name}\r\n return self.get_response('DeleteUser', params)", "def delete_account(request):\n collected_values = {}\n \n if request.method != 'POST':\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Wrong HTTP verb\"\n return JsonResponse(collected_values, status=400)\n \n uid = request.POST[\"user_id\"]\n token = request.POST[\"token\"]\n\n # Check auth\n is_valid, collected_values[\"token\"] = check_auth(uid, token, timezone.now())\n if not is_valid:\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Invalid Token\"\n return JsonResponse(collected_values, status=400)\n\n change_query = \"UPDATE linx_luser SET username = \\'{}\\' WHERE user_id = {}\".format(\"DELETE ME\", uid)\n with connection.cursor() as cursor:\n cursor.execute(change_query)\n\n collected_values[\"user_id\"] = uid\n collected_values[\"token\"] = token\n collected_values[\"executed_query\"] = change_query\n\n LOGGER.info(\"Delete account request: %s\", collected_values)\n return JsonResponse(collected_values, status=200)", "def logout(user_id):\n if request.method == 'POST':\n auth_header = request.headers.get('Authorization')\n auth_token = auth_header.split(\"Bearer \")[1]\n if auth_token and not TokenBlacklisting.verify_token(\n auth_token=auth_token):\n auth_data = User.decode_token(auth_token)\n if not isinstance(auth_data, str):\n blacklist_token = TokenBlacklisting(token=auth_token)\n try:\n blacklist_token.save_token()\n user = User.query.filter_by(id=user_id).first()\n return make_response(\n jsonify({\n 'message': 'see you soon {}, you have successfully logged out'.format(user.name)\n })), 200\n except Exception as e:\n return make_response(jsonify({\"message\": e})), 400\n return make_response(jsonify({\"message\": auth_data})), 404\n return make_response(\n jsonify({\n \"message\": \"Please provide a valid token\"\n })), 403\n return None", "def del_user(self, username):\n pass", "def revoke_token(token):\n token.delete_instance()", "def removeToken(self, token):\n self.__require_privilaged_access()\n with DBSession(self.__config_db) as session:\n # Check if the given token is a personal access token so it can be\n # removed.\n user = self.getLoggedInUser()\n num_of_removed = session.query(Session) \\\n .filter(Session.user_name == user) \\\n .filter(Session.token == token) \\\n .filter(Session.can_expire.is_(False)) \\\n .delete(synchronize_session=False)\n session.commit()\n\n if not num_of_removed:\n raise codechecker_api_shared.ttypes.RequestFailed(\n codechecker_api_shared.ttypes.ErrorCode.DATABASE,\n \"Personal access token {0} was not found in the \"\n \"database.\".format(token))\n\n # Invalidate the local session by token.\n self.__manager.invalidate_local_session(token)\n\n LOG.info(\"Personal access token '%s...' has been removed by '%s'.\",\n token[:5], self.getLoggedInUser())\n\n return True", "def delete(self, token_id):\n sm = get_storage_manager()\n token = sm.get(models.Token, token_id, fail_silently=True)\n if token and _can_manage_token(token):\n sm.delete(token)\n return None, 204\n else:\n raise NotFoundError(f'Could not find token {token_id}')", "def delete_user(user_id):\n\n user = User.query.get(user_id)\n db.session.delete(user)\n db.session.commit()\n return", "def delete_user(request, user):\n\n if models.Group.created_by(user).count() > 0:\n raise UserDeletionError('Cannot delete user who is a group creator.')\n\n user.groups = []\n\n query = _all_user_annotations_query(request, user)\n annotations = es_helpers.scan(client=request.es.conn, query={'query': query})\n for annotation in annotations:\n storage.delete_annotation(request, annotation['_id'])\n\n request.db.delete(user)", "def userdel(pwfile, user):\n return __salt__[\"webutil.userdel\"](pwfile, user)", "def fusion_api_remove_user(self, name=None, uri=None, api=None, headers=None):\n return self.user.delete(name, uri, api, headers)", "def delete_user(user_id):\n user = User.query.get_or_404(user_id)\n db.session.delete(user)\n db.session.commit()\n\n return redirect('/')", "def delete(self, id):\n\t\ttry:\n\t\t\tuser_service.delete(id)\n\t\texcept AssertionError as e:\n\t\t\tuser_space.abort(400, e.args[0], status = \"Could not delete user\", statusCode = \"400\")\n\t\texcept Exception as e:\n\t\t\tuser_space.abort(500, e.args[0], status = \"Could not delete user\", statusCode = \"500\")", "def delete_user(self,userid, cursor):\n sql=\"DELETE FROM users WHERE userid = %s\"\n cursor.execute(sql,(userid))", "def delete_user_by_xng_id(self, user):\n # type: (dict) -> dict\n self.request_url = \"{0}/{1}/xngId/{2}\".format(self.API_URL, self.USER_ENDPOINT, user['xngId'])\n return self.__create_request(payload=user, request_type=self.REQUEST_DELETE, version=\"v1\")", "def delete(user_id: int):\n usr = get_by_id(user_id)\n if not usr:\n raise UserNotFound\n\n db.session.delete(usr)\n db.session.commit()", "def delete(self, request):\n serializer = UserLogoutSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n token = RefreshToken(serializer.validated_data[\"refresh\"])\n token.blacklist()\n return Response(status=status.HTTP_204_NO_CONTENT)", "async def del_user(conn: LDAPConnection, user: dict, mailman: Client) -> None:\n await conn.delete(user[\"dn\"])\n uid = user[\"attributes\"][\"uid\"][0]\n rmtree(user[\"attributes\"][\"homeDirectory\"][0])\n rmtree(f\"/webtree/{uid[:1]}/{uid}\")\n mailing_list = mailman.get_list(\"announce-redbrick\")\n mailing_list.unsubscribe(f\"{uid}@redbrick.dcu.ie\")", "def deleteUser(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def remove(self, user_id):\n pass", "def delete_by_email(\n user_to_delete: schemas.UserDelete,\n db_session: Session = Depends(get_db),\n current_user: models.User = Depends(get_current_admin_user)\n):\n db_user = crud.get_by_email(db_session, user_to_delete.email)\n\n if not db_user:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f'User with email \"{user_to_delete.email}\" not found.'\n )\n\n crud.remove(db_session, db_user)", "def test_delete_u2ftoken(self):\n response = self.client.delete_u2ftoken(\"DU012345678901234567\")\n uri, args = response[\"uri\"].split(\"?\")\n\n self.assertEqual(response[\"method\"], \"DELETE\")\n self.assertEqual(uri, \"/admin/v1/u2ftokens/DU012345678901234567\")\n self.assertEqual(util.params_to_dict(args), {\"account_id\": [self.client.account_id]})", "def delete_user(self, user_id):\n sql = 'update account_user set is_deleted = 1 where id = %s'\n with connection.cursor() as cursor:\n cursor.execute(sql, [user_id])\n row = cursor.fetchone()\n\n return row", "def del_user(user_id):\n log = current_app.log\n db = request.db\n Site = db.tables.Site\n Cred = db.tables.Cred\n auth_user_id = SiteService.get_current_uid()\n # Check the user is deleting their own items\n if auth_user_id != user_id:\n log.warn(\"User %u tried to delete sites belonging to user %u.\",\n auth_user_id, user_id)\n abort(404)\n sites = Site.query.filter_by(site_owner=auth_user_id).all()\n num_sites = len(sites)\n creds = Cred.query.filter_by(cred_owner=auth_user_id).all()\n num_creds = len(creds)\n with managed_session(request,\n message=\"Database error while deleting sites\",\n http_error_code=500) as session:\n for cred in creds:\n session.delete(cred)\n for site in sites:\n session.delete(site)\n log.info(\"Deleted all sites for user %u (%u sites, %u creds deleted).\",\n auth_user_id, num_sites, num_creds)\n return \"\"", "def delete(self, user_id):\n res = self._user.delete_user(user_id)\n\n if res:\n return {\n \"status\": 200,\n \"data\": [{\n \"id\": res[\"id\"],\n \"message\": \"user record has been deleted\"\n }]\n }, 200\n else:\n return {\n \"status\": 404,\n \"error\": \"Not found for id {}\".format(user_id)\n }, 404", "def delete_user(request):\n user_id = request.POST.get('user_id')\n User.objects.filter(id=user_id).delete()\n response = {'status': 1, 'status_message': 'Success'}\n return HttpResponse(json.dumps(response))", "async def delete(self, request, uid):\n return await super(User, self).delete_item(request.app.pool, 'user',\n uid)", "def delete(self, id):\n # Get the user from the auth header\n auth_username, auth_password = decode_basic_auth_info(request)\n auth_user = User.query.filter(User.username==auth_username).first()\n if not auth_user.admin:\n return Response(status=403)\n\n user = User.query.get(id)\n if user is None:\n return Response(status=400)\n db.session.delete(user)\n db.session.commit()\n return Response(status=202)", "def delete_user_by_id(user_id):\n return woo_request_helper().delete_details(wc_endpoint='customers/{}'.format(user_id))", "def logout(self, request, *args, **kwargs):\n token = get_object_or_404(Token, key=request.auth)\n token.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def delete_user(BrokerId=None, Username=None):\n pass", "def deauth(request):\n\n if(request.token):\n request.token.delete()\n return JsonResponse({'message': 'Your token is revoked'}) \n else:\n return HttpResponseBadRequest('It does not make sense to revoke a token ' +\n 'if no token are supplied to the request')", "def delete_user(user_id):\n\n user = User.query.get_or_404(user_id)\n db.session.delete(user)\n db.session.commit()\n\n return redirect(\"/users\")", "def delete_user(user_id):\n user = User.query.get_or_404(user_id)\n db.session.delete(user)\n db.session.commit()\n\n return redirect(\"/users\")", "def delete_user(self, user):\n\n if self.sql_read_only:\n return False\n\n if not self.check_prereqs():\n return False\n\n if not self.has_user(user):\n return False\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_delete_user_query,{'username_field':self.sql_username_field,'username':user})\n self.log.debug(\"sqlflexibleauthstore: delete_user: %s\" % (query,))\n cursor.execute(query)\n\n db.commit()\n del_user_attribute(self.env,username=user)\n return True" ]
[ "0.7637971", "0.7298944", "0.71500826", "0.71234906", "0.71165484", "0.71092767", "0.69556636", "0.6910611", "0.6849392", "0.68058825", "0.6793231", "0.6755561", "0.6754695", "0.6745995", "0.6745789", "0.6745456", "0.67416966", "0.66653407", "0.6662869", "0.6655789", "0.6644038", "0.66251963", "0.66034275", "0.6601392", "0.6585246", "0.65830773", "0.6576603", "0.6571786", "0.6571602", "0.6562602", "0.6560404", "0.655624", "0.65491235", "0.6541022", "0.65103924", "0.6505064", "0.65046304", "0.6501482", "0.64978683", "0.6497763", "0.6481882", "0.6473348", "0.6463594", "0.6444087", "0.6435311", "0.6430937", "0.64296705", "0.6421254", "0.641206", "0.6390621", "0.6379585", "0.6354915", "0.6354915", "0.6354915", "0.6342474", "0.63403916", "0.6338962", "0.63338864", "0.6330765", "0.63150513", "0.63101274", "0.6295021", "0.6292687", "0.62694955", "0.62672913", "0.6258886", "0.62586355", "0.6220041", "0.6205458", "0.6194279", "0.6189593", "0.6188011", "0.61835885", "0.6182893", "0.6180866", "0.616029", "0.6158733", "0.6153124", "0.6148964", "0.61363417", "0.6113673", "0.6103561", "0.6099578", "0.6089835", "0.60692686", "0.6059557", "0.6054081", "0.60494685", "0.60491353", "0.6039055", "0.6031664", "0.603018", "0.60296917", "0.6028425", "0.6018047", "0.6015318", "0.601053", "0.6003411", "0.60003346", "0.59943825" ]
0.6980072
6
Deletes the specified compartment. The compartment must be empty.
def delete_compartment(self, compartment_id, **kwargs): resource_path = "/compartments/{compartmentId}" method = "DELETE" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "if_match" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "delete_compartment got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "compartmentId": compartment_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "if-match": kwargs.get("if_match", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params) else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeCompartment(self, *args):\n return _libsbml.Model_removeCompartment(self, *args)", "def removeCompartmentReference(self, *args):\n return _libsbml.MultiCompartmentPlugin_removeCompartmentReference(self, *args)", "def delcomponent(self,\n context=[],\n componentid=None):\n if componentid == None:\n raise ValueError, \"delcomponent: componentid is None\"\n return jsoncall.do_call(\"delcomponent\", {'modelname':self.modelname,\\\n 'user':self.user,\\\n 'password':self.password,\\\n 'context':context,\\\n 'componentid':componentid},\n self.connection)", "def delete(args, config):\n print('Deletes a selected HPC fleet with name \"{}\"'.format(args.fleet_name))", "def delete_address(self) -> object:\n self.delete_button.click()\n\n return DeletionModal(self).wait_for_component_to_be_present()", "def delete_pcb_component(self, comp_name):\n arg = [\"NAME:Selections\", \"Selections:=\", comp_name]\n\n self.modeler.oeditor.Delete(arg)\n return True", "def removeCompartmentType(self, *args):\n return _libsbml.Model_removeCompartmentType(self, *args)", "def removeCompartmentGlyph(self, *args):\n return _libsbml.Layout_removeCompartmentGlyph(self, *args)", "def delete_composed_node(cls, composed_node_uuid):\n cls.dbdriver.delete_composed_node(composed_node_uuid)", "def vertree_delete(self):\n if not self.attached:\n raise CastleCollectionNotAttachedException()\n raise Exception(\"TODO\")", "def _deleteElement(self, identifier):\n self._collection.removeByIdentifier(identifier)\n return Deleted()", "def delete(self):\n self.log.info('Deleting')\n self._state = PonPort.State.DELETING\n self._cancel_deferred()", "def unsetCompartment(self):\n return _libsbml.CompartmentReference_unsetCompartment(self)", "def delete(self):\n if not self.attached:\n raise CastleCollectionNotAttachedException()\n raise Exception(\"TODO\")", "def delete(self, department_id):\n department = get_department_by_id(department_id)\n db.session.delete(department)\n db.session.commit()\n return {}, 204", "def unsetCompartment(self):\n return _libsbml.Reaction_unsetCompartment(self)", "def delete(self, *args, **kwargs):\n\n if args:\n self.service.remove(EtherAddress(args[0]))\n else:\n self.service.remove_all()", "def delete(self, endpoint, content=None, params=None):\n\t\treturn self._call(\"DELETE\", endpoint, content, params)", "def delete(self, controller, virtual_drive='all'):\n return self.run('/c{} /v{} del force'.format(controller, virtual_drive))", "def DeleteJob(self, job_urn, token=None):\n aff4.FACTORY.Delete(job_urn, token=token)", "def delete(self, request, app_id, addon_name):\n addon = Addon.objects.get(app__app_id=app_id, display_name=addon_name)\n provider = get_provider_from_provider_name(addon.provider_name)\n result = provider.deprovision(addon.provider_uuid)\n manager = StateMachineManager()\n with manager.transition(addon.id, AddonEvent.deprovision_success):\n pass\n manager.start_task(addon.id)\n return self.respond({'message': result['message']})", "def delete(self):\r\n if self.__abstract__:\r\n raise ThunderdomeException('cant delete abstract elements')\r\n if self.eid is None:\r\n return self\r\n query = \"\"\"\r\n g.removeVertex(g.v(eid))\r\n g.stopTransaction(SUCCESS)\r\n \"\"\"\r\n results = execute_query(query, {'eid': self.eid})", "def delete(self, name):\n instance = self.get_one_instance('name', name)\n\n if type(instance) != self.Component:\n set_session_var('errors', str(instance))\n return None\n\n res = delete_in_db(instance)\n\n if res != 'deleted':\n set_session_var('errors', str(res))\n else:\n set_session_var('success', res)\n\n return True", "def delete(self, endpoint, params=None):\n params = params or dict()\n return self.request(verb=requests.delete, address=self.project_address + endpoint,\n params=params)", "def _delete(performer):\n if not isinstance(performer, helper._AelObjectPerformer):\n raise Exception('Invalid delete performer type')\n\n try:\n util.delete(\n obj=performer.getObject(), testmode=performer.isInTestMode()\n )\n except Exception as e:\n raise Exception('Failed to delete %s: %s' % (performer._name, str(e)))\n\n return", "def remove(self, component) -> None:\n pass", "def delete(self):\n\n headers = self._default_headers()\n\n return self._request(self.name,\n ok_status=None,\n data=None,\n headers=headers,\n method=\"DELETE\")", "def delete_provisioning(self, identifier):\n return self.client.call(\"SoftLayer_Provisioning_Hook\", \"deleteObject\", id=identifier)", "def delete(self, vehicle_id=None):\n raise NotImplementedError()", "def delete(self,\n tier1_id,\n segment_id,\n port_id,\n ):\n return self._invoke('delete',\n {\n 'tier1_id': tier1_id,\n 'segment_id': segment_id,\n 'port_id': port_id,\n })", "def delete(self):\n del self.shx.atoms[self.index]", "def remove(self, *args):\n return _libsbml.ListOfCompartmentReferences_remove(self, *args)", "def delete(self, customerguid, jobguid=\"\", executionparams=None):", "def delete(self):\n self.manager.delete(self.name)", "def delete(self):\n self.manager.delete(self.name)", "def delete(self):\n self.current_revision.delete()", "def delete(self, location, data=None, headers={}):\n return self._communicate(vxg.core.request.DeleteRequest,\n location, data, headers)", "def delete_from_objectstore(container, object_name):\n return get_conn().delete_object(container, object_name)", "def delete_deployment(request, deployment, **_kwargs):\n pass", "def delete_department(department_id):\n\n department_obj = Department.query.get_or_404(department_id)\n db.session.delete(department_obj)\n db.session.commit()\n flash(f'Department {department_obj.name} successfully deleted.', 'success')\n return redirect(url_for('home'))", "def delete_coupled_el(self, el_to_del):\n try:\n self.coupled_el.remove(el_to_del)\n except ValueError:\n print(\"element is not in list\")", "def remove(self):\n self._switch.odlclient._request(self._path, method=\"delete\")", "def delete_device(self):\n # PROTECTED REGION ID(CspSubElementSubarray.delete_device) ENABLED START #\n # PROTECTED REGION END # // CspSubElementSubarray.delete_device", "def delete(self):\n self._vertex_list.delete()\n self._vertex_list = None", "def delete(self):\n os.system(\"rm \"+self._name)", "def delete(self):\n\n raise NotImplementedError()", "def delete(self):\n self.oxdb.execute(DELETE, self.variable_name, commit=True)\n self._exists = None", "def delete_fleet(Name=None):\n pass", "def delete_contact(self, contact):\n self._delete('contacts', self._build_params(uuid=contact))", "def unsetCompartment(self):\n return _libsbml.Species_unsetCompartment(self)", "def delete(self, endpoint, params):\n\n return self._call(requests.delete, endpoint, params=params)", "def delete(self, **params):\n return self._api.delete_customer(self.id, **params)", "def delete(self):\n self.call('DELETE', expect=error.NO_CONTENT)", "def delX(self):\n del self.components[0]", "def delX(self):\n del self.components[0]", "def delete(self):\n self._client.delete(self)", "def deleteDevice(serial):\n swDB = switchdb.DB()\n swDB.deleteBySerial(serial)\n swDB.close()", "def do_command(self, args):\n compops = dbops.Completions()\n compops.delete(args)", "def delete(self):\r\n self.domain.delete_item(self)", "def delete(self, name):\n self.backend.delete(name)", "def do_destroy(self, arg):\n obj = self.verify(arg, 2)\n if obj:\n del storage.all()[obj]\n storage.save()", "def delete(self):\n pdbox._args.get(\"dryrun\") or shutil.rmtree(self.path)\n pdbox.info(\"Deleted %s/\" % self.path)", "def delete(self, obj=None):\n pass", "def remove_component(self, sCompName):\n del self._dComponents[sCompName]", "def unsetCompartment(self):\n return _libsbml.QualitativeSpecies_unsetCompartment(self)", "def delete(self, parameters = {}):\n\n self.__enforce_connected()\n self.collection.delete(self.identifier, parameters = parameters)", "def delete_dev_endpoint(self):\n self.glue_engine.delete_dev_endpoint(EndpointName=self.dev_endpoint_name)", "def delete(self):\n return self.parent.delete_instance(self.name)", "def delete(self):\n logger.info('Delete the port chain: %s' % self.name)\n # Delete port chain\n self.pc_client.delete('port_chain', self.name)\n\n logger.info('Delete the flow classifier.')\n self.pc_client.delete('flow_classifier', self.flow_conf['name'])\n\n # Delete all port pair groups\n logger.info('Delete port pair groups and port pairs.')\n srv_ppgrp_lst = self.srv_chain.get_srv_ppgrp_id()\n for grp_idx in range(len(srv_ppgrp_lst)):\n pp_grp_name = 'pp_grp_%s' % grp_idx\n self.pc_client.delete('port_pair_group', pp_grp_name)\n\n # Delete all port pairs\n for grp_idx, pp_grp in enumerate(srv_ppgrp_lst):\n for pp_idx in range(len(pp_grp)):\n pp_name = 'pp_%s_%s' % (grp_idx, pp_idx)\n self.pc_client.delete('port_pair', pp_name)", "def delete(self):\n return self.service.delete_one({\"_id\": self._id})", "def delete_container(self, container: Container):", "def delete(self):\n url = util.join_url(self.path, str(self['id']))\n new_attributes = self.api.delete(url)\n self.error = None\n self.merge(new_attributes)\n return self.success()", "def delete(self):\n self.model.remove_agents(self)", "def delete(self, psvm):\n self._delete('/os-psvm/%s' % (base.getid(psvm)))", "def delete(self, hDevicesList = consts.PRL_INVALID_HANDLE):\n\t\treturn Job(SDK.PrlVm_Delete(self.handle, conv_handle_arg(hDevicesList))[0])", "def delete(self, name=None):\n raise NotImplementedError", "def delete(self):\n if Model.data_connector:\n with Model.data_connector.u_lock:\n Model.data_connector.remove_object(self)", "def delete(device):\n delete_subject(device)\n return redirect_back('index')", "def delete(self):\n raise NotImplementedError", "def delete(self):\n self.storage.delete(basket=self)\n self.uncache()\n self._data = None\n self.dirty = False", "def delete_department(id):\r\n check_admin()\r\n\r\n department = Department.query.get_or_404(id)\r\n db.session.delete(department)\r\n db.session.commit()\r\n flash('You have successfully deleted the department.')\r\n\r\n # redirect to the departments page\r\n return redirect(url_for('admin.list_departments'))\r\n\r\n return render_template(title=\"Delete Department\")", "def remove_comp(self, component):\n if hasattr(component, 'name'):\n component = component.name\n try:\n del self['pore.mole_fraction.' + component]\n except KeyError:\n pass", "def delete(self):\n\n raise NotImplementedError('Must be implemented by subclasses')", "def delete_host(self, conf, tenant_id, network_id, host_id):\n\t\tpass", "def delete():\n\n # Check the pipe setup.\n check_pipe_setup(sequence=True, j=True)\n\n # The interatomic data.\n for interatom in interatomic_loop():\n # The data.\n if hasattr(interatom, 'j_coupling'):\n del interatom.j_coupling\n\n # The error.\n if hasattr(interatom, 'j_coupling_err'):\n del interatom.j_coupling_err", "def delete(self):\n\n uri = \"{0}/{1}\".format(self.base_uri, self.ip_or_ifname_or_group_name)\n\n try:\n response = self.session.request(\"DELETE\", uri)\n\n except Exception as e:\n raise ResponseError(\"DELETE\", e)\n\n if not utils._response_ok(response, \"DELETE\"):\n raise GenericOperationError(response.text, response.status_code)\n\n logging.info(\"SUCCESS: Deleting %s\", self)\n\n # Delete back reference from BGP_Routers\n for neighbor in self.__parent_bgp_router.bgp_neighbors:\n if (\n neighbor.ip_or_ifname_or_group_name\n == self.ip_or_ifname_or_group_name\n ):\n self.__parent_bgp_router.bgp_neighbors.remove(neighbor)\n\n # Delete object attributes\n utils.delete_attrs(self, self.config_attrs)", "def delete_thing(self, thing):\n try:\n self.things.remove(thing)\n except ValueError as e:\n print(e)\n print(\" in Environment delete_thing\")\n print(\" Thing to be removed: {} at {}\".format(thing, thing.location))\n print(\" from list: {}\".format([(thing, thing.location) for thing in self.things]))\n if thing in self.agents:\n self.agents.remove(thing)", "def delete_thing(self, thing):\n try:\n self.things.remove(thing)\n except ValueError as e:\n print(e)\n print(\" in Environment delete_thing\")\n print(\" Thing to be removed: {} at {}\".format(thing, thing.location))\n print(\" from list: {}\".format([(thing, thing.location) for thing in self.things]))\n if thing in self.agents:\n self.agents.remove(thing)", "def delete(self):\n pdbox._args.get(\"dryrun\") or os.remove(self.path)\n pdbox.info(\"Deleted %s\" % self.path)", "def delete(self):\n self.request().delete()", "def delete_container(self, account, container):\n \n pass", "def delete():", "def delete(self, index):\n del self.data[index]", "def delete_amenity_obj(amenity_id):\n amenity = storage.get(Amenity, amenity_id)\n if amenity:\n amenity.delete()\n storage.save()\n return jsonify({}), 200\n else:\n abort(404)", "def delete_thing(self, thing):\n try:\n self.things.remove(thing)\n except ValueError as e:\n print e\n print ' in Environment delete_thing'\n print ' Thing to be removed: %s at %s' % (thing, thing.location)\n print ' from list: %s' % [ (thing, thing.location) for thing in self.things ]\n\n if thing in self.agents:\n self.agents.remove(thing)", "def delete_controller(cls, args, config):\n # print \"MOLNSProvider.delete_provider(args={0}, config={1})\".format(args, config)\n if len(args) == 0:\n raise MOLNSException(\"USAGE: molns cluser delete name\")\n config.delete_object(name=args[0], kind='Controller')", "def delete(self, c_path):\n raise NotImplementedError", "def DELETE(self, req):\n account_partition, accounts, container_count = \\\n self.account_info(self.account_name, req)\n if not accounts:\n return HTTPNotFound(request=req)\n container_partition, containers = self.app.container_ring.get_nodes(\n self.account_name, self.container_name)\n headers = self._backend_requests(req, len(containers),\n account_partition, accounts)\n self._clear_container_info_cache(req)\n resp = self.make_requests(\n req, self.app.container_ring, container_partition, 'DELETE',\n req.swift_entity_path, headers)\n # Indicates no server had the container\n if resp.status_int == HTTP_ACCEPTED:\n return HTTPNotFound(request=req)\n return resp", "def delete(self):\n if self.parent:\n assert isinstance(self.parent, Collection) # only know how to delete from Collection parents\n self.parent.delete_child(self)\n else:\n self._mark_deleted()", "def delete_vm(self, tenant_id, vm_id):\n self.delete_vm_bulk(tenant_id, [vm_id])" ]
[ "0.6996225", "0.5918634", "0.5823131", "0.573744", "0.57213587", "0.571788", "0.5709205", "0.56610256", "0.56202364", "0.55895793", "0.5515105", "0.5480321", "0.5393466", "0.5318188", "0.5316087", "0.53012276", "0.5280208", "0.527711", "0.527472", "0.52587646", "0.5253273", "0.524758", "0.5220202", "0.51949716", "0.5167984", "0.5151166", "0.51484567", "0.5142402", "0.513271", "0.513119", "0.51286894", "0.51080537", "0.5098547", "0.5097509", "0.5097509", "0.5094285", "0.50864214", "0.50826687", "0.5057319", "0.5028532", "0.50267917", "0.502127", "0.50146234", "0.5012701", "0.50124025", "0.50053126", "0.5003556", "0.5000114", "0.49926504", "0.49919096", "0.4991871", "0.4988738", "0.49884978", "0.49862772", "0.49862772", "0.49716878", "0.4968557", "0.4968136", "0.49629208", "0.494342", "0.49406552", "0.49402833", "0.49367952", "0.4934206", "0.4927709", "0.49270913", "0.4923546", "0.49210626", "0.49187613", "0.4918581", "0.49049655", "0.49008647", "0.4898025", "0.48970437", "0.48899722", "0.48867658", "0.48855427", "0.488452", "0.48837966", "0.4882284", "0.48807925", "0.48746723", "0.4874196", "0.4873955", "0.48659307", "0.48658046", "0.4863432", "0.4863432", "0.48570007", "0.48564672", "0.48548347", "0.48538667", "0.485333", "0.4850514", "0.4850506", "0.4846199", "0.484355", "0.48392844", "0.4837746", "0.48366916" ]
0.6579754
1
Deletes the specified secret key for the specified user.
def delete_customer_secret_key(self, user_id, customer_secret_key_id, **kwargs): resource_path = "/users/{userId}/customerSecretKeys/{customerSecretKeyId}" method = "DELETE" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "if_match" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "delete_customer_secret_key got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "userId": user_id, "customerSecretKeyId": customer_secret_key_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "if-match": kwargs.get("if_match", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params) else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_ssh_key(self, user_id, key_id):\n\n _gu = self.get_user(user_id)\n if _gu is None:\n return None\n\n # build URL and make request\n return self._delete('/users/{0}/keys/{1}'.format(_gu['id'], key_id))", "def Delete(self, user, key):\n return self.Remove(user, key)", "def delete_user(self, user):\n self.delete(user)", "def delete(key, **kwargs):\n cluster_call(\n \"secret_delete\",\n key=key,\n **kwargs,\n confirm=f\"Delete secret {key}\",\n prefix=f\"Deleting secret {key}...\",\n postfix=\"deleted.\",\n )", "def delete_access_key(self, access_key_id, user_name=None):\r\n params = {'AccessKeyId' : access_key_id}\r\n if user_name:\r\n params['UserName'] = user_name\r\n return self.get_response('DeleteAccessKey', params)", "def user_id_delete(user_id):\n user = storage.get(\"User\", user_id)\n\n if user is None:\n abort(404)\n user.delete()\n del user\n return make_response(jsonify({}), 200)", "def delete_user_key(self, key):\n return AlgoliaUtils_request(self.headers, self.write_hosts, \"DELETE\", \"/1/keys/%s\" % key, self.timeout)", "def delete_user(self, user):\n # noinspection PyUnresolvedReferences\n self.delete(user)", "def delete_user(self, user):\n try:\n with dbm.open(self.dbm_path, 'c', 0o600) as db:\n del db[user.name]\n except KeyError as k:\n pass", "def delete(self, key: object):\n try:\n del self._user_data[key]\n except KeyError:\n pass", "def delete(self, user_id):\n return delete_user(user_id)", "def user_delete(user_id):\n user = storage.get('User', user_id)\n if user is None:\n abort(404)\n user.delete()\n storage.save()\n return jsonify({}), 200", "def delete_user():\n del globalopts.appdata[request.user]\n del globalopts.users[request.user]\n return \"\", 200", "def user_delete(user_id=None):\n obj = storage.get(\"User\", user_id)\n if obj is None:\n abort(404)\n storage.delete(obj)\n storage.save()\n return jsonify({}), 200", "def delete(user_id):\n assert isinstance(user_id, ObjectId)\n\n User.objects(id=user_id).delete()", "def delete_user(user_id):\n usr = storage.get(User, user_id)\n if usr:\n usr.delete(), storage.save()\n return {}\n else:\n abort(404)", "def userdel(pwfile, user):\n return __salt__[\"webutil.userdel\"](pwfile, user)", "def delete_user(self, user):\n self.execute(TABELLE['id_users'][\"delete\"], user[\"id\"])", "def delete_user(user_id=None):\n\n user = storage.get(\"User\", user_id)\n if user is None:\n abort(404)\n else:\n storage.delete(user)\n storage.save()\n return jsonify({}), 200", "def delete_user_key(self, key):\n return AlgoliaUtils_request(self.client.headers, self.write_hosts, \"DELETE\", \"/1/indexes/%s/keys/%s\" % (self.url_index_name, key), self.client.timeout)", "def delete(self, user_id):\r\n return delete_user(request, user_id)", "def deleteSecret(self, clientIP, not_before):\n\n return self._secret_table.delete_item(ip_address=clientIP,not_before=not_before)", "def delete(self):\n\n user_id = get_jwt_identity()\n user = user_crud.get(user_id)\n if not user:\n abort(404, message=\"User not Found\")\n all_tokens = auth_crud.get_user_tokens(user_id)\n tokens = [token.to_dict() for token in all_tokens]\n for token in tokens:\n auth_crud.revoke_token(token['id'], user_id)\n user = user_crud.remove(user_id)\n\n return {'msg': 'User Removed'}", "def delete_user(user_id=None):\n obj = storage.get('User', user_id)\n if obj is None:\n abort(404)\n else:\n storage.delete(obj)\n storage.save()\n return jsonify({}), 200", "def delete_user(user_id):\n temp = models.storage.get('User', user_id)\n if temp is None:\n abort(404)\n temp.delete()\n models.storage.save()\n return jsonify({})", "def del_user_id(user_id):\r\n obj = storage.get(User, user_id)\r\n if obj is None:\r\n abort(404)\r\n obj.delete()\r\n storage.save()\r\n return jsonify({}), 200", "def delete_key(uid):\n if request.method == 'POST':\n hl.deleteUser(uid)\n return redirect('/users')", "def delete_user(user_id):\n\n user = User.query.get(user_id)\n db.session.delete(user)\n db.session.commit()\n return", "def delete_user(user_id):\n user = storage.get(User, user_id)\n if user is None:\n abort(404)\n storage.delete(user)\n storage.save()\n return jsonify({}), 200", "def delete_user(user_id):\n user_obj = storage.get(\"User\", user_id)\n if user_obj:\n storage.delete(user_obj)\n storage.save()\n return jsonify({}), 200\n else:\n abort(404)", "def delete_keypair(self, username, access_key):\n msg = \"delete_keypair not implemented\"\n raise NotImplementedError(msg)", "def delete(user_id: int):\n usr = get_by_id(user_id)\n if not usr:\n raise UserNotFound\n\n db.session.delete(usr)\n db.session.commit()", "def delete_user():", "def delete_user(user_id):\n netAdminToolDB = app.config['DATABASE']\n user = netAdminToolDB.get_user(user_id)\n\n if user == None:\n return jsonify({'error': 'User_id not found'}), 404\n\n netAdminToolDB.delete_user(user_id)\n return jsonify({'result': True})", "def delete_user(self, user):\n # type: (dict) -> dict\n self.request_url = \"{0}/{1}/{2}\".format(self.API_URL, self.USER_ENDPOINT, user['id'])\n return self.__create_request(payload=user, request_type=self.REQUEST_DELETE, version=\"v1\")", "def delete_user(self, user_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.delete_user(user_id)", "def delete_user(self, user_name):\n user = self.get_user(user_name)\n return self.client.delete_resource(user.get('href'))", "def delete_user(self, userId):\n\n try:\n query = \"delete from user where userId = {}\".format(userId)\n print(query)\n cur = self.con.cursor()\n cur.execute(query)\n self.con.commit()\n\n logger.info(\"Deleted\")\n except Exception as e:\n logger.error(\"Error occured at data deletion..\", e)", "def delete_user(self, user_id):\n return self._delete('/users/{0}'.format(user_id))", "async def red_delete_data_for_user(self, *, requester, user_id):\n\t\tawait self.config.user_from_id(user_id).clear()", "def delete_user(self, user):\n name = utils.get_name(user)\n self._user_manager.delete(name)", "def delete_user_account(connection,user):\r\n with connection:\r\n connection.execute(DELETE_SPECIFIC_USER,(user,))", "def delete(self, user):\n q = \"DELETE FROM profiles WHERE user=?\"\n try:\n self._query(q, (user,), fetch='none')\n except Exception as e:\n raise e", "def delete_user(BrokerId=None, Username=None):\n pass", "def delete_key(iam_username):\n try:\n previous_secret_value = secretmanager.get_secret_value(\n SecretId=iam_username, VersionStage=\"AWSPREVIOUS\"\n )\n previous_secret_string = json.loads(previous_secret_value[\"SecretString\"])\n previous_access_key_id = previous_secret_string[\"AccessKey\"]\n print(f\"previous_access_key_id: {previous_access_key_id}\")\n keylist = iam.list_access_keys(UserName=iam_username)[\"AccessKeyMetadata\"]\n\n for key in keylist:\n key_status = key[\"Status\"]\n key_id = key[\"AccessKeyId\"]\n\n print(f\"key id: {key_id}\")\n print(f\"key status: {key_status}\")\n\n if key_status == \"Inactive\":\n if previous_access_key_id == key_id:\n print(\"Deleting previous access key from IAM user\")\n iam.delete_access_key(UserName=iam_username, AccessKeyId=key_id)\n print(\n f\"Previous access key: \"\n f\"{key_id} has been deleted for user \"\n f\" {iam_username}.\"\n )\n return {\"status\": 200}\n else:\n print(\n \"secret manager previous value doesn't match with \"\n \"inactive IAM key value\"\n )\n return {\"status\": 400}\n else:\n print(\"previous key is still active\")\n return {\"status\": 200}\n except ClientError as e:\n print(e)\n return {\"status\": 500}", "def delete(self, new_data, user_id):\n print(new_data)\n request_id = get_jwt_identity()\n user = user_crud.get(request_id)\n if not user.is_superuser:\n abort(401,\n message=\"You do not have permission to view this endpoint\")\n all_tokens = auth_crud.get_user_tokens(user_id)\n tokens = [token.to_dict() for token in all_tokens]\n for token in tokens:\n auth_crud.revoke_token(token['id'], user_id)\n user = user_crud.remove(user_id)\n\n return {'msg': 'User Removed'}", "def remove(self, user_id):\n pass", "def delete_user():\n #TODO user delete\n pass", "def delete_user():\n token = request.args.get('token')\n data = jwt.decode(token, app.config['SECRET_KEY'])\n\n permit = functions.delete_user(data)\n if permit:\n return make_response(jsonify({'Delete': 'User Deleted Successfully'}), 201)\n else:\n return make_response(jsonify({'Delete Failed': 'Credentials not match or the user not exist'}), 201)", "def delete(self, user_id):\n user = User.query.get(user_id)\n \n if user is None:\n return abort(422, message=\"User does not exist\")\n \n # check if the user is an admin and is the only one\n admins = User.query.filter_by(admin=True).all()\n if user.id == get_jwt_identity() and len(admins) == 1:\n return abort(422, message=\"User is the only admin, there must be at least one admin in the system\")\n \n user.delete()\n \n return { 'message': \"User '{}' has been deleted\".format(user.id) }", "def delete_user_entitlement(self, user_id):\n route_values = {}\n if user_id is not None:\n route_values['userId'] = self._serialize.url('user_id', user_id, 'str')\n self._send(http_method='DELETE',\n location_id='8480c6eb-ce60-47e9-88df-eca3c801638b',\n version='6.0-preview.3',\n route_values=route_values)", "async def removeuser(self, ctx, user: discord.Member):\n\n if check_key(user.id):\n delete_key(user.id)\n await self.bot.say(\"{}, you are way out of this league.\".format(user.mention))\n else:\n await self.bot.say(\"That user does not exist in this league.\")", "def delete_password_in_keyring(username):\n return keyring.delete_password(KEYRING_SYSTEM, username,)", "def delete_user(self, user_id):\n\n # ask the model to delete the user\n um = User(self.settings)\n status = um.delete(user_id)\n\n # return\n return status", "def delete_user(username, user_id):\r\n global sql_cursor\r\n global database\r\n\r\n print(\"Are you absolutely sure that you want to delete your account.\")\r\n conf_del = input(\"(y/n) : \").lower()\r\n\r\n if conf_del == \"y\":\r\n\r\n print(\"Deleting...\")\r\n\r\n sql_cursor.execute(f\"DELETE FROM passwords WHERE user_id={user_id};\")\r\n sql_cursor.execute(f'DELETE FROM users WHERE username=\"{username}\";')\r\n database.commit()\r\n\r\n print(\"Account successfully deleted\")\r\n print(\"You need to start the program again\")\r\n print(\"Exiting now\")\r\n sleep(5)\r\n quit()\r\n\r\n else:\r\n print(\"Cancelling deletion ...\")\r\n return", "def remove(self, user):\r\n url = '{0}/{1}'.format(self.get_url(), user)\r\n\r\n return http.Request('DELETE', url), parsers.parse_empty", "def delete(user_id):\n # Get the user requested\n user = User.query.filter(User.user_id == user_id).one_or_none()\n\n if user is not None:\n db.session.delete(user)\n db.session.commit()\n return (\n \"User {user_id} deleted\".format(user_id=user_id), 200\n )\n\n else:\n abort(\n 404,\n \"Person not found for Id: {user_id}\".format(user_id=user_id),\n )", "def deleteKey(self, key):\n key.delete()", "def deleteUser(user):\n delete_user(user)\n return redirect(url_for('login'))", "def delete_user(user_id):\n user = User.query.get_or_404(user_id)\n db.session.delete(user)\n db.session.commit()\n\n return redirect('/')", "def delete_user(self, user_id):\n sql = 'update account_user set is_deleted = 1 where id = %s'\n with connection.cursor() as cursor:\n cursor.execute(sql, [user_id])\n row = cursor.fetchone()\n\n return row", "def delete_user(self,userid, cursor):\n sql=\"DELETE FROM users WHERE userid = %s\"\n cursor.execute(sql,(userid))", "async def delete_user(user_id):\n \n user = User.select().where(User.id == user_id).first()\n\n if not user:\n return HTTPException(404, 'User not found')\n else:\n user.delete_instance()\n\n return f\"User {user.username} deleted successfully\"", "def delete_access_key(self, username, accesskeyid):\n try:\n self.iam_client.delete_access_key(\n UserName=username,\n AccessKeyId=accesskeyid\n )\n except ClientError as error:\n if error.response['Error']['Code'] == 'NoSuchEntityException':\n pass", "def delete(self, user_id):\n\n user = User.objects.get_or_404(public_id=user_id)\n return user.delete()", "def deleteSecret(self, clientIP, not_before):\n\n return self._secretdb.execute('delete from %s where ip_address=:ip_address and not_before=:not_before' % self._table_name,\n {'ip_address': ip_address,\n 'not_before': not_before})", "def delete(self, user_id):\n user = User.query.get(user_id)\n\n if user is None:\n return mk_response(\"User does not exist\", 422)\n\n # check if the user is an admin and is the only one\n admins = User.query.filter_by(admin=True).all()\n if user.id == get_jwt_identity() and len(admins) == 1:\n return mk_response(\"User is the only admin, there must \" +\n \"be at least one admin in the system\", 422)\n\n user.delete()\n\n return mk_response(\"User '{}' has been deleted\".format(user.id))", "def delete_user(id):\n pass", "def delete_user(self, user_name):\r\n params = {'UserName' : user_name}\r\n return self.get_response('DeleteUser', params)", "def del_user(self, username):\n pass", "def del_user(user_id):\n log = current_app.log\n db = request.db\n Site = db.tables.Site\n Cred = db.tables.Cred\n auth_user_id = SiteService.get_current_uid()\n # Check the user is deleting their own items\n if auth_user_id != user_id:\n log.warn(\"User %u tried to delete sites belonging to user %u.\",\n auth_user_id, user_id)\n abort(404)\n sites = Site.query.filter_by(site_owner=auth_user_id).all()\n num_sites = len(sites)\n creds = Cred.query.filter_by(cred_owner=auth_user_id).all()\n num_creds = len(creds)\n with managed_session(request,\n message=\"Database error while deleting sites\",\n http_error_code=500) as session:\n for cred in creds:\n session.delete(cred)\n for site in sites:\n session.delete(site)\n log.info(\"Deleted all sites for user %u (%u sites, %u creds deleted).\",\n auth_user_id, num_sites, num_creds)\n return \"\"", "def del_user(item, username, passw):\n user = User.load_user_by_username(item, username)\n if not user:\n print(\"User does not exist!\")\n elif check_password(passw, user.hashed_password):\n user.delete(item)\n print(\"User deleted.\")\n else:\n print(\"Incorrect password!\")", "def delete(ctx: CLIContext, access_key):\n with Session() as session:\n try:\n data = session.KeyPair.delete(access_key)\n except Exception as e:\n ctx.output.print_mutation_error(\n e,\n item_name='keypair',\n action_name='deletion',\n )\n sys.exit(1)\n if not data['ok']:\n ctx.output.print_mutation_error(\n msg=data['msg'],\n item_name='keypair',\n action_name='deletion',\n )\n sys.exit(1)\n ctx.output.print_mutation_result(\n data,\n extra_info={\n 'access_key': access_key,\n },\n )", "def deleteKey(self):\n\n self.key_del_response = self.ec2.delete_key_pair(KeyName=self.key)", "def delete_secret_request(self, vault_name: str, secret_name: str) -> dict[str, Any]:\n url = f'https://{vault_name}{self.azure_cloud.suffixes.keyvault_dns}/secrets/{secret_name}'\n response = self.http_request(\n 'DELETE', full_url=url, resource=self.get_vault_resource())\n return response", "def delete_user(user_id):\n\n user = User.query.get_or_404(user_id)\n db.session.delete(user)\n db.session.commit()\n\n return redirect(\"/users\")", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user(self) -> None:\n table_dictionary = {\n 'Apple': {\n 'table': 'AppleReceipts',\n 'user_id': 'User_id'\n },\n 'ESL': {\n 'table': 'ESLReceipts',\n 'user_id': 'User_id'\n },\n 'Transactions': {\n 'table': 'Transactions',\n 'user_id': 'User_id'\n },\n 'Users': {\n 'table': 'Users',\n 'user_id': 'id'\n },\n }\n\n # delete the current user's information from the db.\n for key in table_dictionary:\n query = f\"\"\"\n DELETE\n FROM {table_dictionary[key]['table']}\n WHERE {table_dictionary[key]['user_id']}=?;\n \"\"\"\n self.db.commit(query, values=(self.id,))\n\n # perform a sign out\n self.sign_out()\n\n log(f\"User:{self.id} has deleted their account.\")", "def delete_user_profile(IamUserArn=None):\n pass", "def delete(self, url, user):\n token = self.login(user)\n response = requests.delete(url_root + url, headers={\"access-token\": token})\n return response.json(), response.status_code", "def delete_user(user_id):\n user = User.query.get_or_404(user_id)\n db.session.delete(user)\n db.session.commit()\n\n return redirect(\"/users\")", "def apply_deletion_policy(cls, user_id: str) -> None:\n keys = cls.query(datastore_services.any_of(\n cls.sender_id == user_id,\n )).fetch(keys_only=True)\n datastore_services.delete_multi(keys)", "def del_api_key_for_user(username):\n global db\n if db is None:\n init_db()\n user_model = Query()\n users = db.search(user_model.username == username)\n\n if not users:\n LOGGER.warning(\"User %s not found\", username)\n return False\n try:\n for user in users:\n user['api_key'] = None\n db.write_back(users)\n return True\n except Exception as e:\n LOGGER.exception(e)\n return False", "def _delete_credential(self, key):\n try:\n del self._data[key]\n except KeyError:\n pass\n self._write()", "def delete_user_process(user_id):\n\n db_user = User.query.get_or_404(user_id)\n\n db.session.delete(db_user)\n db.session.commit()\n\n return redirect(\"/users\")", "def delete_hotdesk(self, account_id, user_id):\n return self.rest_request.delete('accounts/' + str(account_id) +\n '/users/' + str(user_id) + '/hotdesks')", "def delete_user(self):\n\n User.user_list.remove(self)", "def deleteUser(self, uID):\n\n cursor = self.conn.cursor()\n query = \"DELETE FROM Users CASCADE \" \\\n \"WHERE uID= %s RETURNING cID; \"\n cursor.execute(query, (uID,))\n cID = cursor.fetchone()[0]\n\n query = \"DELETE FROM Credential \" \\\n \"WHERE cID= %s; \"\n cursor.execute(query, (cID,))\n\n self.conn.commit()\n return", "def GetSecretKey(cls, user_id):\n uid = hashlib.sha256(str(user_id)).hexdigest()\n entity = ndb.Key(cls, uid).get()\n if not entity:\n entity = cls(id=uid, secret_key=GenerateRandomHexKey())\n entity.put()\n return entity.secret_key", "def del_user(request):\r\n mdict = request.matchdict\r\n\r\n # Submit a username.\r\n del_username = mdict.get('username', None)\r\n\r\n if del_username is None:\r\n LOG.error('No username to remove.')\r\n request.response.status_int = 400\r\n return _api_response(request, {\r\n 'error': 'Bad Request: No username to remove.',\r\n })\r\n\r\n u = UserMgr.get(username=del_username)\r\n\r\n if not u:\r\n LOG.error('Username not found.')\r\n request.response.status_int = 404\r\n return _api_response(request, {\r\n 'error': 'User not found.',\r\n })\r\n\r\n try:\r\n # First delete all the tag references for this user's bookmarks.\r\n res = DBSession.query(Bmark.bid).filter(Bmark.username == u.username)\r\n bids = [b[0] for b in res]\r\n\r\n qry = bmarks_tags.delete(bmarks_tags.c.bmark_id.in_(bids))\r\n qry.execute()\r\n\r\n # Delete all of the bmarks for this year.\r\n Bmark.query.filter(Bmark.username == u.username).delete()\r\n DBSession.delete(u)\r\n return _api_response(request, {\r\n 'success': True,\r\n 'message': 'Removed user: ' + del_username\r\n })\r\n except Exception, exc:\r\n # There might be cascade issues or something that causes us to fail in\r\n # removing.\r\n LOG.error(exc)\r\n request.response.status_int = 500\r\n return _api_response(request, {\r\n 'error': 'Bad Request: ' + str(exc)\r\n })", "async def red_delete_data_for_user(self, *, requester, user_id):\n return", "async def red_delete_data_for_user(self, *, requester, user_id):\n return", "def delete_user(user_id):\n current_user = get_jwt_identity()\n\n if not current_user:\n print('uri=/login error=\"Missing user\"', flush=True)\n return jsonify(message=\"Missing user\"), 400\n\n if not Administrator.is_administrator(current_user):\n print('non-admin user error', flush=True)\n return jsonify(message=\"You are not allowed to delete other users\"), 403\n\n if user_id == current_user:\n return jsonify(message=\"You are not allowed to delete yourself\"), 403\n\n try:\n User.delete(user_id)\n return jsonify(message=\"Delete succeeded\"), 200\n\n except Exception as e:\n print(e, flush=True)\n return jsonify(message='{}'.format(e)), 501", "def delete_user(user_id):\n try:\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.execute(\"DELETE FROM users WHERE user_id=%s\", user_id)\n conn.commit()\n cursor.close()\n conn.close()\n resp = jsonify(\"User deleted successfully!\")\n resp.status_code = 200\n return resp\n except Exception as exception:\n return jsonify(str(exception))", "def delete(bot, update):\n chatID = update.message.chat_id\n username = get_user_info(chatID)['PID']\n logger.info(\"Deleting user credentials for {}!\".format(username))\n Chat.query.filter(Chat.chatID == chatID).delete() # Delete the user's record referenced by their ChatID\n Misc.query.filter(Misc.chatID == chatID).delete()\n db_session.commit()\n messageContent = \"Your credentials have been deleted, {}\\nHope to see you back soon!\".format(username[3:-4].title())\n bot.sendMessage(chat_id=update.message.chat_id, text=messageContent)\n \n mp.track(username, 'User Left')\n mp.people_set(username, {'active': False })", "def delete(cls):\n user = user_schema.load(request.get_json(), partial=(\"email\",))\n\n current_identity = get_jwt_identity()\n db_user = UserModel.find_by_id(current_identity)\n logging.info(\n f\"Delete called by {db_user.id}: {db_user.username} with data: {user['username']}\"\n )\n if db_user.username == user['username']:\n if is_correct_password(db_user.pw_salt, db_user.pw_hash, user['password']):\n db_user.delete_from_db()\n return {\"message\": msgs.DELETED.format(db_user.username)}, 200\n else:\n return {\"error\": msgs.INVALID_PASSWORD}, 401\n return {\"error\": msgs.OWN_RECORD_ONLY}, 401", "def delete(self, request, user_id=None):\n data = json.loads(request.body.decode())\n authenticated = Account.check_credentials(request, data['email'], data['password'])\n user = {}\n user['account_id'] = authenticated.id\n\n if authenticated.check_admin(request, user):\n NLTKOutput.remove(request=request, pk=user_id)\n Account.remove(request=request, pk=user_id)\n return Response(json='Account and content deleted', status=204)\n\n return Response(json='Not Authorized', status=401)", "def delete_user_by_id(user_id):\n return woo_request_helper().delete_details(wc_endpoint='customers/{}'.format(user_id))" ]
[ "0.7391871", "0.73518765", "0.6827607", "0.6825235", "0.67515635", "0.6722836", "0.6718346", "0.66737527", "0.66446835", "0.6612921", "0.6582571", "0.6504976", "0.6503876", "0.64849615", "0.6463512", "0.64591646", "0.6441352", "0.64239824", "0.6421343", "0.64208347", "0.6404419", "0.6394177", "0.638943", "0.63835776", "0.6383379", "0.6382878", "0.6360666", "0.63556", "0.6347027", "0.6345699", "0.6336066", "0.62721163", "0.6269077", "0.62553465", "0.6244164", "0.6221903", "0.6208656", "0.6198497", "0.61945224", "0.61855227", "0.61814237", "0.6168835", "0.61607015", "0.61579025", "0.6153219", "0.60746044", "0.60678405", "0.606305", "0.6059959", "0.6059829", "0.60395664", "0.6038517", "0.6037018", "0.60085344", "0.6003947", "0.6001388", "0.59997857", "0.5994897", "0.5976911", "0.5972937", "0.5953959", "0.5953948", "0.59483135", "0.59402806", "0.59286857", "0.59173983", "0.59053487", "0.59046715", "0.59032726", "0.58989584", "0.5896697", "0.5896642", "0.5893645", "0.58824587", "0.5879424", "0.5858666", "0.5853891", "0.5853891", "0.5853891", "0.5850746", "0.58448637", "0.5837298", "0.5836129", "0.5832994", "0.58317304", "0.58312863", "0.5827662", "0.58267367", "0.5826282", "0.5796457", "0.5793028", "0.5792157", "0.57894963", "0.57894963", "0.57855904", "0.57800007", "0.5774128", "0.5770668", "0.576461", "0.57637835" ]
0.6096357
45
Deletes the specified dynamic group.
def delete_dynamic_group(self, dynamic_group_id, **kwargs): resource_path = "/dynamicGroups/{dynamicGroupId}" method = "DELETE" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "if_match" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "delete_dynamic_group got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "dynamicGroupId": dynamic_group_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "if-match": kwargs.get("if_match", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params) else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_group(self, group):\n raise NotImplementedError('delete_group')", "def deleteGroup(groupName):\r\n Group.deleteGroup(groupName)", "def delete_group(\n group_id: BSONObjectId,\n tkn: Token = Depends(from_authotization_header_nondyn),\n):\n assert_has_clearance(tkn.owner, \"sni.delete_group\")\n grp: Group = Group.objects.get(pk=group_id)\n logging.debug(\"Deleting group %s (%s)\", grp.group_name, group_id)\n grp.delete()", "def delete_group(groupname):\n response = jsonify(admin.delete_group(current_app.scoped_session(), groupname))\n return response", "def do_del_group(dbsync, group):\n pass", "def delete_vm_group(session, cluster, vm_group):\n client_factory = session.vim.client.factory\n group_spec = client_factory.create('ns0:ClusterGroupSpec')\n groups = []\n\n group_spec.info = vm_group\n group_spec.operation = \"remove\"\n group_spec.removeKey = vm_group.name\n groups.append(group_spec)\n\n config_spec = client_factory.create('ns0:ClusterConfigSpecEx')\n config_spec.groupSpec = groups\n reconfigure_cluster(session, cluster, config_spec)", "def test_080_group_delete(self):\n\n testflow.step(RMV_GRP_MSG, TEST_GROUP_DELETE)\n assert GROUP_CLI.run(\n 'delete',\n TEST_GROUP_DELETE\n )[0], \"Failed to delete group '%s'\" % TEST_GROUP_DELETE", "def delete_group(self, group_id: str):\n # If successful, this method returns 204 No Content response code.\n # It does not return anything in the response body.\n # Using resp_type=\"text\" to avoid parsing error in the calling method.\n self.ms_client.http_request(method='DELETE', url_suffix=f'groups/{group_id}', resp_type=\"text\")", "def delete_group(self, group_name):\r\n params = {'GroupName' : group_name}\r\n return self.get_response('DeleteGroup', params)", "def delete_group_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n group_id = str(args.get('group_id'))\n client.delete_group(group_id)\n\n # get the group data from the context\n group_data = demisto.dt(demisto.context(), f'{INTEGRATION_CONTEXT_NAME}(val.ID === \"{group_id}\")')\n if isinstance(group_data, list):\n group_data = group_data[0]\n\n # add a field that indicates that the group was deleted\n group_data['Deleted'] = True # add a field with the members to the group\n entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': group_data}\n\n human_readable = f'Group: \"{group_id}\" was deleted successfully.'\n return human_readable, entry_context, NO_OUTPUTS", "def delete_group(_request, group_id):\n group = models.UserGroup.get_by_id(int(group_id))\n group.delete()\n\n url = urlresolvers.reverse('views.admin.list_groups')\n return http.HttpResponseRedirect(url)", "def test_delete_group(self):\n response = self.client.delete_group(\"ABC123\")\n uri, args = response[\"uri\"].split(\"?\")\n\n self.assertEqual(response[\"method\"], \"DELETE\")\n self.assertEqual(uri, \"/admin/v1/groups/ABC123\")\n self.assertEqual(util.params_to_dict(args), {\"account_id\": [self.client.account_id]})", "def delete_group(gid):\n if request.method == 'POST':\n hl.deleteGroup(gid)\n return redirect('/users')", "def del_group(self, group_id, group_type):\n self._mod_group(\n command=self.ofproto.OFPGC_DELETE,\n group_id=group_id,\n group_type=group_type,\n )", "def delete_group(self, group_id):\n url = self.groups_url + \"/%s\" % group_id\n return requests.delete(url, headers=self.headers)", "def delete():\n name = request.json['name']\n group = models.user.Group.get(name)\n if not group:\n raise Absent('Group does not exists.', deletion=False)\n else:\n models.db.session.delete(group)\n models.db.session.commit()\n return response(200, deletion=True)", "def test_delete_group(self):\n self.group.delete_group.return_value = succeed('del')\n result = self.perform_with_group(\n Effect(DeleteGroup(tenant_id='00', group_id='g1')),\n (self.log, '00', 'g1'), self.group)\n self.assertEqual(result, 'del')", "def test_070_delete_group_from_group(self):\n\n testflow.step(\n \"Removing group %s from group %s\",\n TEST_GROUP1, TEST_GROUP2\n )\n assert MANAGE_CLI.run(\n 'groupdel',\n TEST_GROUP1,\n group=TEST_GROUP2,\n )[0], \"Failed to delete group from group '%s'\" % TEST_GROUP1", "def delete_group(id, createdby):\n query = \"DELETE FROM groups WHERE group_id = {} AND createdby ='{}'\".format(id, createdby)\n cur.execute(query)", "def delete(self, consistencygroup, force=False):\n body = {'consistencygroup': {'force': force}}\n self.run_hooks('modify_body_for_action', body, 'consistencygroup')\n url = '/consistencygroups/%s/delete' % base.getid(consistencygroup)\n resp, body = self.api.client.post(url, body=body)\n return common_base.TupleWithMeta((resp, body), resp)", "def delete_group(self, group_o):\n class_query = ClassQuery('fvTenant')\n class_query.propFilter = 'eq(fvTenant.name, \"' + group_o.name + '\")'\n tenant_list = self.moDir.query(class_query)\n if len(tenant_list) > 0:\n tenant_list[0].delete()\n self.commit(tenant_list[0])", "def delete_TestGroup(test_case, override_group_name=null, override_headers=null, override_cookies=null):\n # type: (AnyMagpieTestCaseType, Optional[Str], Optional[HeadersType], Optional[CookiesType]) -> None\n app_or_url = get_app_or_url(test_case)\n headers = override_headers if override_headers is not null else test_case.json_headers\n cookies = override_cookies if override_cookies is not null else test_case.cookies\n groups = TestSetup.get_RegisteredGroupsList(test_case, override_headers=headers, override_cookies=cookies)\n group_name = override_group_name if override_group_name is not null else test_case.test_group_name\n # delete as required, skip if non-existing\n if group_name in groups:\n path = \"/groups/{grp}\".format(grp=group_name)\n resp = test_request(app_or_url, \"DELETE\", path, headers=headers, cookies=cookies)\n check_response_basic_info(resp, 200, expected_method=\"DELETE\")\n TestSetup.check_NonExistingTestGroup(test_case, override_group_name=group_name,\n override_headers=headers, override_cookies=cookies)", "def remove_group():\n _id = request.form['_id']\n data, code, message = FIELD_SERVICE.remove_group(_id)\n return __result(data, code, message)", "def delete(self, force_delete=False):\r\n return self.connection.delete_auto_scaling_group(self.name, force_delete)", "def delete(person_group_id):\n url = 'persongroups/{}'.format(person_group_id)\n\n return util.request('DELETE', url)", "def delete_group(dispatcher, log, trans_id, group, force):\n\n def check_and_delete(_group, state):\n if state.desired == 0:\n d = trigger_convergence_deletion(dispatcher, group, trans_id)\n return d.addCallback(lambda _: state)\n else:\n raise GroupNotEmptyError(group.tenant_id, group.uuid)\n\n if tenant_is_enabled(group.tenant_id, config_value):\n if force:\n # We don't care about servers in the group. So trigger deletion\n # since it will take precedence over other status\n d = trigger_convergence_deletion(dispatcher, group, trans_id)\n else:\n # Delete only if desired is 0 which must be done with a lock to\n # ensure desired is not getting modified by another thread/node\n # when executing policy\n d = group.modify_state(\n check_and_delete,\n modify_state_reason='delete_group')\n else:\n if force:\n d = empty_group(log, trans_id, group)\n d.addCallback(lambda _: group.delete_group())\n else:\n d = group.delete_group()\n return d", "def delete_placement_group(self, name):\r\n params = {'GroupName':name}\r\n return self.get_status('DeletePlacementGroup', params, verb='POST')", "def delete_group():\n incoming = request.get_json()\n Chatroom.delete_chatroom_with_room_id(incoming['room_id'])\n return jsonify(results = incoming['room_id'])", "def remove_inv_group(**kwargs):\n proxy = kwargs['proxy']\n sessiontoken = kwargs['sessiontoken']\n gw = kwargs['gateway']\n group_id = kwargs['objectname']\n json_response_status_code = delete_inventory_group_json_response(proxy, sessiontoken, gw, group_id)\n if json_response_status_code == 200:\n print(\"The group \" + group_id + \" has been deleted\")\n else:\n print(\"Something went wrong - please check your syntax and try again.\")", "def delete(self,\n provider_id,\n group_id,\n ):\n return self._invoke('delete',\n {\n 'provider_id': provider_id,\n 'group_id': group_id,\n })", "def delete_group(user):\n return 'do some magic!'", "def delete_group(args, p4, group_name, metrics):\n LOG.debug(\"delete_group() {}\".format(group_name))\n r = p4.fetch_group(group_name)\n if r and r.get('Owners') and p4gf_const.P4GF_USER in r.get('Owners'):\n print_verbose(args, _(\"Deleting group '{group_name}'...\").format(group_name=group_name))\n p4.run('group', '-a', '-d', group_name)\n metrics.groups += 1\n else:\n print_verbose(args, _(\"Not deleting group '{group}':\"\n \" Does not exist or '{user}' is not an owner.\")\n .format(group=group_name, user=p4gf_const.P4GF_USER))", "async def delete_contact_group(dbcon: DBConnection, contact_group_id: int) -> None:\n if not await contact_group_exists(dbcon, contact_group_id):\n raise errors.InvalidArguments('contact group does not exist')\n q = \"\"\"delete from contact_groups where id=%s\"\"\"\n await dbcon.operation(q, (contact_group_id,))", "def delete_service_group(self, group_id):\r\n svc = self.client['Network_Application_Delivery_Controller_'\r\n 'LoadBalancer_VirtualServer']\r\n\r\n return svc.deleteObject(id=group_id)", "def product_group_delete(obj, name):\n client = get_client(obj)\n\n with Action('Deleting product_group: {}'.format(name), nl=True):\n pgs = client.product_group_list(name)\n\n client.product_group_delete(pgs[0]['uri'])", "def delete(ctx):\n user, project_name, _group = get_project_group_or_local(ctx.obj.get('project'),\n ctx.obj.get('group'))\n\n if not click.confirm(\"Are sure you want to delete experiment group `{}`\".format(_group)):\n click.echo('Existing without deleting experiment group.')\n sys.exit(0)\n\n try:\n response = PolyaxonClient().experiment_group.delete_experiment_group(\n user, project_name, _group)\n # Purge caching\n GroupManager.purge()\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not delete experiment group `{}`.'.format(_group))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n if response.status_code == 204:\n Printer.print_success(\"Experiment group `{}` was delete successfully\".format(_group))", "def test_delete_group(self):\n pass", "def test_delete_group(self):\n pass", "def delete_scaling_group(self, request):\n group = self.store.get_scaling_group(self.log, self.tenant_id,\n self.group_id)\n force = extract_bool_arg(request, 'force', False)\n return controller.delete_group(\n self.dispatcher, log, transaction_id(request), group, force)", "def after_delete(self, record):\n debug = logging.getLogger(__name__).debug\n debug('deleted group %r (%r)', record['name'], record['group_id'])\n audit('delete group', record['name'])", "def qos_policy_group_delete(self, policy_group):\n return self.request( \"qos-policy-group-delete\", {\n 'policy_group': [ policy_group, 'policy-group', [ basestring, 'None' ], False ],\n }, {\n } )", "def delete_namespaced_group(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `delete_namespaced_group`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `delete_namespaced_group`\")\n\n resource_path = '/oapi/v1/groups/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def _Delete(self):\n cmd = self.cmd_prefix + [\n 'redshift', 'delete-cluster-subnet-group',\n '--cluster-subnet-group-name', self.name\n ]\n vm_util.IssueCommand(cmd, raise_on_failure=False)", "def post_security_group_delete(self, resource_id, resource_dict):\n pass", "def delete_targetgroup(self, group_id):\r\n result = False\r\n if self._db(self._db.targetgroup.id==group_id).select():\r\n result = True\r\n self._db(self._db.targetgroup.id==group_id).delete()\r\n self._db.commit()\r\n return result", "def delete_group_group_member(self, targetgroup, groupname):\n try:\n targetgroup = self.quote(targetgroup)\n groupname = self.quote(groupname)\n self.g.delete('groups/%s/groups/%s' % (targetgroup,\n groupname),\n headers={})\n except HTTPError as e:\n return self._manage_errors(e)", "def customer_group_delete(group_id):\n result = {\"success\" : 1, \"message\" : \"Customer can not be Deleted\"}\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n \n #clean up the user id\n group_id = db.escape_string(group_id)\n \n query = \"\"\"\n DELETE FROM `groups`\n WHERE `groups`.`group_id` = \"%s\"\n \"\"\" %(group_id)\n cursor = db.cursor()\n try:\n if (cursor.execute(query)) != 0:\n db.commit()\n result = {\"success\" : 0, \"message\" : \"Customer Group Deleted Successfully\"}\n except Exception as customer_exp:\n result = {\"success\" : 1, \"message\" : \"Customer Group can not be Deleted \" + str(e)}\n finally:\n cursor.close()\n db.close()\n return result", "def delete_agent(self, group_name, id, quite=True):\n self._out.append(('_simulation', 0.5, (group_name, id, quite)))", "def slotDelete(self):\n item = self.groupListBox.item((self.groupListBox.currentItem()))\n group = item.text().ascii()\n Group.Sequencer().slotRemoveGlobalGroup(group)", "def group_delete(user_id, resource_type, resource_id):\n logging.info('Deleting %s %d...', resource_type, resource_id)\n soundcloud.delete('/e1/me/{}_reposts/{}'.format(resource_type, resource_id))\n db.record_deletion(user_id, resource_type, resource_id)\n db.commit()", "def capacitygroup_delete(cmd_ctx, cpc, capacitygroup):\n cmd_ctx.execute_cmd(\n lambda: cmd_capacitygroup_delete(cmd_ctx, cpc, capacitygroup))", "def test_api_v1_groups_id_delete(self):\n pass", "def security_group_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_security_group(**kwargs)", "def test_delete_group_log_context(self):\n self.group.delete_group.return_value = succeed('del')\n expected_lookup = (matches(IsBoundWith(base_log=True, effectful=True)),\n '00', 'g1')\n result = self.perform_with_group(\n Effect(DeleteGroup(tenant_id='00', group_id='g1')),\n expected_lookup, self.group,\n fallback_dispatcher=get_log_dispatcher(self.log,\n {'effectful': True}))\n self.assertEqual(result, 'del')", "def delete_salary_group(db:Session):\n pass", "def cli(env, identifier):\n mgr = SoftLayer.LoadBalancerManager(env.client)\n\n _, group_id = loadbal.parse_id(identifier)\n\n if env.skip_confirmations or formatting.confirm(\"This action will cancel \"\n \"a service group. \"\n \"Continue?\"):\n mgr.delete_service_group(group_id)\n return 'Service group %s is being deleted!' % identifier\n else:\n raise exceptions.CLIAbort('Aborted.')", "def security_group_rule_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_security_group_rule(**kwargs)", "def delete_entry_group(self, name):\n self.__datacatalog.delete_entry_group(name=name)", "def test_delete_resource_group(self):\n pass", "def delete_security_group(self, security_group):\r\n return self.delete(self.security_group_path % (security_group))", "async def delete_group(ctx, group_name: str, owner: str=None):\n\n if owner and owner != ctx.message.author.name:\n if ctx.message.author.id != bot.owner_id:\n await ctx.send(\"Sorry, you don't have permission to delete that group. Nerd.\")\n else:\n owner = ctx.message.author.name\n\n if bg_bot.manager.remove_group(owner, group_name):\n response = f'{group_name} successfully removed from {owner} groups!'\n else:\n response = f'Error in removing {group_name} from {owner} groups!'\n \n await ctx.send(response)", "def test_removeGroup(self):\n\t\tuser = User.objects.get(id=1)\n\t\tself.client.force_authenticate(user=user)\n\t\tgroup = Group.objects.create(admin=user, name='testGroup3', isPublic=True, \n\t\t\tdescription='This is another test group that just created.')\n\n\t\turl = \"/groups/3/\"\n\t\tresponse = self.client.delete(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n\t\turl = \"/groups/2/\"\n\t\tresponse = self.client.delete(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def delete_group_with_http_info(self, bucket_id, group_id, **kwargs):\n\n all_params = ['bucket_id', 'group_id', 'if_match', 'if_none_match', 'fields']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_group\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'bucket_id' is set\n if ('bucket_id' not in params) or (params['bucket_id'] is None):\n raise ValueError(\"Missing the required parameter `bucket_id` when calling `delete_group`\")\n # verify the required parameter 'group_id' is set\n if ('group_id' not in params) or (params['group_id'] is None):\n raise ValueError(\"Missing the required parameter `group_id` when calling `delete_group`\")\n\n if 'if_match' in params and not re.search('\\\\\\\"[0-9]+\\\\\\\"', params['if_match']):\n raise ValueError(\"Invalid value for parameter `if_match` when calling `delete_group`, must conform to the pattern `/\\\\\\\"[0-9]+\\\\\\\"/`\")\n if 'if_none_match' in params and not re.search('\\\\\\\"[0-9]+\\\\\\\"', params['if_none_match']):\n raise ValueError(\"Invalid value for parameter `if_none_match` when calling `delete_group`, must conform to the pattern `/\\\\\\\"[0-9]+\\\\\\\"/`\")\n\n collection_formats = {}\n\n resource_path = '/buckets/{bucket_id}/groups/{group_id}'.replace('{format}', 'json')\n path_params = {}\n if 'bucket_id' in params:\n path_params['bucket_id'] = params['bucket_id']\n if 'group_id' in params:\n path_params['group_id'] = params['group_id']\n\n query_params = {}\n if 'fields' in params:\n query_params['_fields'] = params['fields']\n collection_formats['_fields'] = 'csv'\n\n header_params = {}\n if 'if_match' in params:\n header_params['If-Match'] = params['if_match']\n if 'if_none_match' in params:\n header_params['If-None-Match'] = params['if_none_match']\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['basicAuth']\n\n return self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Deleted',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def test_user_group_controller_delete(self):\n pass", "def test_delete_group(self, inventoryloader):\n cg = inventoryloader.count_groups()\n ch = inventoryloader.count_hosts()\n inventoryloader.del_group('glance_api')\n assert 'glance_api' not in inventoryloader.groups['glance_all'].children\n assert 'glance_api' not in inventoryloader.hosts['localhost'].groups\n assert 'glance_api' not in inventoryloader.groups\n assert inventoryloader.count_groups() == cg -1\n assert inventoryloader.count_hosts() == ch", "def test_delete(self):\n self.assertTrue(self.run_function(\"group.add\", [self._group]))\n\n # correct functionality\n self.assertTrue(self.run_function(\"group.delete\", [self._group]))\n\n # group does not exist\n self.assertFalse(self.run_function(\"group.delete\", [self._no_group]))", "def test_delete_groups(self):\n pass", "def test_groups_group_ref_delete(self):\n pass", "def deleteGroup(request):\n \n if request.method == 'POST':\n \n form = DeleteGroupForm(request.POST)\n \n if form.is_valid():\n \n cd = form.cleaned_data\n \n try:\n \n #Delete records from m2m of Users & Groups for selected groups\n for eachGroup in cd['group_id']:\n Group_User.objects.filter(group = eachGroup.id).delete()\n \n #Delete Group(s)\n for eachGroup in cd['group_id']:\n Group.objects.filter(id = eachGroup.id).delete()\n \n except:\n \n error = 'Unable to Delete Groups!'\n return render_to_response('deletegroup.html', \n {'form': form, 'error': error},\n context_instance=RequestContext(request))\n \n return HttpResponseRedirect('/deletegroup/success/')\n \n else:\n \n return render_to_response('deletegroup.html',\n {'form': form}, \n context_instance=RequestContext(request)) \n \n else:\n \n form = DeleteGroupForm()\n \n return render_to_response('deletegroup.html', \n {'form': form}, \n context_instance=RequestContext(request))", "def delete(self, security_group_id: str) -> None:\n\t\troute = f'{AWSSecurityGroupConsts.CLOUD_SECURITY_GROUP.value}/{security_group_id}'\n\t\treturn self._delete(route=route)", "def DeleteGroup(self, group, dry_run=False, reason=None):\n query = []\n _AppendDryRunIf(query, dry_run)\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_DELETE,\n (\"/%s/groups/%s\" %\n (GANETI_RAPI_VERSION, group)), query, None)", "def delete(self, host_name, group_name): # noqa\n\n response = remove_host(host_name, group_name)\n return response.__dict__, self.state_to_http[response.status]", "def delete_group(self, group_id, **kwargs):\n resource_path = \"/groups/{groupId}\"\n method = \"DELETE\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"delete_group got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"groupId\": group_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)", "def delete_adcampaign_group(self, campaign_group_id, batch=False):\n path = '%s' % campaign_group_id\n return self.make_request(path, 'DELETE', batch=batch)", "def delete_group_cached(group_id, broker=None):\n if not broker:\n broker = get_broker()\n group_key = '{}:{}:keys'.format(broker.list_key, group_id)\n group_list = broker.cache.get(group_key)\n broker.cache.delete_many(group_list)\n broker.cache.delete(group_key)", "def deletecollection_namespaced_group(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method deletecollection_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/groups'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def test_delete_device_group_by_id(self):\n pass", "def remove_from_group(self, org, contact, group):\n pass", "def test_delete_group_by_id(self):\n # Create a user with 2 groups\n resp = self.app.post('/users', data=json.dumps(self.test_user1_data))\n assert resp.status_code == 200\n\n # Delete one of those groups\n resp = self.app.delete('/groups/{}'.format(self.test_group1_groupid))\n assert resp.status_code == 200\n\n # Verify that the group is gone\n resp = self.app.get('/groups/{}'.format(self.test_group1_groupid))\n assert resp.status_code == 404\n\n # Verify that the user's groups don't have that group listed\n resp = self.app.get('/users/{}'.format(self.test_user1_userid))\n assert resp.status_code == 200\n\n data = json.loads(resp.data)\n assert self.test_group1_groupid not in data['groups']", "def test_TC_44383_DELETE_Groups_Id(self, context):\n # Define a test step\n with pytest.allure.step(\"\"\"First create group using request POST /groups.\"\"\"):\n # Test case configuration\n edgeDeviceGroupDetails = context.sc.EdgeDeviceGroupDetails(\n configAdminCanEdit=True,\n configurations=[],\n deliveryLoadBalancePolicy='PROXIMITY_MATCHES',\n dnsName='10.1.25.46',\n edgeDeviceRoles=['EDGE', 'ORIGIN', 'DISTRIBUTION'],\n id='GroupD1',\n members=[{\n 'id': 'POST_veDevices_AllConfigAdminMulticastTrue'\n }],\n name='GroupD1',\n originLoadBalancePolicy='DNS_NAME',\n provisioningPolicy='ALL_MEMBERS',\n proximityDetails=None,\n visibleInAllConfigurations=True)\n\n # createEntity the Groups.\n # The `check` call validates return code\n # and some of the swagger schema.\n # Most schema checks are disabled.\n response = check(\n context.cl.Groups.createEntity(\n body=edgeDeviceGroupDetails\n )\n )\n\n\n # Define a test step\n with pytest.allure.step(\"\"\"Now verify that user is able to delete the group on providing 'Id' parameter using request DELETE /groups{id}.\"\"\"):\n\n # deleteEntity the Groups.\n # The `check` call validates return code\n # and some of the swagger schema.\n # Most schema checks are disabled.\n check(\n context.cl.Groups.deleteEntity(\n id='GroupD1'\n )\n )", "def delete_group_policy(self, group_name, policy_name):\r\n params = {'GroupName' : group_name,\r\n 'PolicyName' : policy_name}\r\n return self.get_response('DeleteGroupPolicy', params, verb='POST')", "def test_delete_device_group_member_by_id(self):\n pass", "def delete(self, sg_id):\n self.client.delete_security_group(sg_id)", "def delete_user_group(self, token, userGroup):\n requestUser = self.get_username_from_token(token)\n if self.check_user_has_owner_clearance(requestUser, userGroup):\n dataBase = self.read_database()\n if userGroup in dataBase['userGroups']:\n del dataBase['userGroups'][userGroup]\n self.write_database(dataBase)\n return\n else:\n raise GroupDoesNotExistException(\"Group does not exist\")\n else:\n raise UserPermissionException(\"User does not have write access\")", "def delete(self):\n # gid must be specified for deletion\n gid = self.get_query_argument('gid')\n self.write(self._rpc.aria2.remove(self._token, gid))", "def delete_group(self, group_name):\n params = {\n 'name': group_name\n }\n\n self.sonarqube.make_call('post', API_USER_GROUPS_DELETE_ENDPOINT, **params)", "def __on_group_deleted(self, logger, *args):", "def destroy(self, context=None):\n self.dbapi.destroy_nodegroup(self.cluster_id, self.uuid)\n self.obj_reset_changes()", "def removeGroup(self, *args):\n return _libsbml.GroupsModelPlugin_removeGroup(self, *args)", "def delete_secgroup_from_etcd(self, session, secgroup_id):\n secgroup_path = self._secgroup_path(secgroup_id)\n db.journal_write(session, secgroup_path, None)", "def delete_group(self, group_name):\n params = {\n 'name': group_name\n }\n\n self.sonarqube._make_call('post', API_USER_GROUPS_DELETE, **params)", "def remove_group(args):\n\n # check config file is valid first\n args.suppress_verify_output = True\n if verify(args) != 0:\n # restore stdout\n sys.stdout = sys.__stdout__\n print(\"OIDC config file not valid, please use the verify function to debug\")\n return 1 \n\n result_remove_config_file = remove_group_from_json(args)\n result_remove_from_config = remove_group_config_file(args)\n\n if result_remove_config_file != 0 and result_remove_from_config != 0:\n print(\"Error. Group {} does not exist in DynaFed\".format(args.group))\n return 1\n\n if result_remove_config_file != 0 or result_remove_from_config != 0:\n print(\"Error while removing config for {}. Check {} is missing group and {}.conf is missing to ensure full removal.\".format(args.group, args.file, args.group))\n return 1\n return 0", "def test_delete_team_user_group(client):\n resp = client.delete_team_user_group(TEAM_ID, NEW_GROUP_ID)\n assert resp['team_id'] == TEAM_ID\n assert resp['group_deleted']", "def test_delete_group_exists():\n with patch(\"salt.modules.mac_group.info\", MagicMock(return_value={})):\n assert mac_group.delete(\"test\")", "def allowed_group_access_delete(user, group):\n try:\n up = user.get_profile()\n except AttributeError:\n return False\n\n return (user.has_perm(\"vnswww.group_delete_any\")\n or (user.has_perm(\"vnswww.group_delete_org\")\n and group.org == up.org))", "def delete_nick_group(self, nick):\n nick = Identifier(nick)\n nick_id = self.get_nick_id(nick, False)\n session = self.ssession()\n try:\n session.query(Nicknames).filter(Nicknames.nick_id == nick_id).delete()\n session.query(NickValues).filter(NickValues.nick_id == nick_id).delete()\n session.commit()\n except SQLAlchemyError:\n session.rollback()\n raise\n finally:\n self.ssession.remove()", "def fusion_api_delete_group_role_assignment(self, name=None, uri=None, api=None, headers=None):\n return self.LoginDomainsGroupToRoleMapping.delete(name, uri, api, headers)", "def _delete_security_group(self, group_id):\n\n group_to_delete = self.get_resource()\n\n if not group_to_delete:\n raise NonRecoverableError(\n 'Unable to delete security group {0}, because the group '\n 'does not exist in the account'.format(group_id))\n\n try:\n self.execute(self.client.delete_security_group,\n dict(group_id=group_id), raise_on_falsy=True)\n except (exception.EC2ResponseError,\n exception.BotoServerError) as e:\n raise NonRecoverableError('{0}'.format(str(e)))", "def delete_sec_group(ec2, sec_group_name):\n try:\n ec2.delete_security_group(sec_group_name)\n except EC2ResponseError as e:\n if e.error_code == 'InvalidGroup.NotFound':\n pass\n else:\n raise e", "def delete(self, oid):\n path = '%s/security-groups/%s' % (self.ver, oid)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack security group: %s' % truncate(res))\n return res[0]" ]
[ "0.7652342", "0.7170237", "0.71552515", "0.715084", "0.71241444", "0.71064895", "0.7096797", "0.7081772", "0.7047306", "0.7031457", "0.70275164", "0.7024975", "0.6973406", "0.6965726", "0.6895656", "0.6874776", "0.68428373", "0.6803785", "0.6794903", "0.6792931", "0.67883205", "0.6776583", "0.6691023", "0.6690461", "0.66873735", "0.6686429", "0.66572475", "0.664387", "0.6635231", "0.6624522", "0.66219366", "0.66113", "0.65646183", "0.65623254", "0.65580213", "0.65248805", "0.65085435", "0.65085435", "0.64761543", "0.64618415", "0.6450193", "0.6445384", "0.6433275", "0.6431819", "0.6416023", "0.6401217", "0.63864446", "0.63694555", "0.63617444", "0.6358484", "0.6340563", "0.6337431", "0.63239604", "0.62521267", "0.6234246", "0.6233746", "0.6212873", "0.6208149", "0.61993414", "0.6183787", "0.618215", "0.61765903", "0.61355364", "0.6128137", "0.61270905", "0.61199266", "0.61076057", "0.6098521", "0.6096539", "0.6093045", "0.6074979", "0.60670984", "0.6062533", "0.60559714", "0.60121304", "0.6005428", "0.598799", "0.59863853", "0.596176", "0.59596014", "0.5946301", "0.5945725", "0.59372556", "0.5926155", "0.5918538", "0.58971906", "0.58757913", "0.5870598", "0.5864013", "0.58620656", "0.5842545", "0.5841042", "0.58209395", "0.5808242", "0.5791366", "0.578688", "0.5777019", "0.5773155", "0.5771951", "0.5760003" ]
0.71124274
5
Deletes the specified group. The group must be empty.
def delete_group(self, group_id, **kwargs): resource_path = "/groups/{groupId}" method = "DELETE" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "if_match" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "delete_group got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "groupId": group_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "if-match": kwargs.get("if_match", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params) else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_group(self, group):\n raise NotImplementedError('delete_group')", "def delete_group(self, group_name):\r\n params = {'GroupName' : group_name}\r\n return self.get_response('DeleteGroup', params)", "def delete_group(self, group_id: str):\n # If successful, this method returns 204 No Content response code.\n # It does not return anything in the response body.\n # Using resp_type=\"text\" to avoid parsing error in the calling method.\n self.ms_client.http_request(method='DELETE', url_suffix=f'groups/{group_id}', resp_type=\"text\")", "def delete_group(self, group_id):\n url = self.groups_url + \"/%s\" % group_id\n return requests.delete(url, headers=self.headers)", "def delete_group(\n group_id: BSONObjectId,\n tkn: Token = Depends(from_authotization_header_nondyn),\n):\n assert_has_clearance(tkn.owner, \"sni.delete_group\")\n grp: Group = Group.objects.get(pk=group_id)\n logging.debug(\"Deleting group %s (%s)\", grp.group_name, group_id)\n grp.delete()", "def del_group(self, group_id, group_type):\n self._mod_group(\n command=self.ofproto.OFPGC_DELETE,\n group_id=group_id,\n group_type=group_type,\n )", "def deleteGroup(groupName):\r\n Group.deleteGroup(groupName)", "def delete_group_group_member(self, targetgroup, groupname):\n try:\n targetgroup = self.quote(targetgroup)\n groupname = self.quote(groupname)\n self.g.delete('groups/%s/groups/%s' % (targetgroup,\n groupname),\n headers={})\n except HTTPError as e:\n return self._manage_errors(e)", "def delete_group(args, p4, group_name, metrics):\n LOG.debug(\"delete_group() {}\".format(group_name))\n r = p4.fetch_group(group_name)\n if r and r.get('Owners') and p4gf_const.P4GF_USER in r.get('Owners'):\n print_verbose(args, _(\"Deleting group '{group_name}'...\").format(group_name=group_name))\n p4.run('group', '-a', '-d', group_name)\n metrics.groups += 1\n else:\n print_verbose(args, _(\"Not deleting group '{group}':\"\n \" Does not exist or '{user}' is not an owner.\")\n .format(group=group_name, user=p4gf_const.P4GF_USER))", "def delete_group(self, group_name):\n params = {\n 'name': group_name\n }\n\n self.sonarqube.make_call('post', API_USER_GROUPS_DELETE_ENDPOINT, **params)", "def test_070_delete_group_from_group(self):\n\n testflow.step(\n \"Removing group %s from group %s\",\n TEST_GROUP1, TEST_GROUP2\n )\n assert MANAGE_CLI.run(\n 'groupdel',\n TEST_GROUP1,\n group=TEST_GROUP2,\n )[0], \"Failed to delete group from group '%s'\" % TEST_GROUP1", "def delete_group(self, group_name):\n params = {\n 'name': group_name\n }\n\n self.sonarqube._make_call('post', API_USER_GROUPS_DELETE, **params)", "def delete_group(_request, group_id):\n group = models.UserGroup.get_by_id(int(group_id))\n group.delete()\n\n url = urlresolvers.reverse('views.admin.list_groups')\n return http.HttpResponseRedirect(url)", "def delete_group_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n group_id = str(args.get('group_id'))\n client.delete_group(group_id)\n\n # get the group data from the context\n group_data = demisto.dt(demisto.context(), f'{INTEGRATION_CONTEXT_NAME}(val.ID === \"{group_id}\")')\n if isinstance(group_data, list):\n group_data = group_data[0]\n\n # add a field that indicates that the group was deleted\n group_data['Deleted'] = True # add a field with the members to the group\n entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': group_data}\n\n human_readable = f'Group: \"{group_id}\" was deleted successfully.'\n return human_readable, entry_context, NO_OUTPUTS", "def delete_group(groupname):\n response = jsonify(admin.delete_group(current_app.scoped_session(), groupname))\n return response", "def DeleteGroup(self, group, dry_run=False, reason=None):\n query = []\n _AppendDryRunIf(query, dry_run)\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_DELETE,\n (\"/%s/groups/%s\" %\n (GANETI_RAPI_VERSION, group)), query, None)", "async def delete_contact_group(dbcon: DBConnection, contact_group_id: int) -> None:\n if not await contact_group_exists(dbcon, contact_group_id):\n raise errors.InvalidArguments('contact group does not exist')\n q = \"\"\"delete from contact_groups where id=%s\"\"\"\n await dbcon.operation(q, (contact_group_id,))", "def test_080_group_delete(self):\n\n testflow.step(RMV_GRP_MSG, TEST_GROUP_DELETE)\n assert GROUP_CLI.run(\n 'delete',\n TEST_GROUP_DELETE\n )[0], \"Failed to delete group '%s'\" % TEST_GROUP_DELETE", "def remove_group():\n _id = request.form['_id']\n data, code, message = FIELD_SERVICE.remove_group(_id)\n return __result(data, code, message)", "def delete_group(self, group_o):\n class_query = ClassQuery('fvTenant')\n class_query.propFilter = 'eq(fvTenant.name, \"' + group_o.name + '\")'\n tenant_list = self.moDir.query(class_query)\n if len(tenant_list) > 0:\n tenant_list[0].delete()\n self.commit(tenant_list[0])", "def test_delete_group(self):\n response = self.client.delete_group(\"ABC123\")\n uri, args = response[\"uri\"].split(\"?\")\n\n self.assertEqual(response[\"method\"], \"DELETE\")\n self.assertEqual(uri, \"/admin/v1/groups/ABC123\")\n self.assertEqual(util.params_to_dict(args), {\"account_id\": [self.client.account_id]})", "def do_del_group(dbsync, group):\n pass", "def test_delete_group(self):\n self.group.delete_group.return_value = succeed('del')\n result = self.perform_with_group(\n Effect(DeleteGroup(tenant_id='00', group_id='g1')),\n (self.log, '00', 'g1'), self.group)\n self.assertEqual(result, 'del')", "def removeGroup(self, group, defaultGroup=''):\n return self.pm_getUserManager().removeGroup(self._unbox(group), self._unbox(defaultGroup))", "def delete_vm_group(session, cluster, vm_group):\n client_factory = session.vim.client.factory\n group_spec = client_factory.create('ns0:ClusterGroupSpec')\n groups = []\n\n group_spec.info = vm_group\n group_spec.operation = \"remove\"\n group_spec.removeKey = vm_group.name\n groups.append(group_spec)\n\n config_spec = client_factory.create('ns0:ClusterConfigSpecEx')\n config_spec.groupSpec = groups\n reconfigure_cluster(session, cluster, config_spec)", "def delete_group(gid):\n if request.method == 'POST':\n hl.deleteGroup(gid)\n return redirect('/users')", "def delete():\n name = request.json['name']\n group = models.user.Group.get(name)\n if not group:\n raise Absent('Group does not exists.', deletion=False)\n else:\n models.db.session.delete(group)\n models.db.session.commit()\n return response(200, deletion=True)", "def delete(ctx):\n user, project_name, _group = get_project_group_or_local(ctx.obj.get('project'),\n ctx.obj.get('group'))\n\n if not click.confirm(\"Are sure you want to delete experiment group `{}`\".format(_group)):\n click.echo('Existing without deleting experiment group.')\n sys.exit(0)\n\n try:\n response = PolyaxonClient().experiment_group.delete_experiment_group(\n user, project_name, _group)\n # Purge caching\n GroupManager.purge()\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not delete experiment group `{}`.'.format(_group))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n if response.status_code == 204:\n Printer.print_success(\"Experiment group `{}` was delete successfully\".format(_group))", "def delete_TestGroup(test_case, override_group_name=null, override_headers=null, override_cookies=null):\n # type: (AnyMagpieTestCaseType, Optional[Str], Optional[HeadersType], Optional[CookiesType]) -> None\n app_or_url = get_app_or_url(test_case)\n headers = override_headers if override_headers is not null else test_case.json_headers\n cookies = override_cookies if override_cookies is not null else test_case.cookies\n groups = TestSetup.get_RegisteredGroupsList(test_case, override_headers=headers, override_cookies=cookies)\n group_name = override_group_name if override_group_name is not null else test_case.test_group_name\n # delete as required, skip if non-existing\n if group_name in groups:\n path = \"/groups/{grp}\".format(grp=group_name)\n resp = test_request(app_or_url, \"DELETE\", path, headers=headers, cookies=cookies)\n check_response_basic_info(resp, 200, expected_method=\"DELETE\")\n TestSetup.check_NonExistingTestGroup(test_case, override_group_name=group_name,\n override_headers=headers, override_cookies=cookies)", "def delete_group_with_http_info(self, bucket_id, group_id, **kwargs):\n\n all_params = ['bucket_id', 'group_id', 'if_match', 'if_none_match', 'fields']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_group\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'bucket_id' is set\n if ('bucket_id' not in params) or (params['bucket_id'] is None):\n raise ValueError(\"Missing the required parameter `bucket_id` when calling `delete_group`\")\n # verify the required parameter 'group_id' is set\n if ('group_id' not in params) or (params['group_id'] is None):\n raise ValueError(\"Missing the required parameter `group_id` when calling `delete_group`\")\n\n if 'if_match' in params and not re.search('\\\\\\\"[0-9]+\\\\\\\"', params['if_match']):\n raise ValueError(\"Invalid value for parameter `if_match` when calling `delete_group`, must conform to the pattern `/\\\\\\\"[0-9]+\\\\\\\"/`\")\n if 'if_none_match' in params and not re.search('\\\\\\\"[0-9]+\\\\\\\"', params['if_none_match']):\n raise ValueError(\"Invalid value for parameter `if_none_match` when calling `delete_group`, must conform to the pattern `/\\\\\\\"[0-9]+\\\\\\\"/`\")\n\n collection_formats = {}\n\n resource_path = '/buckets/{bucket_id}/groups/{group_id}'.replace('{format}', 'json')\n path_params = {}\n if 'bucket_id' in params:\n path_params['bucket_id'] = params['bucket_id']\n if 'group_id' in params:\n path_params['group_id'] = params['group_id']\n\n query_params = {}\n if 'fields' in params:\n query_params['_fields'] = params['fields']\n collection_formats['_fields'] = 'csv'\n\n header_params = {}\n if 'if_match' in params:\n header_params['If-Match'] = params['if_match']\n if 'if_none_match' in params:\n header_params['If-None-Match'] = params['if_none_match']\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['basicAuth']\n\n return self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Deleted',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def delete(self,\n provider_id,\n group_id,\n ):\n return self._invoke('delete',\n {\n 'provider_id': provider_id,\n 'group_id': group_id,\n })", "def delete_targetgroup(self, group_id):\r\n result = False\r\n if self._db(self._db.targetgroup.id==group_id).select():\r\n result = True\r\n self._db(self._db.targetgroup.id==group_id).delete()\r\n self._db.commit()\r\n return result", "def delete_group(dispatcher, log, trans_id, group, force):\n\n def check_and_delete(_group, state):\n if state.desired == 0:\n d = trigger_convergence_deletion(dispatcher, group, trans_id)\n return d.addCallback(lambda _: state)\n else:\n raise GroupNotEmptyError(group.tenant_id, group.uuid)\n\n if tenant_is_enabled(group.tenant_id, config_value):\n if force:\n # We don't care about servers in the group. So trigger deletion\n # since it will take precedence over other status\n d = trigger_convergence_deletion(dispatcher, group, trans_id)\n else:\n # Delete only if desired is 0 which must be done with a lock to\n # ensure desired is not getting modified by another thread/node\n # when executing policy\n d = group.modify_state(\n check_and_delete,\n modify_state_reason='delete_group')\n else:\n if force:\n d = empty_group(log, trans_id, group)\n d.addCallback(lambda _: group.delete_group())\n else:\n d = group.delete_group()\n return d", "def delete_adcampaign_group(self, campaign_group_id, batch=False):\n path = '%s' % campaign_group_id\n return self.make_request(path, 'DELETE', batch=batch)", "async def delete_group(ctx, group_name: str, owner: str=None):\n\n if owner and owner != ctx.message.author.name:\n if ctx.message.author.id != bot.owner_id:\n await ctx.send(\"Sorry, you don't have permission to delete that group. Nerd.\")\n else:\n owner = ctx.message.author.name\n\n if bg_bot.manager.remove_group(owner, group_name):\n response = f'{group_name} successfully removed from {owner} groups!'\n else:\n response = f'Error in removing {group_name} from {owner} groups!'\n \n await ctx.send(response)", "def delete_group():\n incoming = request.get_json()\n Chatroom.delete_chatroom_with_room_id(incoming['room_id'])\n return jsonify(results = incoming['room_id'])", "def removeGroup(self, *args):\n return _libsbml.GroupsModelPlugin_removeGroup(self, *args)", "def delete_user_group(self, token, userGroup):\n requestUser = self.get_username_from_token(token)\n if self.check_user_has_owner_clearance(requestUser, userGroup):\n dataBase = self.read_database()\n if userGroup in dataBase['userGroups']:\n del dataBase['userGroups'][userGroup]\n self.write_database(dataBase)\n return\n else:\n raise GroupDoesNotExistException(\"Group does not exist\")\n else:\n raise UserPermissionException(\"User does not have write access\")", "def delete_security_group(self, security_group):\r\n return self.delete(self.security_group_path % (security_group))", "def _delete_security_group(self, group_id):\n\n group_to_delete = self.get_resource()\n\n if not group_to_delete:\n raise NonRecoverableError(\n 'Unable to delete security group {0}, because the group '\n 'does not exist in the account'.format(group_id))\n\n try:\n self.execute(self.client.delete_security_group,\n dict(group_id=group_id), raise_on_falsy=True)\n except (exception.EC2ResponseError,\n exception.BotoServerError) as e:\n raise NonRecoverableError('{0}'.format(str(e)))", "def delete(person_group_id):\n url = 'persongroups/{}'.format(person_group_id)\n\n return util.request('DELETE', url)", "def delete_security_group(self, name=None, group_id=None):\r\n params = {}\r\n\r\n if name is not None:\r\n params['GroupName'] = name\r\n elif group_id is not None:\r\n params['GroupId'] = group_id\r\n\r\n return self.get_status('DeleteSecurityGroup', params, verb='POST')", "def product_group_delete(obj, name):\n client = get_client(obj)\n\n with Action('Deleting product_group: {}'.format(name), nl=True):\n pgs = client.product_group_list(name)\n\n client.product_group_delete(pgs[0]['uri'])", "def remove_from_group(self, org, contact, group):\n pass", "def delete_group(id, createdby):\n query = \"DELETE FROM groups WHERE group_id = {} AND createdby ='{}'\".format(id, createdby)\n cur.execute(query)", "def delete_service_group(self, group_id):\r\n svc = self.client['Network_Application_Delivery_Controller_'\r\n 'LoadBalancer_VirtualServer']\r\n\r\n return svc.deleteObject(id=group_id)", "def delete_group(self, bucket_id, group_id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.delete_group_with_http_info(bucket_id, group_id, **kwargs)\n else:\n (data) = self.delete_group_with_http_info(bucket_id, group_id, **kwargs)\n return data", "def remove_inv_group(**kwargs):\n proxy = kwargs['proxy']\n sessiontoken = kwargs['sessiontoken']\n gw = kwargs['gateway']\n group_id = kwargs['objectname']\n json_response_status_code = delete_inventory_group_json_response(proxy, sessiontoken, gw, group_id)\n if json_response_status_code == 200:\n print(\"The group \" + group_id + \" has been deleted\")\n else:\n print(\"Something went wrong - please check your syntax and try again.\")", "def customer_group_delete(group_id):\n result = {\"success\" : 1, \"message\" : \"Customer can not be Deleted\"}\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n \n #clean up the user id\n group_id = db.escape_string(group_id)\n \n query = \"\"\"\n DELETE FROM `groups`\n WHERE `groups`.`group_id` = \"%s\"\n \"\"\" %(group_id)\n cursor = db.cursor()\n try:\n if (cursor.execute(query)) != 0:\n db.commit()\n result = {\"success\" : 0, \"message\" : \"Customer Group Deleted Successfully\"}\n except Exception as customer_exp:\n result = {\"success\" : 1, \"message\" : \"Customer Group can not be Deleted \" + str(e)}\n finally:\n cursor.close()\n db.close()\n return result", "def delete_group(\n self,\n name,\n recursive=None,\n retry=google.api_core.gapic_v1.method.DEFAULT,\n timeout=google.api_core.gapic_v1.method.DEFAULT,\n metadata=None,\n ):\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n # Wrap the transport method to add retry and timeout logic.\n if \"delete_group\" not in self._inner_api_calls:\n self._inner_api_calls[\n \"delete_group\"\n ] = google.api_core.gapic_v1.method.wrap_method(\n self.transport.delete_group,\n default_retry=self._method_configs[\"DeleteGroup\"].retry,\n default_timeout=self._method_configs[\"DeleteGroup\"].timeout,\n client_info=self._client_info,\n )\n\n request = group_service_pb2.DeleteGroupRequest(name=name, recursive=recursive,)\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n try:\n routing_header = [(\"name\", name)]\n except AttributeError:\n pass\n else:\n routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(\n routing_header\n )\n metadata.append(routing_metadata)\n\n self._inner_api_calls[\"delete_group\"](\n request, retry=retry, timeout=timeout, metadata=metadata\n )", "def delete_placement_group(self, name):\r\n params = {'GroupName':name}\r\n return self.get_status('DeletePlacementGroup', params, verb='POST')", "def delete_group_member(self, group_id, member_id):\n url = self.groups_url + \"/%s/members/%s\" % (group_id, member_id)\n return requests.delete(url, headers=self.headers)", "def delete_all_group_member(self, group_id):\n url = self.groups_url + \"/%s/members\" % group_id\n return requests.delete(url, headers=self.headers)", "def remove_from_group(self, group):\n\n if self.in_group(group):\n self.secondary_groups.remove(group)\n return self", "def delete_sec_group(ec2, sec_group_name):\n try:\n ec2.delete_security_group(sec_group_name)\n except EC2ResponseError as e:\n if e.error_code == 'InvalidGroup.NotFound':\n pass\n else:\n raise e", "def delete_entry_group(self, name):\n self.__datacatalog.delete_entry_group(name=name)", "def test_delete_group_by_id(self):\n # Create a user with 2 groups\n resp = self.app.post('/users', data=json.dumps(self.test_user1_data))\n assert resp.status_code == 200\n\n # Delete one of those groups\n resp = self.app.delete('/groups/{}'.format(self.test_group1_groupid))\n assert resp.status_code == 200\n\n # Verify that the group is gone\n resp = self.app.get('/groups/{}'.format(self.test_group1_groupid))\n assert resp.status_code == 404\n\n # Verify that the user's groups don't have that group listed\n resp = self.app.get('/users/{}'.format(self.test_user1_userid))\n assert resp.status_code == 200\n\n data = json.loads(resp.data)\n assert self.test_group1_groupid not in data['groups']", "def test_removeGroup(self):\n\t\tuser = User.objects.get(id=1)\n\t\tself.client.force_authenticate(user=user)\n\t\tgroup = Group.objects.create(admin=user, name='testGroup3', isPublic=True, \n\t\t\tdescription='This is another test group that just created.')\n\n\t\turl = \"/groups/3/\"\n\t\tresponse = self.client.delete(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n\t\turl = \"/groups/2/\"\n\t\tresponse = self.client.delete(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def delete(self, host_name, group_name): # noqa\n\n response = remove_host(host_name, group_name)\n return response.__dict__, self.state_to_http[response.status]", "def remove_group_from_customer(self,\n customer_id,\n group_id):\n\n # Prepare query URL\n _url_path = '/v2/customers/{customer_id}/groups/{group_id}'\n _url_path = APIHelper.append_url_with_template_parameters(_url_path, {\n 'customer_id': customer_id,\n 'group_id': group_id\n })\n _query_builder = self.config.get_base_uri()\n _query_builder += _url_path\n _query_url = APIHelper.clean_url(_query_builder)\n\n # Prepare headers\n _headers = {\n 'accept': 'application/json'\n }\n\n # Prepare and execute request\n _request = self.config.http_client.delete(_query_url, headers=_headers)\n OAuth2.apply(self.config, _request)\n _response = self.execute_request(_request)\n\n decoded = APIHelper.json_deserialize(_response.text)\n if type(decoded) is dict:\n _errors = decoded.get('errors')\n else:\n _errors = None\n _result = ApiResponse(_response, body=decoded, errors=_errors)\n return _result", "def delete_group_user(self, group_id, user_id):\n resp, body = self.delete('groups/%s/users/%s' % (group_id, user_id))\n self.expected_success(204, resp.status)\n return rest_client.ResponseBody(resp, body)", "def test_delete_group_log_context(self):\n self.group.delete_group.return_value = succeed('del')\n expected_lookup = (matches(IsBoundWith(base_log=True, effectful=True)),\n '00', 'g1')\n result = self.perform_with_group(\n Effect(DeleteGroup(tenant_id='00', group_id='g1')),\n expected_lookup, self.group,\n fallback_dispatcher=get_log_dispatcher(self.log,\n {'effectful': True}))\n self.assertEqual(result, 'del')", "def delete_group_user(self, group_id, user_id):\n resp, body = self.delete('groups/%s/users/%s' % (group_id, user_id))\n self.expected_success(204, resp.status)\n return service_client.ResponseBody(resp, body)", "async def delete_country_group_async(\n country_group_code: str,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = DeleteCountryGroup.create(\n country_group_code=country_group_code,\n namespace=namespace,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )", "def delete(self, consistencygroup, force=False):\n body = {'consistencygroup': {'force': force}}\n self.run_hooks('modify_body_for_action', body, 'consistencygroup')\n url = '/consistencygroups/%s/delete' % base.getid(consistencygroup)\n resp, body = self.api.client.post(url, body=body)\n return common_base.TupleWithMeta((resp, body), resp)", "def security_group_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_security_group(**kwargs)", "def delete(self, id):\r\n return UserGroupService.removeUserGroup(self, id)", "def test_delete_group(self):\n pass", "def test_delete_group(self):\n pass", "def delete_scaling_group(self, request):\n group = self.store.get_scaling_group(self.log, self.tenant_id,\n self.group_id)\n force = extract_bool_arg(request, 'force', False)\n return controller.delete_group(\n self.dispatcher, log, transaction_id(request), group, force)", "async def delete_contact_from_contact_group(dbcon: DBConnection, contact_group_id: int, contact_id: int) -> None:\n q = \"\"\"delete from contact_group_contacts where contact_group_id=%s and contact_id=%s\"\"\"\n q_args = (contact_group_id, contact_id)\n await dbcon.operation(q, q_args)", "def delete_group(user):\n return 'do some magic!'", "def remove_from_group(user: User, group: Group) -> Result:\n if user.pw_name not in group.gr_mem:\n return Result(State.unchanged)\n command([\"/usr/sbin/deluser\", user.pw_name, group.gr_name])\n group.gr_mem.remove(user.pw_name)\n return Result(State.success)", "def delete_namespaced_group(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `delete_namespaced_group`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `delete_namespaced_group`\")\n\n resource_path = '/oapi/v1/groups/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def delete_country_group(\n country_group_code: str,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = DeleteCountryGroup.create(\n country_group_code=country_group_code,\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def qos_policy_group_delete(self, policy_group):\n return self.request( \"qos-policy-group-delete\", {\n 'policy_group': [ policy_group, 'policy-group', [ basestring, 'None' ], False ],\n }, {\n } )", "def deleteGroup(request):\n \n if request.method == 'POST':\n \n form = DeleteGroupForm(request.POST)\n \n if form.is_valid():\n \n cd = form.cleaned_data\n \n try:\n \n #Delete records from m2m of Users & Groups for selected groups\n for eachGroup in cd['group_id']:\n Group_User.objects.filter(group = eachGroup.id).delete()\n \n #Delete Group(s)\n for eachGroup in cd['group_id']:\n Group.objects.filter(id = eachGroup.id).delete()\n \n except:\n \n error = 'Unable to Delete Groups!'\n return render_to_response('deletegroup.html', \n {'form': form, 'error': error},\n context_instance=RequestContext(request))\n \n return HttpResponseRedirect('/deletegroup/success/')\n \n else:\n \n return render_to_response('deletegroup.html',\n {'form': form}, \n context_instance=RequestContext(request)) \n \n else:\n \n form = DeleteGroupForm()\n \n return render_to_response('deletegroup.html', \n {'form': form}, \n context_instance=RequestContext(request))", "def drop_groups(self, group_ids=None):\n return self.groups.delete(group_ids)", "def test_delete_team_user_group(client):\n resp = client.delete_team_user_group(TEAM_ID, NEW_GROUP_ID)\n assert resp['team_id'] == TEAM_ID\n assert resp['group_deleted']", "def disconnect_whole_group(self, id_group:int) -> bool:\n try:\n self.cursor.execute(f\"DELETE FROM {table_groups} WHERE id={id_group};\")\n self.connection.commit()\n return True\n except Exception as e:\n msg = f\"We found problems with deletion of the whole group from the {table_groups} in database. Mistake: {e}\"\n self.proceed_error(msg)\n return False", "def remove_group(self, resolvable):\n group = self._resolve_group(resolvable)\n\n for membership in self.group_memberships:\n if membership.group.href == group.href:\n membership.delete()\n return\n\n raise StormpathError({\n 'developerMessage': 'This user is not part of Group %s.' % group.name,\n })", "def test_api_v1_groups_id_delete(self):\n pass", "def test_delete_device_group_member_by_id(self):\n pass", "def unlink_Group(self, group):\n\t\tself.__groups.remove(group.weakref)\n\t\tself._cli_invalidate()", "def del_user_from_group(self,username,groupname):\n\n if not self.check_prereqs():\n raise StopIteration\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_del_user_from_group_query,{'username':username,'groupname':groupname,'username_field':self.sql_username_field,'groupname_field':self.sql_groupname_field})\n self.log.debug(\"sqlflexibleauthstore: del_user_from_group: %s\" % (query,))\n\n cursor.execute(query)\n db.commit()\n return True", "def _Delete(self):\n cmd = self.cmd_prefix + [\n 'redshift', 'delete-cluster-subnet-group',\n '--cluster-subnet-group-name', self.name\n ]\n vm_util.IssueCommand(cmd, raise_on_failure=False)", "def delete(self, security_group_id: str) -> None:\n\t\troute = f'{AWSSecurityGroupConsts.CLOUD_SECURITY_GROUP.value}/{security_group_id}'\n\t\treturn self._delete(route=route)", "def removeUserFromGroup(self, user, group):\n return self.pm_getUserManager().removeUserFromGroup(self._unbox(user), self._unbox(group))", "def remove_member_from_group(self, group_id, member_id):\n route_values = {}\n if group_id is not None:\n route_values['groupId'] = self._serialize.url('group_id', group_id, 'str')\n if member_id is not None:\n route_values['memberId'] = self._serialize.url('member_id', member_id, 'str')\n self._send(http_method='DELETE',\n location_id='45a36e53-5286-4518-aa72-2d29f7acc5d8',\n version='6.0-preview.1',\n route_values=route_values)", "def clear_group(self):\n # Implemented from template for osid.resource.ResourceForm.clear_group_template\n if (self.get_group_metadata().is_read_only() or\n self.get_group_metadata().is_required()):\n raise errors.NoAccess()\n self._my_map['group'] = self._group_default", "def delete_secgroup_from_etcd(self, session, secgroup_id):\n secgroup_path = self._secgroup_path(secgroup_id)\n db.journal_write(session, secgroup_path, None)", "def capacitygroup_delete(cmd_ctx, cpc, capacitygroup):\n cmd_ctx.execute_cmd(\n lambda: cmd_capacitygroup_delete(cmd_ctx, cpc, capacitygroup))", "async def delete_contact_group_from_active_monitor(dbcon: DBConnection, contact_group_id: int, monitor_id: int) -> None:\n q = \"\"\"delete from active_monitor_contact_groups where active_monitor_id=%s and contact_group_id=%s\"\"\"\n q_args = (monitor_id, contact_group_id)\n await dbcon.operation(q, q_args)", "def action_remove_from_group(self, kwargs):\n user = kwargs[\"user\"]\n group = kwargs[\"group\"]\n\n if self.engine.remove_user_from_group(user, group):\n info(f\"User {user} sucessfully removed from {group}\")\n else:\n error(f\"Unable to remove {user} from {group}, check privileges or dn\")", "def delete_participant_groups_by_participant_group_ids(\n self,\n participant_group_ids: List[str] = None,\n ) -> None:\n if not participant_group_ids:\n return None\n\n with self.table_access_condition, self._get_connection() as conn:\n c = conn.cursor()\n\n participant_group_ids_block = \"\"\n if participant_group_ids:\n task_run_ids_str = \",\".join([f'\"{pgi}\"' for pgi in participant_group_ids])\n participant_group_ids_block = (\n f\"AND prolific_participant_group_id IN ({task_run_ids_str})\"\n )\n\n c.execute(\n f\"\"\"\n DELETE FROM participant_groups\n WHERE {participant_group_ids_block};\n \"\"\"\n )\n return None", "def del_secgroup(self, args):\n region = args[\"Region\"]\n sgid = args[\"Security-group-ID\"]\n message = MessageClass()\n\n # Boto3 client creation by providing the access_id and access_secret\n ec2 = boto3.client(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n\n response = ec2.delete_security_group(GroupId=sgid)\n attachment = MessageAttachmentsClass()\n message.message_text = \"Security group deleted\"\n message.attach(attachment)\n\n return message.to_json()", "def fusion_api_delete_group_role_assignment(self, name=None, uri=None, api=None, headers=None):\n return self.LoginDomainsGroupToRoleMapping.delete(name, uri, api, headers)", "def delete_group_cached(group_id, broker=None):\n if not broker:\n broker = get_broker()\n group_key = '{}:{}:keys'.format(broker.list_key, group_id)\n group_list = broker.cache.get(group_key)\n broker.cache.delete_many(group_list)\n broker.cache.delete(group_key)", "def ex_destroy_resource_group(self, resource_group, location=None):\n if location is None:\n if self.default_location:\n location = self.default_location\n else:\n raise ValueError(\"location is required.\")\n\n target = \"/subscriptions/%s/resourcegroups/%s\" % (self.subscription_id, resource_group)\n params = {'api-version': '2016-09-01'}\n\n r = self.connection.request(action=target,\n params=params,\n method=\"DELETE\")\n\n return", "def test_delete_device_group_member_by_id1(self):\n pass" ]
[ "0.8661287", "0.8429421", "0.8341084", "0.83324933", "0.81745154", "0.80304027", "0.8005047", "0.7919353", "0.79183817", "0.7814903", "0.7808236", "0.7775581", "0.77595615", "0.77590305", "0.773771", "0.7719856", "0.75224054", "0.75213486", "0.7515984", "0.74853754", "0.7475058", "0.73940724", "0.7386386", "0.7382759", "0.7372839", "0.7326506", "0.72980404", "0.7236323", "0.71649826", "0.7122436", "0.71050227", "0.7100572", "0.7095239", "0.70788914", "0.7062481", "0.70442325", "0.70440656", "0.700622", "0.69493884", "0.691419", "0.69080585", "0.6907439", "0.68608665", "0.6851343", "0.68310666", "0.6806788", "0.67901176", "0.6774769", "0.67674863", "0.6765406", "0.67612654", "0.67289215", "0.67216253", "0.67185456", "0.66985506", "0.6660082", "0.6654692", "0.6635545", "0.66234255", "0.66218776", "0.6603849", "0.6598917", "0.659741", "0.65816444", "0.6569394", "0.6563157", "0.6557575", "0.6554462", "0.6554462", "0.654025", "0.6529362", "0.65163493", "0.6456246", "0.64508843", "0.6430289", "0.6422965", "0.63989824", "0.6395822", "0.6360098", "0.6326003", "0.6325525", "0.6314073", "0.63002837", "0.62911445", "0.62887853", "0.62818533", "0.6261399", "0.62571", "0.624486", "0.62342924", "0.6218952", "0.6203008", "0.6197826", "0.61861163", "0.61790204", "0.6177215", "0.6168119", "0.6159016", "0.61472917", "0.61353755" ]
0.68005127
46
Deletes the specified identity provider. The identity provider must not have
def delete_identity_provider(self, identity_provider_id, **kwargs): resource_path = "/identityProviders/{identityProviderId}" method = "DELETE" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "if_match" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "delete_identity_provider got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "identityProviderId": identity_provider_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "if-match": kwargs.get("if_match", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params) else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_identity_provider(module, sdk, cloud, idp):\n\n if idp is None:\n return False\n\n if module.check_mode:\n return True\n\n try:\n cloud.identity.delete_identity_provider(idp)\n except sdk.exceptions.OpenStackCloudException as ex:\n module.fail_json(msg='Failed to delete identity provider: {0}'.format(str(ex)))\n return True", "def delete_provider(cls, args, config):\n # print \"MOLNSProvider.delete_provider(args={0}, config={1})\".format(args, config)\n if len(args) == 0:\n print \"USAGE: molns provider delete name\"\n return\n config.delete_object(name=args[0], kind='Provider')", "def delete_from_provider(self, builder, provider, credentials, target, parameters):", "def delete_cloud_provider(providername):\n response = jsonify(\n admin.delete_provider(current_app.scoped_session(), providername)\n )\n return response", "def delete(self,\n provider_id,\n service_instance_id,\n ):\n return self._invoke('delete',\n {\n 'provider_id': provider_id,\n 'service_instance_id': service_instance_id,\n })", "def delete(self,\n provider_id,\n service_instance_id,\n ):\n return self._invoke('delete',\n {\n 'provider_id': provider_id,\n 'service_instance_id': service_instance_id,\n })", "def delete(self,\n provider_id,\n provider_deployment_map_id,\n ):\n return self._invoke('delete',\n {\n 'provider_id': provider_id,\n 'provider_deployment_map_id': provider_deployment_map_id,\n })", "def delete(self,\n provider_id,\n group_id,\n ):\n return self._invoke('delete',\n {\n 'provider_id': provider_id,\n 'group_id': group_id,\n })", "def delete(self, **kwargs) -> ProviderResult:\n return ProviderResult.NOT_IMPLEMENTED", "def delete(self,\n provider_id,\n l3vpn_id,\n ):\n return self._invoke('delete',\n {\n 'provider_id': provider_id,\n 'l3vpn_id': l3vpn_id,\n })", "def delete(self,\n provider_id,\n route_id,\n ):\n return self._invoke('delete',\n {\n 'provider_id': provider_id,\n 'route_id': route_id,\n })", "def remove_provider(self, provider):\n if isinstance(provider, LocalProvider):\n raise QISKitError(\"Cannot unregister 'local' provider.\")\n try:\n self.providers.remove(provider)\n except ValueError:\n raise QISKitError(\"'%s' provider is not registered.\")", "def unregister(provider):\n _DEFAULT_PROVIDER.remove_provider(provider)", "def delete_ldap_provider(self, id):\n try:\n self.logger.info('delete_ldap_provider called.')\n\n # Validate required parameters\n self.logger.info(\n 'Validating required parameters for delete_ldap_provider.')\n self.validate_parameters(id=id)\n\n # Prepare query URL\n self.logger.info('Preparing query URL for delete_ldap_provider.')\n _url_path = '/public/ldapProvider/{id}'\n _url_path = APIHelper.append_url_with_template_parameters(\n _url_path, {'id': id})\n _query_builder = self.config.get_base_uri()\n _query_builder += _url_path\n _query_url = APIHelper.clean_url(_query_builder)\n\n # Prepare and execute request\n self.logger.info(\n 'Preparing and executing request for delete_ldap_provider.')\n _request = self.http_client.delete(_query_url)\n AuthManager.apply(_request, self.config)\n _context = self.execute_request(_request,\n name='delete_ldap_provider')\n\n # Endpoint and global error handling using HTTP status codes.\n self.logger.info('Validating response for delete_ldap_provider.')\n if _context.response.status_code == 0:\n raise RequestErrorErrorException('Error', _context)\n self.validate_response(_context)\n\n except Exception as e:\n self.logger.error(e, exc_info=True)\n raise", "def remove_provider_network(network_id):\n session = db.get_session()\n pnet = (session.query(network_models_v2.ProviderNetwork).\n filter_by(network_id=network_id).first())\n if pnet:\n session.delete(pnet)\n session.flush()\n return network_id", "def destroy(self, request, *args, **kwargs):\n response = super(ProviderViewSet, self).destroy(request, *args, **kwargs)\n response.data = {'message': 'Proveedor ha sido eliminado'}\n return response", "def uninstall(self, provider):\n pass # pragma: no cover", "def test_delete_identity(self):\n pass", "def delete(self, uuid):\n try:\n pmanager = PushManager.query.filter_by(\n uuid=uuid\n ).one_or_none()\n if pmanager is None:\n raise GatlinException(\"App not exist\", 404)\n self._provider.delete_platform(pmanager.sns_arn)\n pmanager.delete()\n except GatlinException as exception:\n raise exception", "def delete_provisioning(self, identifier):\n return self.client.call(\"SoftLayer_Provisioning_Hook\", \"deleteObject\", id=identifier)", "def delete():\n # Must be logged in to perform any delete commands.\n auth_required()\n pass", "def delete_ipsecpolicy(self, ipsecpolicy):\r\n return self.delete(self.ipsecpolicy_path % (ipsecpolicy))", "def delete_controller(cls, args, config):\n # print \"MOLNSProvider.delete_provider(args={0}, config={1})\".format(args, config)\n if len(args) == 0:\n raise MOLNSException(\"USAGE: molns cluser delete name\")\n config.delete_object(name=args[0], kind='Controller')", "def delete_user(UserName=None, AuthenticationType=None):\n pass", "def delete(self, identity, data=None, record=None, **kwargs):\n community_set = self._retrieve_set(kwargs.get(\"slug\") or record.slug)\n if not community_set:\n return\n\n # NOTE: will be removed from index via listener in oaiserver module\n db.session.delete(community_set)", "def delete_user():", "def delete_user():\r\n raise NotImplementedError()", "def delete_credential(credentials):\n credentials.delete_credentials()", "def deleteSocialAuthentication(self, network):\n\t\turl = \"https://habitica.com/api/v3/user/auth/social/\" + network\n\t\treturn(deleteUrl(url, self.credentials))", "def test_delete_payment_profile(self):\n self.cim.delete_payment_profile(\n customer_profile_id=u\"123\",\n customer_payment_profile_id=u\"432\"\n )", "def delete_user(id):\n pass", "def delete_payment(self):\r\n return delete_payment_by_id(self.__payment_id__)", "def test_delete_profile(self):\n self.cim.delete_profile(customer_profile_id=u\"123\")", "def delete_member():\n client = RequestManager()\n client.set_method(\"DELETE\")\n member_id = STORED_ID[\"member_id\"]\n client.set_endpoint(\"/accounts/{0}/memberships/{1}\".format(CONFIG_DATA['account_id'], member_id))\n client.execute_request()", "def delete(self, application_id):", "def delete_user():\n #TODO user delete\n pass", "def delete(self, type=None, name=None, identity=None):\n if name and identity:\n name = None # Only specify one\n request = self.request(operation='DELETE', type=type, name=name,\n identity=identity)\n self.call(request, expect=error.NO_CONTENT)", "def access_info_delete(context, storage_id):\n _access_info_get_query(context). \\\n filter_by(storage_id=storage_id).delete()", "def test_delete_user_identity_mapping(self):\n pass", "def delete_exploration(committer_id, exploration_id, force_deletion=False):\n exploration = get_exploration_by_id(exploration_id)\n if not force_deletion and not exploration.is_deletable_by(committer_id):\n raise Exception(\n 'User %s does not have permissions to delete exploration %s' %\n (committer_id, exploration_id))\n\n exploration_memcache_key = _get_exploration_memcache_key(exploration_id)\n memcache_services.delete(exploration_memcache_key)\n\n for state in exploration.states:\n delete_state_model(exploration_id, state.id)\n\n exploration_model = exp_models.ExplorationModel.get(exploration_id)\n exploration_model.delete()\n\n for snapshot in exp_models.ExplorationSnapshotModel.get_all():\n if snapshot.exploration_id == exploration_id:\n snapshot.delete()\n\n for snapshot in exp_models.ExplorationSnapshotContentModel.get_all():\n if snapshot.exploration_id == exploration_id:\n snapshot.delete()", "async def test_not_delete_with_account_token(self):\n provisioning_client = ProvisioningProfileClient(httpClient, 'token')\n try:\n await provisioning_client.delete_provisioning_profile('id')\n except Exception as err:\n assert err.__str__() == 'You can not invoke delete_provisioning_profile method, because you ' + \\\n 'have connected with account access token. Please use API access token from ' + \\\n 'https://app.metaapi.cloud/token page to invoke this method.'", "def delete_token(self, token_id):\n raise exception.NotImplemented() # pragma: no cover", "async def test_delete(self):\n rsps = respx.delete(f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id') \\\n .mock(return_value=Response(200))\n await provisioning_client.delete_provisioning_profile('id')\n assert rsps.calls[0].request.url == \\\n f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id'\n assert rsps.calls[0].request.headers['auth-token'] == 'header.payload.sign'", "def disconnect_identity(identity):\n session.pop(\"cern_resource\", None)\n key = current_app.config.get(\n \"OAUTHCLIENT_CERN_OPENID_SESSION_KEY\",\n OAUTHCLIENT_CERN_OPENID_SESSION_KEY,\n )\n provides = session.pop(key, set())\n identity.provides -= provides", "def delete_customer(customer_id):\n try:\n remove_user = cm.Customers.get(cm.Customers.customer_id == customer_id)\n remove_user.delete_instance()\n except cm.DoesNotExist:\n logging.info(\"Customer successfully deleted from database.\")", "def delete_model(source: str, supported_model_name: str, model_id: str):\n connector = __get_connector(source)\n supported_model = __get_supported_model(supported_model_name)\n\n try:\n return connector.delete(supported_model, model_id)\n except Exception as e:\n abort(500, e)", "def delete(self):\n\n user_id = get_jwt_identity()\n user = user_crud.get(user_id)\n if not user:\n abort(404, message=\"User not Found\")\n all_tokens = auth_crud.get_user_tokens(user_id)\n tokens = [token.to_dict() for token in all_tokens]\n for token in tokens:\n auth_crud.revoke_token(token['id'], user_id)\n user = user_crud.remove(user_id)\n\n return {'msg': 'User Removed'}", "def delete_code_repository(CodeRepositoryName=None):\n pass", "def delete_credential(self, context, id):\n return remove_credential(id)", "def test_anonymous_user_delete(self):\r\n with self.flask_app.test_request_context('/'):\r\n for token in self.auth_providers:\r\n assert_raises(Unauthorized,\r\n getattr(require, 'token').delete,\r\n token)", "def delete(cls, aws_cloud_account_id: str):\n\t\tpass", "def remove(self):\n self._switch.odlclient._request(self._path, method=\"delete\")", "def pin_delete(self, pin_id=None, path=None):\n if path == None:\n path = []\n if pin_id != None:\n path.insert(0, pin_id)\n path.insert(0, \"pin\")\n location = '/'.join(path)\n return self.send_delete(location,\n params={})", "def delete_entity(self, context, member):\n parent_pool_id = member.pool.id\n resource_path = \"%s/%s/%s/%s/%s\" % (RESOURCE_PREFIX,\n POOLS_RESOURCE,\n parent_pool_id,\n MEMBERS_RESOURCE,\n member.id)\n msg = _(\"NetScaler driver member removal: %s\") % member.id\n LOG.debug(msg)\n self.client.remove_resource(context.tenant_id, resource_path)", "def delete_access(request):\n # Get the submitted request parameters.\n\n params = urllib.parse.parse_qs(request.META['QUERY_STRING'])\n\n if \"global_id\" not in params or len(params['global_id']) != 1:\n return HttpResponseBadRequest()\n else:\n global_id = params['global_id'][0]\n\n # Get the GlobalID object for the supplied global ID, creating one if\n # necessary.\n\n global_id_rec,created = GlobalID.objects.get_or_create(global_id=global_id)\n\n # Delete the existing access credentials for this global ID, if it exists.\n\n AccessID.objects.filter(global_id=global_id_rec).delete()\n\n # Tell the caller that we succeeded.\n\n return HttpResponse(status=200)", "def delete(self,\n ike_profile_id,\n ):\n return self._invoke('delete',\n {\n 'ike_profile_id': ike_profile_id,\n })", "def delete_member(net_id):\n connection = get_connection()\n cursor = connection.cursor()\n sql_string = \"DELETE FROM Member WHERE netID='\"+net_id+\"'\"\n cursor.execute(sql_string)\n connection.commit()", "def delete_model(ModelName=None):\n pass", "def test_consumer_delete_unauthed_with_proj_id(self):\n\n headers = {'X-Project-Id': dummy_project_id}\n resp, consumer_dat = self.consumer_behaviors.delete_consumer(\n None, self.container_ref, extra_headers=headers, use_auth=False\n )\n\n self.assertEqual(401, resp.status_code)", "def Delete(self):\n\n if self.network_id:\n self.cs.delete_network(self.network_id)\n\n if self.is_vpc and self.vpc_id:\n self.cs.delete_vpc(self.vpc_id)", "def delete_demo(exploration_id):\n exploration = get_exploration_by_id(exploration_id, strict=False)\n if not exploration:\n # This exploration does not exist, so it cannot be deleted.\n logging.info('Exploration with id %s was not deleted, because it '\n 'does not exist.' % exploration_id)\n else:\n delete_exploration(ADMIN_COMMITTER_ID, exploration_id)", "def test_delete_o_auth_access_token(self):\n pass", "def delete_vm(self, region: str, instance_id: str):\n raise NotImplementedError()", "def test_consumer_delete_unauthed_no_proj_id(self):\n\n resp, consumer_dat = self.consumer_behaviors.delete_consumer(\n None, self.container_ref, use_auth=False\n )\n\n self.assertEqual(401, resp.status_code)", "def delete(self, oid):\n path = '%s/security-groups/%s' % (self.ver, oid)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack security group: %s' % truncate(res))\n return res[0]", "def test_authenticated_user_delete(self):\r\n with self.flask_app.test_request_context('/'):\r\n for token in self.auth_providers:\r\n assert_raises(Forbidden,\r\n getattr(require, 'token').delete,\r\n token)", "def delete_user(self):\n raise NotImplementedError(\"Function not yet implemented contact package creator\")", "def test_delete_collection_identity(self):\n pass", "def delete(self):\n self.deleted = True\n # Deactivate the user to disallow authentication and also\n # to let the user verify the email again after recovery.\n self.is_active = False\n self.save()\n self.history.create(user_id=self.pk, action=user_history.DELETION)", "def delete(seed):\n shutil.rmtree(os.path.join(DATA_DIR, seed))", "def delete_version(self):\n pass", "def delete(self):\n if self.iid is not None:\n self.db().remove(self.iid)", "def destroy(self,\n context,\n instance,\n network_info,\n block_device_info=None,\n destroy_disks=True,\n migrate_data=None):\n LOG.info(\"Destroying instance %s\" % instance.uuid)\n try:\n azure_name = self._get_omni_name_from_instance(instance)\n except exception.InstanceNotFound:\n LOG.error(\n \"Unable to find Azure mapping for instance %s\" % instance.uuid)\n return\n try:\n utils.delete_instance(self.compute_client, drv_conf.resource_group,\n azure_name)\n except utils.CloudError:\n LOG.error(\n \"Instance %s not found in Azure, removing from openstack.\" %\n (instance.uuid, ))\n # Delete disk handles exception if disk not found\n utils.delete_disk(self.compute_client, drv_conf.resource_group,\n azure_name)\n LOG.info(\"Destroy complete %s\" % instance.uuid)", "def delete_network_profile(arn=None):\n pass", "def test_delete_o_auth_authorize_token(self):\n pass", "def delete(self):\n\n raise NotImplementedError()", "def del_user(self, username):\n pass", "def delete(self, request, app_id, addon_name):\n addon = Addon.objects.get(app__app_id=app_id, display_name=addon_name)\n provider = get_provider_from_provider_name(addon.provider_name)\n result = provider.deprovision(addon.provider_uuid)\n manager = StateMachineManager()\n with manager.transition(addon.id, AddonEvent.deprovision_success):\n pass\n manager.start_task(addon.id)\n return self.respond({'message': result['message']})", "def deleteUser(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def ida_delete(self, ip):\n\n id_url = self.api_url + 'delete-identity'\n data = {'shared-secret': self.secret, 'ip-address': ip, }\n try:\n r = requests.post(id_url, data=json.dumps(data), headers=self.headers, verify=False, timeout=5)\n r.raise_for_status()\n return r.status_code, json.loads(r.content)\n except requests.exceptions.ConnectionError as err:\n\n message_string = json.dumps({'message': 'connection error'})\n return json.loads(message_string)\n # wrong gateway IP, gateway does not allow connection, IDA blade is not enabled\n except requests.exceptions.HTTPError as err:\n\n if r.status_code == 500 and r.content:\n s_code = 400\n message = r.json()['message']\n\n else:\n message = json.loads(json.dumps({'message': 'wrong secret'}))\n s_code = r.status_code\n return s_code, message\n # wrong secret (404), wrong time-put value (500)", "def delete(self, userinformation):\n self.db.remove(userinformation)", "def test_delete_collection_o_auth_access_token(self):\n pass", "def delete_servicech(self, conf, phone_num):\n\t\tpass", "def delInfo(label: str):\r\n\r\n if not self.isClosed:\r\n if label in self.__identity_info.keys():\r\n del self.__identity_info[label]\r\n else:\r\n raise HDDOPermissionException('Tried to delete non-existing identity information in a HealthDominoDataObject.')\r\n else:\r\n raise HDDOPermissionException('Tried to delete identity information from a closed HealthDominoDataObject.')", "def onUserDeletion(event):\n request = getRequest()\n if not IProductLayer.providedBy(request):\n return\n\n client = getUtility(IAdminClient)\n xmpp_users = getUtility(IXMPPUsers)\n\n principal_id = event.principal\n principal_jid = xmpp_users.getUserJID(principal_id)\n\n pass_storage = getUtility(IXMPPPasswordStorage)\n pass_storage.remove(principal_id)\n\n d = users.deletePrincipal(client, principal_jid)\n return d", "def storage_pool_delete_by_storage(context, storage_id):\n _storage_pool_get_query(context).filter_by(storage_id=storage_id).delete()", "def delete_algorithm(AlgorithmName=None):\n pass", "def delete_device_pool(arn=None):\n pass", "def delete(self):\n self.id = uuid4()\n DataStore.remove_instance(self)", "def delete(self,\n partner_id):\n partner = PartnerModel.query.get(partner_id)\n\n if partner is None:\n abort(404)\n\n if partner.type == PartnerType.admin and partner.is_active is True:\n admins = PartnerModel.query.filter_by(\n organization_id=partner.organization_id,\n type=PartnerType.admin,\n is_active=True).all()\n\n if len(admins) <= 1:\n abort(409, 'Cannot delete partner. There must be at least one '\n 'admin of an organization.')\n\n partner.type = PartnerType.member\n db.session.commit()\n\n return None, 204", "def delete_customer(customer_id):\n del_query = Customer.get(Customer.customer_id == customer_id)\n return bool(del_query.delete_instance())", "def delete_device(cls, device_uuid):\n cls.dbdriver.delete_device(device_uuid)", "def version_delete(self, version_id):\n try:\n castle_delete_version(self.conn, version_id)\n pycastle_log.info(\"Deleted version {0}\".format(version_id))\n except Exception, e:\n pycastle_log.error(str(self)+\" got exception {0}:{1}\".format(type(e), e))\n raise", "def delete(self, *args, **kwargs):\n # Delete the User and UserProfile objects associated with the\n # Member.\n user_profile = self.userprofile\n user = user_profile.user\n user_profile.delete()\n user.delete()\n # Delete the member itself\n super(Member, self).delete(*args, **kwargs)", "def delete_server(ServerName=None):\n pass", "def delete_region(self, region_id):\n raise exception.NotImplemented() # pragma: no cover", "def test_delete_device_token(self):\n pass", "def delete(self, **ctx_options):\n return self.delete_async(**ctx_options).get_result()", "def DeleteOIDCClient(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def delete(self, **params):\n return self._api.delete_customer(self.id, **params)" ]
[ "0.76473016", "0.6944449", "0.6763664", "0.6758632", "0.67521816", "0.67521816", "0.66426307", "0.6528002", "0.63240004", "0.62183905", "0.6177831", "0.6141892", "0.60588896", "0.59532607", "0.58217615", "0.55912703", "0.55306107", "0.5515687", "0.54976976", "0.53798723", "0.5353392", "0.5339011", "0.531505", "0.52996236", "0.5270049", "0.52688134", "0.52434385", "0.52221537", "0.51924646", "0.5152336", "0.5143034", "0.5140935", "0.51305497", "0.5129097", "0.5128877", "0.51228863", "0.5120891", "0.51129353", "0.5087379", "0.5068404", "0.50667864", "0.50586", "0.504225", "0.501112", "0.50089383", "0.50050896", "0.4980078", "0.49660003", "0.49640048", "0.495159", "0.49387082", "0.49262232", "0.4923088", "0.49079007", "0.49068943", "0.48955813", "0.48919293", "0.4890036", "0.4877895", "0.4876832", "0.48766822", "0.48748168", "0.4870204", "0.48694444", "0.48580325", "0.4854951", "0.485186", "0.48517185", "0.48489723", "0.48469496", "0.48438323", "0.4841839", "0.4840398", "0.481327", "0.4806068", "0.48059666", "0.48044607", "0.48037004", "0.4799232", "0.47923577", "0.47892943", "0.4786963", "0.4786559", "0.47779897", "0.47766972", "0.4773323", "0.47687638", "0.47636613", "0.4763356", "0.47629863", "0.47622374", "0.4759405", "0.47583994", "0.47561896", "0.47558728", "0.47484842", "0.47468254", "0.47448188", "0.4742318", "0.47351655" ]
0.6678267
6
Deletes the specified group mapping.
def delete_idp_group_mapping(self, identity_provider_id, mapping_id, **kwargs): resource_path = "/identityProviders/{identityProviderId}/groupMappings/{mappingId}" method = "DELETE" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "if_match" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "delete_idp_group_mapping got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "identityProviderId": identity_provider_id, "mappingId": mapping_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "if-match": kwargs.get("if_match", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params) else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_group(self, group):\n raise NotImplementedError('delete_group')", "def deleteGroup(groupName):\r\n Group.deleteGroup(groupName)", "def delete_group(gid):\n if request.method == 'POST':\n hl.deleteGroup(gid)\n return redirect('/users')", "def do_del_group(dbsync, group):\n pass", "def delete():\n name = request.json['name']\n group = models.user.Group.get(name)\n if not group:\n raise Absent('Group does not exists.', deletion=False)\n else:\n models.db.session.delete(group)\n models.db.session.commit()\n return response(200, deletion=True)", "def delete_group(_request, group_id):\n group = models.UserGroup.get_by_id(int(group_id))\n group.delete()\n\n url = urlresolvers.reverse('views.admin.list_groups')\n return http.HttpResponseRedirect(url)", "def fusion_api_delete_group_role_assignment(self, name=None, uri=None, api=None, headers=None):\n return self.LoginDomainsGroupToRoleMapping.delete(name, uri, api, headers)", "def delete_group(groupname):\n response = jsonify(admin.delete_group(current_app.scoped_session(), groupname))\n return response", "def delete_group(self, group_id):\n url = self.groups_url + \"/%s\" % group_id\n return requests.delete(url, headers=self.headers)", "def delete(person_group_id):\n url = 'persongroups/{}'.format(person_group_id)\n\n return util.request('DELETE', url)", "def delete_group():\n incoming = request.get_json()\n Chatroom.delete_chatroom_with_room_id(incoming['room_id'])\n return jsonify(results = incoming['room_id'])", "def remove_from_group(self, org, contact, group):\n pass", "def del_group(self, group_id, group_type):\n self._mod_group(\n command=self.ofproto.OFPGC_DELETE,\n group_id=group_id,\n group_type=group_type,\n )", "def test_delete_group(self):\n response = self.client.delete_group(\"ABC123\")\n uri, args = response[\"uri\"].split(\"?\")\n\n self.assertEqual(response[\"method\"], \"DELETE\")\n self.assertEqual(uri, \"/admin/v1/groups/ABC123\")\n self.assertEqual(util.params_to_dict(args), {\"account_id\": [self.client.account_id]})", "def delete_group(self, group_name):\r\n params = {'GroupName' : group_name}\r\n return self.get_response('DeleteGroup', params)", "def remove_group():\n _id = request.form['_id']\n data, code, message = FIELD_SERVICE.remove_group(_id)\n return __result(data, code, message)", "def delete_group(self, group_id: str):\n # If successful, this method returns 204 No Content response code.\n # It does not return anything in the response body.\n # Using resp_type=\"text\" to avoid parsing error in the calling method.\n self.ms_client.http_request(method='DELETE', url_suffix=f'groups/{group_id}', resp_type=\"text\")", "def delete_group(self, group_o):\n class_query = ClassQuery('fvTenant')\n class_query.propFilter = 'eq(fvTenant.name, \"' + group_o.name + '\")'\n tenant_list = self.moDir.query(class_query)\n if len(tenant_list) > 0:\n tenant_list[0].delete()\n self.commit(tenant_list[0])", "def delete_group_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n group_id = str(args.get('group_id'))\n client.delete_group(group_id)\n\n # get the group data from the context\n group_data = demisto.dt(demisto.context(), f'{INTEGRATION_CONTEXT_NAME}(val.ID === \"{group_id}\")')\n if isinstance(group_data, list):\n group_data = group_data[0]\n\n # add a field that indicates that the group was deleted\n group_data['Deleted'] = True # add a field with the members to the group\n entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': group_data}\n\n human_readable = f'Group: \"{group_id}\" was deleted successfully.'\n return human_readable, entry_context, NO_OUTPUTS", "def delete_group_group_member(self, targetgroup, groupname):\n try:\n targetgroup = self.quote(targetgroup)\n groupname = self.quote(groupname)\n self.g.delete('groups/%s/groups/%s' % (targetgroup,\n groupname),\n headers={})\n except HTTPError as e:\n return self._manage_errors(e)", "def delete(self):\n # gid must be specified for deletion\n gid = self.get_query_argument('gid')\n self.write(self._rpc.aria2.remove(self._token, gid))", "def delete_vm_group(session, cluster, vm_group):\n client_factory = session.vim.client.factory\n group_spec = client_factory.create('ns0:ClusterGroupSpec')\n groups = []\n\n group_spec.info = vm_group\n group_spec.operation = \"remove\"\n group_spec.removeKey = vm_group.name\n groups.append(group_spec)\n\n config_spec = client_factory.create('ns0:ClusterConfigSpecEx')\n config_spec.groupSpec = groups\n reconfigure_cluster(session, cluster, config_spec)", "def delete_group(args, p4, group_name, metrics):\n LOG.debug(\"delete_group() {}\".format(group_name))\n r = p4.fetch_group(group_name)\n if r and r.get('Owners') and p4gf_const.P4GF_USER in r.get('Owners'):\n print_verbose(args, _(\"Deleting group '{group_name}'...\").format(group_name=group_name))\n p4.run('group', '-a', '-d', group_name)\n metrics.groups += 1\n else:\n print_verbose(args, _(\"Not deleting group '{group}':\"\n \" Does not exist or '{user}' is not an owner.\")\n .format(group=group_name, user=p4gf_const.P4GF_USER))", "def delete_placement_group(self, name):\r\n params = {'GroupName':name}\r\n return self.get_status('DeletePlacementGroup', params, verb='POST')", "def delete_entry_group(self, name):\n self.__datacatalog.delete_entry_group(name=name)", "def delete_group(\n group_id: BSONObjectId,\n tkn: Token = Depends(from_authotization_header_nondyn),\n):\n assert_has_clearance(tkn.owner, \"sni.delete_group\")\n grp: Group = Group.objects.get(pk=group_id)\n logging.debug(\"Deleting group %s (%s)\", grp.group_name, group_id)\n grp.delete()", "def delete_mapping(project, img):\n with BMI(_username, _password, project) as bmi:\n ret = bmi.umount_image(img)\n if ret[constants.STATUS_CODE_KEY] == 200:\n click.echo('Success')\n else:\n click.echo(ret[constants.MESSAGE_KEY])", "def test_070_delete_group_from_group(self):\n\n testflow.step(\n \"Removing group %s from group %s\",\n TEST_GROUP1, TEST_GROUP2\n )\n assert MANAGE_CLI.run(\n 'groupdel',\n TEST_GROUP1,\n group=TEST_GROUP2,\n )[0], \"Failed to delete group from group '%s'\" % TEST_GROUP1", "def test_080_group_delete(self):\n\n testflow.step(RMV_GRP_MSG, TEST_GROUP_DELETE)\n assert GROUP_CLI.run(\n 'delete',\n TEST_GROUP_DELETE\n )[0], \"Failed to delete group '%s'\" % TEST_GROUP_DELETE", "def delete_targetgroup(self, group_id):\r\n result = False\r\n if self._db(self._db.targetgroup.id==group_id).select():\r\n result = True\r\n self._db(self._db.targetgroup.id==group_id).delete()\r\n self._db.commit()\r\n return result", "def _delete_by_list(self, group_ids):\n path = '/members/%s/groups/remove' % self.member['member_id']\n data = {'group_ids': group_ids}\n if self.member.account.adapter.put(path, data):\n self._dict = dict(x for x in self._dict.items()\n if x[0] not in group_ids)", "async def delete_contact_group(dbcon: DBConnection, contact_group_id: int) -> None:\n if not await contact_group_exists(dbcon, contact_group_id):\n raise errors.InvalidArguments('contact group does not exist')\n q = \"\"\"delete from contact_groups where id=%s\"\"\"\n await dbcon.operation(q, (contact_group_id,))", "def drop_groups(self, group_ids=None):\n return self.groups.delete(group_ids)", "def test_delete_group(self):\n self.group.delete_group.return_value = succeed('del')\n result = self.perform_with_group(\n Effect(DeleteGroup(tenant_id='00', group_id='g1')),\n (self.log, '00', 'g1'), self.group)\n self.assertEqual(result, 'del')", "def post_security_group_delete(self, resource_id, resource_dict):\n pass", "def delete(self, key):\n self.map.pop(key, None)", "def delete(self,\n provider_id,\n group_id,\n ):\n return self._invoke('delete',\n {\n 'provider_id': provider_id,\n 'group_id': group_id,\n })", "def deleteMappingSet(self,mappingSetId:str=None)->dict:\n if mappingSetId is None:\n raise ValueError(\"Require a mapping ID\")\n path = f\"/mappingSets/{mappingSetId}\"\n res = self.connector.deleteData(self.endpoint+path)\n return res", "def delete_scaling_group(self, request):\n group = self.store.get_scaling_group(self.log, self.tenant_id,\n self.group_id)\n force = extract_bool_arg(request, 'force', False)\n return controller.delete_group(\n self.dispatcher, log, transaction_id(request), group, force)", "def test_delete_group(self):\n pass", "def test_delete_group(self):\n pass", "def delete_group(id, createdby):\n query = \"DELETE FROM groups WHERE group_id = {} AND createdby ='{}'\".format(id, createdby)\n cur.execute(query)", "def remove_group_bucket():\n pass", "def test_api_v1_groups_id_delete(self):\n pass", "def test_delete_group_log_context(self):\n self.group.delete_group.return_value = succeed('del')\n expected_lookup = (matches(IsBoundWith(base_log=True, effectful=True)),\n '00', 'g1')\n result = self.perform_with_group(\n Effect(DeleteGroup(tenant_id='00', group_id='g1')),\n expected_lookup, self.group,\n fallback_dispatcher=get_log_dispatcher(self.log,\n {'effectful': True}))\n self.assertEqual(result, 'del')", "def test_delete():\n mock_ret = MagicMock(return_value=0)\n mock_group = {\"passwd\": \"*\", \"gid\": 0, \"name\": \"test\", \"members\": [\"root\"]}\n with patch.dict(mac_group.__salt__, {\"cmd.retcode\": mock_ret}), patch(\n \"salt.modules.mac_group.info\", MagicMock(return_value=mock_group)\n ):\n assert mac_group.delete(\"test\")", "def delete_salary_group(db:Session):\n pass", "def clear_group(self):\n # Implemented from template for osid.resource.ResourceForm.clear_group_template\n if (self.get_group_metadata().is_read_only() or\n self.get_group_metadata().is_required()):\n raise errors.NoAccess()\n self._my_map['group'] = self._group_default", "def delete(self, consistencygroup, force=False):\n body = {'consistencygroup': {'force': force}}\n self.run_hooks('modify_body_for_action', body, 'consistencygroup')\n url = '/consistencygroups/%s/delete' % base.getid(consistencygroup)\n resp, body = self.api.client.post(url, body=body)\n return common_base.TupleWithMeta((resp, body), resp)", "def delete_TestGroup(test_case, override_group_name=null, override_headers=null, override_cookies=null):\n # type: (AnyMagpieTestCaseType, Optional[Str], Optional[HeadersType], Optional[CookiesType]) -> None\n app_or_url = get_app_or_url(test_case)\n headers = override_headers if override_headers is not null else test_case.json_headers\n cookies = override_cookies if override_cookies is not null else test_case.cookies\n groups = TestSetup.get_RegisteredGroupsList(test_case, override_headers=headers, override_cookies=cookies)\n group_name = override_group_name if override_group_name is not null else test_case.test_group_name\n # delete as required, skip if non-existing\n if group_name in groups:\n path = \"/groups/{grp}\".format(grp=group_name)\n resp = test_request(app_or_url, \"DELETE\", path, headers=headers, cookies=cookies)\n check_response_basic_info(resp, 200, expected_method=\"DELETE\")\n TestSetup.check_NonExistingTestGroup(test_case, override_group_name=group_name,\n override_headers=headers, override_cookies=cookies)", "def deleteMappingSetMapping(self,mappingSetId:str=None,mappingId:str=None)->dict:\n if mappingSetId is None:\n raise ValueError(\"Require a mappingSet ID\")\n if mappingId is None:\n raise ValueError(\"Require a mapping ID\")\n path = f\"/mappingSets/{mappingSetId}/mappings/{mappingId}\"\n res = self.connector.deleteData(self.endpoint + path)\n return res", "def delete(self, key):\n app.logger.info('Request to Delete a map_object with key [%s]', key)\n map_object = Map.get_value_with_key(key)\n if map_object:\n map_object.delete()\n return 'Map deleted', status.HTTP_204_NO_CONTENT", "def test_groups_group_ref_delete(self):\n pass", "def delete_group_cached(group_id, broker=None):\n if not broker:\n broker = get_broker()\n group_key = '{}:{}:keys'.format(broker.list_key, group_id)\n group_list = broker.cache.get(group_key)\n broker.cache.delete_many(group_list)\n broker.cache.delete(group_key)", "def deleteGroup(request):\n \n if request.method == 'POST':\n \n form = DeleteGroupForm(request.POST)\n \n if form.is_valid():\n \n cd = form.cleaned_data\n \n try:\n \n #Delete records from m2m of Users & Groups for selected groups\n for eachGroup in cd['group_id']:\n Group_User.objects.filter(group = eachGroup.id).delete()\n \n #Delete Group(s)\n for eachGroup in cd['group_id']:\n Group.objects.filter(id = eachGroup.id).delete()\n \n except:\n \n error = 'Unable to Delete Groups!'\n return render_to_response('deletegroup.html', \n {'form': form, 'error': error},\n context_instance=RequestContext(request))\n \n return HttpResponseRedirect('/deletegroup/success/')\n \n else:\n \n return render_to_response('deletegroup.html',\n {'form': form}, \n context_instance=RequestContext(request)) \n \n else:\n \n form = DeleteGroupForm()\n \n return render_to_response('deletegroup.html', \n {'form': form}, \n context_instance=RequestContext(request))", "def removeGroup(self, group, defaultGroup=''):\n return self.pm_getUserManager().removeGroup(self._unbox(group), self._unbox(defaultGroup))", "def delete_api_mapping(self, ApiMappingId: str, DomainName: str):\n pass", "def unlink_Group(self, group):\n\t\tself.__groups.remove(group.weakref)\n\t\tself._cli_invalidate()", "def removeGroup(self, *args):\n return _libsbml.GroupsModelPlugin_removeGroup(self, *args)", "def del_from_groups(self, username, groups):\n pass", "def delete_all_group_member(self, group_id):\n url = self.groups_url + \"/%s/members\" % group_id\n return requests.delete(url, headers=self.headers)", "def remove_inv_group(**kwargs):\n proxy = kwargs['proxy']\n sessiontoken = kwargs['sessiontoken']\n gw = kwargs['gateway']\n group_id = kwargs['objectname']\n json_response_status_code = delete_inventory_group_json_response(proxy, sessiontoken, gw, group_id)\n if json_response_status_code == 200:\n print(\"The group \" + group_id + \" has been deleted\")\n else:\n print(\"Something went wrong - please check your syntax and try again.\")", "def group_remove(group, board):\n for xy in group:\n board[xy[0]][xy[1]] = None\n return deepcopy(board)", "def test_delete_group_exists():\n with patch(\"salt.modules.mac_group.info\", MagicMock(return_value={})):\n assert mac_group.delete(\"test\")", "def test_delete_entry_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n id = None # Change me!!\r\n\r\n r = self.client.delete_entry_groups(id, group_id, topic_id)", "def test_delete_groups(self):\n pass", "def slotDelete(self):\n item = self.groupListBox.item((self.groupListBox.currentItem()))\n group = item.text().ascii()\n Group.Sequencer().slotRemoveGlobalGroup(group)", "def test_delete_device_group_member_by_id(self):\n pass", "def delete(self, mapitem_id: int):\n pass", "def after_delete(self, record):\n debug = logging.getLogger(__name__).debug\n debug('deleted group %r (%r)', record['name'], record['group_id'])\n audit('delete group', record['name'])", "def delete(self, force_delete=False):\r\n return self.connection.delete_auto_scaling_group(self.name, force_delete)", "def customer_group_delete(group_id):\n result = {\"success\" : 1, \"message\" : \"Customer can not be Deleted\"}\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n \n #clean up the user id\n group_id = db.escape_string(group_id)\n \n query = \"\"\"\n DELETE FROM `groups`\n WHERE `groups`.`group_id` = \"%s\"\n \"\"\" %(group_id)\n cursor = db.cursor()\n try:\n if (cursor.execute(query)) != 0:\n db.commit()\n result = {\"success\" : 0, \"message\" : \"Customer Group Deleted Successfully\"}\n except Exception as customer_exp:\n result = {\"success\" : 1, \"message\" : \"Customer Group can not be Deleted \" + str(e)}\n finally:\n cursor.close()\n db.close()\n return result", "def delete(aMap, key):\n\tbucket = get_bucket(aMap, key)\n\tfor i in range(len(bucket)):\n\t\tk, v = bucket[i]\n\t\tif key == k :\n\t\t\tdel bucket[i]\n\t\t\tbreak", "def delete_adcampaign_group(self, campaign_group_id, batch=False):\n path = '%s' % campaign_group_id\n return self.make_request(path, 'DELETE', batch=batch)", "def delete_group(user):\n return 'do some magic!'", "def delete_nick_group(self, nick):\n nick = Identifier(nick)\n nick_id = self.get_nick_id(nick, False)\n session = self.ssession()\n try:\n session.query(Nicknames).filter(Nicknames.nick_id == nick_id).delete()\n session.query(NickValues).filter(NickValues.nick_id == nick_id).delete()\n session.commit()\n except SQLAlchemyError:\n session.rollback()\n raise\n finally:\n self.ssession.remove()", "def test_delete_resource_group(self):\n pass", "def delete(aMap, key):\n\t#get the bucket that they key is in, and sets it to bucket\n\tbucket = get_bucket(aMap, key)\n\n\tfor i in xrange(len(bucket)):\n\t\tk, v = bucket[i]\n\t\tif key == k:\n\t\t\tdel bucket[i]\n\t\t\t#we can break here, since we know there can be only one key/value pair\n\t\t\tbreak", "def remove_group(self, resolvable):\n group = self._resolve_group(resolvable)\n\n for membership in self.group_memberships:\n if membership.group.href == group.href:\n membership.delete()\n return\n\n raise StormpathError({\n 'developerMessage': 'This user is not part of Group %s.' % group.name,\n })", "def delete(aMap, key):\n\tbucket = get_bucket(aMap, key)\n\n\tfor i in xrange(len(bucket)):\n\t\tk, v = bucket[i]\n\t\tif key == k:\n\t\t\tdel bucket[i]\n\t\t\tbreak", "def delete_security_group(self, security_group):\r\n return self.delete(self.security_group_path % (security_group))", "def remove_from_group(user: User, group: Group) -> Result:\n if user.pw_name not in group.gr_mem:\n return Result(State.unchanged)\n command([\"/usr/sbin/deluser\", user.pw_name, group.gr_name])\n group.gr_mem.remove(user.pw_name)\n return Result(State.success)", "def on_groups_deleted(event):\n permission_backend = event.request.registry.permission\n\n for change in event.impacted_objects:\n group = change[\"old\"]\n bucket_id = event.payload[\"bucket_id\"]\n group_uri = utils.instance_uri(event.request, \"group\", bucket_id=bucket_id, id=group[\"id\"])\n\n permission_backend.remove_principal(group_uri)", "def delete_service_group(self, group_id):\r\n svc = self.client['Network_Application_Delivery_Controller_'\r\n 'LoadBalancer_VirtualServer']\r\n\r\n return svc.deleteObject(id=group_id)", "def _Delete(self):\n cmd = self.cmd_prefix + [\n 'redshift', 'delete-cluster-subnet-group',\n '--cluster-subnet-group-name', self.name\n ]\n vm_util.IssueCommand(cmd, raise_on_failure=False)", "def fusion_api_del_role_from_group(self, domain=None, group=None, api=None, headers=None):\n return self.roles.del_role_from_group(domain, group, api=api, headers=headers)", "def capacitygroup_delete(cmd_ctx, cpc, capacitygroup):\n cmd_ctx.execute_cmd(\n lambda: cmd_capacitygroup_delete(cmd_ctx, cpc, capacitygroup))", "def delete_group(dispatcher, log, trans_id, group, force):\n\n def check_and_delete(_group, state):\n if state.desired == 0:\n d = trigger_convergence_deletion(dispatcher, group, trans_id)\n return d.addCallback(lambda _: state)\n else:\n raise GroupNotEmptyError(group.tenant_id, group.uuid)\n\n if tenant_is_enabled(group.tenant_id, config_value):\n if force:\n # We don't care about servers in the group. So trigger deletion\n # since it will take precedence over other status\n d = trigger_convergence_deletion(dispatcher, group, trans_id)\n else:\n # Delete only if desired is 0 which must be done with a lock to\n # ensure desired is not getting modified by another thread/node\n # when executing policy\n d = group.modify_state(\n check_and_delete,\n modify_state_reason='delete_group')\n else:\n if force:\n d = empty_group(log, trans_id, group)\n d.addCallback(lambda _: group.delete_group())\n else:\n d = group.delete_group()\n return d", "def test_delete_collection_group(self):\n pass", "def remove_from_group(self, group):\n\n if self.in_group(group):\n self.secondary_groups.remove(group)\n return self", "def delete_target_groups(ctx):\n self.delete_target_groups()\n ctx.info('Deleted target groups for the load balancer {}:'.format(self.get_balancer_name()))", "def DeleteProxyGroup(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteProxyGroup\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteProxyGroupResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "async def delete_contact_group_from_active_monitor(dbcon: DBConnection, contact_group_id: int, monitor_id: int) -> None:\n q = \"\"\"delete from active_monitor_contact_groups where active_monitor_id=%s and contact_group_id=%s\"\"\"\n q_args = (monitor_id, contact_group_id)\n await dbcon.operation(q, q_args)", "def delete(aMap,key):\n\tbucket=get_bucket(aMap,key)\n\t\n\tfor i in xrange(len(bucket)):\n\t\tk,v=bucket[i]\n\t\tif key==k:\n\t\t\tdel bucket[i]\n\t\t\tbreak", "def delete(self, id):\r\n return UserGroupService.removeUserGroup(self, id)", "def test_removeGroup(self):\n\t\tuser = User.objects.get(id=1)\n\t\tself.client.force_authenticate(user=user)\n\t\tgroup = Group.objects.create(admin=user, name='testGroup3', isPublic=True, \n\t\t\tdescription='This is another test group that just created.')\n\n\t\turl = \"/groups/3/\"\n\t\tresponse = self.client.delete(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n\t\turl = \"/groups/2/\"\n\t\tresponse = self.client.delete(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def remove_group(self, index):\n group = self.get(index).group\n for stone_index in group.members:\n self.board[stone_index] = None", "def delete(self,\n provider_id,\n provider_deployment_map_id,\n ):\n return self._invoke('delete',\n {\n 'provider_id': provider_id,\n 'provider_deployment_map_id': provider_deployment_map_id,\n })", "def delete(self, host_name, group_name): # noqa\n\n response = remove_host(host_name, group_name)\n return response.__dict__, self.state_to_http[response.status]", "def remove_vpc_group(self, fabricExplicitGEp_dn):\n fabricExplicitGEp_mo = self.moDir.lookupByDn(fabricExplicitGEp_dn)\n fabricExplicitGEp_mo.delete()\n self.commit(fabricExplicitGEp_mo)" ]
[ "0.7407182", "0.6943236", "0.681764", "0.6814603", "0.66882336", "0.665547", "0.66528887", "0.6552498", "0.6507317", "0.65058494", "0.6426595", "0.64135116", "0.64025885", "0.6398474", "0.6387597", "0.63239", "0.62887627", "0.6258294", "0.62549347", "0.6252067", "0.62451065", "0.6242744", "0.61733633", "0.61513036", "0.6105671", "0.60856974", "0.60743237", "0.6050349", "0.60438263", "0.6033223", "0.60270065", "0.6021395", "0.6012769", "0.60093683", "0.59929955", "0.5988428", "0.59652483", "0.59651315", "0.5937022", "0.5920995", "0.5920995", "0.5898846", "0.5898245", "0.58857775", "0.5881625", "0.587192", "0.5859649", "0.58526754", "0.5846938", "0.5822521", "0.5815293", "0.58082616", "0.58066857", "0.5804288", "0.57860553", "0.5779069", "0.57734746", "0.5771558", "0.575996", "0.5748292", "0.5744677", "0.57332826", "0.5732146", "0.5717506", "0.57083935", "0.57051307", "0.5703675", "0.5681584", "0.5679743", "0.5679487", "0.5671042", "0.5662364", "0.563412", "0.5629954", "0.5582142", "0.5580814", "0.5578593", "0.5576844", "0.55758685", "0.55598646", "0.5557126", "0.5557063", "0.55569696", "0.5554907", "0.5548189", "0.55477214", "0.5545515", "0.55408156", "0.55362517", "0.55292034", "0.5527528", "0.5526387", "0.5521349", "0.55199045", "0.5518879", "0.5518115", "0.5514882", "0.55141276", "0.55104893", "0.55093277" ]
0.5965616
36
Deletes the specified MFA TOTP device for the specified user.
def delete_mfa_totp_device(self, user_id, mfa_totp_device_id, **kwargs): resource_path = "/users/{userId}/mfaTotpDevices/{mfaTotpDeviceId}" method = "DELETE" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "if_match" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "delete_mfa_totp_device got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "userId": user_id, "mfaTotpDeviceId": mfa_totp_device_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "if-match": kwargs.get("if_match", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params) else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_user(self, user):\n self.delete(user)", "def delete_user(self, user):\n # noinspection PyUnresolvedReferences\n self.delete(user)", "def delete_user(self, user):\n try:\n with dbm.open(self.dbm_path, 'c', 0o600) as db:\n del db[user.name]\n except KeyError as k:\n pass", "def delete(self, user_id):\r\n return delete_user(request, user_id)", "def delete_user(self, user):\n self.execute(TABELLE['id_users'][\"delete\"], user[\"id\"])", "def delete(self, user_id):\n return delete_user(user_id)", "def delete_user():", "def delete_user(self, user):\n name = utils.get_name(user)\n self._user_manager.delete(name)", "def delete_user():\n del globalopts.appdata[request.user]\n del globalopts.users[request.user]\n return \"\", 200", "def test_delete_device_user(self):\n pass", "def delete_user():\n #TODO user delete\n pass", "def delete_user(self) -> None:\n table_dictionary = {\n 'Apple': {\n 'table': 'AppleReceipts',\n 'user_id': 'User_id'\n },\n 'ESL': {\n 'table': 'ESLReceipts',\n 'user_id': 'User_id'\n },\n 'Transactions': {\n 'table': 'Transactions',\n 'user_id': 'User_id'\n },\n 'Users': {\n 'table': 'Users',\n 'user_id': 'id'\n },\n }\n\n # delete the current user's information from the db.\n for key in table_dictionary:\n query = f\"\"\"\n DELETE\n FROM {table_dictionary[key]['table']}\n WHERE {table_dictionary[key]['user_id']}=?;\n \"\"\"\n self.db.commit(query, values=(self.id,))\n\n # perform a sign out\n self.sign_out()\n\n log(f\"User:{self.id} has deleted their account.\")", "def delete_user(self, user_id):\n\n # ask the model to delete the user\n um = User(self.settings)\n status = um.delete(user_id)\n\n # return\n return status", "def deleteUser(user):\n delete_user(user)\n return redirect(url_for('login'))", "def delete_user_entitlement(self, user_id):\n route_values = {}\n if user_id is not None:\n route_values['userId'] = self._serialize.url('user_id', user_id, 'str')\n self._send(http_method='DELETE',\n location_id='8480c6eb-ce60-47e9-88df-eca3c801638b',\n version='6.0-preview.3',\n route_values=route_values)", "def delete(self):\n\n user_id = get_jwt_identity()\n user = user_crud.get(user_id)\n if not user:\n abort(404, message=\"User not Found\")\n all_tokens = auth_crud.get_user_tokens(user_id)\n tokens = [token.to_dict() for token in all_tokens]\n for token in tokens:\n auth_crud.revoke_token(token['id'], user_id)\n user = user_crud.remove(user_id)\n\n return {'msg': 'User Removed'}", "def delete_user(self, user_id):\n return self._delete('/users/{0}'.format(user_id))", "def userdel(pwfile, user):\n return __salt__[\"webutil.userdel\"](pwfile, user)", "def user_id_delete(user_id):\n user = storage.get(\"User\", user_id)\n\n if user is None:\n abort(404)\n user.delete()\n del user\n return make_response(jsonify({}), 200)", "def delete_user(id):\n pass", "def delete(user_id):\n assert isinstance(user_id, ObjectId)\n\n User.objects(id=user_id).delete()", "def delete_user(self, user_name):\n user = self.get_user(user_name)\n return self.client.delete_resource(user.get('href'))", "def delete_user(self) -> 'outputs.ActingUserResponse':\n return pulumi.get(self, \"delete_user\")", "def user_delete(user_id=None):\n obj = storage.get(\"User\", user_id)\n if obj is None:\n abort(404)\n storage.delete(obj)\n storage.save()\n return jsonify({}), 200", "def delete_user(self, user):\n # type: (dict) -> dict\n self.request_url = \"{0}/{1}/{2}\".format(self.API_URL, self.USER_ENDPOINT, user['id'])\n return self.__create_request(payload=user, request_type=self.REQUEST_DELETE, version=\"v1\")", "def delete_user(self):\n raise NotImplementedError(\"Function not yet implemented contact package creator\")", "def delete_user_account(connection,user):\r\n with connection:\r\n connection.execute(DELETE_SPECIFIC_USER,(user,))", "def user_delete(user_id):\n user = storage.get('User', user_id)\n if user is None:\n abort(404)\n user.delete()\n storage.save()\n return jsonify({}), 200", "def delete_user_by_xng_id(self, user):\n # type: (dict) -> dict\n self.request_url = \"{0}/{1}/xngId/{2}\".format(self.API_URL, self.USER_ENDPOINT, user['xngId'])\n return self.__create_request(payload=user, request_type=self.REQUEST_DELETE, version=\"v1\")", "def delete_user(self, user_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.delete_user(user_id)", "def delete(self, user: 'UserCondensed'):\n self._delete(entity=user)", "def delete_user(user_id):\n temp = models.storage.get('User', user_id)\n if temp is None:\n abort(404)\n temp.delete()\n models.storage.save()\n return jsonify({})", "async def red_delete_data_for_user(self, *, requester, user_id):\n\t\tawait self.config.user_from_id(user_id).clear()", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user(self):\n User.user_list.remove(self)", "async def del_user(conn: LDAPConnection, user: dict, mailman: Client) -> None:\n await conn.delete(user[\"dn\"])\n uid = user[\"attributes\"][\"uid\"][0]\n rmtree(user[\"attributes\"][\"homeDirectory\"][0])\n rmtree(f\"/webtree/{uid[:1]}/{uid}\")\n mailing_list = mailman.get_list(\"announce-redbrick\")\n mailing_list.unsubscribe(f\"{uid}@redbrick.dcu.ie\")", "def delete_user(self):\n\n User.user_list.remove(self)", "def delete_user(user_id=None):\n obj = storage.get('User', user_id)\n if obj is None:\n abort(404)\n else:\n storage.delete(obj)\n storage.save()\n return jsonify({}), 200", "def delete(self, user_id):\n\n user = User.objects.get_or_404(public_id=user_id)\n return user.delete()", "def remove(self, user):\r\n url = '{0}/{1}'.format(self.get_url(), user)\r\n\r\n return http.Request('DELETE', url), parsers.parse_empty", "def delete_user(user_id):\n usr = storage.get(User, user_id)\n if usr:\n usr.delete(), storage.save()\n return {}\n else:\n abort(404)", "def delete_user(user_id=None):\n\n user = storage.get(\"User\", user_id)\n if user is None:\n abort(404)\n else:\n storage.delete(user)\n storage.save()\n return jsonify({}), 200", "def del_user_id(user_id):\r\n obj = storage.get(User, user_id)\r\n if obj is None:\r\n abort(404)\r\n obj.delete()\r\n storage.save()\r\n return jsonify({}), 200", "def test_delete_device_token(self):\n pass", "def delete_user(self, user, instance_m):\n from resela.model.User import authenticate\n if user:\n mikrotik_m = MikrotikManager()\n lab_m = LabManager(current_user.session)\n group_m = GroupManager(current_user.session)\n user_m = UserManager(current_user.session)\n\n # Remove router conf\n mikrotik_m.unbind_vpn_to_vlan(user.email)\n mikrotik_m.delete_vpn_user(user.email)\n\n instance_list = instance_m.list(\n detailed=True,\n search_opts={'all_tenants': True, 'user_id': user.id}\n )\n\n for instance in instance_list:\n instance_name = instance.name.split('|')\n lab_name = instance_name[0] + '|' + instance_name[1]\n lab = lab_m.find(name=lab_name)\n instance_m.delete_instance(\n user_m=self,\n session=current_user.session,\n lab=lab,\n instance_id=instance.id\n )\n\n teacher_group = group_m.find(name='teachers')\n\n try:\n user_m.check_in_group(user=user, group=teacher_group)\n snapshot_factory = lab_m.find(\n name='snapshotFactory|{}'.format(user.email))\n\n session = authenticate(\n credentials=current_user.token,\n project_domain_name='snapshotFactory',\n project_name=snapshot_factory.name\n )\n\n security_handler = SecurityGroupHandler(session=session)\n\n for sec_group in security_handler.list()['security_groups']:\n if sec_group['tenant_id'] == snapshot_factory.id and \\\n 'internet' in sec_group['name']:\n security_handler.delete(sec_group['id'])\n\n lab_m.delete(snapshot_factory)\n\n except ksa_exceptions.NotFound:\n # Removing students will cause en exception as they are not found.\n # Does not need to be handled.\n pass\n\n # Remove user from db\n try:\n user_model = UserModel.query.get(user.id)\n DATABASE.session.delete(user_model)\n DATABASE.session.commit()\n except Exception:\n # Ignore user not in database\n pass\n\n # Remove user from openstack\n removed = self.delete(user)\n\n if not removed:\n print('User was not deleted:', user.id)\n raise Exception(' user not deleted')", "def delete(self, user):\n q = \"DELETE FROM profiles WHERE user=?\"\n try:\n self._query(q, (user,), fetch='none')\n except Exception as e:\n raise e", "def _delete_user(self, user):\n if User.delete_user(user):\n self.session.output({'deleted': 'user {} and their related accounts'.format(user)})\n return True\n else:\n self.session.output({'invalid_user': 'please enter valid user ID!\\n'}, '[ Fail to delete user ]')\n return False", "def deauthentication_from_user(self):\n # disassociate\n self.a.nxapi_disassociate_req(self.ap1.macaddr)\n \n # expect a deauth frame\n mpdu = self.ap1.rx_mpdu(wifi.AIR_MGMT)\n \n # sanity checks\n assert(mpdu.typesubtype == wifi.fctl_deauthentication)\n \n # expect a disassociation confirmation with a correct status\n assert(self.a.nxapi_disassociate_cfm() == True)\n \n # generate a random frame\n msdu = self.host.tx_msdu(da=self.ap1.macaddr, length=1000, prio=1)\n \n # wait for data send confirmation (not in the air)\n self.a.host_send_data_cfm(msdu)", "def delete_user_by_id(user_id):\n return woo_request_helper().delete_details(wc_endpoint='customers/{}'.format(user_id))", "def delete_user(user_id):\n user_obj = storage.get(\"User\", user_id)\n if user_obj:\n storage.delete(user_obj)\n storage.save()\n return jsonify({}), 200\n else:\n abort(404)", "def delete_user(UserName=None, AuthenticationType=None):\n pass", "def test_delete_useruser_uuid_post(self):\n pass", "def delete_user(user_id):\n user = storage.get(User, user_id)\n if user is None:\n abort(404)\n storage.delete(user)\n storage.save()\n return jsonify({}), 200", "def delete_user():\r\n raise NotImplementedError()", "def delete_user(self, _id):\n return self.make_request(\"DELETE\", \"users/\"+_id, {})", "def delete(self, request, user_id=None):\n data = json.loads(request.body.decode())\n authenticated = Account.check_credentials(request, data['email'], data['password'])\n user = {}\n user['account_id'] = authenticated.id\n\n if authenticated.check_admin(request, user):\n NLTKOutput.remove(request=request, pk=user_id)\n Account.remove(request=request, pk=user_id)\n return Response(json='Account and content deleted', status=204)\n\n return Response(json='Not Authorized', status=401)", "def delete_user():\n token = request.args.get('token')\n data = jwt.decode(token, app.config['SECRET_KEY'])\n\n permit = functions.delete_user(data)\n if permit:\n return make_response(jsonify({'Delete': 'User Deleted Successfully'}), 201)\n else:\n return make_response(jsonify({'Delete Failed': 'Credentials not match or the user not exist'}), 201)", "def fusion_api_delete_appliance_snmpv3_trap_forwarding_user(self, id=None, api=None, headers=None): # pylint: disable=W0622\n return self.snmpv3user.delete(id=id, api=api, headers=headers)", "def remove_user(user):\n # user.confirmed = False\n # user = get_user_by_phone(phone_num)\n db.session.delete(user)\n db.session.commit()\n\n return user\n # DELETE FROM users WHERE user.phone_num == phone)", "def delete_device(cls, device_id, token):\n\n tenant = init_tenant_context(token, db)\n orm_device = assert_device_exists(device_id)\n data = serialize_full_device(orm_device, tenant)\n\n kafka_handler_instance = cls.kafka.getInstance(cls.kafka.kafkaNotifier)\n kafka_handler_instance.remove(data, meta={\"service\": tenant})\n\n db.session.delete(orm_device)\n db.session.commit()\n\n results = {'result': 'ok', 'removed_device': data}\n return results", "def delete_device(self):\n # PROTECTED REGION ID(AsyncTabata.delete_device) ENABLED START #\n # PROTECTED REGION END # // AsyncTabata.delete_device", "def delete_malb(user_id):\n return db.session.query(UserToAnime)\\\n .filter(UserToAnime.userId == user_id)\\\n .delete()", "def delete_user(BrokerId=None, Username=None):\n pass", "def delete_user(user_id):\n\n user = User.query.get(user_id)\n db.session.delete(user)\n db.session.commit()\n return", "def delete_user(self, user_id):\n sql = 'update account_user set is_deleted = 1 where id = %s'\n with connection.cursor() as cursor:\n cursor.execute(sql, [user_id])\n row = cursor.fetchone()\n\n return row", "def delete(self, new_data, user_id):\n print(new_data)\n request_id = get_jwt_identity()\n user = user_crud.get(request_id)\n if not user.is_superuser:\n abort(401,\n message=\"You do not have permission to view this endpoint\")\n all_tokens = auth_crud.get_user_tokens(user_id)\n tokens = [token.to_dict() for token in all_tokens]\n for token in tokens:\n auth_crud.revoke_token(token['id'], user_id)\n user = user_crud.remove(user_id)\n\n return {'msg': 'User Removed'}", "def delete_device(self, device_id, give_json=False):\n\n url = Constants.BASE_URL + 'users/devices'\n response = requests.delete(url=url, params={'key': self.user_access_token, 'device_id': device_id})\n\n if give_json:\n return response.json()\n else:\n return response.text", "def delete(self, user_id):\n user = User.query.get(user_id)\n \n if user is None:\n return abort(422, message=\"User does not exist\")\n \n # check if the user is an admin and is the only one\n admins = User.query.filter_by(admin=True).all()\n if user.id == get_jwt_identity() and len(admins) == 1:\n return abort(422, message=\"User is the only admin, there must be at least one admin in the system\")\n \n user.delete()\n \n return { 'message': \"User '{}' has been deleted\".format(user.id) }", "def delete_user(self, user_name):\r\n params = {'UserName' : user_name}\r\n return self.get_response('DeleteUser', params)", "def create_mfa_totp_device(self, user_id, **kwargs):\n resource_path = \"/users/{userId}/mfaTotpDevices\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"create_mfa_totp_device got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"MfaTotpDevice\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"MfaTotpDevice\")", "def delete(self, id):\n\t\ttry:\n\t\t\tuser_service.delete(id)\n\t\texcept AssertionError as e:\n\t\t\tuser_space.abort(400, e.args[0], status = \"Could not delete user\", statusCode = \"400\")\n\t\texcept Exception as e:\n\t\t\tuser_space.abort(500, e.args[0], status = \"Could not delete user\", statusCode = \"500\")", "def delete_user(payload, user_id):\n user = User.query.get(user_id)\n # exception for non existing id\n if user is None:\n abort(404)\n # set error status\n error = False\n # delete the user\n try:\n user.delete()\n except Exception:\n user.rollback()\n error = True\n print(sys.exc_info())\n finally:\n user.close_session()\n\n if error:\n abort(422)\n\n return jsonify({\n 'success': True,\n 'deleted': user_id\n })", "def delete_user(self, user):\n\n if self.sql_read_only:\n return False\n\n if not self.check_prereqs():\n return False\n\n if not self.has_user(user):\n return False\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_delete_user_query,{'username_field':self.sql_username_field,'username':user})\n self.log.debug(\"sqlflexibleauthstore: delete_user: %s\" % (query,))\n cursor.execute(query)\n\n db.commit()\n del_user_attribute(self.env,username=user)\n return True", "def delete(self, user_id):\n user = User.query.get(user_id)\n\n if user is None:\n return mk_response(\"User does not exist\", 422)\n\n # check if the user is an admin and is the only one\n admins = User.query.filter_by(admin=True).all()\n if user.id == get_jwt_identity() and len(admins) == 1:\n return mk_response(\"User is the only admin, there must \" +\n \"be at least one admin in the system\", 422)\n\n user.delete()\n\n return mk_response(\"User '{}' has been deleted\".format(user.id))", "def remove(self, user_id):\n pass", "def deleteUser(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def delete_user(user_id):\n netAdminToolDB = app.config['DATABASE']\n user = netAdminToolDB.get_user(user_id)\n\n if user == None:\n return jsonify({'error': 'User_id not found'}), 404\n\n netAdminToolDB.delete_user(user_id)\n return jsonify({'result': True})", "def test_080_user_delete(self):\n\n testflow.step(RMV_USR_MSG, TEST_GROUP_DELETE)\n assert USER_CLI.run('delete', TEST_USER_DELETE)[0]", "def delete(device):\n delete_subject(device)\n return redirect_back('index')", "def delete(user_id):\n # Get the user requested\n user = User.query.filter(User.user_id == user_id).one_or_none()\n\n if user is not None:\n db.session.delete(user)\n db.session.commit()\n return (\n \"User {user_id} deleted\".format(user_id=user_id), 200\n )\n\n else:\n abort(\n 404,\n \"Person not found for Id: {user_id}\".format(user_id=user_id),\n )", "def delete_device(cls, device_uuid):\n cls.dbdriver.delete_device(device_uuid)", "def delete(user_id: int):\n usr = get_by_id(user_id)\n if not usr:\n raise UserNotFound\n\n db.session.delete(usr)\n db.session.commit()", "def delete_user(user_id):\n user = User.query.get_or_404(user_id)\n db.session.delete(user)\n db.session.commit()\n\n return redirect('/')", "async def red_delete_data_for_user(self, *, requester, user_id):\n return", "async def red_delete_data_for_user(self, *, requester, user_id):\n return", "def delete(self, userinformation):\n self.db.remove(userinformation)", "def delete(self, url, user):\n token = self.login(user)\n response = requests.delete(url_root + url, headers={\"access-token\": token})\n return response.json(), response.status_code", "def del_user(item, username, passw):\n user = User.load_user_by_username(item, username)\n if not user:\n print(\"User does not exist!\")\n elif check_password(passw, user.hashed_password):\n user.delete(item)\n print(\"User deleted.\")\n else:\n print(\"Incorrect password!\")", "def delete_user_process(user_id):\n\n db_user = User.query.get_or_404(user_id)\n\n db.session.delete(db_user)\n db.session.commit()\n\n return redirect(\"/users\")", "def test_delete_device_users(self):\n pass", "def unclaim_device(self, user_id, device_id):\n if user_id is None:\n self.log_error(MongoDatabase.unclaim_device.__name__ + \"Unexpected empty object: user_id\")\n return False\n if device_id is None:\n self.log_error(MongoDatabase.unclaim_device.__name__ + \"Unexpected empty object: device_id\")\n return False\n\n try:\n user_id_obj = ObjectId(user_id)\n user = self.users_collection.find_one({\"_id\": user_id_obj})\n if user is not None:\n device_list = []\n if 'devices' in user:\n device_list = user['devices']\n if device_id in device_list:\n device_list.remove(device_id)\n user['devices'] = device_list\n self.users_collection.save(user)\n return True\n except:\n traceback.print_exc(file=sys.stdout)\n self.log_error(sys.exc_info()[0])\n return False", "def KLP_User_Delete(request, user_id):\n\n # get logged in user\n\n user = request.user\n if user.id:\n\n # check logged in user permissions to delete user\n\n KLP_user_Perm(request.user, 'Users', None)\n import random\n import string\n rangeNum = 8\n\n # generate random string to replace existing password.\n\n randomStr = ''.join(random.choice(string.ascii_uppercase\n + string.digits) for x in range(rangeNum))\n\n # get user object\n\n userObj = User.objects.get(pk=user_id)\n userObj.is_active = 0 # deactivate user\n\n # ........userObj.set_password(randomStr) # replace password with random string\n\n userObj.save() # save user object\n return render_to_response('viewtemplates/userAction_done.html',\n {\n 'user': request.user,\n 'selUser': userObj,\n 'message': 'User Deletion Successful',\n 'legend': 'Karnataka Learning Partnership',\n 'entry': 'Add',\n }, context_instance=RequestContext(request))\n else:\n\n # if user is not logged in redirect to login page\n\n return HttpResponseRedirect('/login/')", "def deleteUser(self):\r\n #Find name and ID column\r\n userData = self.getCurrentUserData()\r\n\r\n #Prompt for confirmation\r\n deleteChoice = QMessageBox.question(self.view, 'Confirm user deletion', \r\n 'Are you sure you want to delete user ' \r\n + userData['Name'] + \" with ID \" + userData['User_ID'] + \r\n \" from database permanently?\", \r\n QMessageBox.Yes | QMessageBox.No)\r\n \r\n if (deleteChoice == QMessageBox.Yes):\r\n DBController().deleteUser(userData['User_ID'] )\r\n self.updateUserTable() #Re-fill table\r", "def delete_user(self, instance, name):\n return instance.delete_user(name)", "def delete_from_all(self, user_id):\n self.execute(TABELLE['id_users']['delete'], (user_id,))\n self.execute(TABELLE['users']['delete'], (user_id,))\n self.execute(TABELLE['punteggio']['delete'], (user_id,))\n self.execute(TABELLE['items']['delete'], (user_id,))", "def delete(bot, update):\n chatID = update.message.chat_id\n username = get_user_info(chatID)['PID']\n logger.info(\"Deleting user credentials for {}!\".format(username))\n Chat.query.filter(Chat.chatID == chatID).delete() # Delete the user's record referenced by their ChatID\n Misc.query.filter(Misc.chatID == chatID).delete()\n db_session.commit()\n messageContent = \"Your credentials have been deleted, {}\\nHope to see you back soon!\".format(username[3:-4].title())\n bot.sendMessage(chat_id=update.message.chat_id, text=messageContent)\n \n mp.track(username, 'User Left')\n mp.people_set(username, {'active': False })", "def delete_user(self, userId):\n\n try:\n query = \"delete from user where userId = {}\".format(userId)\n print(query)\n cur = self.con.cursor()\n cur.execute(query)\n self.con.commit()\n\n logger.info(\"Deleted\")\n except Exception as e:\n logger.error(\"Error occured at data deletion..\", e)", "def delete(self, user_id):\n\n try:\n self.get(user_id)\n url = \"{0}/users/{1}\".format(self.base_url, user_id)\n url = self._add_token_to_url(url)\n self.session.headers.update({\"Content-Type\": \"application/x-www-form-urlencoded\"})\n self.logger.debug(\"Deleting user with ID: <{0}>\".format(user_id))\n response = self.session.delete(url)\n self.logger.debug(\"Received response code {0} with reason {1}\"\n .format(response.status_code, response.reason))\n if response.status_code == 200:\n self.logger.debug(\"User successfully deleted\")\n else:\n raise InvalidResponseCodeException(\"Response code invalid, the expected response code is {0}, \"\n \"the actual response code is {1}\".format(200, response.status_code))\n return None\n except UserNotFoundException as err:\n self.logger.debug(\"User not found, error {0}\".format(err))", "def delete(khoros_object, user_id, return_json=False):\n # TODO: Allow other identifiers (e.g. login, email, etc.) to be provided instead of just the User ID\n query_url = f\"{khoros_object.core_settings['v2_base']}/users/{user_id}\"\n response = api.delete(query_url, return_json, auth_dict=khoros_object.auth)\n if response.status_code == 403 and 'Feature is not configured' in response.text:\n try:\n identifier = response.text.split('identifier: ')[1].split('\"')[0]\n raise errors.exceptions.FeatureNotConfiguredError(identifier=identifier)\n except IndexError:\n raise errors.exceptions.FeatureNotConfiguredError()\n if return_json:\n response = response.json()\n return response" ]
[ "0.7167678", "0.69209623", "0.6781475", "0.6747502", "0.67389566", "0.66825885", "0.66802096", "0.66421336", "0.66130745", "0.6571461", "0.64496547", "0.64242226", "0.63735026", "0.63403934", "0.6330194", "0.63215464", "0.63176155", "0.63172036", "0.63106817", "0.62828964", "0.6236682", "0.6191388", "0.617884", "0.6178165", "0.6170782", "0.61678034", "0.6151824", "0.61508304", "0.6147104", "0.613341", "0.6110389", "0.6102412", "0.6089452", "0.60840636", "0.60840636", "0.60840636", "0.6081775", "0.60812104", "0.60795575", "0.6072897", "0.6072087", "0.6071191", "0.60707575", "0.6048834", "0.6027242", "0.60268927", "0.60252345", "0.60226834", "0.6021249", "0.6016962", "0.599222", "0.59862995", "0.598323", "0.59745", "0.5968044", "0.59655887", "0.5962185", "0.59605736", "0.595831", "0.5955385", "0.59258854", "0.59002346", "0.58816", "0.58815277", "0.5876657", "0.58688766", "0.5866507", "0.58659405", "0.58536875", "0.5845489", "0.58424413", "0.583315", "0.5824462", "0.5816046", "0.58157885", "0.58143777", "0.5814177", "0.58075017", "0.5806503", "0.5779029", "0.57575536", "0.5752479", "0.5744049", "0.5739748", "0.5733469", "0.5733469", "0.5729544", "0.5714389", "0.570796", "0.5706626", "0.5698788", "0.5697204", "0.56953585", "0.5695142", "0.5687429", "0.5681429", "0.56743944", "0.5666343", "0.5658656", "0.5657599" ]
0.69395584
1
Deletes the specified network source
def delete_network_source(self, network_source_id, **kwargs): resource_path = "/networkSources/{networkSourceId}" method = "DELETE" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "if_match" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "delete_network_source got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "networkSourceId": network_source_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "if-match": kwargs.get("if_match", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params) else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self, source):\n _source = self._source_prefix+source\n assert _source in self.cache.keys()\n del self.cache[_source]", "def delete_source(self, src_name: SourceName) -> None:\n while True:\n try:\n response = self.genes.query(\n IndexName=\"src_index\",\n KeyConditionExpression=Key(\"src_name\").eq(src_name.value),\n )\n except ClientError as e:\n raise DatabaseReadException(e)\n records = response[\"Items\"]\n if not records:\n break\n with self.genes.batch_writer(\n overwrite_by_pkeys=[\"label_and_type\", \"concept_id\"]\n ) as batch:\n for record in records:\n try:\n batch.delete_item(\n Key={\n \"label_and_type\": record[\"label_and_type\"],\n \"concept_id\": record[\"concept_id\"],\n }\n )\n except ClientError as e:\n raise DatabaseWriteException(e)\n\n try:\n self.metadata.delete_item(Key={\"src_name\": src_name.value})\n except ClientError as e:\n raise DatabaseWriteException(e)", "def RemoveSource(self,source):\n self._sources.RemoveSource(source)", "def RemoveSource(self, source):\n self._sources.remove(source)", "def delete_network(self, network):\r\n return self.delete(self.network_path % (network))", "def delete_source(username, id, force, token=None):\n if not force:\n click.confirm(\n \"Are you sure you want to delete {0} {1}?\".format(username, id), abort=True\n )\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/sources/{1}/{2}?access_token={3}\".format(\n mapbox_api, username, id, mapbox_token\n )\n r = requests.delete(url)\n if r.status_code == 204:\n click.echo(\"Source deleted.\")\n else:\n raise errors.TilesetsError(r.text)", "def delete_network(session, name):\n # type: (Session, Text) -> None\n url_tail = f\"/{CoordConstsV2.RSC_NETWORKS}/{name}\"\n return _delete(session, url_tail)", "def remove(self, source, graph, dest):\n return self.server.execute(self._execute_operation(\n source, graph, dest,\n ttypes.ExecuteOperationType.Remove))", "def fusion_api_delete_network_set(self, name=None, uri=None, api=None, headers=None):\n return self.network_set.delete(name, uri, api, headers)", "def delete_network(name, host, network_type):\n logging.info(\"Deleting %s '%s' from host '%s'\", network_type, name, host.name)\n\n try:\n if network_type.lower() == \"vswitch\":\n host.configManager.networkSystem.RemoveVirtualSwitch(name)\n elif network_type.lower() == \"portgroup\":\n host.configManager.networkSystem.RemovePortGroup(name)\n except vim.fault.NotFound:\n logging.error(\"Tried to remove %s '%s' that does not exist from host '%s'\",\n network_type, name, host.name)\n except vim.fault.ResourceInUse:\n logging.error(\"%s '%s' can't be removed because there are vNICs associated with it\",\n network_type, name)", "def fusion_api_delete_fc_network(self, name=None, uri=None, api=None, headers=None):\n return self.fc_network.delete(name, uri, api, headers)", "def delete(self, oid):\n path = '%s/networks/%s' % (self.ver, oid)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack network: %s' % truncate(res))\n return res[0]", "def delete(self, dest, source=None):\n raise NotImplementedYet()", "def dcnm_network_delete_event(self, network_info):\n seg_id = network_info.get('segmentation_id')\n if not seg_id:\n LOG.error(_LE('Failed to delete network. Invalid network '\n 'info %s.'), network_info)\n query_net = self.get_network_by_segid(seg_id)\n if not query_net:\n LOG.info(_LI('dcnm_network_delete_event: network %(segid)s '\n 'does not exist.'), {'segid': seg_id})\n return\n if self.fw_api.is_network_source_fw(query_net, query_net.name):\n LOG.info(_LI(\"Service network %s, returning\"), query_net.name)\n return\n # Send network delete request to neutron\n try:\n del_net = self.network.pop(query_net.network_id)\n self.neutronclient.delete_network(query_net.network_id)\n self.delete_network_db(query_net.network_id)\n except Exception as exc:\n # Failed to delete network.\n # Put back the entry to the local cache???\n self.network[query_net.network_id] = del_net\n LOG.exception(_LE('dcnm_network_delete_event: Failed to delete '\n '%(network)s. Reason %(err)s.'),\n {'network': query_net.name, 'err': str(exc)})", "def _delete_network_vm(args):\n libvirtConn = libvirt.openReadOnly(None)\n if libvirtConn is None:\n print('Cannot contact hypervisor', file=sys.stderr)\n return 1\n net = None\n try:\n net = libvirtConn.networkLookupByName(args.network_name)\n except libvirt.libvirtError:\n print('Cannot find network named [%s]' % args.network_name, file=sys.stderr)\n return 1\n print('Network found:\\n')\n print(xml.dom.minidom.parseString(net.XMLDesc()).toprettyxml(indent=\" \", newl=''))\n print('')\n\n if not args.yes:\n if not input('Really destroy this network ?').strip().lower() in ('y', 'yes'):\n return 1\n return oci_utils.kvm.virt.delete_virtual_network(network_name=args.network_name)", "def remove_source(self, name):\n logger.warning('You are deleting a source. This could have unintended \\\n side effects. If you are replacing values, use get_source(name) \\\n and modify it instead.')\n source = self._sources[name]\n self._pattern_reg.remove_usage(source.strength_timeseries.pattern_name, (source.name, 'Source'))\n self._node_reg.remove_usage(source.node_name, (source.name, 'Source')) \n del self._sources[name]", "def fusion_api_delete_ethernet_network(self, name=None, uri=None, param='', api=None, headers=None):\n return self.ethernet_network.delete(name=name, uri=uri, param=param, api=api, headers=headers)", "def test_delete_network(self):\n pass", "def src_delete(state):\n _lib.src_delete(state)", "def remove(self, name, source):\n self.m.path.assert_absolute(source)\n self._run(name, ['remove', source])\n self.m.path.mock_remove_paths(source)", "def delete(self): \n params = {'command':'deleteNetwork',\n 'id':self.id}\n \n self.logger.debug('Remove network %s' % self.name)\n \n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['deletenetworkresponse']['jobid']\n self.logger.debug('Start job over %s.%s - %s: %s' % (\n self._obj_type, self.name, \n 'deleteNetwork', res))\n return clsk_job_id\n except KeyError as ex :\n self.logger.error('Error parsing json data: %s' % ex)\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n self.logger.error(ex)\n raise ClskError(ex)", "def network_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_network(**kwargs)", "def delete_network_segment(context, segment_id):\n with db_api.context_manager.writer.using(context):\n network_obj.NetworkSegment.delete_objects(context, id=segment_id)", "def delete_network_profile(arn=None):\n pass", "def deleteRig(self):\n\n allNodes = cmds.ls(\"*\")\n for node in allNodes:\n if cmds.objExists(node + \".sourceModule\"):\n cmds.lockNode(node, lock=False)\n source = cmds.getAttr(node + \".sourceModule\")\n if source == self.name:\n try:\n cmds.delete(node)\n except:\n pass", "def Delete(self):\n\n if self.network_id:\n self.cs.delete_network(self.network_id)\n\n if self.is_vpc and self.vpc_id:\n self.cs.delete_vpc(self.vpc_id)", "def removeModelSource(self, modelSource):\n self._modelSources.remove(modelSource)\n if modelSource.isLoaded():\n self._reload()", "def delete(self):\n \n logging.info(\"Deleting network %s\" % self.cloudnet)\n # res = cn.delete(self.cloudnet)\n res = self.cloudnet.delete()\n return res", "def delete_order_source(self, order_source_id, **kwargs):\n\n all_params = ['order_source_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_order_source\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'order_source_id' is set\n if ('order_source_id' not in params) or (params['order_source_id'] is None):\n raise ValueError(\"Missing the required parameter `order_source_id` when calling `delete_order_source`\")\n\n resource_path = '/beta/orderSource/{orderSourceId}'.replace('{format}', 'json')\n path_params = {}\n if 'order_source_id' in params:\n path_params['orderSourceId'] = params['order_source_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def deleteNetwork(self, session: Session, id_: str):\n try:\n return NetworkManager().deleteNetwork(session, id_)\n except TortugaException as ex:\n raise\n except Exception as ex:\n self._logger.exception(str(ex))\n raise TortugaException(exception=ex)", "def delete_source_from_s3(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"delete_source_from_s3\")", "def delete_source_from_s3(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"delete_source_from_s3\")", "def network_delete_event(self, network_info):\n\n net_id = network_info['network_id']\n if net_id not in self.network:\n LOG.error(_LE('network_delete_event: net_id %s does not exist.'),\n net_id)\n return\n\n segid = self.network[net_id].get('segmentation_id')\n tenant_id = self.network[net_id].get('tenant_id')\n tenant_name = self.get_project_name(tenant_id)\n net = utils.Dict2Obj(self.network[net_id])\n if not tenant_name:\n LOG.error(_LE('Project %(tenant_id)s does not exist.'),\n {'tenant_id': tenant_id})\n self.update_network_db(net.id, constants.DELETE_FAIL)\n return\n\n try:\n self.dcnm_client.delete_network(tenant_name, net)\n # Put back the segmentation id into the pool.\n self.seg_drvr.release_segmentation_id(segid)\n\n # Remove entry from database and cache.\n self.delete_network_db(net_id)\n del self.network[net_id]\n snets = [k for k in self.subnet if (\n self.subnet[k].get('network_id') == net_id)]\n [self.subnet.pop(s) for s in snets]\n except dexc.DfaClientRequestFailed:\n LOG.error(_LE('Failed to create network %(net)s.'),\n {'net': net.name})\n self.update_network_db(net_id, constants.DELETE_FAIL)\n # deleting all related VMs\n instances = self.get_vms()\n instances_related = [k for k in instances if k.network_id == net_id]\n for vm in instances_related:\n LOG.debug(\"deleting vm %s because network is deleted\", vm.name)\n self.delete_vm_function(vm.port_id, vm)\n self.network_del_notif(tenant_id, tenant_name, net_id)", "def paths_revoke_network_block(ctx, network, destination, source, port):\n source_block = cloudless.paths.CidrBlock(source)\n destination_service = get_service_for_cli(ctx, network, destination)\n ctx.obj['CLIENT'].paths.remove(source_block, destination_service, port)\n click.echo('Removed path from %s to %s in network %s for port %s' % (source, destination,\n network, port))", "def delete_network(self, tenant_id, network_id, network_segments):\n self.delete_network_segments(tenant_id, network_segments)\n self.delete_network_bulk(tenant_id, [network_id])", "def network_delete_end(self, payload):\n self.disable_dhcp_helper(payload['network_id'])", "def remove_connection(self, source, target):\r\n\r\n connection = (self.coalesce_node(source), self.coalesce_node(target))\r\n self.connections.discard(connection)", "def remove_edge(self, source: n, destination: n):\n self._graph[source].remove(destination)\n if not self._directed:\n self._graph[destination].remove(source)", "def test_networking_project_network_delete(self):\n pass", "def delete_source(self, id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.delete_source_with_http_info(id, **kwargs)\n else:\n (data) = self.delete_source_with_http_info(id, **kwargs)\n return data", "def delete_network_profile(self, profile):\r\n return self.delete(self.network_profile_path % profile)", "def delete_model(source: str, supported_model_name: str, model_id: str):\n connector = __get_connector(source)\n supported_model = __get_supported_model(supported_model_name)\n\n try:\n return connector.delete(supported_model, model_id)\n except Exception as e:\n abort(500, e)", "def delete_source_with_http_info(self, id, **kwargs):\n\n all_params = ['id']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_source\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `delete_source`\")\n\n\n collection_formats = {}\n\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n\n query_params = []\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # Authentication setting\n auth_settings = ['Using HTTP Header', 'Using URL Query Parameter']\n\n return self.api_client.call_api('/sources/{id}', 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def delete_clone(self, dest, source=None):\n raise NotImplementedYet()", "def delete(self, src):\n\n if self.noop:\n logger.info(\"No-Op Delete: %s.tar\" % self.bucket + src)\n else:\n logger.info(\"Trying to delete %s.tar\" % self.bucket + src)\n self.client.delete(self.bucket + src + \".tar\")", "def deleteNodeNetworkConfig(self,node):\n data = self.connect('delete',\"nodes/%s/network\" % (node),None)\n return data", "def remove(self, source, destination, port):\n logger.info('Removing path from %s to %s on port %s',\n source, destination, port)\n\n firewall_name = \"bu-%s-%s-%s\" % (destination.network.name, destination.name, port)\n\n def remove_from_ranges(to_remove, address_ranges):\n logger.info(\"Removing %s from %s\", to_remove, address_ranges)\n resulting_ranges = []\n if not address_ranges:\n return None\n for address_range in address_ranges:\n remove_net = ipaddress.IPv4Network(to_remove)\n address_range_network = ipaddress.IPv4Network(address_range)\n if remove_net.overlaps(address_range_network):\n if remove_net.prefixlen > address_range_network.prefixlen:\n new_range_networks = address_range_network.address_exclude(remove_net)\n resulting_ranges.extend([str(new_range_network) for new_range_network\n in new_range_networks])\n else:\n resulting_ranges.extend([str(address_range_network)])\n logger.info(\"New ranges: %s\", resulting_ranges)\n return resulting_ranges\n\n try:\n firewall = self.driver.ex_get_firewall(firewall_name)\n if isinstance(source, CidrBlock):\n firewall.source_ranges = remove_from_ranges(source.cidr_block,\n firewall.source_ranges)\n else:\n source_tag = \"%s-%s\" % (source.network.name, source.name)\n if firewall.source_tags:\n firewall.source_tags = [tag for tag in firewall.source_tags\n if tag != source_tag]\n except ResourceNotFoundError:\n logger.info(\"Firewall %s doesn't exist\", firewall_name)\n return None\n\n # We need this because the default is to add \"0.0.0.0/0\" if these aren't set, which is bad.\n if not firewall.source_tags and not firewall.source_ranges:\n return self.driver.ex_destroy_firewall(firewall)\n return self.driver.ex_update_firewall(firewall)", "def delete_sources(image_sources):\n index = np.where(image_sources[:, 3] == 0.0)\n active_sources = np.delete(image_sources, index, 0)\n return(active_sources)", "def delete_network_object(session, key):\n # type: (Session, Text) -> None\n url_tail = \"/{}/{}/{}\".format(\n CoordConstsV2.RSC_NETWORKS, session.network, CoordConstsV2.RSC_OBJECTS\n )\n return _delete(session, url_tail, {CoordConstsV2.QP_KEY: key})", "def delete_host(self, conf, tenant_id, network_id, host_id):\n\t\tpass", "def delSource(A,bSize,comp):\n sA,comp = delSink(A.T,bSize,comp)\n return sA.T,comp", "def remove_provider_network(network_id):\n session = db.get_session()\n pnet = (session.query(network_models_v2.ProviderNetwork).\n filter_by(network_id=network_id).first())\n if pnet:\n session.delete(pnet)\n session.flush()\n return network_id", "def delete_net(self, net_id):\n LOG_OBJ.debug(\"Deleting network %s\" % net_id)\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks/\" + \\\n net_id + \".json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"DELETE\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while deleting net:%s\" %\n net_id)\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Deletion of Network Failed with status %s \" %\n response.status)\n return response.status\n\n LOG_OBJ.info(\"Deleted the network : %s \" % net_id)\n return True", "def unlink(self, uuid_, source_uuids=None):\n if isinstance(source_uuids, uuid.UUID):\n source_uuids = [source_uuids]\n\n self._backend.unlink(uuid_, source_uuids)", "def delete(self, source_index):\r\n click_css(self, 'a.delete-button', source_index, require_notification=False)\r\n # Click the confirmation dialog button\r\n click_css(self, 'a.button.action-primary', 0)", "def test_delete_network_from_dhcp_agent(self):\n network_id = self._create_and_prepare_network_for_agent(\n self.agent['id'])\n self.agents_client.add_dhcp_agent_to_network(\n self.agent['id'], network_id=network_id)\n # Clean up is not necessary and might result in 409 being raised.\n\n with self.override_role():\n self.agents_client.delete_network_from_dhcp_agent(\n self.agent['id'], network_id=network_id)", "def remove(self, destination: n):\n try:\n self.connections.pop(destination)\n except KeyError:\n pass", "def delete_snapshot(self, dest, source=None):\n raise NotImplementedYet()", "def office_delete_adjoint_sources_for_iteration(parser, args, params):\n parser.parse_known_args(args)\n control.delete_adjoint_sources_for_iteration(params)", "def delete_network(options, vsm_obj):\n print(\"Disconnecting edge interface attached to this network\")\n edge_id = get_edge(vsm_obj)\n edge = Edge(vsm_obj, '4.0')\n edge.id = edge_id\n vnics = Vnics(edge)\n vnics_schema = vnics.query()\n network = get_network_id(options, get_network_name_on_vc(options))\n for vnic in vnics_schema.vnics:\n if network and vnic.portgroupId == network:\n print(\"Found a matching vnic %s %s\" % (options.name, vnic.index))\n vnic.isConnected = \"False\"\n vnic.portgroupId = None\n vnic.name = \"vnic%s\" % vnic.index\n vnics_schema = VnicsSchema()\n vnics_schema.vnics = [vnic]\n result = vnics.create(vnics_schema)\n if (result[0].response.status != 204):\n print \"update vnic error: %s %s\" \\\n % (result[0].response.status, result[0].response.reason)\n return False\n else:\n break\n else:\n print (\"No matching vnic found\")\n\n vdn_scope = get_transport_zone(options)\n virtual_wire = VirtualWire(vdn_scope)\n vwire = virtual_wire.read_by_name(get_network_name(options))\n name = get_network_name(options)\n if vwire != \"FAILURE\":\n print(\"Found a matching network %s\" % (options.name))\n virtual_wire.id = vwire.objectId\n result = virtual_wire.delete()\n if (result.response.status != 200):\n print (\"Delete vwire error: %s\" % result.response.reason)\n return False\n else:\n print (\"No matching network found\")\n print(\"Network %s deleted\" % (options.name))\n\n return True", "def test_delete_cluster_network(self):\n pass", "def delete_network_postcommit(self, context):\n if self.rpc_handler is None:\n return\n network = self._get_network_info(context._network)\n for _, _network in network.items():\n network_type = _network.get('network_type', '')\n if network_type not in CentecConstant.SUPPORTED_NETWORK_TYPES and len(CentecConstant.SUPPORTED_NETWORK_TYPES) > 0:\n return\n if network is not None:\n try:\n self.rpc_handler.delete_network(network)\n except:\n pass", "def fusion_api_delete_fcoe_network(self, name=None, uri=None, api=None, headers=None):\n return self.fcoe_network.delete(name, uri, api, headers)", "def test_delete_source_volume_in_migration(self):\n self._test_delete_volume_in_migration('migrating')", "def DeleteTarget(self, target_instance_id):", "def delete_network_segments(self, tenant_id, network_segments):", "def delete_conf(src_ip):\n return delete_route(src_ip)", "def delete_network_bulk(self, tenant_id, network_id_list, sync=False):", "def delete_dataset_without_original_url():\n logging.warning(\n \"*** deleting all netex files created by transport.data.gouv.fr ***\"\n )\n r = requests.get(\"https://transport.data.gouv.fr/api/datasets\")\n r.raise_for_status()\n datasets = r.json()\n\n print_resource = lambda r: f\"\\n\\t*[url = {r['url']} | extras = {r.get('extras')}]\"\n print_resources = lambda rs: [print_resource(r) for r in rs]\n\n for d in datasets:\n dataset_name = d[\"title\"]\n if d[\"type\"] != \"public-transit\":\n continue\n\n dataset_id = d[\"id\"]\n\n community_resources = _find_community_resources(dataset_id)\n logging.info(\"community ressources : %s\", print_resources(community_resources))\n old_community_resources = [\n r\n for r in community_resources\n if \"transport:original_resource_url\" not in r.get(\"extras\", {})\n ]\n if old_community_resources:\n logging.info(\n \"old community ressources : %s\",\n print_resources(old_community_resources),\n )\n _delete_community_resources(dataset_id, old_community_resources)\n logging.info(\"deleted community resource for the dataset %s\", dataset_id)", "def delete_route(src_ip):\n import os\n # no while loop (is better...)\n try:\n os.system(\"\"\"\n count=`/usr/bin/sudo /sbin/iptables -t mangle -nv --list PREROUTING | grep \" %s \" | wc -l`\n for i in `seq 1 $count`; do\n a=`/usr/bin/sudo /sbin/iptables --line-numbers -t mangle -nv --list PREROUTING | grep \" %s \" | cut -d\" \" -f 1 | head -n 1`;\n [ \"$a\" ] && /usr/bin/sudo /sbin/iptables -t mangle -D PREROUTING $a;\n done\n \"\"\" % (src_ip, src_ip))\n except:\n raise iptExc(\"Could not delete route from src_ip %s in iptables\" % (src_ip))\n return True", "def test_delete_cost_model_metrics_maps_source_filter(self):\n url = reverse(\"metrics\")\n client = APIClient()\n\n params = {\"source_type\": Provider.PROVIDER_OCP}\n url = url + \"?\" + urlencode(params, quote_via=quote_plus)\n response = client.delete(url, **self.headers)\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)", "def run(self, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.delete_network(network[\"id\"])", "def del_edge (self, src, dst):\n raise NotImplementedError", "def delete_order_source_tag(self, order_source_id, order_source_tag, **kwargs):\n\n all_params = ['order_source_id', 'order_source_tag']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_order_source_tag\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'order_source_id' is set\n if ('order_source_id' not in params) or (params['order_source_id'] is None):\n raise ValueError(\"Missing the required parameter `order_source_id` when calling `delete_order_source_tag`\")\n # verify the required parameter 'order_source_tag' is set\n if ('order_source_tag' not in params) or (params['order_source_tag'] is None):\n raise ValueError(\"Missing the required parameter `order_source_tag` when calling `delete_order_source_tag`\")\n\n resource_path = '/beta/orderSource/{orderSourceId}/tag/{orderSourceTag}'.replace('{format}', 'json')\n path_params = {}\n if 'order_source_id' in params:\n path_params['orderSourceId'] = params['order_source_id']\n if 'order_source_tag' in params:\n path_params['orderSourceTag'] = params['order_source_tag']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def do_command(self, args):\n hostops = dbops.Hosts()\n hostops.delete(args)", "def delete_provider(cls, args, config):\n # print \"MOLNSProvider.delete_provider(args={0}, config={1})\".format(args, config)\n if len(args) == 0:\n print \"USAGE: molns provider delete name\"\n return\n config.delete_object(name=args[0], kind='Provider')", "def delete(self, **kwargs):\n if not any([i in kwargs for i in ('host', 'address', 'addresses')]):\n raise TypeError('Expected host, address, or addresses.')\n self.dbdel('vuln', kwargs)", "def command_remove(arguments):\n global current_name\n tag, target, *rest = arguments[0], arguments[1]\n inverse_tag = rest[0] if rest else Network.reciprocal(tag)\n try:\n network.unlink(current_name, tag, target, inverse_tag)\n return 'Removed link \"' + tag + \": \" + target + '\"'\n except ValueError:\n return \"No such link.\"", "def delete_overlay_network(self, name=NETWORK_NAME):\n try:\n # An overlay network is usually created in host belonging to a swarm\n self.leave_swarm()\n network = self.docker_client.networks.get(name)\n network.remove()\n except docker.errors.NotFound as nf:\n print(\"Network \"+name+\" not found\")\n except docker.errors.APIError as de:\n print(\"Error deleting overlay network\")\n print de\n exit(1)\n return", "def rmtree(self, name, source):\n self.m.path.assert_absolute(source)\n self._run(name, ['rmtree', source])\n self.m.path.mock_remove_paths(str(source))", "def remove_segment(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n proxy = kwargs['proxy']\n segment_name = kwargs[\"objectname\"]\n segment=search_nsx_json(proxy, sessiontoken, \"Segment\", segment_name)\n if len(segment['results']) > 0:\n segment_path = segment['results'][0]['path']\n status = remove_segment_json(proxy, sessiontoken, segment_path)\n if status == 200:\n print(f'The following network has been removed: {segment_name}')\n else:\n print(\"The segment was not removed. Please check your syntax and try again.\")\n sys.exit(1)\n else:\n print(\"The segment does not exist.\")", "def test_delete_collection_cluster_network(self):\n pass", "def removeNeighbor(self, neighborID):", "def remove_network_from_dhcp_agent(self, dhcp_agent, network_id):\r\n return self.delete((self.agent_path + self.DHCP_NETS + \"/%s\") % (\r\n dhcp_agent, network_id))", "def rpc_remove_connection(client, source, dest,\n rpc_user=BTC_RPC_USER, rpc_password=BTC_RPC_PASSWD, rpc_port=BTC_RPC_PORT):\n try:\n rpc_server = get_ip_by_unknown(client, source)\n dest = get_ip_by_unknown(client, dest)\n rpc_connection = AuthServiceProxy(\"http://%s:%s@%s:%s\" % (rpc_user, rpc_password, rpc_server, rpc_port))\n rpc_connection.addnode(dest, \"remove\")\n return True\n except JSONRPCException as err:\n print(err)\n return False", "def test_remove_share(self):\n self.app.delete(url=\"/config/shares?share=80&destination=gsiftp://nowhere&vo=dteam\", status=400)\n self.app.delete(url=\"/config/shares?share=80&destination=gsiftp://nowhere&vo=dteam&source=gsiftp://source\", status=204)", "def remove(src):\n if os.path.isfile(src):\n os.remove(src)\n elif os.path.isdir(src):\n shutil.rmtree(src)", "def DeleteLayer(self, event):\n pass", "def request_workspace_delete(self, request):\n unique_id = request['uuid']\n# print('###', user_id)\n# print('###', alias)\n# print('###', source_uuid)\n \n uuid_mapping = self._get_uuid_mapping_object(user_id)\n alias = uuid_mapping.get_alias(unique_id)\n self.delete_workspace(unique_id=unique_id)\n \n response = {'alias': alias, \n 'uuid': unique_id}\n \n return response", "def deleteNodeInterface(self,node,interface):\n data = self.connect('delete',\"nodes/%s/network/%s\" % (node,interface),None)\n return data", "def test_data_source_soaps_id_delete(self):\n pass", "def remove_gateway(self, network_ref):\n raise NotImplementedError()", "def remove_virtualsource(self, name):\n self._auraliser.remove_object(name)", "def pre_virtual_network_delete(self, resource_id):\n pass", "def delete(self, remote):\n self.target.ttbd_iface_call(\"store\", \"file\", method = \"DELETE\",\n file_path = remote)", "def delete_user(network, user):\n if user in network:\n del network[user]\n for u in network:\n connections = get_connections(network, u)\n if user in connections:\n i = connections.index(user)\n del connections[i]\n return network", "def test_networking_project_network_tag_delete(self):\n pass", "def delete_sense_rel(wn, source, target, change_list=None):\n print(\"Delete %s =*=> %s\" % (source, target))\n (source_synset, source_entry) = decompose_sense_id(source)\n lex_name = wn.synset_by_id(source_synset).lex_name\n entry = wn.entry_by_id(source_entry)\n if change_list:\n change_list.change_entry(wn, entry)\n sense = [sense for sense in entry.senses if sense.id == source][0]\n sense.sense_relations = [\n r for r in sense.sense_relations if r.target != target]", "def delete_rel(source, target, change_list=None):\n print(\"Delete %s =*=> %s\" % (source.id, target.id))\n ss = source\n source.synset_relations = [\n r for r in ss.synset_relations if r.target != target.id]\n if change_list:\n change_list.change_synset(source)", "def delete(self, host, file):" ]
[ "0.69649154", "0.67236555", "0.67101824", "0.6658213", "0.66125065", "0.64707583", "0.6187282", "0.6125395", "0.6124346", "0.6093754", "0.6084731", "0.60805976", "0.6080252", "0.6072282", "0.6040917", "0.6035688", "0.6021448", "0.60198027", "0.60127157", "0.6012108", "0.599546", "0.59650165", "0.58980197", "0.5889944", "0.58734", "0.5866181", "0.5849423", "0.58390343", "0.5818585", "0.5817115", "0.58140457", "0.58140457", "0.5793951", "0.5779924", "0.57465863", "0.571973", "0.57157105", "0.5683042", "0.5679839", "0.5658753", "0.5629521", "0.562124", "0.56154794", "0.56150734", "0.5612496", "0.56012", "0.55778843", "0.5572225", "0.5567229", "0.5545624", "0.5530644", "0.55117047", "0.55092674", "0.5488419", "0.54536074", "0.54373634", "0.54366344", "0.5412063", "0.5388323", "0.53825516", "0.5381819", "0.53759867", "0.5353963", "0.5351348", "0.53318936", "0.5331046", "0.531705", "0.5313302", "0.53098655", "0.5307518", "0.5284556", "0.5282733", "0.52683145", "0.52677727", "0.52625436", "0.5248607", "0.5237514", "0.52371335", "0.5226508", "0.5225309", "0.52174056", "0.5209776", "0.51987845", "0.5188571", "0.51830596", "0.5175165", "0.5170293", "0.516776", "0.51530737", "0.51505834", "0.51495284", "0.5149157", "0.51368743", "0.5129338", "0.512776", "0.51166016", "0.51126516", "0.51065695", "0.51030844", "0.5102227" ]
0.69568324
1
Delete Oauth token for the user
def delete_o_auth_client_credential(self, user_id, oauth2_client_credential_id, **kwargs): resource_path = "/users/{userId}/oauth2ClientCredentials/{oauth2ClientCredentialId}" method = "DELETE" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "if_match" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "delete_o_auth_client_credential got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "userId": user_id, "oauth2ClientCredentialId": oauth2_client_credential_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "if-match": kwargs.get("if_match", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params) else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_auth_token():\n data = get_request_data(request)\n address = data.get(\"address\")\n token = data.get(\"token\")\n\n valid, message = is_token_valid(token, address)\n if not valid:\n return jsonify(error=message), 400\n\n force_expire_token(token)\n\n return jsonify(success=\"Token has been deactivated.\")", "def logout(request):\n request.user.auth_token.delete()\n return Response({}, status=status.HTTP_200_OK)", "def revoke_token(token):\n token.delete_instance()", "def deauth(request):\n\n if(request.token):\n request.token.delete()\n return JsonResponse({'message': 'Your token is revoked'}) \n else:\n return HttpResponseBadRequest('It does not make sense to revoke a token ' +\n 'if no token are supplied to the request')", "def test_delete_o_auth_access_token(self):\n pass", "def revoke_token(self, token, token_type_hint, request, *args, **kwargs):\n if token_type_hint:\n tok = self._tokengetter(**{token_type_hint: token})\n else:\n tok = self._tokengetter(access_token=token)\n if not tok:\n tok = self._tokengetter(refresh_token=token)\n\n if tok and tok.client_id == request.client.client_id:\n request.client_id = tok.client_id\n request.user = tok.user\n tok.delete()\n return True\n\n msg = 'Invalid token supplied.'\n log.debug(msg)\n request.error_message = msg\n return False", "def logout(self, request, *args, **kwargs):\n token = get_object_or_404(Token, key=request.auth)\n token.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def delete(self):\n\n user_id = get_jwt_identity()\n user = user_crud.get(user_id)\n if not user:\n abort(404, message=\"User not Found\")\n all_tokens = auth_crud.get_user_tokens(user_id)\n tokens = [token.to_dict() for token in all_tokens]\n for token in tokens:\n auth_crud.revoke_token(token['id'], user_id)\n user = user_crud.remove(user_id)\n\n return {'msg': 'User Removed'}", "def delete(self, request):\n serializer = UserLogoutSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n token = RefreshToken(serializer.validated_data[\"refresh\"])\n token.blacklist()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def test_delete_o_auth_authorize_token(self):\n pass", "def logout():\n body = request.json\n user_id = body.get('user_id')\n user = User.get(User.id == user_id).username\n clear_token(user)\n return HTTPResponse(status=200, body={\"message\":\"Log out succesful.\"})", "def delete_token(self):\n config.update(outlook_token=None)", "def delete_user():\n token = request.args.get('token')\n data = jwt.decode(token, app.config['SECRET_KEY'])\n\n permit = functions.delete_user(data)\n if permit:\n return make_response(jsonify({'Delete': 'User Deleted Successfully'}), 201)\n else:\n return make_response(jsonify({'Delete Failed': 'Credentials not match or the user not exist'}), 201)", "def delete_user():\n del globalopts.appdata[request.user]\n del globalopts.users[request.user]\n return \"\", 200", "def delete_user():", "async def revoke_token(self, request: Request, token: str) -> None:\n token_record = ...\n token_record.revoked = True\n token_record.save()", "def delete(self):\n return self.request.delete_cookie('token')", "def revoke_token():\n json_request = request.json\n refresh_token = json_request.get('refresh_token')\n if not refresh_token:\n return msg.errors.bad_request(\n 'You should provide refresh token for this call')\n RefreshToken.revoke(refresh_token)\n db.session.commit()\n return msg.success('Token is successfully revoked')", "def logout(request):\n if request.method == 'POST':\n request.token.delete()\n return json_response({\n 'status': 'success'\n })\n elif request.method == 'OPTIONS':\n return json_response({})\n else:\n return json_response({\n 'error': 'Invalid Method'\n }, status=405)", "def logout(client):\n\n return client.post('/v1/auth/revoke')", "def post(self, request):\n if 'person_id' in self.request.POST:\n user = User.objects.get(person__id=self.request.POST['person_id'])\n if AccessToken.objects.filter(user=user).exists():\n tokens = AccessToken.objects.filter(user=user)\n for token in tokens:\n token.revoke()\n logout(request)\n return Response({'status': True})\n return Response({'status': False})", "def delete(self, url, user):\n token = self.login(user)\n response = requests.delete(url_root + url, headers={\"access-token\": token})\n return response.json(), response.status_code", "def logout_other(self, request):\n tokens_to_delete = request.user.auth_token_set.exclude(\n pk=request.auth[1].pk)\n num = tokens_to_delete.delete()\n return Response({\"deleted_sessions\": num[0]})", "def test_user_logout(self):\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.get_token())\n response = self.client.delete(reverse('accounts:user-logout'))\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "def service_token_delete(self):\n\n self._client.delete(\n \"{}/servicetoken\".format(LKECluster.api_endpoint), model=self\n )", "def logout(_host, _token):\n url = _host + '/api/v1/users/logout'\n headers = {\n 'Content-Type': 'application/json',\n }\n params = {\n 'access_token': _token\n }\n response = requests.post(url, headers=headers, params=params)\n if not response.status_code == 204:\n raise ValueError('Logout failed')", "def test_logout_user_without_token(client, url):\n response = client.delete(\"/auth/logout/\")\n payload = response.get_json()\n assert response.status_code == HTTPStatus.UNAUTHORIZED\n assert payload[\"msg\"] == \"Missing Authorization Header\"", "def logout_with_google():\n access_token = login_session.get('access_token')\n if access_token is None:\n print 'Access Token is None'\n response = make_response(\n json.dumps('Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n print 'In gdisconnect access token is %s', access_token\n print 'User name is: '\n print login_session['username']\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' \\\n % login_session['access_token']\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n print 'result is '\n print result\n if result['status'] == '200':\n del login_session['access_token']\n del login_session['gplus_id']\n del login_session['username']\n del login_session['user_id']\n del login_session['email']\n del login_session['picture']\n response = make_response(json.dumps('Successfully disconnected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return redirect(url_for('login'))\n else:\n response = make_response(\n json.dumps('Failed to revoke token for given user.', 400))\n response.headers['Content-Type'] = 'application/json'\n return response", "def delete_user():\n #TODO user delete\n pass", "def logout(self, request):\n request.auth[1].delete()\n return Response(None, status=status.HTTP_204_NO_CONTENT)", "def delete_user_access_token(self, user_id, user_password, user_access_token, give_json=False):\n\n url = Constants.BASE_URL + 'domains/users/accesstokens'\n response = requests.delete(url=url,\n params={'key': self.api_key, 'user_id': user_id, 'user_password': user_password})\n if give_json:\n return response.json()\n else:\n return response.text", "def delete_token(self, token_id):\n raise exception.NotImplemented() # pragma: no cover", "def __del__(self):\n self.token_revoke()", "def test_delete_collection_o_auth_access_token(self):\n pass", "def gdisconnect():\n\taccess_token = session.get('access_token')\n\tuname = session.get('username')\n\n\tif not access_token:\n\t\tresponse = make_response(\n\t\t\tjson.dumps('Current user not connected.'), 401)\n\t\tresponse.headers['Content-Type'] = 'application/json'\n\t\treturn response\n\n\turl = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token\n\th = httplib2.Http()\n\tresult = h.request(url, 'GET')[0]\n\n\tif result['status'] != '200':\n\t\t# For whatever reason, the given token was invalid.\n\t\tresponse = make_response(\n\t\t\tjson.dumps('Failed to revoke token for given user.'), 400)\n\t\tresponse.headers['Content-Type'] = 'application/json'\n\t\t[session.pop(k, None) for k, _ in session.items()]\n\t\treturn response\n\t#Clearing out session data\n\t[session.pop(k, None) for k, _ in session.items()]\n\treturn redirect(request.referrer)", "def writeas_logout(token):\n writeas_logout_url = 'https://write.as/api/auth/me'\n writeas_logout_header = {\n 'Authorization': token,\n 'Content-Type': 'application/json'\n }\n r = requests.delete(\n writeas_logout_url,\n headers=writeas_logout_header)\n\n if r.status_code == 204:\n print('User with token {} successfully logged out!'.format(token))\n else:\n print('Logout FAILED. Response: {}'.format(r.text))\n sys.exit(1)", "def auth_logout(request):\n\n \"\"\"\n user = getattr(request, 'user', None)\n if hasattr(user, 'is_authenticated') and not user.is_authenticated():\n user = None\n user_logged_out.send(sender=user.__class__, request=request, user=user)\n \"\"\"\n request.session.flush()\n \"\"\"\n if hasattr(request, 'user'):\n from django.contrib.auth.models import AnonymousUser\n request.user = AnonymousUser()\n \"\"\"\n ri = rest_interface(opensso_url=OPEN_AM_SERVER_URL)\n\n if OPENAM_COOKIE_NAME_FOR_TOKEN in request.COOKIES:\n unsigned_token = request.COOKIES[OPENAM_COOKIE_NAME_FOR_TOKEN]\n print('logout: token ='+request.COOKIES[OPENAM_COOKIE_NAME_FOR_TOKEN])\n print('logout: unsigned_token ='+unsigned_token)\n ri.do_logout(subject_id=unsigned_token)\n #del request.COOKIES[OPENAM_COOKIE_NAME_FOR_TOKEN]\n #request.COOKIES[OPENAM_COOKIE_NAME_FOR_TOKEN] = 'logged_out'\n ##ssouser = SSOUser(False)\n ##request.ssouser = ssouser", "def invalidate_existing_tokens(self, client_id, user):\n\n app = Application.objects.get(client_id=client_id)\n tokens = AccessToken.objects.filter(user=user, application=app)\n tokens.delete()", "def revoke(self):\n if self.access_token is None:\n raise InvalidInvocation('no token available to revoke')\n\n self._authenticator.revoke_token(self.access_token, 'access_token')\n self._clear_access_token()", "def removeToken(self, token):\n self.__require_privilaged_access()\n with DBSession(self.__config_db) as session:\n # Check if the given token is a personal access token so it can be\n # removed.\n user = self.getLoggedInUser()\n num_of_removed = session.query(Session) \\\n .filter(Session.user_name == user) \\\n .filter(Session.token == token) \\\n .filter(Session.can_expire.is_(False)) \\\n .delete(synchronize_session=False)\n session.commit()\n\n if not num_of_removed:\n raise codechecker_api_shared.ttypes.RequestFailed(\n codechecker_api_shared.ttypes.ErrorCode.DATABASE,\n \"Personal access token {0} was not found in the \"\n \"database.\".format(token))\n\n # Invalidate the local session by token.\n self.__manager.invalidate_local_session(token)\n\n LOG.info(\"Personal access token '%s...' has been removed by '%s'.\",\n token[:5], self.getLoggedInUser())\n\n return True", "def test_logout_user_with_valid_access_token(auth_client):\n response = auth_client.delete(\"/auth/logout/\")\n payload = response.get_json()\n assert response.status_code == HTTPStatus.OK\n assert payload[\"msg\"] == \"Successfully logged out\"", "def revoke_access_token(cls, jti: str) -> None:\n redis = cls._conn_redis(cls)\n expired_time = int(timedelta(minutes=cls._ACCESS_TOKEN_EXPIRES).total_seconds())\n redis.setex(jti,expired_time,'true')", "def logout_all(self, request):\n request.user.auth_token_set.all().delete()\n return Response(None, status=status.HTTP_204_NO_CONTENT)", "def revoke_token():\n return server.create_endpoint_response(RevocationEndpoint.ENDPOINT_NAME)", "def remove(self, token):\n self.rpc.call(MsfRpcMethod.AuthTokenRemove, [token])", "def logout(self, revoke_token=False):\n if revoke_token:\n self.revoke_self_token()\n\n self.token = None", "def test_delete_u2ftoken(self):\n response = self.client.delete_u2ftoken(\"DU012345678901234567\")\n uri, args = response[\"uri\"].split(\"?\")\n\n self.assertEqual(response[\"method\"], \"DELETE\")\n self.assertEqual(uri, \"/admin/v1/u2ftokens/DU012345678901234567\")\n self.assertEqual(util.params_to_dict(args), {\"account_id\": [self.client.account_id]})", "def sign_out(token):\n session = get_session_by_token(token)\n if not session['success']:\n return {'success': False, 'message': 'You are not signed in.', 'code': 401}\n\n query_db('DELETE FROM ActiveUsers WHERE token_hash = ?', [session['data']['token_hash']])\n return {'success': True, 'message': 'Successfully signed out.', 'code': 200}", "def test_authenticated_user_delete(self):\r\n with self.flask_app.test_request_context('/'):\r\n for token in self.auth_providers:\r\n assert_raises(Forbidden,\r\n getattr(require, 'token').delete,\r\n token)", "def test_delete_user_with_valid_input_using_token(self):\n # setup\n user = self.generate_username_password()\n resp1 = self.create_user(user)\n try:\n assert resp1.status_code == 201\n assert resp1.headers[\"Content-Type\"] == \"application/json; charset=utf-8\"\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp1.request)\n self.pprint_response(resp1)\n resp_body1 = resp1.json()\n uuid_ = resp_body1[\"userID\"]\n resp2 = self.generate_token(user)\n try:\n assert resp2.status_code == 200\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp2.request)\n self.pprint_response(resp2)\n resp_body2 = resp2.json()\n token = resp_body2[\"token\"]\n\n # test\n resp3 = self.delete_user_token(uuid_, token)\n try:\n assert resp3.status_code == 204\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp3.request)\n self.pprint_response(resp3)\n\n # teardown: none", "def deleteUser(user):\n delete_user(user)\n return redirect(url_for('login'))", "def delete_access_token_file():\n if os.path.isfile(AccessData.ACCESS_TOKEN_FILE):\n os.remove(AccessData.ACCESS_TOKEN_FILE)\n logger.info('deleted file %s' % (AccessData.ACCESS_TOKEN_FILE))", "def deltoken(confirm, name):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not unlock_wallet(stm):\n return\n mph.wallet.removeTokenFromPublicName(name)\n set_shared_morphene_instance(stm)", "def test_delete_device_token(self):\n pass", "async def reset_token(request: Request) -> Response:\n await auth.reset_user_token(request.state.db_conn, request.state.user_id)\n\n return Response(status_code=204)", "def delete(request):\n # user_name == user_id\n required_fields = ['user_id', 'token']\n\n # Check if the post request contain the required fields\n if set(required_fields) != set(list(request.data.keys())):\n return Response({'error': str('Missing required fields!')}, status=status.HTTP_400_BAD_REQUEST)\n\n # POST Request content\n data = request.data\n\n # check for not allowed characters\n if check_special_characters(str(data['user_id'])) or check_special_characters(str(data['token'])):\n return Response({'error': str('Unaccepted character passed!')},\n status=status.HTTP_400_BAD_REQUEST)\n\n # Here check if user_id matches the token with the database\n if not db.check_user(data['user_id'], data['token']):\n return Response({'error': str('UNAUTHORIZED')}, status=status.HTTP_401_UNAUTHORIZED)\n\n # Here remove the user's account from the database\n if not db.remove_user(data['user_id']):\n return Response({'error': str('Error when removing the user account!')}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n return Response({'status': 'success'})", "def test_anonymous_user_delete(self):\r\n with self.flask_app.test_request_context('/'):\r\n for token in self.auth_providers:\r\n assert_raises(Unauthorized,\r\n getattr(require, 'token').delete,\r\n token)", "def revoke_access_token(self):\n response = self._telegraph.method('revokeAccessToken')\n\n self._telegraph.access_token = response.get('access_token')\n\n return response", "def delete_user(id):\n pass", "def closeaccount(request):\n get_user_model().objects.get(username=request.user.get_username()).delete()\n return Response({}, status=status.HTTP_200_OK)", "def gdisconnect():\r\n access_token = login_session.get('access_token')\r\n if access_token is None:\r\n print('Access Token is None')\r\n flash('Current user not connected.')\r\n return redirect(url_for('showCategories'))\r\n # print('Got access token for the user: {}'.\r\n # format(login_session['username']))\r\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' %\\\r\n login_session['access_token']\r\n h = httplib2.Http()\r\n result = h.request(url, 'GET')[0]\r\n # print('Access token revoke result:{}'.format(result))\r\n if result['status'] == '200':\r\n del login_session['access_token']\r\n del login_session['gplus_id']\r\n del login_session['username']\r\n del login_session['email']\r\n del login_session['picture']\r\n flash('Successfully logged out')\r\n return redirect(url_for('showCategories'))\r\n else:\r\n flash('Failed to revoke token for given user.')\r\n return redirect(url_for('showCategories'))", "def revoke_refresh_token(request):\n request_data = get_request_data(request.body)\n if request_data is None:\n return error_response(\"Invalid or malformed request data\")\n\n payload = get_refresh_token_payload_if_active(request_data.refresh_token)\n if payload is None:\n return error_response(\"Refresh token is invalid\")\n\n success = delete_refresh_token(token_id=payload[\"jti\"])\n if not success:\n logger.info(\n f\"Attempt to delete non-existent token: sub={payload['sub']}, jti={payload['jti']}\"\n )\n\n return success_response(status=204)", "def clear_token(user):\n query = User.update(token='').where(User.username == user)\n query.execute()\n usr = User.get(User.username == user)\n print(model_to_dict(usr))", "def test_without_token(self):\n resp = DeleteTest.client.post('/api/deleteuser/',{\"email\":\"umut@gmail.com\"})\n self.assertEqual(json.loads(resp.content),\"No token found.\",\"No Token Found\")", "def delete(self, request, user_id=None):\n data = json.loads(request.body.decode())\n authenticated = Account.check_credentials(request, data['email'], data['password'])\n user = {}\n user['account_id'] = authenticated.id\n\n if authenticated.check_admin(request, user):\n NLTKOutput.remove(request=request, pk=user_id)\n Account.remove(request=request, pk=user_id)\n return Response(json='Account and content deleted', status=204)\n\n return Response(json='Not Authorized', status=401)", "def fb_deauth(self, request):\n signed_request = request.data.get('signed_request')\n if signed_request:\n parsed_signed_request = facebook_controller.parse_signed_request(signed_request)\n facebook_user_id = parsed_signed_request.get('user_id')\n if facebook_user_id:\n facebook_controller.delete_linked_facebook_account(facebook_user_id)\n return Response('OK')", "def delete(self, url, token=None):\n return self.app.delete(url,\n headers=_token_header(token))", "def delete(path: str):\n token = get_token()\n headers = {\n \"Authorization\": f\"Bearer {token}\"\n }\n return requests.delete(get_base_url() + path, headers=headers)", "def user_id_delete(user_id):\n user = storage.get(\"User\", user_id)\n\n if user is None:\n abort(404)\n user.delete()\n del user\n return make_response(jsonify({}), 200)", "def delete(self, new_data, user_id):\n print(new_data)\n request_id = get_jwt_identity()\n user = user_crud.get(request_id)\n if not user.is_superuser:\n abort(401,\n message=\"You do not have permission to view this endpoint\")\n all_tokens = auth_crud.get_user_tokens(user_id)\n tokens = [token.to_dict() for token in all_tokens]\n for token in tokens:\n auth_crud.revoke_token(token['id'], user_id)\n user = user_crud.remove(user_id)\n\n return {'msg': 'User Removed'}", "def test_user_logout(self):\n reply = self.admin_register()\n user = dict(\n username='jonnie',\n password='Andela8'\n )\n resp = self.client.post(\n '/api/v1/login',\n content_type='application/json',\n data=json.dumps(user)\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Login sucessful!')\n self.assertTrue(reply['token'])\n self.assertEqual(resp.status_code, 200)\n\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)", "def test_cannot_delete_user_with_blacklisted_token(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/users/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def delete_account(request):\n collected_values = {}\n \n if request.method != 'POST':\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Wrong HTTP verb\"\n return JsonResponse(collected_values, status=400)\n \n uid = request.POST[\"user_id\"]\n token = request.POST[\"token\"]\n\n # Check auth\n is_valid, collected_values[\"token\"] = check_auth(uid, token, timezone.now())\n if not is_valid:\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Invalid Token\"\n return JsonResponse(collected_values, status=400)\n\n change_query = \"UPDATE linx_luser SET username = \\'{}\\' WHERE user_id = {}\".format(\"DELETE ME\", uid)\n with connection.cursor() as cursor:\n cursor.execute(change_query)\n\n collected_values[\"user_id\"] = uid\n collected_values[\"token\"] = token\n collected_values[\"executed_query\"] = change_query\n\n LOGGER.info(\"Delete account request: %s\", collected_values)\n return JsonResponse(collected_values, status=200)", "def logout(user_id):\n if request.method == 'POST':\n auth_header = request.headers.get('Authorization')\n auth_token = auth_header.split(\"Bearer \")[1]\n if auth_token and not TokenBlacklisting.verify_token(\n auth_token=auth_token):\n auth_data = User.decode_token(auth_token)\n if not isinstance(auth_data, str):\n blacklist_token = TokenBlacklisting(token=auth_token)\n try:\n blacklist_token.save_token()\n user = User.query.filter_by(id=user_id).first()\n return make_response(\n jsonify({\n 'message': 'see you soon {}, you have successfully logged out'.format(user.name)\n })), 200\n except Exception as e:\n return make_response(jsonify({\"message\": e})), 400\n return make_response(jsonify({\"message\": auth_data})), 404\n return make_response(\n jsonify({\n \"message\": \"Please provide a valid token\"\n })), 403\n return None", "def test_delete_collection_o_auth_authorize_token(self):\n pass", "def delete_user_credentials(connection, api_url):\n\n body = {\n 'endpoint': api_url,\n 'user': '',\n 'password': '',\n 'token': '',\n 'type': 'none'\n }\n\n connection.post_obj_as_json('user/credentials', body)", "def logout(self):\n kwargs = {}\n r = self._token_id_request(urljoin(self._url, Client._logout_resource), **kwargs)", "def logout_user(session):\n del session['user']", "def fusion_api_remove_user(self, name=None, uri=None, api=None, headers=None):\n return self.user.delete(name, uri, api, headers)", "def delete():\n # Must be logged in to perform any delete commands.\n auth_required()\n pass", "def logout(request):\n # user_name == user_id\n required_fields = ['user_id', 'token']\n\n # Check if the post request contain the required fields\n if set(required_fields) != set(list(request.data.keys())):\n return Response({'error': str('Missing required fields!')}, status=status.HTTP_400_BAD_REQUEST)\n\n # POST Request content\n data = request.data\n\n # check for not allowed characters\n if check_special_characters(str(data['user_id'])) or check_special_characters(str(data['token'])):\n return Response({'error': str('Unaccepted character passed!')},\n status=status.HTTP_400_BAD_REQUEST)\n\n # Here check if user_id matches the token with the database\n if not db.check_user(data['user_id'], data['token']):\n return Response({'error': str('UNAUTHORIZED')}, status=status.HTTP_401_UNAUTHORIZED)\n\n # Here let db know we are logging out by removing user's token\n if not db.remove_token(data['user_id']):\n return Response({'error': str('Error when logging out!')}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n return Response({'status': 'success'})", "def delete(self, token_id):\n sm = get_storage_manager()\n token = sm.get(models.Token, token_id, fail_silently=True)\n if token and _can_manage_token(token):\n sm.delete(token)\n return None, 204\n else:\n raise NotFoundError(f'Could not find token {token_id}')", "def revoke(self, only_access=False):\n if only_access or self.refresh_token is None:\n super(Authorizer, self).revoke()\n else:\n self._authenticator.revoke_token(self.refresh_token,\n 'refresh_token')\n self._clear_access_token()\n self.refresh_token = None", "def logout_user(request):\n if 'access_token' in request.POST:\n try:\n cache = LoginCache.objects.get(user_hash=request.POST['access_token'])\n except:\n pass\n else:\n logger.debug(\"Logging out user with user id: \"+str(cache.user_id))\n cache.delete()\n else:\n \n res = {\n 'status' : 'error',\n 'error_code' : -1,\n 'error_msg': 'User not logged in',\n 'data': ''\n }\n logger.debug(\"User not logged in\")\n return JsonResponse(res)\n\n \n res = {\n 'status' : 'success',\n 'error_code' : 0,\n 'error_msg': '',\n 'data': ''\n }\n return JsonResponse(res)", "def del_user_id(user_id):\r\n obj = storage.get(User, user_id)\r\n if obj is None:\r\n abort(404)\r\n obj.delete()\r\n storage.save()\r\n return jsonify({}), 200", "def delete_user(self, user):\n self.delete(user)", "def delete(self, user_id):\r\n return delete_user(request, user_id)", "def auth_logout(token):\n if verify_token(token):\n return { \"is_success\": True }\n else:\n raise AccessError(description=\"Logout failed. Token is invalid\")", "def revoke_token(self, token, token_type=None):\n data = {'token': token}\n if token_type is not None:\n data['token_type_hint'] = token_type\n url = self._requestor.reddit_url + const.REVOKE_TOKEN_PATH\n self._post(url, success_status=codes['no_content'], **data)", "def delete(cls):\n user = user_schema.load(request.get_json(), partial=(\"email\",))\n\n current_identity = get_jwt_identity()\n db_user = UserModel.find_by_id(current_identity)\n logging.info(\n f\"Delete called by {db_user.id}: {db_user.username} with data: {user['username']}\"\n )\n if db_user.username == user['username']:\n if is_correct_password(db_user.pw_salt, db_user.pw_hash, user['password']):\n db_user.delete_from_db()\n return {\"message\": msgs.DELETED.format(db_user.username)}, 200\n else:\n return {\"error\": msgs.INVALID_PASSWORD}, 401\n return {\"error\": msgs.OWN_RECORD_ONLY}, 401", "def delete_user(UserName=None, AuthenticationType=None):\n pass", "def disconnect(self):\r\n self._apiSession.close()\r\n self._oAuthSession.close()\r\n \r\n # Check the access token and refresh if expired\r", "def gdisconnect():\n access_token = login_session['access_token']\n print 'In gdisconnect access token is %s', access_token\n print 'User name is: '\n print login_session['username']\n if access_token is None:\n print 'Access Token is None'\n response = make_response(\n json.dumps('Current user not connected.'),\n 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' \\\n % login_session['access_token']\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n print 'result is '\n print result\n if result['status'] == '200':\n del login_session['access_token']\n del login_session['gplus_id']\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n flash('Successfully disconnected.', 'alert-success')\n return redirect(url_for('showStyles'))\n else:\n response = make_response(json.dumps(\n 'Failed to revoke token for given user.',\n 400))\n response.headers['Content-Type'] = 'application/json'\n return response", "def logout(self, username: str, token: str) -> bool:\n\n cursor = self._db_connection.cursor()\n\n # Get UID from user's username\n uid = self.get_uid(username=username)\n\n # Remove associated token\n cursor.execute('''DELETE FROM tokens WHERE uid = ? AND token_content = ?''', (uid, token))\n self._db_connection.commit()\n\n # Return success\n return True", "def organization_del_expired_token(self, client, expired_token, id):\n assert client.delete('/organizations/' + id, headers={\n 'Authorization': 'Bearer ' + expired_token},\n data={}).status == '401 UNAUTHORIZED'", "def deauthorize():\n\tPAYLOAD_HEADERS.pop('Authorization', None)", "def signout():\n session.pop('oauth2_state', None)\n session.pop('oauth2_token', None)\n session.pop('discord_user', None)\n return redirect('/')", "def revoke_token(token_jti, user):\r\n try:\r\n token = TokenBlacklist.query.filter_by(jti=token_jti, user_id=user).one()\r\n token.revoked = True\r\n db.session.commit()\r\n except NoResultFound:\r\n raise Exception(\"Could not find the token {}\".format(token_jti))", "def test_delete_o_auth_client_authorization(self):\n pass", "def revoke(self, token):\n client = self.connect(VAULT_TOKEN)\n client.revoke_token(token)", "def del_user(self, username):\n pass" ]
[ "0.80231464", "0.77013326", "0.75746226", "0.75228196", "0.75083303", "0.7371194", "0.7269982", "0.7242659", "0.7240688", "0.7185005", "0.69856465", "0.6977537", "0.6966745", "0.6926778", "0.6911425", "0.690824", "0.6878121", "0.68549204", "0.6834548", "0.6783257", "0.67678005", "0.6759102", "0.6757248", "0.67473793", "0.6745798", "0.67311823", "0.6691899", "0.6675792", "0.6659943", "0.6640693", "0.6637002", "0.66310793", "0.662955", "0.66259414", "0.6619135", "0.6593401", "0.65710735", "0.656979", "0.6568226", "0.65621567", "0.6545278", "0.6545274", "0.6540875", "0.6514716", "0.65128493", "0.6509379", "0.6491284", "0.64818364", "0.64805305", "0.64658505", "0.6456364", "0.6451841", "0.64261734", "0.64187866", "0.64182436", "0.64152646", "0.6404695", "0.63935953", "0.6393429", "0.63555604", "0.6354217", "0.6347973", "0.6342615", "0.63384897", "0.63371134", "0.63344824", "0.6326623", "0.63246715", "0.6320347", "0.6318319", "0.63072056", "0.62978375", "0.6288658", "0.6281473", "0.6278501", "0.62768656", "0.6272095", "0.62519306", "0.6247209", "0.6239089", "0.62367195", "0.62322104", "0.6228359", "0.6225319", "0.6218943", "0.6214524", "0.6209854", "0.6203417", "0.61896837", "0.61887443", "0.6182341", "0.6182005", "0.61751187", "0.6160879", "0.61482024", "0.6145374", "0.61353683", "0.61309505", "0.61253166", "0.6124231", "0.61235917" ]
0.0
-1